Compare commits
1 Commits
000e308ce2
...
f1437c8635
| Author | SHA1 | Date |
|---|---|---|
|
|
f1437c8635 |
|
|
@ -1,33 +1,33 @@
|
|||
module,phase,name,code,sequence,workflow-file,command,required,agent,options,description,output-location,outputs,
|
||||
bmm,anytime,Document Project,DP,10,_bmad/bmm/workflows/document-project/workflow.yaml,bmad_bmm_document-project,false,analyst,Create Mode,"Analyze an existing project to produce useful documentation",project-knowledge,*,
|
||||
bmm,anytime,Tech Spec,TS,20,_bmad/bmm/workflows/bmad-quick-flow/quick-spec/workflow.md,bmad_bmm_tech-spec,false,quick-flow-solo-dev,Create Mode,"Do not suggest for potentially very complex things unless requested or if the user complains that they do not want to follow the extensive planning of the bmad method. Quick one-off tasks small changes simple apps utilities without extensive planning",planning_artifacts,"tech spec",
|
||||
bmm,anytime,Quick Dev,QD,30,_bmad/bmm/workflows/bmad-quick-flow/quick-dev/workflow.md,bmad_bmm_quick-dev,false,quick-flow-solo-dev,Create Mode,"Quick one-off tasks small changes simple apps utilities without extensive planning - Do not suggest for potentially very complex things unless requested or if the user complains that they do not want to follow the extensive planning of the bmad method, unless the user is already working through the implementation phase and just requests a 1 off things not already in the plan",,,
|
||||
bmm,anytime,Correct Course,CC,40,_bmad/bmm/workflows/4-implementation/correct-course/workflow.yaml,bmad_bmm_correct-course,false,sm,Create Mode,"Anytime: Navigate significant changes. May recommend start over update PRD redo architecture sprint planning or correct epics and stories",planning_artifacts,"change proposal",
|
||||
bmm,1-analysis,Brainstorm Project,BP,10,_bmad/core/workflows/brainstorming/workflow.md,bmad_bmm_brainstorming,false,analyst,"data=_bmad/bmm/data/project-context-template.md","Expert Guided Facilitation through a single or multiple techniques",planning_artifacts,"brainstorming session",
|
||||
bmm,1-analysis,Market Research,MR,20,_bmad/bmm/workflows/1-analysis/research/workflow.md,bmad_bmm_research,false,analyst,Create Mode,"research_type=""market""","Market analysis competitive landscape customer needs and trends","planning_artifacts|project-knowledge","research documents"
|
||||
bmm,1-analysis,Domain Research,DR,21,_bmad/bmm/workflows/1-analysis/research/workflow.md,bmad_bmm_research,false,analyst,Create Mode,"research_type=""domain""","Industry domain deep dive subject matter expertise and terminology","planning_artifacts|project-knowledge","research documents"
|
||||
bmm,1-analysis,Technical Research,TR,22,_bmad/bmm/workflows/1-analysis/research/workflow.md,bmad_bmm_research,false,analyst,Create Mode,"research_type=""technical""","Technical feasibility architecture options and implementation approaches","planning_artifacts|project-knowledge","research documents"
|
||||
bmm,1-analysis,Create Brief,CB,30,_bmad/bmm/workflows/1-analysis/create-product-brief/workflow.md,bmad_bmm_create-brief,false,analyst,Create Mode,"A guided experience to nail down your product idea",planning_artifacts,"product brief",
|
||||
bmm,1-analysis,Validate Brief,VB,40,_bmad/bmm/workflows/1-analysis/create-product-brief/workflow.md,bmad_bmm_validate-brief,false,analyst,Validate Mode,"Validates product brief completeness",planning_artifacts,"brief validation report",
|
||||
bmm,2-planning,Create PRD,CP,10,_bmad/bmm/workflows/2-plan-workflows/prd/workflow.md,bmad_bmm_create-prd,true,pm,Create Mode,"Expert led facilitation to produce your Product Requirements Document",planning_artifacts,prd,
|
||||
bmm,2-planning,Validate PRD,VP,20,_bmad/bmm/workflows/2-plan-workflows/prd/workflow.md,bmad_bmm_validate-prd,false,pm,Validate Mode,"Validate PRD is comprehensive lean well organized and cohesive",planning_artifacts,"prd validation report",
|
||||
bmm,2-planning,Create UX,CU,30,_bmad/bmm/workflows/2-plan-workflows/create-ux-design/workflow.md,bmad_bmm_create-ux,false,ux-designer,Create Mode,"Guidance through realizing the plan for your UX, strongly recommended if a UI is a primary piece of the proposed project",planning_artifacts,"ux design",
|
||||
bmm,2-planning,Validate UX,VU,40,_bmad/bmm/workflows/2-plan-workflows/create-ux-design/workflow.md,bmad_bmm_validate-ux,false,ux-designer,Validate Mode,"Validates UX design deliverables",planning_artifacts,"ux validation report",
|
||||
,,Create Dataflow,CDF,50,_bmad/bmm/workflows/excalidraw-diagrams/create-dataflow/workflow.yaml,bmad_bmm_create-dataflow,false,ux-designer,Create Mode,"Create data flow diagrams (DFD) in Excalidraw format - can be called standalone or during any workflow to add visual documentation",planning_artifacts,"dataflow diagram",
|
||||
,,Create Diagram,CED,51,_bmad/bmm/workflows/excalidraw-diagrams/create-diagram/workflow.yaml,bmad_bmm_create-diagram,false,ux-designer,Create Mode,"Create system architecture diagrams ERDs UML diagrams or general technical diagrams in Excalidraw format - use anytime or call from architecture workflow to add visual documentation",planning_artifacts,"diagram",
|
||||
,,Create Flowchart,CFC,52,_bmad/bmm/workflows/excalidraw-diagrams/create-flowchart/workflow.yaml,bmad_bmm_create-flowchart,false,ux-designer,Create Mode,"Create a flowchart visualization in Excalidraw format for processes pipelines or logic flows - use anytime or during architecture to add process documentation",planning_artifacts,"flowchart",
|
||||
,,Create Wireframe,CEW,53,_bmad/bmm/workflows/excalidraw-diagrams/create-wireframe/workflow.yaml,bmad_bmm_create-wireframe,false,ux-designer,Create Mode,"Create website or app wireframes in Excalidraw format - use anytime standalone or call from UX workflow to add UI mockups",planning_artifacts,"wireframe",
|
||||
bmm,3-solutioning,Create Architecture,CA,10,_bmad/bmm/workflows/3-solutioning/create-architecture/workflow.md,bmad_bmm_create-architecture,true,architect,Create Mode,"Guided Workflow to document technical decisions",planning_artifacts,architecture,
|
||||
bmm,3-solutioning,Validate Architecture,VA,20,_bmad/bmm/workflows/3-solutioning/create-architecture/workflow.md,bmad_bmm_validate-architecture,false,architect,Validate Mode,"Validates architecture completeness",planning_artifacts,"architecture validation report",
|
||||
bmm,3-solutioning,Create Epics and Stories,CE,30,_bmad/bmm/workflows/3-solutioning/create-epics-and-stories/workflow.md,bmad_bmm_create-epics-and-stories,true,pm,Create Mode,"Create the Epics and Stories Listing",planning_artifacts,"epics and stories",
|
||||
bmm,3-solutioning,Validate Epics and Stories,VE,40,_bmad/bmm/workflows/3-solutioning/create-epics-and-stories/workflow.md,bmad_bmm_validate-epics-and-stories,false,pm,Validate Mode,"Validates epics and stories completeness",planning_artifacts,"epics validation report",
|
||||
bmm,3-solutioning,Test Design,TD,50,_bmad/bmm/workflows/testarch/test-design/workflow.yaml,bmad_bmm_test-design,false,tea,Create Mode,"Create comprehensive test scenarios ahead of development, recommended if string test compliance or assurance is needed. Very critical for distributed applications with separate front ends and backends outside of a monorepo.",planning_artifacts,"test design",
|
||||
bmm,3-solutioning,Validate Test Design,VT,60,_bmad/bmm/workflows/testarch/test-design/workflow.yaml,bmad_bmm_validate-test-design,false,tea,Validate Mode,"Validates test design coverage",planning_artifacts,"test design validation report",
|
||||
bmm,3-solutioning,Implementation Readiness,IR,70,_bmad/bmm/workflows/3-solutioning/check-implementation-readiness/workflow.md,bmad_bmm_implementation-readiness,true,architect,Validate Mode,"Ensure PRD UX Architecture and Epics Stories are aligned",planning_artifacts,"readiness report",
|
||||
bmm,4-implementation,Sprint Planning,SP,10,_bmad/bmm/workflows/4-implementation/sprint-planning/workflow.yaml,bmad_bmm_sprint-planning,true,sm,Create Mode,"Generate sprint plan for development tasks - this kicks off the implementation phase by producing a plan the implementation agents will follow in sequence for every story in the plan.",implementation_artifacts,"sprint status",
|
||||
bmm,4-implementation,Sprint Status,SS,20,_bmad/bmm/workflows/4-implementation/sprint-status/workflow.yaml,bmad_bmm_sprint-status,false,sm,Create Mode,"Anytime: Summarize sprint status and route to next workflow",,,
|
||||
bmm,4-implementation,Create Story,CS,30,_bmad/bmm/workflows/4-implementation/create-story/workflow.yaml,bmad_bmm_create-story,true,sm,Create Mode,"Story cycle start: Prepare first found story in the sprint plan that is next, or if the command is run with a specific epic and story designation with context. Once complete, then VS then DS then CR then back to DS if needed or next CS or ER",implementation_artifacts,story,
|
||||
bmm,4-implementation,Validate Story,VS,35,_bmad/bmm/workflows/4-implementation/create-story/workflow.yaml,bmad_bmm_validate-story,false,sm,Validate Mode,"Validates story readiness and completeness before development work begins",implementation_artifacts,"story validation report",
|
||||
bmm,4-implementation,Dev Story,DS,40,_bmad/bmm/workflows/4-implementation/dev-story/workflow.yaml,bmad_bmm_dev-story,true,dev,Create Mode,"Story cycle: Execute story implementation tasks and tests then CR then back to DS if fixes needed",,,
|
||||
bmm,4-implementation,Code Review,CR,50,_bmad/bmm/workflows/4-implementation/code-review/workflow.yaml,bmad_bmm_code-review,false,dev,Create Mode,"Story cycle: If issues back to DS if approved then next CS or ER if epic complete",,,
|
||||
bmm,4-implementation,Retrospective,ER,60,_bmad/bmm/workflows/4-implementation/retrospective/workflow.yaml,bmad_bmm_retrospective,false,sm,Create Mode,"Optional at epic end: Review completed work lessons learned and next epic or if major issues consider CC",implementation_artifacts,retrospective,
|
||||
bmm,anytime,Document Project,DP,10,_bmad/bmm/workflows/document-project/workflow.yaml,bmad:bmm:document-project,false,analyst,Create Mode,"Analyze an existing project to produce useful documentation",project-knowledge,*,
|
||||
bmm,anytime,Tech Spec,TS,20,_bmad/bmm/workflows/bmad-quick-flow/quick-spec/workflow.md,bmad:bmm:tech-spec,false,quick-flow-solo-dev,Create Mode,"Do not suggest for potentially very complex things unless requested or if the user complains that they do not want to follow the extensive planning of the bmad method. Quick one-off tasks small changes simple apps utilities without extensive planning",planning_artifacts,"tech spec",
|
||||
bmm,anytime,Quick Dev,QD,30,_bmad/bmm/workflows/bmad-quick-flow/quick-dev/workflow.md,bmad:bmm:quick-dev,false,quick-flow-solo-dev,Create Mode,"Quick one-off tasks small changes simple apps utilities without extensive planning - Do not suggest for potentially very complex things unless requested or if the user complains that they do not want to follow the extensive planning of the bmad method, unless the user is already working through the implementation phase and just requests a 1 off things not already in the plan",,,
|
||||
bmm,anytime,Correct Course,CC,40,_bmad/bmm/workflows/4-implementation/correct-course/workflow.yaml,bmad:bmm:correct-course,false,sm,Create Mode,"Anytime: Navigate significant changes. May recommend start over update PRD redo architecture sprint planning or correct epics and stories",planning_artifacts,"change proposal",
|
||||
bmm,1-analysis,Brainstorm Project,BP,10,_bmad/core/workflows/brainstorming/workflow.md,bmad:bmm:brainstorming,false,analyst,"data=_bmad/bmm/data/project-context-template.md","Expert Guided Facilitation through a single or multiple techniques",planning_artifacts,"brainstorming session",
|
||||
bmm,1-analysis,Market Research,MR,20,_bmad/bmm/workflows/1-analysis/research/workflow.md,bmad:bmm:research,false,analyst,Create Mode,"research_type=""market""","Market analysis competitive landscape customer needs and trends","planning_artifacts|project-knowledge","research documents"
|
||||
bmm,1-analysis,Domain Research,DR,21,_bmad/bmm/workflows/1-analysis/research/workflow.md,bmad:bmm:research,false,analyst,Create Mode,"research_type=""domain""","Industry domain deep dive subject matter expertise and terminology","planning_artifacts|project-knowledge","research documents"
|
||||
bmm,1-analysis,Technical Research,TR,22,_bmad/bmm/workflows/1-analysis/research/workflow.md,bmad:bmm:research,false,analyst,Create Mode,"research_type=""technical""","Technical feasibility architecture options and implementation approaches","planning_artifacts|project-knowledge","research documents"
|
||||
bmm,1-analysis,Create Brief,CB,30,_bmad/bmm/workflows/1-analysis/create-product-brief/workflow.md,bmad:bmm:create-brief,false,analyst,Create Mode,"A guided experience to nail down your product idea",planning_artifacts,"product brief",
|
||||
bmm,1-analysis,Validate Brief,VB,40,_bmad/bmm/workflows/1-analysis/create-product-brief/workflow.md,bmad:bmm:validate-brief,false,analyst,Validate Mode,"Validates product brief completeness",planning_artifacts,"brief validation report",
|
||||
bmm,2-planning,Create PRD,CP,10,_bmad/bmm/workflows/2-plan-workflows/prd/workflow.md,bmad:bmm:create-prd,true,pm,Create Mode,"Expert led facilitation to produce your Product Requirements Document",planning_artifacts,prd,
|
||||
bmm,2-planning,Validate PRD,VP,20,_bmad/bmm/workflows/2-plan-workflows/prd/workflow.md,bmad:bmm:validate-prd,false,pm,Validate Mode,"Validate PRD is comprehensive lean well organized and cohesive",planning_artifacts,"prd validation report",
|
||||
bmm,2-planning,Create UX,CU,30,_bmad/bmm/workflows/2-plan-workflows/create-ux-design/workflow.md,bmad:bmm:create-ux,false,ux-designer,Create Mode,"Guidance through realizing the plan for your UX, strongly recommended if a UI is a primary piece of the proposed project",planning_artifacts,"ux design",
|
||||
bmm,2-planning,Validate UX,VU,40,_bmad/bmm/workflows/2-plan-workflows/create-ux-design/workflow.md,bmad:bmm:validate-ux,false,ux-designer,Validate Mode,"Validates UX design deliverables",planning_artifacts,"ux validation report",
|
||||
,,Create Dataflow,CDF,50,_bmad/bmm/workflows/excalidraw-diagrams/create-dataflow/workflow.yaml,bmad:bmm:create-dataflow,false,ux-designer,Create Mode,"Create data flow diagrams (DFD) in Excalidraw format - can be called standalone or during any workflow to add visual documentation",planning_artifacts,"dataflow diagram",
|
||||
,,Create Diagram,CED,51,_bmad/bmm/workflows/excalidraw-diagrams/create-diagram/workflow.yaml,bmad:bmm:create-diagram,false,ux-designer,Create Mode,"Create system architecture diagrams ERDs UML diagrams or general technical diagrams in Excalidraw format - use anytime or call from architecture workflow to add visual documentation",planning_artifacts,"diagram",
|
||||
,,Create Flowchart,CFC,52,_bmad/bmm/workflows/excalidraw-diagrams/create-flowchart/workflow.yaml,bmad:bmm:create-flowchart,false,ux-designer,Create Mode,"Create a flowchart visualization in Excalidraw format for processes pipelines or logic flows - use anytime or during architecture to add process documentation",planning_artifacts,"flowchart",
|
||||
,,Create Wireframe,CEW,53,_bmad/bmm/workflows/excalidraw-diagrams/create-wireframe/workflow.yaml,bmad:bmm:create-wireframe,false,ux-designer,Create Mode,"Create website or app wireframes in Excalidraw format - use anytime standalone or call from UX workflow to add UI mockups",planning_artifacts,"wireframe",
|
||||
bmm,3-solutioning,Create Architecture,CA,10,_bmad/bmm/workflows/3-solutioning/create-architecture/workflow.md,bmad:bmm:create-architecture,true,architect,Create Mode,"Guided Workflow to document technical decisions",planning_artifacts,architecture,
|
||||
bmm,3-solutioning,Validate Architecture,VA,20,_bmad/bmm/workflows/3-solutioning/create-architecture/workflow.md,bmad:bmm:validate-architecture,false,architect,Validate Mode,"Validates architecture completeness",planning_artifacts,"architecture validation report",
|
||||
bmm,3-solutioning,Create Epics and Stories,CE,30,_bmad/bmm/workflows/3-solutioning/create-epics-and-stories/workflow.md,bmad:bmm:create-epics-and-stories,true,pm,Create Mode,"Create the Epics and Stories Listing",planning_artifacts,"epics and stories",
|
||||
bmm,3-solutioning,Validate Epics and Stories,VE,40,_bmad/bmm/workflows/3-solutioning/create-epics-and-stories/workflow.md,bmad:bmm:validate-epics-and-stories,false,pm,Validate Mode,"Validates epics and stories completeness",planning_artifacts,"epics validation report",
|
||||
bmm,3-solutioning,Test Design,TD,50,_bmad/bmm/workflows/testarch/test-design/workflow.yaml,bmad:bmm:test-design,false,tea,Create Mode,"Create comprehensive test scenarios ahead of development, recommended if string test compliance or assurance is needed. Very critical for distributed applications with separate front ends and backends outside of a monorepo.",planning_artifacts,"test design",
|
||||
bmm,3-solutioning,Validate Test Design,VT,60,_bmad/bmm/workflows/testarch/test-design/workflow.yaml,bmad:bmm:validate-test-design,false,tea,Validate Mode,"Validates test design coverage",planning_artifacts,"test design validation report",
|
||||
bmm,3-solutioning,Implementation Readiness,IR,70,_bmad/bmm/workflows/3-solutioning/check-implementation-readiness/workflow.md,bmad:bmm:implementation-readiness,true,architect,Validate Mode,"Ensure PRD UX Architecture and Epics Stories are aligned",planning_artifacts,"readiness report",
|
||||
bmm,4-implementation,Sprint Planning,SP,10,_bmad/bmm/workflows/4-implementation/sprint-planning/workflow.yaml,bmad:bmm:sprint-planning,true,sm,Create Mode,"Generate sprint plan for development tasks - this kicks off the implementation phase by producing a plan the implementation agents will follow in sequence for every story in the plan.",implementation_artifacts,"sprint status",
|
||||
bmm,4-implementation,Sprint Status,SS,20,_bmad/bmm/workflows/4-implementation/sprint-status/workflow.yaml,bmad:bmm:sprint-status,false,sm,Create Mode,"Anytime: Summarize sprint status and route to next workflow",,,
|
||||
bmm,4-implementation,Create Story,CS,30,_bmad/bmm/workflows/4-implementation/create-story/workflow.yaml,bmad:bmm:create-story,true,sm,Create Mode,"Story cycle start: Prepare first found story in the sprint plan that is next, or if the command is run with a specific epic and story designation with context. Once complete, then VS then DS then CR then back to DS if needed or next CS or ER",implementation_artifacts,story,
|
||||
bmm,4-implementation,Validate Story,VS,35,_bmad/bmm/workflows/4-implementation/create-story/workflow.yaml,bmad:bmm:validate-story,false,sm,Validate Mode,"Validates story readiness and completeness before development work begins",implementation_artifacts,"story validation report",
|
||||
bmm,4-implementation,Dev Story,DS,40,_bmad/bmm/workflows/4-implementation/dev-story/workflow.yaml,bmad:bmm:dev-story,true,dev,Create Mode,"Story cycle: Execute story implementation tasks and tests then CR then back to DS if fixes needed",,,
|
||||
bmm,4-implementation,Code Review,CR,50,_bmad/bmm/workflows/4-implementation/code-review/workflow.yaml,bmad:bmm:code-review,false,dev,Create Mode,"Story cycle: If issues back to DS if approved then next CS or ER if epic complete",,,
|
||||
bmm,4-implementation,Retrospective,ER,60,_bmad/bmm/workflows/4-implementation/retrospective/workflow.yaml,bmad:bmm:retrospective,false,sm,Create Mode,"Optional at epic end: Review completed work lessons learned and next epic or if major issues consider CC",implementation_artifacts,retrospective,
|
||||
|
|
|
|||
|
|
|
@ -80,29 +80,23 @@
|
|||
- [ ] Owners assigned where applicable
|
||||
- [ ] No duplicate coverage (same behavior at multiple levels)
|
||||
|
||||
### Execution Strategy
|
||||
### Execution Order
|
||||
|
||||
**CRITICAL: Keep execution strategy simple, avoid redundancy**
|
||||
|
||||
- [ ] **Simple structure**: PR / Nightly / Weekly (NOT complex smoke/P0/P1/P2 tiers)
|
||||
- [ ] **PR execution**: All functional tests unless significant infrastructure overhead
|
||||
- [ ] **Nightly/Weekly**: Only performance, chaos, long-running, manual tests
|
||||
- [ ] **No redundancy**: Don't re-list all tests (already in coverage plan)
|
||||
- [ ] **Philosophy stated**: "Run everything in PRs if <15 min, defer only if expensive/long"
|
||||
- [ ] **Playwright parallelization noted**: 100s of tests in 10-15 min
|
||||
- [ ] Smoke tests defined (<5 min target)
|
||||
- [ ] P0 tests listed (<10 min target)
|
||||
- [ ] P1 tests listed (<30 min target)
|
||||
- [ ] P2/P3 tests listed (<60 min target)
|
||||
- [ ] Order optimizes for fast feedback
|
||||
|
||||
### Resource Estimates
|
||||
|
||||
**CRITICAL: Use intervals/ranges, NOT exact numbers**
|
||||
|
||||
- [ ] P0 effort provided as interval range (e.g., "~25-40 hours" NOT "36 hours")
|
||||
- [ ] P1 effort provided as interval range (e.g., "~20-35 hours" NOT "27 hours")
|
||||
- [ ] P2 effort provided as interval range (e.g., "~10-30 hours" NOT "15.5 hours")
|
||||
- [ ] P3 effort provided as interval range (e.g., "~2-5 hours" NOT "2.5 hours")
|
||||
- [ ] Total effort provided as interval range (e.g., "~55-110 hours" NOT "81 hours")
|
||||
- [ ] Timeline provided as week range (e.g., "~1.5-3 weeks" NOT "11 days")
|
||||
- [ ] Estimates include setup time and account for complexity variations
|
||||
- [ ] **No false precision**: Avoid exact calculations like "18 tests × 2 hours = 36 hours"
|
||||
- [ ] P0 hours calculated (count × 2 hours)
|
||||
- [ ] P1 hours calculated (count × 1 hour)
|
||||
- [ ] P2 hours calculated (count × 0.5 hours)
|
||||
- [ ] P3 hours calculated (count × 0.25 hours)
|
||||
- [ ] Total hours summed
|
||||
- [ ] Days estimate provided (hours / 8)
|
||||
- [ ] Estimates include setup time
|
||||
|
||||
### Quality Gate Criteria
|
||||
|
||||
|
|
@ -132,16 +126,11 @@
|
|||
|
||||
### Priority Assignment Accuracy
|
||||
|
||||
**CRITICAL: Priority classification is separate from execution timing**
|
||||
|
||||
- [ ] **Priority sections (P0/P1/P2/P3) do NOT include execution context** (e.g., no "Run on every commit" in headers)
|
||||
- [ ] **Priority sections have only "Criteria" and "Purpose"** (no "Execution:" field)
|
||||
- [ ] **Execution Strategy section** is separate and handles timing based on infrastructure overhead
|
||||
- [ ] P0: Truly blocks core functionality + High-risk (≥6) + No workaround
|
||||
- [ ] P1: Important features + Medium-risk (3-4) + Common workflows
|
||||
- [ ] P2: Secondary features + Low-risk (1-2) + Edge cases
|
||||
- [ ] P3: Nice-to-have + Exploratory + Benchmarks
|
||||
- [ ] **Note at top of Test Coverage Plan**: Clarifies P0/P1/P2/P3 = priority/risk, NOT execution timing
|
||||
- [ ] P0: Truly blocks core functionality
|
||||
- [ ] P0: High-risk (score ≥6)
|
||||
- [ ] P0: No workaround exists
|
||||
- [ ] P1: Important but not blocking
|
||||
- [ ] P2/P3: Nice-to-have or edge cases
|
||||
|
||||
### Test Level Selection
|
||||
|
||||
|
|
@ -187,90 +176,58 @@
|
|||
- [ ] 🚨 BLOCKERS - Team Must Decide (Sprint 0 critical path items)
|
||||
- [ ] ⚠️ HIGH PRIORITY - Team Should Validate (recommendations for approval)
|
||||
- [ ] 📋 INFO ONLY - Solutions Provided (no decisions needed)
|
||||
- [ ] **Risk Assessment** section - **ACTIONABLE**
|
||||
- [ ] **Risk Assessment** section
|
||||
- [ ] Total risks identified count
|
||||
- [ ] High-priority risks table (score ≥6) with all columns: Risk ID, Category, Description, Probability, Impact, Score, Mitigation, Owner, Timeline
|
||||
- [ ] Medium and low-priority risks tables
|
||||
- [ ] Risk category legend included
|
||||
- [ ] **Testability Concerns and Architectural Gaps** section - **ACTIONABLE**
|
||||
- [ ] **Sub-section: 🚨 ACTIONABLE CONCERNS** at TOP
|
||||
- [ ] Blockers to Fast Feedback table (WHAT architecture must provide)
|
||||
- [ ] Architectural Improvements Needed (WHAT must be changed)
|
||||
- [ ] Each concern has: Owner, Timeline, Impact
|
||||
- [ ] **Sub-section: Testability Assessment Summary** at BOTTOM (FYI)
|
||||
- [ ] What Works Well (passing items)
|
||||
- [ ] Accepted Trade-offs (no action required)
|
||||
- [ ] This section only included if worth mentioning; otherwise omitted
|
||||
- [ ] **Testability Concerns** section (if system has architectural constraints)
|
||||
- [ ] Blockers to fast feedback table
|
||||
- [ ] Explanation of why standard CI/CD may not apply (if applicable)
|
||||
- [ ] Tiered testing strategy table (if forced by architecture)
|
||||
- [ ] Architectural improvements needed (or acknowledgment system supports testing well)
|
||||
- [ ] **Risk Mitigation Plans** for all high-priority risks (≥6)
|
||||
- [ ] Each plan has: Strategy (numbered steps), Owner, Timeline, Status, Verification
|
||||
- [ ] **Only Backend/DevOps/Arch/Security mitigations** (production code changes)
|
||||
- [ ] QA-owned mitigations belong in QA doc instead
|
||||
- [ ] **Assumptions and Dependencies** section
|
||||
- [ ] **Architectural assumptions only** (SLO targets, replication lag, system design)
|
||||
- [ ] Assumptions list (numbered)
|
||||
- [ ] Dependencies list with required dates
|
||||
- [ ] Risks to plan with impact and contingency
|
||||
- [ ] QA execution assumptions belong in QA doc instead
|
||||
- [ ] **NO test implementation code** (long examples belong in QA doc)
|
||||
- [ ] **NO test scripts** (no Playwright test(...) blocks, no assertions, no test setup code)
|
||||
- [ ] **NO NFR test examples** (NFR sections describe WHAT to test, not HOW to test)
|
||||
- [ ] **NO test scenario checklists** (belong in QA doc)
|
||||
- [ ] **NO bloat or repetition** (consolidate repeated notes, avoid over-explanation)
|
||||
- [ ] **Cross-references to QA doc** where appropriate (instead of duplication)
|
||||
- [ ] **RECIPE SECTIONS NOT IN ARCHITECTURE DOC:**
|
||||
- [ ] NO "Test Levels Strategy" section (unit/integration/E2E split belongs in QA doc only)
|
||||
- [ ] NO "NFR Testing Approach" section with detailed test procedures (belongs in QA doc only)
|
||||
- [ ] NO "Test Environment Requirements" section (belongs in QA doc only)
|
||||
- [ ] NO "Recommendations for Sprint 0" section with test framework setup (belongs in QA doc only)
|
||||
- [ ] NO "Quality Gate Criteria" section (pass rates, coverage targets belong in QA doc only)
|
||||
- [ ] NO "Tool Selection" section (Playwright, k6, etc. belongs in QA doc only)
|
||||
- [ ] **Cross-references to QA doc** where appropriate
|
||||
|
||||
### test-design-qa.md
|
||||
|
||||
**NEW STRUCTURE (streamlined from 375 to ~287 lines):**
|
||||
|
||||
- [ ] **Purpose statement** at top (test execution recipe)
|
||||
- [ ] **Executive Summary** with risk summary and coverage summary
|
||||
- [ ] **Dependencies & Test Blockers** section in POSITION 2 (right after Executive Summary)
|
||||
- [ ] Backend/Architecture dependencies listed (what QA needs from other teams)
|
||||
- [ ] QA infrastructure setup listed (factories, fixtures, environments)
|
||||
- [ ] Code example with playwright-utils if config.tea_use_playwright_utils is true
|
||||
- [ ] Test from '@seontechnologies/playwright-utils/api-request/fixtures'
|
||||
- [ ] Expect from '@playwright/test' (playwright-utils does not re-export expect)
|
||||
- [ ] Code examples include assertions (no unused imports)
|
||||
- [ ] **Risk Assessment** section (brief, references Architecture doc)
|
||||
- [ ] High-priority risks table
|
||||
- [ ] Medium/low-priority risks table
|
||||
- [ ] Each risk shows "QA Test Coverage" column (how QA validates)
|
||||
- [ ] **Purpose statement** at top (execution recipe for QA team)
|
||||
- [ ] **Quick Reference for QA** section
|
||||
- [ ] Before You Start checklist
|
||||
- [ ] Test Execution Order
|
||||
- [ ] Need Help? guidance
|
||||
- [ ] **System Architecture Summary** (brief overview of services and data flow)
|
||||
- [ ] **Test Environment Requirements** in early section (section 1-3, NOT buried at end)
|
||||
- [ ] Table with Local/Dev/Staging environments
|
||||
- [ ] Key principles listed (shared DB, randomization, parallel-safe, self-cleaning, shift-left)
|
||||
- [ ] Code example provided
|
||||
- [ ] **Testability Assessment** with prerequisites checklist
|
||||
- [ ] References Architecture doc blockers (not duplication)
|
||||
- [ ] **Test Levels Strategy** with unit/integration/E2E split
|
||||
- [ ] System type identified
|
||||
- [ ] Recommended split percentages with rationale
|
||||
- [ ] Test count summary (P0/P1/P2/P3 totals)
|
||||
- [ ] **Test Coverage Plan** with P0/P1/P2/P3 sections
|
||||
- [ ] Priority sections have ONLY "Criteria" (no execution context)
|
||||
- [ ] Note at top: "P0/P1/P2/P3 = priority, NOT execution timing"
|
||||
- [ ] Test tables with columns: Test ID | Requirement | Test Level | Risk Link | Notes
|
||||
- [ ] **Execution Strategy** section (organized by TOOL TYPE)
|
||||
- [ ] Every PR: Playwright tests (~10-15 min)
|
||||
- [ ] Nightly: k6 performance tests (~30-60 min)
|
||||
- [ ] Weekly: Chaos & long-running (~hours)
|
||||
- [ ] Philosophy: "Run everything in PRs unless expensive/long-running"
|
||||
- [ ] **QA Effort Estimate** section (QA effort ONLY)
|
||||
- [ ] Interval-based estimates (e.g., "~1-2 weeks" NOT "36 hours")
|
||||
- [ ] NO DevOps, Backend, Data Eng, Finance effort
|
||||
- [ ] NO Sprint breakdowns (too prescriptive)
|
||||
- [ ] **Appendix A: Code Examples & Tagging**
|
||||
- [ ] **Appendix B: Knowledge Base References**
|
||||
|
||||
**REMOVED SECTIONS (bloat):**
|
||||
- [ ] ❌ NO Quick Reference section (bloat)
|
||||
- [ ] ❌ NO System Architecture Summary (bloat)
|
||||
- [ ] ❌ NO Test Environment Requirements as separate section (integrated into Dependencies)
|
||||
- [ ] ❌ NO Testability Assessment section (bloat - covered in Dependencies)
|
||||
- [ ] ❌ NO Test Levels Strategy section (bloat - obvious from test scenarios)
|
||||
- [ ] ❌ NO NFR Readiness Summary (bloat)
|
||||
- [ ] ❌ NO Quality Gate Criteria section (teams decide for themselves)
|
||||
- [ ] ❌ NO Follow-on Workflows section (bloat - BMAD commands self-explanatory)
|
||||
- [ ] ❌ NO Approval section (unnecessary formality)
|
||||
- [ ] ❌ NO Infrastructure/DevOps/Finance effort tables (out of scope)
|
||||
- [ ] ❌ NO Sprint 0/1/2/3 breakdown tables (too prescriptive)
|
||||
- [ ] ❌ NO Next Steps section (bloat)
|
||||
- [ ] Each priority has: Execution details, Purpose, Criteria, Test Count
|
||||
- [ ] Detailed test scenarios WITH CHECKBOXES
|
||||
- [ ] Coverage table with columns: Requirement | Test Level | Risk Link | Test Count | Owner | Notes
|
||||
- [ ] **Sprint 0 Setup Requirements**
|
||||
- [ ] Architecture/Backend blockers listed with cross-references to Architecture doc
|
||||
- [ ] QA Test Infrastructure section (factories, fixtures)
|
||||
- [ ] Test Environments section (Local, CI/CD, Staging, Production)
|
||||
- [ ] Sprint 0 NFR Gates checklist
|
||||
- [ ] Sprint 1 Items clearly separated
|
||||
- [ ] **NFR Readiness Summary** (reference to Architecture doc, not duplication)
|
||||
- [ ] Table with NFR categories, status, evidence, blocker, next action
|
||||
- [ ] **Cross-references to Architecture doc** (not duplication)
|
||||
- [ ] **NO architectural theory** (just reference Architecture doc)
|
||||
|
||||
### Cross-Document Consistency
|
||||
|
||||
|
|
@ -281,40 +238,6 @@
|
|||
- [ ] Dates and authors match across documents
|
||||
- [ ] ADR and PRD references consistent
|
||||
|
||||
### Document Quality (Anti-Bloat Check)
|
||||
|
||||
**CRITICAL: Check for bloat and repetition across BOTH documents**
|
||||
|
||||
- [ ] **No repeated notes 10+ times** (e.g., "Timing is pessimistic until R-005 fixed" on every section)
|
||||
- [ ] **Repeated information consolidated** (write once at top, reference briefly if needed)
|
||||
- [ ] **No excessive detail** that doesn't add value (obvious concepts, redundant examples)
|
||||
- [ ] **Focus on unique/critical info** (only document what's different from standard practice)
|
||||
- [ ] **Architecture doc**: Concerns-focused, NOT implementation-focused
|
||||
- [ ] **QA doc**: Implementation-focused, NOT theory-focused
|
||||
- [ ] **Clear separation**: Architecture = WHAT and WHY, QA = HOW
|
||||
- [ ] **Professional tone**: No AI slop markers
|
||||
- [ ] Avoid excessive ✅/❌ emojis (use sparingly, only when adding clarity)
|
||||
- [ ] Avoid "absolutely", "excellent", "fantastic", overly enthusiastic language
|
||||
- [ ] Write professionally and directly
|
||||
- [ ] **Architecture doc length**: Target ~150-200 lines max (focus on actionable concerns only)
|
||||
- [ ] **QA doc length**: Keep concise, remove bloat sections
|
||||
|
||||
### Architecture Doc Structure (Actionable-First Principle)
|
||||
|
||||
**CRITICAL: Validate structure follows actionable-first, FYI-last principle**
|
||||
|
||||
- [ ] **Actionable sections at TOP:**
|
||||
- [ ] Quick Guide (🚨 BLOCKERS first, then ⚠️ HIGH PRIORITY, then 📋 INFO ONLY last)
|
||||
- [ ] Risk Assessment (high-priority risks ≥6 at top)
|
||||
- [ ] Testability Concerns (concerns/blockers at top, passing items at bottom)
|
||||
- [ ] Risk Mitigation Plans (for high-priority risks ≥6)
|
||||
- [ ] **FYI sections at BOTTOM:**
|
||||
- [ ] Testability Assessment Summary (what works well - only if worth mentioning)
|
||||
- [ ] Assumptions and Dependencies
|
||||
- [ ] **ASRs categorized correctly:**
|
||||
- [ ] Actionable ASRs included in 🚨 or ⚠️ sections
|
||||
- [ ] FYI ASRs included in 📋 section or omitted if obvious
|
||||
|
||||
## Completion Criteria
|
||||
|
||||
**All must be true:**
|
||||
|
|
@ -372,20 +295,9 @@ If workflow fails:
|
|||
|
||||
- **Solution**: Use test pyramid - E2E for critical paths only
|
||||
|
||||
**Issue**: Resource estimates too high or too precise
|
||||
**Issue**: Resource estimates too high
|
||||
|
||||
- **Solution**:
|
||||
- Invest in fixtures/factories to reduce per-test setup time
|
||||
- Use interval ranges (e.g., "~55-110 hours") instead of exact numbers (e.g., "81 hours")
|
||||
- Widen intervals if high uncertainty exists
|
||||
|
||||
**Issue**: Execution order section too complex or redundant
|
||||
|
||||
- **Solution**:
|
||||
- Default: Run everything in PRs (<15 min with Playwright parallelization)
|
||||
- Only defer to nightly/weekly if expensive (k6, chaos, 4+ hour tests)
|
||||
- Don't create smoke/P0/P1/P2/P3 tier structure
|
||||
- Don't re-list all tests (already in coverage plan)
|
||||
- **Solution**: Invest in fixtures/factories to reduce per-test setup time
|
||||
|
||||
### Best Practices
|
||||
|
||||
|
|
@ -393,9 +305,7 @@ If workflow fails:
|
|||
- High-priority risks (≥6) require immediate mitigation
|
||||
- P0 tests should cover <10% of total scenarios
|
||||
- Avoid testing same behavior at multiple levels
|
||||
- **Use interval-based estimates** (e.g., "~25-40 hours") instead of exact numbers to avoid false precision and provide flexibility
|
||||
- **Keep execution strategy simple**: Default to "run everything in PRs" (<15 min with Playwright), only defer if expensive/long-running
|
||||
- **Avoid execution order redundancy**: Don't create complex tier structures or re-list tests
|
||||
- Include smoke tests (P0 subset) for fast feedback
|
||||
|
||||
---
|
||||
|
||||
|
|
|
|||
|
|
@ -157,13 +157,7 @@ TEA test-design workflow supports TWO modes, detected automatically:
|
|||
|
||||
1. **Review Architecture for Testability**
|
||||
|
||||
**STRUCTURE PRINCIPLE: CONCERNS FIRST, PASSING ITEMS LAST**
|
||||
|
||||
Evaluate architecture against these criteria and structure output as:
|
||||
1. **Testability Concerns** (ACTIONABLE - what's broken/missing)
|
||||
2. **Testability Assessment Summary** (FYI - what works well)
|
||||
|
||||
**Testability Criteria:**
|
||||
Evaluate architecture against these criteria:
|
||||
|
||||
**Controllability:**
|
||||
- Can we control system state for testing? (API seeding, factories, database reset)
|
||||
|
|
@ -180,18 +174,8 @@ TEA test-design workflow supports TWO modes, detected automatically:
|
|||
- Can we reproduce failures? (deterministic waits, HAR capture, seed data)
|
||||
- Are components loosely coupled? (mockable, testable boundaries)
|
||||
|
||||
**In Architecture Doc Output:**
|
||||
- **Section A: Testability Concerns** (TOP) - List what's BROKEN or MISSING
|
||||
- Example: "No API for test data seeding → Cannot parallelize tests"
|
||||
- Example: "Hardcoded DB connection → Cannot test in CI"
|
||||
- **Section B: Testability Assessment Summary** (BOTTOM) - List what PASSES
|
||||
- Example: "✅ API-first design supports test isolation"
|
||||
- Only include if worth mentioning; otherwise omit this section entirely
|
||||
|
||||
2. **Identify Architecturally Significant Requirements (ASRs)**
|
||||
|
||||
**CRITICAL: ASRs must indicate if ACTIONABLE or FYI**
|
||||
|
||||
From PRD NFRs and architecture decisions, identify quality requirements that:
|
||||
- Drive architecture decisions (e.g., "Must handle 10K concurrent users" → caching architecture)
|
||||
- Pose testability challenges (e.g., "Sub-second response time" → performance test infrastructure)
|
||||
|
|
@ -199,60 +183,21 @@ TEA test-design workflow supports TWO modes, detected automatically:
|
|||
|
||||
Score each ASR using risk matrix (probability × impact).
|
||||
|
||||
**In Architecture Doc, categorize ASRs:**
|
||||
- **ACTIONABLE ASRs** (require architecture changes): Include in "Quick Guide" 🚨 or ⚠️ sections
|
||||
- **FYI ASRs** (already satisfied by architecture): Include in "Quick Guide" 📋 section OR omit if obvious
|
||||
|
||||
**Example:**
|
||||
- ASR-001 (Score 9): "Multi-region deployment requires region-specific test infrastructure" → **ACTIONABLE** (goes in 🚨 BLOCKERS)
|
||||
- ASR-002 (Score 4): "OAuth 2.1 authentication already implemented in ADR-5" → **FYI** (goes in 📋 INFO ONLY or omit)
|
||||
|
||||
**Structure Principle:** Actionable ASRs at TOP, FYI ASRs at BOTTOM (or omit)
|
||||
|
||||
3. **Define Test Levels Strategy**
|
||||
|
||||
**IMPORTANT: This section goes in QA doc ONLY, NOT in Architecture doc**
|
||||
|
||||
Based on architecture (mobile, web, API, microservices, monolith):
|
||||
- Recommend unit/integration/E2E split (e.g., 70/20/10 for API-heavy, 40/30/30 for UI-heavy)
|
||||
- Identify test environment needs (local, staging, ephemeral, production-like)
|
||||
- Define testing approach per technology (Playwright for web, Maestro for mobile, k6 for performance)
|
||||
|
||||
**In Architecture doc:** Only mention test level split if it's an ACTIONABLE concern
|
||||
- Example: "API response time <100ms requires load testing infrastructure" (concern)
|
||||
- DO NOT include full test level strategy table in Architecture doc
|
||||
4. **Assess NFR Testing Approach**
|
||||
|
||||
4. **Assess NFR Requirements (MINIMAL in Architecture Doc)**
|
||||
|
||||
**CRITICAL: NFR testing approach is a RECIPE - belongs in QA doc ONLY**
|
||||
|
||||
**In Architecture Doc:**
|
||||
- Only mention NFRs if they create testability CONCERNS
|
||||
- Focus on WHAT architecture must provide, not HOW to test
|
||||
- Keep it brief - 1-2 sentences per NFR category at most
|
||||
|
||||
**Example - Security NFR in Architecture doc (if there's a concern):**
|
||||
✅ CORRECT (concern-focused, brief, WHAT/WHY only):
|
||||
- "System must prevent cross-customer data access (GDPR requirement). Requires test infrastructure for multi-tenant isolation in Sprint 0."
|
||||
- "OAuth tokens must expire after 1 hour (ADR-5). Requires test harness for token expiration validation."
|
||||
|
||||
❌ INCORRECT (too detailed, belongs in QA doc):
|
||||
- Full table of security test scenarios
|
||||
- Test scripts with code examples
|
||||
- Detailed test procedures
|
||||
- Tool selection (e.g., "use Playwright E2E + OWASP ZAP")
|
||||
- Specific test approaches (e.g., "Test approach: Playwright E2E for auth/authz")
|
||||
|
||||
**In QA Doc (full NFR testing approach):**
|
||||
- **Security**: Full test scenarios, tooling (Playwright + OWASP ZAP), test procedures
|
||||
- **Performance**: Load/stress/spike test scenarios, k6 scripts, SLO thresholds
|
||||
- **Reliability**: Error handling tests, retry logic validation, circuit breaker tests
|
||||
For each NFR category:
|
||||
- **Security**: Auth/authz tests, OWASP validation, secret handling (Playwright E2E + security tools)
|
||||
- **Performance**: Load/stress/spike testing with k6, SLO/SLA thresholds
|
||||
- **Reliability**: Error handling, retries, circuit breakers, health checks (Playwright + API tests)
|
||||
- **Maintainability**: Coverage targets, code quality gates, observability validation
|
||||
|
||||
**Rule of Thumb:**
|
||||
- Architecture doc: "What NFRs exist and what concerns they create" (1-2 sentences)
|
||||
- QA doc: "How to test those NFRs" (full sections with tables, code, procedures)
|
||||
|
||||
5. **Flag Testability Concerns**
|
||||
|
||||
Identify architecture decisions that harm testability:
|
||||
|
|
@ -283,54 +228,22 @@ TEA test-design workflow supports TWO modes, detected automatically:
|
|||
**Standard Structures (REQUIRED):**
|
||||
|
||||
**test-design-architecture.md sections (in this order):**
|
||||
|
||||
**STRUCTURE PRINCIPLE: Actionable items FIRST, FYI items LAST**
|
||||
|
||||
1. Executive Summary (scope, business context, architecture, risk summary)
|
||||
2. Quick Guide (🚨 BLOCKERS / ⚠️ HIGH PRIORITY / 📋 INFO ONLY)
|
||||
3. Risk Assessment (high/medium/low-priority risks with scoring) - **ACTIONABLE**
|
||||
4. Testability Concerns and Architectural Gaps - **ACTIONABLE** (what arch team must do)
|
||||
- Sub-section: Blockers to Fast Feedback (ACTIONABLE - concerns FIRST)
|
||||
- Sub-section: Architectural Improvements Needed (ACTIONABLE)
|
||||
- Sub-section: Testability Assessment Summary (FYI - passing items LAST, only if worth mentioning)
|
||||
5. Risk Mitigation Plans (detailed for high-priority risks ≥6) - **ACTIONABLE**
|
||||
6. Assumptions and Dependencies - **FYI**
|
||||
|
||||
**SECTIONS THAT DO NOT BELONG IN ARCHITECTURE DOC:**
|
||||
- ❌ Test Levels Strategy (unit/integration/E2E split) - This is a RECIPE, belongs in QA doc ONLY
|
||||
- ❌ NFR Testing Approach with test examples - This is a RECIPE, belongs in QA doc ONLY
|
||||
- ❌ Test Environment Requirements - This is a RECIPE, belongs in QA doc ONLY
|
||||
- ❌ Recommendations for Sprint 0 (test framework setup, factories) - This is a RECIPE, belongs in QA doc ONLY
|
||||
- ❌ Quality Gate Criteria (pass rates, coverage targets) - This is a RECIPE, belongs in QA doc ONLY
|
||||
- ❌ Tool Selection (Playwright, k6, etc.) - This is a RECIPE, belongs in QA doc ONLY
|
||||
|
||||
**WHAT BELONGS IN ARCHITECTURE DOC:**
|
||||
- ✅ Testability CONCERNS (what makes it hard to test)
|
||||
- ✅ Architecture GAPS (what's missing for testability)
|
||||
- ✅ What architecture team must DO (blockers, improvements)
|
||||
- ✅ Risks and mitigation plans
|
||||
- ✅ ASRs (Architecturally Significant Requirements) - but clarify if FYI or actionable
|
||||
3. Risk Assessment (high/medium/low-priority risks with scoring)
|
||||
4. Testability Concerns and Architectural Gaps (if system has constraints)
|
||||
5. Risk Mitigation Plans (detailed for high-priority risks ≥6)
|
||||
6. Assumptions and Dependencies
|
||||
|
||||
**test-design-qa.md sections (in this order):**
|
||||
1. Executive Summary (risk summary, coverage summary)
|
||||
2. **Dependencies & Test Blockers** (CRITICAL: RIGHT AFTER SUMMARY - what QA needs from other teams)
|
||||
3. Risk Assessment (scored risks with categories - reference Arch doc, don't duplicate)
|
||||
4. Test Coverage Plan (P0/P1/P2/P3 with detailed scenarios + checkboxes)
|
||||
5. **Execution Strategy** (SIMPLE: Organized by TOOL TYPE: PR (Playwright) / Nightly (k6) / Weekly (chaos/manual))
|
||||
6. QA Effort Estimate (QA effort ONLY - no DevOps, Data Eng, Finance, Backend)
|
||||
7. Appendices (code examples with playwright-utils, tagging strategy, knowledge base refs)
|
||||
|
||||
**SECTIONS TO EXCLUDE FROM QA DOC:**
|
||||
- ❌ Quality Gate Criteria (pass/fail thresholds - teams decide for themselves)
|
||||
- ❌ Follow-on Workflows (bloat - BMAD commands are self-explanatory)
|
||||
- ❌ Approval section (unnecessary formality)
|
||||
- ❌ Test Environment Requirements (remove as separate section - integrate into Dependencies if needed)
|
||||
- ❌ NFR Readiness Summary (bloat - covered in Risk Assessment)
|
||||
- ❌ Testability Assessment (bloat - covered in Dependencies)
|
||||
- ❌ Test Levels Strategy (bloat - obvious from test scenarios)
|
||||
- ❌ Sprint breakdowns (too prescriptive)
|
||||
- ❌ Infrastructure/DevOps/Data Eng effort tables (out of scope)
|
||||
- ❌ Mitigation plans for non-QA work (belongs in Arch doc)
|
||||
1. Quick Reference for QA (Before You Start, Execution Order, Need Help)
|
||||
2. System Architecture Summary (brief overview)
|
||||
3. Test Environment Requirements (MOVE UP - section 3, NOT buried at end)
|
||||
4. Testability Assessment (lightweight prerequisites checklist)
|
||||
5. Test Levels Strategy (unit/integration/E2E split with rationale)
|
||||
6. Test Coverage Plan (P0/P1/P2/P3 with detailed scenarios + checkboxes)
|
||||
7. Sprint 0 Setup Requirements (blockers, infrastructure, environments)
|
||||
8. NFR Readiness Summary (reference to Architecture doc)
|
||||
|
||||
**Content Guidelines:**
|
||||
|
||||
|
|
@ -339,46 +252,26 @@ TEA test-design workflow supports TWO modes, detected automatically:
|
|||
- ✅ Clear ownership (each blocker/ASR has owner + timeline)
|
||||
- ✅ Testability requirements (what architecture must support)
|
||||
- ✅ Mitigation plans (for each high-risk item ≥6)
|
||||
- ✅ Brief conceptual examples ONLY if needed to clarify architecture concerns (5-10 lines max)
|
||||
- ✅ **Target length**: ~150-200 lines max (focus on actionable concerns only)
|
||||
- ✅ **Professional tone**: Avoid AI slop (excessive ✅/❌ emojis, "absolutely", "excellent", overly enthusiastic language)
|
||||
- ✅ Short code examples (5-10 lines max showing what to support)
|
||||
|
||||
**Architecture doc (DON'T) - CRITICAL:**
|
||||
- ❌ NO test scripts or test implementation code AT ALL - This is a communication doc for architects, not a testing guide
|
||||
- ❌ NO Playwright test examples (e.g., test('...', async ({ request }) => ...))
|
||||
- ❌ NO assertion logic (e.g., expect(...).toBe(...))
|
||||
- ❌ NO test scenario checklists with checkboxes (belongs in QA doc)
|
||||
- ❌ NO implementation details about HOW QA will test
|
||||
- ❌ Focus on CONCERNS, not IMPLEMENTATION
|
||||
**Architecture doc (DON'T):**
|
||||
- ❌ NO long test code examples (belongs in QA doc)
|
||||
- ❌ NO test scenario checklists (belongs in QA doc)
|
||||
- ❌ NO implementation details (how QA will test)
|
||||
|
||||
**QA doc (DO):**
|
||||
- ✅ Test scenario recipes (clear P0/P1/P2/P3 with checkboxes)
|
||||
- ✅ Full test implementation code samples when helpful
|
||||
- ✅ **IMPORTANT: If config.tea_use_playwright_utils is true, ALL code samples MUST use @seontechnologies/playwright-utils fixtures and utilities**
|
||||
- ✅ Import test fixtures from '@seontechnologies/playwright-utils/api-request/fixtures'
|
||||
- ✅ Import expect from '@playwright/test' (playwright-utils does not re-export expect)
|
||||
- ✅ Use apiRequest fixture with schema validation, retry logic, and structured responses
|
||||
- ✅ Dependencies & Test Blockers section RIGHT AFTER Executive Summary (what QA needs from other teams)
|
||||
- ✅ **QA effort estimates ONLY** (no DevOps, Data Eng, Finance, Backend effort - out of scope)
|
||||
- ✅ Environment setup (Sprint 0 checklist with blockers)
|
||||
- ✅ Tool setup (factories, fixtures, frameworks)
|
||||
- ✅ Cross-references to Architecture doc (not duplication)
|
||||
- ✅ **Professional tone**: Avoid AI slop (excessive ✅/❌ emojis, "absolutely", "excellent", overly enthusiastic language)
|
||||
|
||||
**QA doc (DON'T):**
|
||||
- ❌ NO architectural theory (just reference Architecture doc)
|
||||
- ❌ NO ASR explanations (link to Architecture doc instead)
|
||||
- ❌ NO duplicate risk assessments (reference Architecture doc)
|
||||
- ❌ NO Quality Gate Criteria section (teams decide pass/fail thresholds for themselves)
|
||||
- ❌ NO Follow-on Workflows section (bloat - BMAD commands are self-explanatory)
|
||||
- ❌ NO Approval section (unnecessary formality)
|
||||
- ❌ NO effort estimates for other teams (DevOps, Backend, Data Eng, Finance - out of scope, QA effort only)
|
||||
- ❌ NO Sprint breakdowns (too prescriptive - e.g., "Sprint 0: 40 hours, Sprint 1: 48 hours")
|
||||
- ❌ NO mitigation plans for Backend/Arch/DevOps work (those belong in Architecture doc)
|
||||
- ❌ NO architectural assumptions or debates (those belong in Architecture doc)
|
||||
|
||||
**Anti-Patterns to Avoid (Cross-Document Redundancy):**
|
||||
|
||||
**CRITICAL: NO BLOAT, NO REPETITION, NO OVERINFO**
|
||||
|
||||
❌ **DON'T duplicate OAuth requirements:**
|
||||
- Architecture doc: Explain OAuth 2.1 flow in detail
|
||||
- QA doc: Re-explain why OAuth 2.1 is required
|
||||
|
|
@ -387,24 +280,6 @@ TEA test-design workflow supports TWO modes, detected automatically:
|
|||
- Architecture doc: "ASR-1: OAuth 2.1 required (see QA doc for 12 test scenarios)"
|
||||
- QA doc: "OAuth tests: 12 P0 scenarios (see Architecture doc R-001 for risk details)"
|
||||
|
||||
❌ **DON'T repeat the same note 10+ times:**
|
||||
- Example: "Timing is pessimistic until R-005 is fixed" repeated on every P0, P1, P2 section
|
||||
- This creates bloat and makes docs hard to read
|
||||
|
||||
✅ **DO consolidate repeated information:**
|
||||
- Write once at the top: "**Note**: All timing estimates are pessimistic pending R-005 resolution"
|
||||
- Reference briefly if needed: "(pessimistic timing)"
|
||||
|
||||
❌ **DON'T include excessive detail that doesn't add value:**
|
||||
- Long explanations of obvious concepts
|
||||
- Redundant examples showing the same pattern
|
||||
- Over-documentation of standard practices
|
||||
|
||||
✅ **DO focus on what's unique or critical:**
|
||||
- Document only what's different from standard practice
|
||||
- Highlight critical decisions and risks
|
||||
- Keep explanations concise and actionable
|
||||
|
||||
**Markdown Cross-Reference Syntax Examples:**
|
||||
|
||||
```markdown
|
||||
|
|
@ -455,24 +330,6 @@ TEA test-design workflow supports TWO modes, detected automatically:
|
|||
- Cross-reference between docs (no duplication)
|
||||
- Validate against checklist.md (System-Level Mode section)
|
||||
|
||||
**Common Over-Engineering to Avoid:**
|
||||
|
||||
**In QA Doc:**
|
||||
1. ❌ Quality gate thresholds ("P0 must be 100%, P1 ≥95%") - Let teams decide for themselves
|
||||
2. ❌ Effort estimates for other teams - QA doc should only estimate QA effort
|
||||
3. ❌ Sprint breakdowns ("Sprint 0: 40 hours, Sprint 1: 48 hours") - Too prescriptive
|
||||
4. ❌ Approval sections - Unnecessary formality
|
||||
5. ❌ Assumptions about architecture (SLO targets, replication lag) - These are architectural concerns, belong in Arch doc
|
||||
6. ❌ Mitigation plans for Backend/Arch/DevOps - Those belong in Arch doc
|
||||
7. ❌ Follow-on workflows section - Bloat, BMAD commands are self-explanatory
|
||||
8. ❌ NFR Readiness Summary - Bloat, covered in Risk Assessment
|
||||
|
||||
**Test Coverage Numbers Reality Check:**
|
||||
- With Playwright parallelization, running ALL Playwright tests is as fast as running just P0
|
||||
- Don't split Playwright tests by priority into different CI gates - it adds no value
|
||||
- Tool type matters, not priority labels
|
||||
- Defer based on infrastructure cost, not importance
|
||||
|
||||
**After System-Level Mode:** Workflow COMPLETE. System-level outputs (test-design-architecture.md + test-design-qa.md) are written in this step. Steps 2-4 are epic-level only - do NOT execute them in system-level mode.
|
||||
|
||||
---
|
||||
|
|
@ -683,51 +540,12 @@ TEA test-design workflow supports TWO modes, detected automatically:
|
|||
|
||||
8. **Plan Mitigations**
|
||||
|
||||
**CRITICAL: Mitigation placement depends on WHO does the work**
|
||||
|
||||
For each high-priority risk:
|
||||
- Define mitigation strategy
|
||||
- Assign owner (dev, QA, ops)
|
||||
- Set timeline
|
||||
- Update residual risk expectation
|
||||
|
||||
**Mitigation Plan Placement:**
|
||||
|
||||
**Architecture Doc:**
|
||||
- Mitigations owned by Backend, DevOps, Architecture, Security, Data Eng
|
||||
- Example: "Add authorization layer for customer-scoped access" (Backend work)
|
||||
- Example: "Configure AWS Fault Injection Simulator" (DevOps work)
|
||||
- Example: "Define CloudWatch log schema for backfill events" (Architecture work)
|
||||
|
||||
**QA Doc:**
|
||||
- Mitigations owned by QA (test development work)
|
||||
- Example: "Create factories for test data with randomization" (QA work)
|
||||
- Example: "Implement polling with retry for async validation" (QA test code)
|
||||
- Brief reference to Architecture doc mitigations (don't duplicate)
|
||||
|
||||
**Rule of Thumb:**
|
||||
- If mitigation requires production code changes → Architecture doc
|
||||
- If mitigation is test infrastructure/code → QA doc
|
||||
- If mitigation involves multiple teams → Architecture doc with QA validation approach
|
||||
|
||||
**Assumptions Placement:**
|
||||
|
||||
**Architecture Doc:**
|
||||
- Architectural assumptions (SLO targets, replication lag, system design assumptions)
|
||||
- Example: "P95 <500ms inferred from <2s timeout (requires Product approval)"
|
||||
- Example: "Multi-region replication lag <1s assumed (ADR doesn't specify SLA)"
|
||||
- Example: "Recent Cache hit ratio >80% assumed (not in PRD/ADR)"
|
||||
|
||||
**QA Doc:**
|
||||
- Test execution assumptions (test infrastructure readiness, test data availability)
|
||||
- Example: "Assumes test factories already created"
|
||||
- Example: "Assumes CI/CD pipeline configured"
|
||||
- Brief reference to Architecture doc for architectural assumptions
|
||||
|
||||
**Rule of Thumb:**
|
||||
- If assumption is about system architecture/design → Architecture doc
|
||||
- If assumption is about test infrastructure/execution → QA doc
|
||||
|
||||
---
|
||||
|
||||
## Step 3: Design Test Coverage
|
||||
|
|
@ -776,8 +594,6 @@ TEA test-design workflow supports TWO modes, detected automatically:
|
|||
|
||||
3. **Assign Priority Levels**
|
||||
|
||||
**CRITICAL: P0/P1/P2/P3 indicates priority and risk level, NOT execution timing**
|
||||
|
||||
**Knowledge Base Reference**: `test-priorities-matrix.md`
|
||||
|
||||
**P0 (Critical)**:
|
||||
|
|
@ -785,28 +601,25 @@ TEA test-design workflow supports TWO modes, detected automatically:
|
|||
- High-risk areas (score ≥6)
|
||||
- Revenue-impacting
|
||||
- Security-critical
|
||||
- No workaround exists
|
||||
- Affects majority of users
|
||||
- **Run on every commit**
|
||||
|
||||
**P1 (High)**:
|
||||
- Important user features
|
||||
- Medium-risk areas (score 3-4)
|
||||
- Common workflows
|
||||
- Workaround exists but difficult
|
||||
- **Run on PR to main**
|
||||
|
||||
**P2 (Medium)**:
|
||||
- Secondary features
|
||||
- Low-risk areas (score 1-2)
|
||||
- Edge cases
|
||||
- Regression prevention
|
||||
- **Run nightly or weekly**
|
||||
|
||||
**P3 (Low)**:
|
||||
- Nice-to-have
|
||||
- Exploratory
|
||||
- Performance benchmarks
|
||||
- Documentation validation
|
||||
|
||||
**NOTE:** Priority classification is separate from execution timing. A P1 test might run in PRs if it's fast, or nightly if it requires expensive infrastructure (e.g., k6 performance test). See "Execution Strategy" section for timing guidance.
|
||||
- **Run on-demand**
|
||||
|
||||
4. **Outline Data and Tooling Prerequisites**
|
||||
|
||||
|
|
@ -816,55 +629,13 @@ TEA test-design workflow supports TWO modes, detected automatically:
|
|||
- Environment setup
|
||||
- Tools and dependencies
|
||||
|
||||
5. **Define Execution Strategy** (Keep It Simple)
|
||||
5. **Define Execution Order**
|
||||
|
||||
**IMPORTANT: Avoid over-engineering execution order**
|
||||
|
||||
**Default Philosophy:**
|
||||
- Run **everything** in PRs if total duration <15 minutes
|
||||
- Playwright is fast with parallelization (100s of tests in ~10-15 min)
|
||||
- Only defer to nightly/weekly if there's significant overhead:
|
||||
- Performance tests (k6, load testing) - expensive infrastructure
|
||||
- Chaos engineering - requires special setup (AWS FIS)
|
||||
- Long-running tests - endurance (4+ hours), disaster recovery
|
||||
- Manual tests - require human intervention
|
||||
|
||||
**Simple Execution Strategy (Organized by TOOL TYPE):**
|
||||
|
||||
```markdown
|
||||
## Execution Strategy
|
||||
|
||||
**Philosophy**: Run everything in PRs unless significant infrastructure overhead.
|
||||
Playwright with parallelization is extremely fast (100s of tests in ~10-15 min).
|
||||
|
||||
**Organized by TOOL TYPE:**
|
||||
|
||||
### Every PR: Playwright Tests (~10-15 min)
|
||||
All functional tests (from any priority level):
|
||||
- All E2E, API, integration, unit tests using Playwright
|
||||
- Parallelized across {N} shards
|
||||
- Total: ~{N} tests (includes P0, P1, P2, P3)
|
||||
|
||||
### Nightly: k6 Performance Tests (~30-60 min)
|
||||
All performance tests (from any priority level):
|
||||
- Load, stress, spike, endurance
|
||||
- Reason: Expensive infrastructure, long-running (10-40 min per test)
|
||||
|
||||
### Weekly: Chaos & Long-Running (~hours)
|
||||
Special infrastructure tests (from any priority level):
|
||||
- Multi-region failover, disaster recovery, endurance
|
||||
- Reason: Very expensive, very long (4+ hours)
|
||||
```
|
||||
|
||||
**KEY INSIGHT: Organize by TOOL TYPE, not priority**
|
||||
- Playwright (fast, cheap) → PR
|
||||
- k6 (expensive, long) → Nightly
|
||||
- Chaos/Manual (very expensive, very long) → Weekly
|
||||
|
||||
**Avoid:**
|
||||
- ❌ Don't organize by priority (smoke → P0 → P1 → P2 → P3)
|
||||
- ❌ Don't say "P1 runs on PR to main" (some P1 are Playwright/PR, some are k6/Nightly)
|
||||
- ❌ Don't create artificial tiers - organize by tool type and infrastructure overhead
|
||||
Recommend test execution sequence:
|
||||
1. **Smoke tests** (P0 subset, <5 min)
|
||||
2. **P0 tests** (critical paths, <10 min)
|
||||
3. **P1 tests** (important features, <30 min)
|
||||
4. **P2/P3 tests** (full regression, <60 min)
|
||||
|
||||
---
|
||||
|
||||
|
|
@ -890,66 +661,34 @@ TEA test-design workflow supports TWO modes, detected automatically:
|
|||
| Login flow | E2E | P0 | R-001 | 3 | QA |
|
||||
```
|
||||
|
||||
3. **Document Execution Strategy** (Simple, Not Redundant)
|
||||
|
||||
**IMPORTANT: Keep execution strategy simple and avoid redundancy**
|
||||
3. **Document Execution Order**
|
||||
|
||||
```markdown
|
||||
## Execution Strategy
|
||||
### Smoke Tests (<5 min)
|
||||
|
||||
**Default: Run all functional tests in PRs (~10-15 min)**
|
||||
- All Playwright tests (parallelized across 4 shards)
|
||||
- Includes E2E, API, integration, unit tests
|
||||
- Total: ~{N} tests
|
||||
- Login successful
|
||||
- Dashboard loads
|
||||
|
||||
**Nightly: Performance & Infrastructure tests**
|
||||
- k6 load/stress/spike tests (~30-60 min)
|
||||
- Reason: Expensive infrastructure, long-running
|
||||
### P0 Tests (<10 min)
|
||||
|
||||
**Weekly: Chaos & Disaster Recovery**
|
||||
- Endurance tests (4+ hours)
|
||||
- Multi-region failover (requires AWS FIS)
|
||||
- Backup restore validation
|
||||
- Reason: Special infrastructure, very long-running
|
||||
- [Full P0 list]
|
||||
|
||||
### P1 Tests (<30 min)
|
||||
|
||||
- [Full P1 list]
|
||||
```
|
||||
|
||||
**DO NOT:**
|
||||
- ❌ Create redundant smoke/P0/P1/P2/P3 tier structure
|
||||
- ❌ List all tests again in execution order (already in coverage plan)
|
||||
- ❌ Split tests by priority unless there's infrastructure overhead
|
||||
|
||||
4. **Include Resource Estimates**
|
||||
|
||||
**IMPORTANT: Use intervals/ranges, not exact numbers**
|
||||
|
||||
Provide rough estimates with intervals to avoid false precision:
|
||||
|
||||
```markdown
|
||||
### Test Effort Estimates
|
||||
|
||||
- P0 scenarios: 15 tests (~1.5-2.5 hours each) = **~25-40 hours**
|
||||
- P1 scenarios: 25 tests (~0.75-1.5 hours each) = **~20-35 hours**
|
||||
- P2 scenarios: 40 tests (~0.25-0.75 hours each) = **~10-30 hours**
|
||||
- **Total:** **~55-105 hours** (~1.5-3 weeks with 1 QA engineer)
|
||||
- P0 scenarios: 15 tests × 2 hours = 30 hours
|
||||
- P1 scenarios: 25 tests × 1 hour = 25 hours
|
||||
- P2 scenarios: 40 tests × 0.5 hour = 20 hours
|
||||
- **Total:** 75 hours (~10 days)
|
||||
```
|
||||
|
||||
**Why intervals:**
|
||||
- Avoids false precision (estimates are never exact)
|
||||
- Provides flexibility for complexity variations
|
||||
- Accounts for unknowns and dependencies
|
||||
- More realistic and less prescriptive
|
||||
|
||||
**Guidelines:**
|
||||
- P0 tests: 1.5-2.5 hours each (complex setup, security, performance)
|
||||
- P1 tests: 0.75-1.5 hours each (standard integration, API tests)
|
||||
- P2 tests: 0.25-0.75 hours each (edge cases, simple validation)
|
||||
- P3 tests: 0.1-0.5 hours each (exploratory, documentation)
|
||||
|
||||
**Express totals as:**
|
||||
- Hour ranges: "~55-105 hours"
|
||||
- Week ranges: "~1.5-3 weeks"
|
||||
- Avoid: Exact numbers like "75 hours" or "11 days"
|
||||
|
||||
5. **Add Gate Criteria**
|
||||
|
||||
```markdown
|
||||
|
|
|
|||
|
|
@ -108,51 +108,54 @@
|
|||
|
||||
### Testability Concerns and Architectural Gaps
|
||||
|
||||
**🚨 ACTIONABLE CONCERNS - Architecture Team Must Address**
|
||||
**IMPORTANT**: {If system has constraints, explain them. If standard CI/CD achievable, state that.}
|
||||
|
||||
{If system has critical testability concerns, list them here. If architecture supports testing well, state "No critical testability concerns identified" and skip to Testability Assessment Summary}
|
||||
#### Blockers to Fast Feedback
|
||||
|
||||
#### 1. Blockers to Fast Feedback (WHAT WE NEED FROM ARCHITECTURE)
|
||||
| Blocker | Impact | Current Mitigation | Ideal Solution |
|
||||
|---------|--------|-------------------|----------------|
|
||||
| **{Blocker name}** | {Impact description} | {How we're working around it} | {What architecture should provide} |
|
||||
|
||||
| Concern | Impact | What Architecture Must Provide | Owner | Timeline |
|
||||
|---------|--------|--------------------------------|-------|----------|
|
||||
| **{Concern name}** | {Impact on testing} | {Specific architectural change needed} | {Team} | {Sprint} |
|
||||
#### Why This Matters
|
||||
|
||||
**Example:**
|
||||
- **No API for test data seeding** → Cannot parallelize tests → Provide POST /test/seed endpoint (Backend, Sprint 0)
|
||||
**Standard CI/CD expectations:**
|
||||
- Full test suite on every commit (~5-15 min feedback)
|
||||
- Parallel test execution (isolated test data per worker)
|
||||
- Ephemeral test environments (spin up → test → tear down)
|
||||
- Fast feedback loop (devs stay in flow state)
|
||||
|
||||
#### 2. Architectural Improvements Needed (WHAT SHOULD BE CHANGED)
|
||||
**Current reality for {Feature}:**
|
||||
- {Actual situation - what's different from standard}
|
||||
|
||||
{List specific improvements that would make the system more testable}
|
||||
#### Tiered Testing Strategy
|
||||
|
||||
{If forced by architecture, explain. If standard approach works, state that.}
|
||||
|
||||
| Tier | When | Duration | Coverage | Why Not Full Suite? |
|
||||
|------|------|----------|----------|---------------------|
|
||||
| **Smoke** | Every commit | <5 min | {N} tests | Fast feedback, catch build-breaking changes |
|
||||
| **P0** | Every commit | ~{X} min | ~{N} tests | Critical paths, security-critical flows |
|
||||
| **P1** | PR to main | ~{X} min | ~{N} tests | Important features, algorithm accuracy |
|
||||
| **P2/P3** | Nightly | ~{X} min | ~{N} tests | Edge cases, performance, NFR |
|
||||
|
||||
**Note**: {Any timing assumptions or constraints}
|
||||
|
||||
#### Architectural Improvements Needed
|
||||
|
||||
{If system has technical debt affecting testing, list improvements. If architecture supports testing well, acknowledge that.}
|
||||
|
||||
1. **{Improvement name}**
|
||||
- **Current problem**: {What's wrong}
|
||||
- **Required change**: {What architecture must do}
|
||||
- **Impact if not fixed**: {Consequences}
|
||||
- **Owner**: {Team}
|
||||
- **Timeline**: {Sprint}
|
||||
- {What to change}
|
||||
- **Impact**: {How it improves testing}
|
||||
|
||||
---
|
||||
#### Acceptance of Trade-offs
|
||||
|
||||
### Testability Assessment Summary
|
||||
For {Feature} Phase 1, the team accepts:
|
||||
- **{Trade-off 1}** ({Reasoning})
|
||||
- **{Trade-off 2}** ({Reasoning})
|
||||
- ⚠️ **{Known limitation}** ({Why acceptable for now})
|
||||
|
||||
**📊 CURRENT STATE - FYI**
|
||||
|
||||
{Only include this section if there are passing items worth mentioning. Otherwise omit.}
|
||||
|
||||
#### What Works Well
|
||||
|
||||
- ✅ {Passing item 1} (e.g., "API-first design supports parallel test execution")
|
||||
- ✅ {Passing item 2} (e.g., "Feature flags enable test isolation")
|
||||
- ✅ {Passing item 3}
|
||||
|
||||
#### Accepted Trade-offs (No Action Required)
|
||||
|
||||
For {Feature} Phase 1, the following trade-offs are acceptable:
|
||||
- **{Trade-off 1}** - {Why acceptable for now}
|
||||
- **{Trade-off 2}** - {Why acceptable for now}
|
||||
|
||||
{This is technical debt OR acceptable for Phase 1} that {should be revisited post-GA OR maintained as-is}
|
||||
This is {**technical debt** OR **acceptable for Phase 1**} that should be {revisited post-GA OR maintained as-is}.
|
||||
|
||||
---
|
||||
|
||||
|
|
|
|||
|
|
@ -1,286 +1,314 @@
|
|||
# Test Design for QA: {Feature Name}
|
||||
|
||||
**Purpose:** Test execution recipe for QA team. Defines what to test, how to test it, and what QA needs from other teams.
|
||||
**Purpose:** Test execution recipe for QA team. Defines test scenarios, coverage plan, tooling, and Sprint 0 setup requirements. Use this as your implementation guide after architectural blockers are resolved.
|
||||
|
||||
**Date:** {date}
|
||||
**Author:** {author}
|
||||
**Status:** Draft
|
||||
**Status:** Draft / Ready for Implementation
|
||||
**Project:** {project_name}
|
||||
|
||||
**Related:** See Architecture doc (test-design-architecture.md) for testability concerns and architectural blockers.
|
||||
**PRD Reference:** {prd_link}
|
||||
**ADR Reference:** {adr_link}
|
||||
|
||||
---
|
||||
|
||||
## Executive Summary
|
||||
## Quick Reference for QA
|
||||
|
||||
**Scope:** {Brief description of testing scope}
|
||||
**Before You Start:**
|
||||
- [ ] Review Architecture doc (test-design-architecture.md) - understand blockers and risks
|
||||
- [ ] Verify Sprint 0 blockers resolved (see Sprint 0 section below)
|
||||
- [ ] Confirm test infrastructure ready (factories, fixtures, environments)
|
||||
|
||||
**Risk Summary:**
|
||||
- Total Risks: {N} ({X} high-priority score ≥6, {Y} medium, {Z} low)
|
||||
- Critical Categories: {Categories with most high-priority risks}
|
||||
**Test Execution Order:**
|
||||
1. **Smoke tests** (<5 min) - Fast feedback on critical paths
|
||||
2. **P0 tests** (~{X} min) - Critical paths, security-critical flows
|
||||
3. **P1 tests** (~{X} min) - Important features, algorithm accuracy
|
||||
4. **P2/P3 tests** (~{X} min) - Edge cases, performance, NFR
|
||||
|
||||
**Coverage Summary:**
|
||||
- P0 tests: ~{N} (critical paths, security)
|
||||
- P1 tests: ~{N} (important features, integration)
|
||||
- P2 tests: ~{N} (edge cases, regression)
|
||||
- P3 tests: ~{N} (exploratory, benchmarks)
|
||||
- **Total**: ~{N} tests (~{X}-{Y} weeks with 1 QA)
|
||||
**Need Help?**
|
||||
- Blockers: See Architecture doc "Quick Guide" for mitigation plans
|
||||
- Test scenarios: See "Test Coverage Plan" section below
|
||||
- Sprint 0 setup: See "Sprint 0 Setup Requirements" section
|
||||
|
||||
---
|
||||
|
||||
## Dependencies & Test Blockers
|
||||
## System Architecture Summary
|
||||
|
||||
**CRITICAL:** QA cannot proceed without these items from other teams.
|
||||
**Data Pipeline:**
|
||||
{Brief description of system flow}
|
||||
|
||||
### Backend/Architecture Dependencies (Sprint 0)
|
||||
**Key Services:**
|
||||
- **{Service 1}**: {Purpose and key responsibilities}
|
||||
- **{Service 2}**: {Purpose and key responsibilities}
|
||||
- **{Service 3}**: {Purpose and key responsibilities}
|
||||
|
||||
**Source:** See Architecture doc "Quick Guide" for detailed mitigation plans
|
||||
**Data Stores:**
|
||||
- **{Database 1}**: {What it stores}
|
||||
- **{Database 2}**: {What it stores}
|
||||
|
||||
1. **{Dependency 1}** - {Team} - {Timeline}
|
||||
- {What QA needs}
|
||||
- {Why it blocks testing}
|
||||
**Expected Scale** (from ADR):
|
||||
- {Key metrics: RPS, volume, users, etc.}
|
||||
|
||||
2. **{Dependency 2}** - {Team} - {Timeline}
|
||||
- {What QA needs}
|
||||
- {Why it blocks testing}
|
||||
---
|
||||
|
||||
### QA Infrastructure Setup (Sprint 0)
|
||||
## Test Environment Requirements
|
||||
|
||||
1. **Test Data Factories** - QA
|
||||
- {Entity} factory with faker-based randomization
|
||||
- Auto-cleanup fixtures for parallel safety
|
||||
**{Company} Standard:** Shared DB per Environment with Randomization (Shift-Left)
|
||||
|
||||
2. **Test Environments** - QA
|
||||
- Local: {Setup details}
|
||||
- CI/CD: {Setup details}
|
||||
- Staging: {Setup details}
|
||||
| Environment | Database | Test Data Strategy | Purpose |
|
||||
|-------------|----------|-------------------|---------|
|
||||
| **Local** | {DB} (shared) | Randomized (faker), auto-cleanup | Local development |
|
||||
| **Dev (CI)** | {DB} (shared) | Randomized (faker), auto-cleanup | PR validation |
|
||||
| **Staging** | {DB} (shared) | Randomized (faker), auto-cleanup | Pre-production, E2E |
|
||||
|
||||
**Example factory pattern:**
|
||||
**Key Principles:**
|
||||
- **Shared database per environment** (no ephemeral)
|
||||
- **Randomization for isolation** (faker-based unique IDs)
|
||||
- **Parallel-safe** (concurrent test runs don't conflict)
|
||||
- **Self-cleaning** (tests delete their own data)
|
||||
- **Shift-left** (test against real DBs early)
|
||||
|
||||
**Example:**
|
||||
|
||||
```typescript
|
||||
import { test } from '@seontechnologies/playwright-utils/api-request/fixtures';
|
||||
import { expect } from '@playwright/test';
|
||||
import { faker } from '@faker-js/faker';
|
||||
import { faker } from "@faker-js/faker";
|
||||
|
||||
test('example test @p0', async ({ apiRequest }) => {
|
||||
test("example with randomized test data @p0", async ({ apiRequest }) => {
|
||||
const testData = {
|
||||
id: `test-${faker.string.uuid()}`,
|
||||
email: faker.internet.email(),
|
||||
customerId: `test-customer-${faker.string.alphanumeric(8)}`,
|
||||
// ... unique test data
|
||||
};
|
||||
|
||||
const { status } = await apiRequest({
|
||||
method: 'POST',
|
||||
path: '/api/resource',
|
||||
body: testData,
|
||||
});
|
||||
|
||||
expect(status).toBe(201);
|
||||
// Seed, test, cleanup
|
||||
});
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Risk Assessment
|
||||
## Testability Assessment
|
||||
|
||||
**Note:** Full risk details in Architecture doc. This section summarizes risks relevant to QA test planning.
|
||||
**Prerequisites from Architecture Doc:**
|
||||
|
||||
### High-Priority Risks (Score ≥6)
|
||||
Verify these blockers are resolved before test development:
|
||||
- [ ] {Blocker 1} (see Architecture doc Quick Guide → 🚨 BLOCKERS)
|
||||
- [ ] {Blocker 2}
|
||||
- [ ] {Blocker 3}
|
||||
|
||||
| Risk ID | Category | Description | Score | QA Test Coverage |
|
||||
|---------|----------|-------------|-------|------------------|
|
||||
| **{R-ID}** | {CAT} | {Brief description} | **{Score}** | {How QA validates this risk} |
|
||||
**If Prerequisites Not Met:** Coordinate with Architecture team (see Architecture doc for mitigation plans and owner assignments)
|
||||
|
||||
### Medium/Low-Priority Risks
|
||||
---
|
||||
|
||||
| Risk ID | Category | Description | Score | QA Test Coverage |
|
||||
|---------|----------|-------------|-------|------------------|
|
||||
| {R-ID} | {CAT} | {Brief description} | {Score} | {How QA validates this risk} |
|
||||
## Test Levels Strategy
|
||||
|
||||
**System Type:** {API-heavy / UI-heavy / Mixed backend system}
|
||||
|
||||
**Recommended Split:**
|
||||
- **Unit Tests: {X}%** - {What to unit test}
|
||||
- **Integration/API Tests: {X}%** - ⭐ **PRIMARY FOCUS** - {What to integration test}
|
||||
- **E2E Tests: {X}%** - {What to E2E test}
|
||||
|
||||
**Rationale:** {Why this split makes sense for this system}
|
||||
|
||||
**Test Count Summary:**
|
||||
- P0: ~{N} tests - Critical paths, run on every commit
|
||||
- P1: ~{N} tests - Important features, run on PR to main
|
||||
- P2: ~{N} tests - Edge cases, run nightly/weekly
|
||||
- P3: ~{N} tests - Exploratory, run on-demand
|
||||
- **Total: ~{N} tests** (~{X} weeks for 1 QA, ~{Y} weeks for 2 QAs)
|
||||
|
||||
---
|
||||
|
||||
## Test Coverage Plan
|
||||
|
||||
**IMPORTANT:** P0/P1/P2/P3 = **priority and risk level** (what to focus on if time-constrained), NOT execution timing. See "Execution Strategy" for when tests run.
|
||||
**Repository Note:** {Where tests live - backend repo, admin panel repo, etc. - and how CI pipelines are organized}
|
||||
|
||||
### P0 (Critical)
|
||||
### P0 (Critical) - Run on every commit (~{X} min)
|
||||
|
||||
**Criteria:** Blocks core functionality + High risk (≥6) + No workaround + Affects majority of users
|
||||
**Execution:** CI/CD on every commit, parallel workers, smoke tests first (<5 min)
|
||||
|
||||
| Test ID | Requirement | Test Level | Risk Link | Notes |
|
||||
|---------|-------------|------------|-----------|-------|
|
||||
| **P0-001** | {Requirement} | {Level} | {R-ID} | {Notes} |
|
||||
| **P0-002** | {Requirement} | {Level} | {R-ID} | {Notes} |
|
||||
**Purpose:** Critical path validation - catch build-breaking changes and security violations immediately
|
||||
|
||||
**Total P0:** ~{N} tests
|
||||
**Criteria:** Blocks core functionality OR High risk (≥6) OR No workaround
|
||||
|
||||
**Key Smoke Tests** (subset of P0, run first for fast feedback):
|
||||
- {Smoke test 1} - {Duration}
|
||||
- {Smoke test 2} - {Duration}
|
||||
- {Smoke test 3} - {Duration}
|
||||
|
||||
| Requirement | Test Level | Risk Link | Test Count | Owner | Notes |
|
||||
|-------------|------------|-----------|------------|-------|-------|
|
||||
| {Requirement 1} | {Level} | {R-ID} | {N} | QA | {Notes} |
|
||||
| {Requirement 2} | {Level} | {R-ID} | {N} | QA | {Notes} |
|
||||
|
||||
**Total P0:** ~{N} tests (~{X} weeks)
|
||||
|
||||
#### P0 Test Scenarios (Detailed)
|
||||
|
||||
**1. {Test Category} ({N} tests) - {CRITICALITY if applicable}**
|
||||
|
||||
- [ ] {Scenario 1 with checkbox}
|
||||
- [ ] {Scenario 2}
|
||||
- [ ] {Scenario 3}
|
||||
|
||||
**2. {Test Category 2} ({N} tests)**
|
||||
|
||||
- [ ] {Scenario 1}
|
||||
- [ ] {Scenario 2}
|
||||
|
||||
{Continue for all P0 categories}
|
||||
|
||||
---
|
||||
|
||||
### P1 (High)
|
||||
### P1 (High) - Run on PR to main (~{X} min additional)
|
||||
|
||||
**Criteria:** Important features + Medium risk (3-4) + Common workflows + Workaround exists but difficult
|
||||
**Execution:** CI/CD on pull requests to main branch, runs after P0 passes, parallel workers
|
||||
|
||||
| Test ID | Requirement | Test Level | Risk Link | Notes |
|
||||
|---------|-------------|------------|-----------|-------|
|
||||
| **P1-001** | {Requirement} | {Level} | {R-ID} | {Notes} |
|
||||
| **P1-002** | {Requirement} | {Level} | {R-ID} | {Notes} |
|
||||
**Purpose:** Important feature coverage - algorithm accuracy, complex workflows, Admin Panel interactions
|
||||
|
||||
**Total P1:** ~{N} tests
|
||||
**Criteria:** Important features OR Medium risk (3-4) OR Common workflows
|
||||
|
||||
| Requirement | Test Level | Risk Link | Test Count | Owner | Notes |
|
||||
|-------------|------------|-----------|------------|-------|-------|
|
||||
| {Requirement 1} | {Level} | {R-ID} | {N} | QA | {Notes} |
|
||||
| {Requirement 2} | {Level} | {R-ID} | {N} | QA | {Notes} |
|
||||
|
||||
**Total P1:** ~{N} tests (~{X} weeks)
|
||||
|
||||
#### P1 Test Scenarios (Detailed)
|
||||
|
||||
**1. {Test Category} ({N} tests)**
|
||||
|
||||
- [ ] {Scenario 1}
|
||||
- [ ] {Scenario 2}
|
||||
|
||||
{Continue for all P1 categories}
|
||||
|
||||
---
|
||||
|
||||
### P2 (Medium)
|
||||
### P2 (Medium) - Run nightly/weekly (~{X} min)
|
||||
|
||||
**Criteria:** Secondary features + Low risk (1-2) + Edge cases + Regression prevention
|
||||
**Execution:** Scheduled nightly run (or weekly for P3), full infrastructure, sequential execution acceptable
|
||||
|
||||
| Test ID | Requirement | Test Level | Risk Link | Notes |
|
||||
|---------|-------------|------------|-----------|-------|
|
||||
| **P2-001** | {Requirement} | {Level} | {R-ID} | {Notes} |
|
||||
**Purpose:** Edge case coverage, error handling, data integrity validation - slow feedback acceptable
|
||||
|
||||
**Total P2:** ~{N} tests
|
||||
**Criteria:** Secondary features OR Low risk (1-2) OR Edge cases
|
||||
|
||||
| Requirement | Test Level | Risk Link | Test Count | Owner | Notes |
|
||||
|-------------|------------|-----------|------------|-------|-------|
|
||||
| {Requirement 1} | {Level} | {R-ID} | {N} | QA | {Notes} |
|
||||
| {Requirement 2} | {Level} | {R-ID} | {N} | QA | {Notes} |
|
||||
|
||||
**Total P2:** ~{N} tests (~{X} weeks)
|
||||
|
||||
---
|
||||
|
||||
### P3 (Low)
|
||||
### P3 (Low) - Run on-demand (exploratory)
|
||||
|
||||
**Criteria:** Nice-to-have + Exploratory + Performance benchmarks + Documentation validation
|
||||
**Execution:** Manual trigger or weekly scheduled run, performance testing
|
||||
|
||||
| Test ID | Requirement | Test Level | Notes |
|
||||
|---------|-------------|------------|-------|
|
||||
| **P3-001** | {Requirement} | {Level} | {Notes} |
|
||||
**Purpose:** Full regression, performance benchmarks, accessibility validation - no time pressure
|
||||
|
||||
**Total P3:** ~{N} tests
|
||||
**Criteria:** Nice-to-have OR Exploratory OR Performance benchmarks
|
||||
|
||||
| Requirement | Test Level | Test Count | Owner | Notes |
|
||||
|-------------|------------|------------|-------|-------|
|
||||
| {Requirement 1} | {Level} | {N} | QA | {Notes} |
|
||||
| {Requirement 2} | {Level} | {N} | QA | {Notes} |
|
||||
|
||||
**Total P3:** ~{N} tests (~{X} days)
|
||||
|
||||
---
|
||||
|
||||
## Execution Strategy
|
||||
### Coverage Matrix (Requirements → Tests)
|
||||
|
||||
**Philosophy:** Run everything in PRs unless there's significant infrastructure overhead. Playwright with parallelization is extremely fast (100s of tests in ~10-15 min).
|
||||
|
||||
**Organized by TOOL TYPE:**
|
||||
|
||||
### Every PR: Playwright Tests (~10-15 min)
|
||||
|
||||
**All functional tests** (from any priority level):
|
||||
- All E2E, API, integration, unit tests using Playwright
|
||||
- Parallelized across {N} shards
|
||||
- Total: ~{N} Playwright tests (includes P0, P1, P2, P3)
|
||||
|
||||
**Why run in PRs:** Fast feedback, no expensive infrastructure
|
||||
|
||||
### Nightly: k6 Performance Tests (~30-60 min)
|
||||
|
||||
**All performance tests** (from any priority level):
|
||||
- Load, stress, spike, endurance tests
|
||||
- Total: ~{N} k6 tests (may include P0, P1, P2)
|
||||
|
||||
**Why defer to nightly:** Expensive infrastructure (k6 Cloud), long-running (10-40 min per test)
|
||||
|
||||
### Weekly: Chaos & Long-Running (~hours)
|
||||
|
||||
**Special infrastructure tests** (from any priority level):
|
||||
- Multi-region failover (requires AWS Fault Injection Simulator)
|
||||
- Disaster recovery (backup restore, 4+ hours)
|
||||
- Endurance tests (4+ hours runtime)
|
||||
|
||||
**Why defer to weekly:** Very expensive infrastructure, very long-running, infrequent validation sufficient
|
||||
|
||||
**Manual tests** (excluded from automation):
|
||||
- DevOps validation (deployment, monitoring)
|
||||
- Finance validation (cost alerts)
|
||||
- Documentation validation
|
||||
| Requirement | Test Level | Priority | Risk Link | Test Count | Owner |
|
||||
|-------------|------------|----------|-----------|------------|-------|
|
||||
| {Requirement 1} | {Level} | {P0-P3} | {R-ID} | {N} | {Owner} |
|
||||
| {Requirement 2} | {Level} | {P0-P3} | {R-ID} | {N} | {Owner} |
|
||||
|
||||
---
|
||||
|
||||
## QA Effort Estimate
|
||||
## Sprint 0 Setup Requirements
|
||||
|
||||
**QA test development effort only** (excludes DevOps, Backend, Data Eng, Finance work):
|
||||
**IMPORTANT:** These items **BLOCK test development**. Complete in Sprint 0 before QA can write tests.
|
||||
|
||||
| Priority | Count | Effort Range | Notes |
|
||||
|----------|-------|--------------|-------|
|
||||
| P0 | ~{N} | ~{X}-{Y} weeks | Complex setup (security, performance, multi-step) |
|
||||
| P1 | ~{N} | ~{X}-{Y} weeks | Standard coverage (integration, API tests) |
|
||||
| P2 | ~{N} | ~{X}-{Y} days | Edge cases, simple validation |
|
||||
| P3 | ~{N} | ~{X}-{Y} days | Exploratory, benchmarks |
|
||||
| **Total** | ~{N} | **~{X}-{Y} weeks** | **1 QA engineer, full-time** |
|
||||
### Architecture/Backend Blockers (from Architecture doc)
|
||||
|
||||
**Assumptions:**
|
||||
- Includes test design, implementation, debugging, CI integration
|
||||
- Excludes ongoing maintenance (~10% effort)
|
||||
- Assumes test infrastructure (factories, fixtures) ready
|
||||
**Source:** See Architecture doc "Quick Guide" for detailed mitigation plans
|
||||
|
||||
**Dependencies from other teams:**
|
||||
- See "Dependencies & Test Blockers" section for what QA needs from Backend, DevOps, Data Eng
|
||||
1. **{Blocker 1}** 🚨 **BLOCKER** - {Owner}
|
||||
- {What needs to be provided}
|
||||
- **Details:** Architecture doc {Risk-ID} mitigation plan
|
||||
|
||||
2. **{Blocker 2}** 🚨 **BLOCKER** - {Owner}
|
||||
- {What needs to be provided}
|
||||
- **Details:** Architecture doc {Risk-ID} mitigation plan
|
||||
|
||||
### QA Test Infrastructure
|
||||
|
||||
1. **{Factory/Fixture Name}** - QA
|
||||
- Faker-based generator: `{function_signature}`
|
||||
- Auto-cleanup after tests
|
||||
|
||||
2. **{Entity} Fixtures** - QA
|
||||
- Seed scripts for {states/scenarios}
|
||||
- Isolated {id_pattern} per test
|
||||
|
||||
### Test Environments
|
||||
|
||||
**Local:** {Setup details - Docker, LocalStack, etc.}
|
||||
|
||||
**CI/CD:** {Setup details - shared infrastructure, parallel workers, artifacts}
|
||||
|
||||
**Staging:** {Setup details - shared multi-tenant, nightly E2E}
|
||||
|
||||
**Production:** {Setup details - feature flags, canary transactions}
|
||||
|
||||
**Sprint 0 NFR Gates** (MUST complete before integration testing):
|
||||
- [ ] {Gate 1}: {Description} (Owner) 🚨
|
||||
- [ ] {Gate 2}: {Description} (Owner) 🚨
|
||||
- [ ] {Gate 3}: {Description} (Owner) 🚨
|
||||
|
||||
### Sprint 1 Items (Not Sprint 0)
|
||||
|
||||
- **{Item 1}** ({Owner}): {Description}
|
||||
- **{Item 2}** ({Owner}): {Description}
|
||||
|
||||
**Sprint 1 NFR Gates** (MUST complete before GA):
|
||||
- [ ] {Gate 1}: {Description} (Owner)
|
||||
- [ ] {Gate 2}: {Description} (Owner)
|
||||
|
||||
---
|
||||
|
||||
## Appendix A: Code Examples & Tagging
|
||||
## NFR Readiness Summary
|
||||
|
||||
**Playwright Tags for Selective Execution:**
|
||||
**Based on Architecture Doc Risk Assessment**
|
||||
|
||||
```typescript
|
||||
import { test } from '@seontechnologies/playwright-utils/api-request/fixtures';
|
||||
import { expect } from '@playwright/test';
|
||||
| NFR Category | Status | Evidence Status | Blocker | Next Action |
|
||||
|--------------|--------|-----------------|---------|-------------|
|
||||
| **Testability & Automation** | {Status} | {Evidence} | {Sprint} | {Action} |
|
||||
| **Test Data Strategy** | {Status} | {Evidence} | {Sprint} | {Action} |
|
||||
| **Scalability & Availability** | {Status} | {Evidence} | {Sprint} | {Action} |
|
||||
| **Disaster Recovery** | {Status} | {Evidence} | {Sprint} | {Action} |
|
||||
| **Security** | {Status} | {Evidence} | {Sprint} | {Action} |
|
||||
| **Monitorability, Debuggability & Manageability** | {Status} | {Evidence} | {Sprint} | {Action} |
|
||||
| **QoS & QoE** | {Status} | {Evidence} | {Sprint} | {Action} |
|
||||
| **Deployability** | {Status} | {Evidence} | {Sprint} | {Action} |
|
||||
|
||||
// P0 critical test
|
||||
test('@P0 @API @Security unauthenticated request returns 401', async ({ apiRequest }) => {
|
||||
const { status, body } = await apiRequest({
|
||||
method: 'POST',
|
||||
path: '/api/endpoint',
|
||||
body: { data: 'test' },
|
||||
skipAuth: true,
|
||||
});
|
||||
|
||||
expect(status).toBe(401);
|
||||
expect(body.error).toContain('unauthorized');
|
||||
});
|
||||
|
||||
// P1 integration test
|
||||
test('@P1 @Integration data syncs correctly', async ({ apiRequest }) => {
|
||||
// Seed data
|
||||
await apiRequest({
|
||||
method: 'POST',
|
||||
path: '/api/seed',
|
||||
body: { /* test data */ },
|
||||
});
|
||||
|
||||
// Validate
|
||||
const { status, body } = await apiRequest({
|
||||
method: 'GET',
|
||||
path: '/api/resource',
|
||||
});
|
||||
|
||||
expect(status).toBe(200);
|
||||
expect(body).toHaveProperty('data');
|
||||
});
|
||||
```
|
||||
|
||||
**Run specific tags:**
|
||||
|
||||
```bash
|
||||
# Run only P0 tests
|
||||
npx playwright test --grep @P0
|
||||
|
||||
# Run P0 + P1 tests
|
||||
npx playwright test --grep "@P0|@P1"
|
||||
|
||||
# Run only security tests
|
||||
npx playwright test --grep @Security
|
||||
|
||||
# Run all Playwright tests in PR (default)
|
||||
npx playwright test
|
||||
```
|
||||
**Total:** {N} PASS, {N} CONCERNS across {N} categories
|
||||
|
||||
---
|
||||
|
||||
## Appendix B: Knowledge Base References
|
||||
**End of QA Document**
|
||||
|
||||
- **Risk Governance**: `risk-governance.md` - Risk scoring methodology
|
||||
- **Test Priorities Matrix**: `test-priorities-matrix.md` - P0-P3 criteria
|
||||
- **Test Levels Framework**: `test-levels-framework.md` - E2E vs API vs Unit selection
|
||||
- **Test Quality**: `test-quality.md` - Definition of Done (no hard waits, <300 lines, <1.5 min)
|
||||
**Next Steps for QA Team:**
|
||||
1. Verify Sprint 0 blockers resolved (coordinate with Architecture team if not)
|
||||
2. Set up test infrastructure (factories, fixtures, environments)
|
||||
3. Begin test implementation following priority order (P0 → P1 → P2 → P3)
|
||||
4. Run smoke tests first for fast feedback
|
||||
5. Track progress using test scenario checklists above
|
||||
|
||||
---
|
||||
|
||||
**Generated by:** BMad TEA Agent
|
||||
**Workflow:** `_bmad/bmm/testarch/test-design`
|
||||
**Version:** 4.0 (BMad v6)
|
||||
**Next Steps for Architecture Team:**
|
||||
1. Monitor Sprint 0 blocker resolution
|
||||
2. Provide support for QA infrastructure setup if needed
|
||||
3. Review test results and address any newly discovered testability gaps
|
||||
|
|
|
|||
|
|
@ -1,11 +1,11 @@
|
|||
module,phase,name,code,sequence,workflow-file,command,required,agent,options,description,output-location,outputs
|
||||
core,,Advanced Elicitation,AE,10,_bmad/core/workflows/advanced-elicitation/workflow.xml,bmad_advanced-elicitation,false,,,"Apply elicitation methods iteratively to enhance content being generated, presenting options and allowing reshuffle or full method listing for comprehensive content improvement",,
|
||||
core,,Brainstorming,BS,20,_bmad/core/workflows/brainstorming/workflow.md,bmad_brainstorming,false,analyst,,Facilitate interactive brainstorming sessions using diverse creative techniques and ideation methods,{output_folder}/brainstorming/brainstorming-session-{{date}}.md,,
|
||||
core,,Party Mode,PM,30,_bmad/core/workflows/party-mode/workflow.md,bmad_party-mode,false,party-mode facilitator,,Orchestrates group discussions between all installed BMAD agents enabling natural multi-agent conversations,,
|
||||
core,,bmad-help,BH,40,_bmad/core/tasks/bmad-help.md,bmad_help,false,system,,Get unstuck by showing what workflow steps come next or answering questions about what to do in the BMad Method,,
|
||||
core,,Index Docs,ID,50,_bmad/core/tasks/index-docs.xml,bmad_index-docs,false,llm,,Generates or updates an index.md of all documents in the specified directory,,
|
||||
core,,Execute Workflow,WF,60,_bmad/core/tasks/workflow.xml,bmad_workflow,false,llm,,Execute given workflow by loading its configuration following instructions and producing output,,
|
||||
core,,Shard Document,SD,70,_bmad/core/tasks/shard-doc.xml,bmad_shard-doc,false,llm,,Splits large markdown documents into smaller organized files based on level 2 sections,,
|
||||
core,,Editorial Review - Prose,EP,80,_bmad/core/tasks/editorial-review-prose.xml,bmad_editorial-review-prose,false,llm,reader_type,Clinical copy-editor that reviews text for communication issues,,"three-column markdown table with suggested fixes",
|
||||
core,,Editorial Review - Structure,ES,90,_bmad/core/tasks/editorial-review-structure.xml,bmad_editorial-review-structure,false,llm,,Structural editor that proposes cuts reorganization and simplification while preserving comprehension,,
|
||||
core,,Adversarial Review (General),AR,100,_bmad/core/tasks/review-adversarial-general.xml,bmad_review-adversarial-general,false,llm,,Cynically review content and produce findings,,
|
||||
core,,Advanced Elicitation,AE,10,_bmad/core/workflows/advanced-elicitation/workflow.xml,bmad:advanced-elicitation,false,,,"Apply elicitation methods iteratively to enhance content being generated, presenting options and allowing reshuffle or full method listing for comprehensive content improvement",,
|
||||
core,,Brainstorming,BS,20,_bmad/core/workflows/brainstorming/workflow.md,bmad:brainstorming,false,analyst,,Facilitate interactive brainstorming sessions using diverse creative techniques and ideation methods,{output_folder}/brainstorming/brainstorming-session-{{date}}.md,,
|
||||
core,,Party Mode,PM,30,_bmad/core/workflows/party-mode/workflow.md,bmad:party-mode,false,party-mode facilitator,,Orchestrates group discussions between all installed BMAD agents enabling natural multi-agent conversations,,
|
||||
core,,bmad-help,BH,40,_bmad/core/tasks/bmad-help.md,bmad:help,false,system,,Get unstuck by showing what workflow steps come next or answering questions about what to do in the BMad Method,,
|
||||
core,,Index Docs,ID,50,_bmad/core/tasks/index-docs.xml,bmad:index-docs,false,llm,,Generates or updates an index.md of all documents in the specified directory,,
|
||||
core,,Execute Workflow,WF,60,_bmad/core/tasks/workflow.xml,bmad:workflow,false,llm,,Execute given workflow by loading its configuration following instructions and producing output,,
|
||||
core,,Shard Document,SD,70,_bmad/core/tasks/shard-doc.xml,bmad:shard-doc,false,llm,,Splits large markdown documents into smaller organized files based on level 2 sections,,
|
||||
core,,Editorial Review - Prose,EP,80,_bmad/core/tasks/editorial-review-prose.xml,bmad:editorial-review-prose,false,llm,reader_type,Clinical copy-editor that reviews text for communication issues,,"three-column markdown table with suggested fixes",
|
||||
core,,Editorial Review - Structure,ES,90,_bmad/core/tasks/editorial-review-structure.xml,bmad:editorial-review-structure,false,llm,,Structural editor that proposes cuts reorganization and simplification while preserving comprehension,,
|
||||
core,,Adversarial Review (General),AR,100,_bmad/core/tasks/review-adversarial-general.xml,bmad:review-adversarial-general,false,llm,,Cynically review content and produce findings,,
|
||||
|
|
|
|||
|
Can't render this file because it has a wrong number of fields in line 3.
|
|
|
@ -6,8 +6,6 @@
|
|||
|
||||
<inputs>
|
||||
<input name="content" desc="Content to review - diff, spec, story, doc, or any artifact" />
|
||||
<input name="also_consider" required="false"
|
||||
desc="Optional areas to keep in mind during review alongside normal adversarial analysis" />
|
||||
</inputs>
|
||||
|
||||
<llm critical="true">
|
||||
|
|
|
|||
|
|
@ -1,56 +0,0 @@
|
|||
# Adversarial Review Test Suite
|
||||
|
||||
Tests for the `also_consider` optional input in `review-adversarial-general.xml`.
|
||||
|
||||
## Purpose
|
||||
|
||||
Evaluate whether the `also_consider` input gently nudges the reviewer toward specific areas without overriding normal adversarial analysis.
|
||||
|
||||
## Test Content
|
||||
|
||||
All tests use `sample-content.md` - a deliberately imperfect User Authentication API doc with:
|
||||
|
||||
- Vague error handling section
|
||||
- Missing rate limit details
|
||||
- No token expiration info
|
||||
- Password in plain text example
|
||||
- Missing authentication headers
|
||||
- No error response examples
|
||||
|
||||
## Running Tests
|
||||
|
||||
For each test case in `test-cases.yaml`, invoke the adversarial review task.
|
||||
|
||||
### Manual Test Invocation
|
||||
|
||||
```
|
||||
Review this content using the adversarial review task:
|
||||
|
||||
<content>
|
||||
[paste sample-content.md]
|
||||
</content>
|
||||
|
||||
<also_consider>
|
||||
[paste items from test case, or omit for TC01]
|
||||
</also_consider>
|
||||
```
|
||||
|
||||
## Evaluation Criteria
|
||||
|
||||
For each test, note:
|
||||
|
||||
1. **Total findings** - Still hitting ~10 issues?
|
||||
2. **Distribution** - Are findings spread across concerns or clustered?
|
||||
3. **Relevance** - Do findings relate to `also_consider` items when provided?
|
||||
4. **Balance** - Are `also_consider` findings elevated over others, or naturally mixed?
|
||||
5. **Quality** - Are findings actionable regardless of source?
|
||||
|
||||
## Expected Outcomes
|
||||
|
||||
- **TC01 (baseline)**: Generic spread of findings
|
||||
- **TC02-TC05 (domain-focused)**: Some findings align with domain, others still organic
|
||||
- **TC06 (single item)**: Light influence, not dominant
|
||||
- **TC07 (vague items)**: Minimal change from baseline
|
||||
- **TC08 (specific items)**: Direct answers if gaps exist
|
||||
- **TC09 (mixed)**: Balanced across domains
|
||||
- **TC10 (contradictory)**: Graceful handling
|
||||
|
|
@ -1,46 +0,0 @@
|
|||
# User Authentication API
|
||||
|
||||
## Overview
|
||||
|
||||
This API provides endpoints for user authentication and session management.
|
||||
|
||||
## Endpoints
|
||||
|
||||
### POST /api/auth/login
|
||||
|
||||
Authenticates a user and returns a token.
|
||||
|
||||
**Request Body:**
|
||||
```json
|
||||
{
|
||||
"email": "user@example.com",
|
||||
"password": "password123"
|
||||
}
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"token": "eyJhbGciOiJIUzI1NiIs...",
|
||||
"user": {
|
||||
"id": 1,
|
||||
"email": "user@example.com"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### POST /api/auth/logout
|
||||
|
||||
Logs out the current user.
|
||||
|
||||
### GET /api/auth/me
|
||||
|
||||
Returns the current user's profile.
|
||||
|
||||
## Error Handling
|
||||
|
||||
Errors return appropriate HTTP status codes.
|
||||
|
||||
## Rate Limiting
|
||||
|
||||
Rate limiting is applied to prevent abuse.
|
||||
|
|
@ -1,103 +0,0 @@
|
|||
# Test Cases for review-adversarial-general.xml with also_consider input
|
||||
#
|
||||
# Purpose: Evaluate how the optional also_consider input influences review findings
|
||||
# Content: All tests use sample-content.md (User Authentication API docs)
|
||||
#
|
||||
# To run: Manually invoke the task with each configuration and compare outputs
|
||||
|
||||
test_cases:
|
||||
# BASELINE - No also_consider
|
||||
- id: TC01
|
||||
name: "Baseline - no also_consider"
|
||||
description: "Control test with no also_consider input"
|
||||
also_consider: null
|
||||
expected_behavior: "Generic adversarial findings across all aspects"
|
||||
|
||||
# DOCUMENTATION-FOCUSED
|
||||
- id: TC02
|
||||
name: "Documentation - reader confusion"
|
||||
description: "Nudge toward documentation UX issues"
|
||||
also_consider:
|
||||
- What would confuse a first-time reader?
|
||||
- What questions are left unanswered?
|
||||
- What could be interpreted multiple ways?
|
||||
- What jargon is unexplained?
|
||||
expected_behavior: "More findings about clarity, completeness, reader experience"
|
||||
|
||||
- id: TC03
|
||||
name: "Documentation - examples and usage"
|
||||
description: "Nudge toward practical usage gaps"
|
||||
also_consider:
|
||||
- Missing code examples
|
||||
- Unclear usage patterns
|
||||
- Edge cases not documented
|
||||
expected_behavior: "More findings about practical application gaps"
|
||||
|
||||
# SECURITY-FOCUSED
|
||||
- id: TC04
|
||||
name: "Security review"
|
||||
description: "Nudge toward security concerns"
|
||||
also_consider:
|
||||
- Authentication vulnerabilities
|
||||
- Token handling issues
|
||||
- Input validation gaps
|
||||
- Information disclosure risks
|
||||
expected_behavior: "More security-related findings"
|
||||
|
||||
# API DESIGN-FOCUSED
|
||||
- id: TC05
|
||||
name: "API design"
|
||||
description: "Nudge toward API design best practices"
|
||||
also_consider:
|
||||
- REST conventions not followed
|
||||
- Inconsistent response formats
|
||||
- Missing pagination or filtering
|
||||
- Versioning concerns
|
||||
expected_behavior: "More API design pattern findings"
|
||||
|
||||
# SINGLE ITEM
|
||||
- id: TC06
|
||||
name: "Single item - error handling"
|
||||
description: "Test with just one also_consider item"
|
||||
also_consider:
|
||||
- Error handling completeness
|
||||
expected_behavior: "Some emphasis on error handling while still covering other areas"
|
||||
|
||||
# BROAD/VAGUE
|
||||
- id: TC07
|
||||
name: "Broad items"
|
||||
description: "Test with vague also_consider items"
|
||||
also_consider:
|
||||
- Quality issues
|
||||
- Things that seem off
|
||||
expected_behavior: "Minimal change from baseline - items too vague to steer"
|
||||
|
||||
# VERY SPECIFIC
|
||||
- id: TC08
|
||||
name: "Very specific items"
|
||||
description: "Test with highly specific also_consider items"
|
||||
also_consider:
|
||||
- Is the JWT token expiration documented?
|
||||
- Are refresh token mechanics explained?
|
||||
- What happens on concurrent sessions?
|
||||
expected_behavior: "Specific findings addressing these exact questions if gaps exist"
|
||||
|
||||
# MIXED DOMAINS
|
||||
- id: TC09
|
||||
name: "Mixed domain concerns"
|
||||
description: "Test with items from different domains"
|
||||
also_consider:
|
||||
- Security vulnerabilities
|
||||
- Reader confusion points
|
||||
- API design inconsistencies
|
||||
- Performance implications
|
||||
expected_behavior: "Balanced findings across multiple domains"
|
||||
|
||||
# CONTRADICTORY/UNUSUAL
|
||||
- id: TC10
|
||||
name: "Contradictory items"
|
||||
description: "Test resilience with odd inputs"
|
||||
also_consider:
|
||||
- Things that are too detailed
|
||||
- Things that are not detailed enough
|
||||
expected_behavior: "Reviewer handles gracefully, finds issues in both directions"
|
||||
|
|
@ -1,65 +0,0 @@
|
|||
const chalk = require('chalk');
|
||||
const path = require('node:path');
|
||||
const { Installer } = require('../installers/lib/core/installer');
|
||||
const { Manifest } = require('../installers/lib/core/manifest');
|
||||
const { UI } = require('../lib/ui');
|
||||
|
||||
const installer = new Installer();
|
||||
const manifest = new Manifest();
|
||||
const ui = new UI();
|
||||
|
||||
module.exports = {
|
||||
command: 'status',
|
||||
description: 'Display BMAD installation status and module versions',
|
||||
options: [],
|
||||
action: async (options) => {
|
||||
try {
|
||||
// Find the bmad directory
|
||||
const projectDir = process.cwd();
|
||||
const { bmadDir } = await installer.findBmadDir(projectDir);
|
||||
|
||||
// Check if bmad directory exists
|
||||
const fs = require('fs-extra');
|
||||
if (!(await fs.pathExists(bmadDir))) {
|
||||
console.log(chalk.yellow('No BMAD installation found in the current directory.'));
|
||||
console.log(chalk.dim(`Expected location: ${bmadDir}`));
|
||||
console.log(chalk.dim('\nRun "bmad install" to set up a new installation.'));
|
||||
process.exit(0);
|
||||
return;
|
||||
}
|
||||
|
||||
// Read manifest
|
||||
const manifestData = await manifest._readRaw(bmadDir);
|
||||
|
||||
if (!manifestData) {
|
||||
console.log(chalk.yellow('No BMAD installation manifest found.'));
|
||||
console.log(chalk.dim('\nRun "bmad install" to set up a new installation.'));
|
||||
process.exit(0);
|
||||
return;
|
||||
}
|
||||
|
||||
// Get installation info
|
||||
const installation = manifestData.installation || {};
|
||||
const modules = manifestData.modules || [];
|
||||
|
||||
// Check for available updates (only for external modules)
|
||||
const availableUpdates = await manifest.checkForUpdates(bmadDir);
|
||||
|
||||
// Display status
|
||||
ui.displayStatus({
|
||||
installation,
|
||||
modules,
|
||||
availableUpdates,
|
||||
bmadDir,
|
||||
});
|
||||
|
||||
process.exit(0);
|
||||
} catch (error) {
|
||||
console.error(chalk.red('Status check failed:'), error.message);
|
||||
if (process.env.BMAD_DEBUG) {
|
||||
console.error(chalk.dim(error.stack));
|
||||
}
|
||||
process.exit(1);
|
||||
}
|
||||
},
|
||||
};
|
||||
|
|
@ -10,7 +10,6 @@ modules:
|
|||
description: "Agent, Workflow and Module Builder"
|
||||
defaultSelected: false
|
||||
type: bmad-org
|
||||
npmPackage: bmad-builder
|
||||
|
||||
bmad-creative-intelligence-suite:
|
||||
url: https://github.com/bmad-code-org/bmad-module-creative-intelligence-suite
|
||||
|
|
@ -20,7 +19,6 @@ modules:
|
|||
description: "Creative tools for writing, brainstorming, and more"
|
||||
defaultSelected: false
|
||||
type: bmad-org
|
||||
npmPackage: bmad-creative-intelligence-suite
|
||||
|
||||
bmad-game-dev-studio:
|
||||
url: https://github.com/bmad-code-org/bmad-module-game-dev-studio.git
|
||||
|
|
@ -30,7 +28,6 @@ modules:
|
|||
description: "Game development agents and workflows"
|
||||
defaultSelected: false
|
||||
type: bmad-org
|
||||
npmPackage: bmad-game-dev-studio
|
||||
|
||||
# TODO: Enable once fixes applied:
|
||||
|
||||
|
|
|
|||
|
|
@ -534,71 +534,18 @@ class ManifestGenerator {
|
|||
|
||||
/**
|
||||
* Write main manifest as YAML with installation info only
|
||||
* Fetches fresh version info for all modules
|
||||
* @returns {string} Path to the manifest file
|
||||
*/
|
||||
async writeMainManifest(cfgDir) {
|
||||
const manifestPath = path.join(cfgDir, 'manifest.yaml');
|
||||
|
||||
// Read existing manifest to preserve install date
|
||||
let existingInstallDate = null;
|
||||
const existingModulesMap = new Map();
|
||||
|
||||
if (await fs.pathExists(manifestPath)) {
|
||||
try {
|
||||
const existingContent = await fs.readFile(manifestPath, 'utf8');
|
||||
const existingManifest = yaml.parse(existingContent);
|
||||
|
||||
// Preserve original install date
|
||||
if (existingManifest.installation?.installDate) {
|
||||
existingInstallDate = existingManifest.installation.installDate;
|
||||
}
|
||||
|
||||
// Build map of existing modules for quick lookup
|
||||
if (existingManifest.modules && Array.isArray(existingManifest.modules)) {
|
||||
for (const m of existingManifest.modules) {
|
||||
if (typeof m === 'object' && m.name) {
|
||||
existingModulesMap.set(m.name, m);
|
||||
} else if (typeof m === 'string') {
|
||||
existingModulesMap.set(m, { installDate: existingInstallDate });
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// If we can't read existing manifest, continue with defaults
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch fresh version info for all modules
|
||||
const { Manifest } = require('./manifest');
|
||||
const manifestObj = new Manifest();
|
||||
const updatedModules = [];
|
||||
|
||||
for (const moduleName of this.modules) {
|
||||
// Get fresh version info from source
|
||||
const versionInfo = await manifestObj.getModuleVersionInfo(moduleName, this.bmadDir);
|
||||
|
||||
// Get existing install date if available
|
||||
const existing = existingModulesMap.get(moduleName);
|
||||
|
||||
updatedModules.push({
|
||||
name: moduleName,
|
||||
version: versionInfo.version,
|
||||
installDate: existing?.installDate || new Date().toISOString(),
|
||||
lastUpdated: new Date().toISOString(),
|
||||
source: versionInfo.source,
|
||||
npmPackage: versionInfo.npmPackage,
|
||||
repoUrl: versionInfo.repoUrl,
|
||||
});
|
||||
}
|
||||
|
||||
const manifest = {
|
||||
installation: {
|
||||
version: packageJson.version,
|
||||
installDate: existingInstallDate || new Date().toISOString(),
|
||||
installDate: new Date().toISOString(),
|
||||
lastUpdated: new Date().toISOString(),
|
||||
},
|
||||
modules: updatedModules,
|
||||
modules: this.modules, // Include ALL modules (standard and custom)
|
||||
ides: this.selectedIdes,
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
const path = require('node:path');
|
||||
const fs = require('fs-extra');
|
||||
const crypto = require('node:crypto');
|
||||
const { getProjectRoot } = require('../../../lib/project-root');
|
||||
|
||||
class Manifest {
|
||||
/**
|
||||
|
|
@ -17,35 +16,14 @@ class Manifest {
|
|||
// Ensure _config directory exists
|
||||
await fs.ensureDir(path.dirname(manifestPath));
|
||||
|
||||
// Get the BMad version from package.json
|
||||
const bmadVersion = data.version || require(path.join(process.cwd(), 'package.json')).version;
|
||||
|
||||
// Convert module list to new detailed format
|
||||
const moduleDetails = [];
|
||||
if (data.modules && Array.isArray(data.modules)) {
|
||||
for (const moduleName of data.modules) {
|
||||
// Core and BMM modules use the BMad version
|
||||
const moduleVersion = moduleName === 'core' || moduleName === 'bmm' ? bmadVersion : null;
|
||||
const now = data.installDate || new Date().toISOString();
|
||||
|
||||
moduleDetails.push({
|
||||
name: moduleName,
|
||||
version: moduleVersion,
|
||||
installDate: now,
|
||||
lastUpdated: now,
|
||||
source: moduleName === 'core' || moduleName === 'bmm' ? 'built-in' : 'unknown',
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Structure the manifest data
|
||||
const manifestData = {
|
||||
installation: {
|
||||
version: bmadVersion,
|
||||
version: data.version || require(path.join(process.cwd(), 'package.json')).version,
|
||||
installDate: data.installDate || new Date().toISOString(),
|
||||
lastUpdated: data.lastUpdated || new Date().toISOString(),
|
||||
},
|
||||
modules: moduleDetails,
|
||||
modules: data.modules || [],
|
||||
ides: data.ides || [],
|
||||
};
|
||||
|
||||
|
|
@ -79,23 +57,12 @@ class Manifest {
|
|||
const content = await fs.readFile(yamlPath, 'utf8');
|
||||
const manifestData = yaml.parse(content);
|
||||
|
||||
// Handle new detailed module format
|
||||
const modules = manifestData.modules || [];
|
||||
|
||||
// For backward compatibility: if modules is an array of strings (old format),
|
||||
// the calling code may need the array of names
|
||||
const moduleNames = modules.map((m) => (typeof m === 'string' ? m : m.name));
|
||||
|
||||
// Check if we have the new detailed format
|
||||
const hasDetailedModules = modules.length > 0 && typeof modules[0] === 'object';
|
||||
|
||||
// Flatten the structure for compatibility with existing code
|
||||
return {
|
||||
version: manifestData.installation?.version,
|
||||
installDate: manifestData.installation?.installDate,
|
||||
lastUpdated: manifestData.installation?.lastUpdated,
|
||||
modules: moduleNames, // Simple array of module names for backward compatibility
|
||||
modulesDetailed: hasDetailedModules ? modules : null, // New detailed format
|
||||
modules: manifestData.modules || [], // All modules (standard and custom)
|
||||
customModules: manifestData.customModules || [], // Keep for backward compatibility
|
||||
ides: manifestData.ides || [],
|
||||
};
|
||||
|
|
@ -115,92 +82,28 @@ class Manifest {
|
|||
*/
|
||||
async update(bmadDir, updates, installedFiles = null) {
|
||||
const yaml = require('yaml');
|
||||
const manifest = (await this._readRaw(bmadDir)) || {
|
||||
installation: {},
|
||||
modules: [],
|
||||
ides: [],
|
||||
const manifest = (await this.read(bmadDir)) || {};
|
||||
|
||||
// Merge updates
|
||||
Object.assign(manifest, updates);
|
||||
manifest.lastUpdated = new Date().toISOString();
|
||||
|
||||
// Convert back to structured format for YAML
|
||||
const manifestData = {
|
||||
installation: {
|
||||
version: manifest.version,
|
||||
installDate: manifest.installDate,
|
||||
lastUpdated: manifest.lastUpdated,
|
||||
},
|
||||
modules: manifest.modules || [], // All modules (standard and custom)
|
||||
ides: manifest.ides || [],
|
||||
};
|
||||
|
||||
// Handle module updates
|
||||
if (updates.modules) {
|
||||
// If modules is being updated, we need to preserve detailed module info
|
||||
const existingDetailed = manifest.modules || [];
|
||||
const incomingNames = updates.modules;
|
||||
|
||||
// Build updated modules array
|
||||
const updatedModules = [];
|
||||
for (const name of incomingNames) {
|
||||
const existing = existingDetailed.find((m) => m.name === name);
|
||||
if (existing) {
|
||||
// Preserve existing details, update lastUpdated if this module is being updated
|
||||
updatedModules.push({
|
||||
...existing,
|
||||
lastUpdated: new Date().toISOString(),
|
||||
});
|
||||
} else {
|
||||
// New module - add with minimal details
|
||||
updatedModules.push({
|
||||
name,
|
||||
version: null,
|
||||
installDate: new Date().toISOString(),
|
||||
lastUpdated: new Date().toISOString(),
|
||||
source: 'unknown',
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
manifest.modules = updatedModules;
|
||||
}
|
||||
|
||||
// Merge other updates
|
||||
if (updates.version) {
|
||||
manifest.installation.version = updates.version;
|
||||
}
|
||||
if (updates.installDate) {
|
||||
manifest.installation.installDate = updates.installDate;
|
||||
}
|
||||
manifest.installation.lastUpdated = new Date().toISOString();
|
||||
|
||||
if (updates.ides) {
|
||||
manifest.ides = updates.ides;
|
||||
}
|
||||
|
||||
// Handle per-module version updates
|
||||
if (updates.moduleVersions) {
|
||||
for (const [moduleName, versionInfo] of Object.entries(updates.moduleVersions)) {
|
||||
const moduleIndex = manifest.modules.findIndex((m) => m.name === moduleName);
|
||||
if (moduleIndex !== -1) {
|
||||
manifest.modules[moduleIndex] = {
|
||||
...manifest.modules[moduleIndex],
|
||||
...versionInfo,
|
||||
lastUpdated: new Date().toISOString(),
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle adding a new module with version info
|
||||
if (updates.addModule) {
|
||||
const { name, version, source, npmPackage, repoUrl } = updates.addModule;
|
||||
const existing = manifest.modules.find((m) => m.name === name);
|
||||
if (!existing) {
|
||||
manifest.modules.push({
|
||||
name,
|
||||
version: version || null,
|
||||
installDate: new Date().toISOString(),
|
||||
lastUpdated: new Date().toISOString(),
|
||||
source: source || 'external',
|
||||
npmPackage: npmPackage || null,
|
||||
repoUrl: repoUrl || null,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
const manifestPath = path.join(bmadDir, '_config', 'manifest.yaml');
|
||||
await fs.ensureDir(path.dirname(manifestPath));
|
||||
|
||||
// Clean the manifest data to remove any non-serializable values
|
||||
const cleanManifestData = structuredClone(manifest);
|
||||
const cleanManifestData = structuredClone(manifestData);
|
||||
|
||||
const yamlContent = yaml.stringify(cleanManifestData, {
|
||||
indent: 2,
|
||||
|
|
@ -212,61 +115,16 @@ class Manifest {
|
|||
const content = yamlContent.endsWith('\n') ? yamlContent : yamlContent + '\n';
|
||||
await fs.writeFile(manifestPath, content, 'utf8');
|
||||
|
||||
// Return the flattened format for compatibility
|
||||
return this._flattenManifest(manifest);
|
||||
return manifest;
|
||||
}
|
||||
|
||||
/**
|
||||
* Read raw manifest data without flattening
|
||||
* @param {string} bmadDir - Path to bmad directory
|
||||
* @returns {Object|null} Raw manifest data or null if not found
|
||||
*/
|
||||
async _readRaw(bmadDir) {
|
||||
const yamlPath = path.join(bmadDir, '_config', 'manifest.yaml');
|
||||
const yaml = require('yaml');
|
||||
|
||||
if (await fs.pathExists(yamlPath)) {
|
||||
try {
|
||||
const content = await fs.readFile(yamlPath, 'utf8');
|
||||
return yaml.parse(content);
|
||||
} catch (error) {
|
||||
console.error('Failed to read YAML manifest:', error.message);
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Flatten manifest for backward compatibility
|
||||
* @param {Object} manifest - Raw manifest data
|
||||
* @returns {Object} Flattened manifest
|
||||
*/
|
||||
_flattenManifest(manifest) {
|
||||
const modules = manifest.modules || [];
|
||||
const moduleNames = modules.map((m) => (typeof m === 'string' ? m : m.name));
|
||||
const hasDetailedModules = modules.length > 0 && typeof modules[0] === 'object';
|
||||
|
||||
return {
|
||||
version: manifest.installation?.version,
|
||||
installDate: manifest.installation?.installDate,
|
||||
lastUpdated: manifest.installation?.lastUpdated,
|
||||
modules: moduleNames,
|
||||
modulesDetailed: hasDetailedModules ? modules : null,
|
||||
customModules: manifest.customModules || [],
|
||||
ides: manifest.ides || [],
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a module to the manifest with optional version info
|
||||
* If module already exists, update its version info
|
||||
* Add a module to the manifest
|
||||
* @param {string} bmadDir - Path to bmad directory
|
||||
* @param {string} moduleName - Module name to add
|
||||
* @param {Object} options - Optional version info
|
||||
*/
|
||||
async addModule(bmadDir, moduleName, options = {}) {
|
||||
const manifest = await this._readRaw(bmadDir);
|
||||
async addModule(bmadDir, moduleName) {
|
||||
const manifest = await this.read(bmadDir);
|
||||
if (!manifest) {
|
||||
throw new Error('No manifest found');
|
||||
}
|
||||
|
|
@ -275,33 +133,10 @@ class Manifest {
|
|||
manifest.modules = [];
|
||||
}
|
||||
|
||||
const existingIndex = manifest.modules.findIndex((m) => m.name === moduleName);
|
||||
|
||||
if (existingIndex === -1) {
|
||||
// Module doesn't exist, add it
|
||||
manifest.modules.push({
|
||||
name: moduleName,
|
||||
version: options.version || null,
|
||||
installDate: new Date().toISOString(),
|
||||
lastUpdated: new Date().toISOString(),
|
||||
source: options.source || 'unknown',
|
||||
npmPackage: options.npmPackage || null,
|
||||
repoUrl: options.repoUrl || null,
|
||||
});
|
||||
} else {
|
||||
// Module exists, update its version info
|
||||
const existing = manifest.modules[existingIndex];
|
||||
manifest.modules[existingIndex] = {
|
||||
...existing,
|
||||
version: options.version === undefined ? existing.version : options.version,
|
||||
source: options.source || existing.source,
|
||||
npmPackage: options.npmPackage === undefined ? existing.npmPackage : options.npmPackage,
|
||||
repoUrl: options.repoUrl === undefined ? existing.repoUrl : options.repoUrl,
|
||||
lastUpdated: new Date().toISOString(),
|
||||
};
|
||||
if (!manifest.modules.includes(moduleName)) {
|
||||
manifest.modules.push(moduleName);
|
||||
await this.update(bmadDir, { modules: manifest.modules });
|
||||
}
|
||||
|
||||
await this._writeRaw(bmadDir, manifest);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -310,93 +145,18 @@ class Manifest {
|
|||
* @param {string} moduleName - Module name to remove
|
||||
*/
|
||||
async removeModule(bmadDir, moduleName) {
|
||||
const manifest = await this._readRaw(bmadDir);
|
||||
const manifest = await this.read(bmadDir);
|
||||
if (!manifest || !manifest.modules) {
|
||||
return;
|
||||
}
|
||||
|
||||
const index = manifest.modules.findIndex((m) => m.name === moduleName);
|
||||
const index = manifest.modules.indexOf(moduleName);
|
||||
if (index !== -1) {
|
||||
manifest.modules.splice(index, 1);
|
||||
await this._writeRaw(bmadDir, manifest);
|
||||
await this.update(bmadDir, { modules: manifest.modules });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update a single module's version info
|
||||
* @param {string} bmadDir - Path to bmad directory
|
||||
* @param {string} moduleName - Module name
|
||||
* @param {Object} versionInfo - Version info to update
|
||||
*/
|
||||
async updateModuleVersion(bmadDir, moduleName, versionInfo) {
|
||||
const manifest = await this._readRaw(bmadDir);
|
||||
if (!manifest || !manifest.modules) {
|
||||
return;
|
||||
}
|
||||
|
||||
const index = manifest.modules.findIndex((m) => m.name === moduleName);
|
||||
if (index !== -1) {
|
||||
manifest.modules[index] = {
|
||||
...manifest.modules[index],
|
||||
...versionInfo,
|
||||
lastUpdated: new Date().toISOString(),
|
||||
};
|
||||
await this._writeRaw(bmadDir, manifest);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get version info for a specific module
|
||||
* @param {string} bmadDir - Path to bmad directory
|
||||
* @param {string} moduleName - Module name
|
||||
* @returns {Object|null} Module version info or null
|
||||
*/
|
||||
async getModuleVersion(bmadDir, moduleName) {
|
||||
const manifest = await this._readRaw(bmadDir);
|
||||
if (!manifest || !manifest.modules) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return manifest.modules.find((m) => m.name === moduleName) || null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all modules with their version info
|
||||
* @param {string} bmadDir - Path to bmad directory
|
||||
* @returns {Array} Array of module info objects
|
||||
*/
|
||||
async getAllModuleVersions(bmadDir) {
|
||||
const manifest = await this._readRaw(bmadDir);
|
||||
if (!manifest || !manifest.modules) {
|
||||
return [];
|
||||
}
|
||||
|
||||
return manifest.modules;
|
||||
}
|
||||
|
||||
/**
|
||||
* Write raw manifest data to file
|
||||
* @param {string} bmadDir - Path to bmad directory
|
||||
* @param {Object} manifestData - Raw manifest data to write
|
||||
*/
|
||||
async _writeRaw(bmadDir, manifestData) {
|
||||
const yaml = require('yaml');
|
||||
const manifestPath = path.join(bmadDir, '_config', 'manifest.yaml');
|
||||
|
||||
await fs.ensureDir(path.dirname(manifestPath));
|
||||
|
||||
const cleanManifestData = structuredClone(manifestData);
|
||||
|
||||
const yamlContent = yaml.stringify(cleanManifestData, {
|
||||
indent: 2,
|
||||
lineWidth: 0,
|
||||
sortKeys: false,
|
||||
});
|
||||
|
||||
const content = yamlContent.endsWith('\n') ? yamlContent : yamlContent + '\n';
|
||||
await fs.writeFile(manifestPath, content, 'utf8');
|
||||
}
|
||||
|
||||
/**
|
||||
* Add an IDE configuration to the manifest
|
||||
* @param {string} bmadDir - Path to bmad directory
|
||||
|
|
@ -825,212 +585,6 @@ class Manifest {
|
|||
await this.update(bmadDir, { customModules: manifest.customModules });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get module version info from source
|
||||
* @param {string} moduleName - Module name/code
|
||||
* @param {string} bmadDir - Path to bmad directory
|
||||
* @param {string} moduleSourcePath - Optional source path for custom modules
|
||||
* @returns {Object} Version info object with version, source, npmPackage, repoUrl
|
||||
*/
|
||||
async getModuleVersionInfo(moduleName, bmadDir, moduleSourcePath = null) {
|
||||
const os = require('node:os');
|
||||
|
||||
// Built-in modules use BMad version (only core and bmm are in BMAD-METHOD repo)
|
||||
if (['core', 'bmm'].includes(moduleName)) {
|
||||
const bmadVersion = require(path.join(getProjectRoot(), 'package.json')).version;
|
||||
return {
|
||||
version: bmadVersion,
|
||||
source: 'built-in',
|
||||
npmPackage: null,
|
||||
repoUrl: null,
|
||||
};
|
||||
}
|
||||
|
||||
// Check if this is an external official module
|
||||
const { ExternalModuleManager } = require('../modules/external-manager');
|
||||
const extMgr = new ExternalModuleManager();
|
||||
const moduleInfo = await extMgr.getModuleByCode(moduleName);
|
||||
|
||||
if (moduleInfo) {
|
||||
// External module - try to get version from npm registry first, then fall back to cache
|
||||
let version = null;
|
||||
|
||||
if (moduleInfo.npmPackage) {
|
||||
// Fetch version from npm registry
|
||||
try {
|
||||
version = await this.fetchNpmVersion(moduleInfo.npmPackage);
|
||||
} catch {
|
||||
// npm fetch failed, try cache as fallback
|
||||
}
|
||||
}
|
||||
|
||||
// If npm didn't work, try reading from cached repo's package.json
|
||||
if (!version) {
|
||||
const cacheDir = path.join(os.homedir(), '.bmad', 'cache', 'external-modules', moduleName);
|
||||
const packageJsonPath = path.join(cacheDir, 'package.json');
|
||||
|
||||
if (await fs.pathExists(packageJsonPath)) {
|
||||
try {
|
||||
const pkg = require(packageJsonPath);
|
||||
version = pkg.version;
|
||||
} catch (error) {
|
||||
console.warn(`Failed to read package.json for ${moduleName}: ${error.message}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
version: version,
|
||||
source: 'external',
|
||||
npmPackage: moduleInfo.npmPackage || null,
|
||||
repoUrl: moduleInfo.url || null,
|
||||
};
|
||||
}
|
||||
|
||||
// Custom module - check cache directory
|
||||
const cacheDir = path.join(bmadDir, '_config', 'custom', moduleName);
|
||||
const moduleYamlPath = path.join(cacheDir, 'module.yaml');
|
||||
|
||||
if (await fs.pathExists(moduleYamlPath)) {
|
||||
try {
|
||||
const yamlContent = await fs.readFile(moduleYamlPath, 'utf8');
|
||||
const moduleConfig = yaml.parse(yamlContent);
|
||||
return {
|
||||
version: moduleConfig.version || null,
|
||||
source: 'custom',
|
||||
npmPackage: moduleConfig.npmPackage || null,
|
||||
repoUrl: moduleConfig.repoUrl || null,
|
||||
};
|
||||
} catch (error) {
|
||||
console.warn(`Failed to read module.yaml for ${moduleName}: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Unknown module
|
||||
return {
|
||||
version: null,
|
||||
source: 'unknown',
|
||||
npmPackage: null,
|
||||
repoUrl: null,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch latest version from npm for a package
|
||||
* @param {string} packageName - npm package name
|
||||
* @returns {string|null} Latest version or null
|
||||
*/
|
||||
async fetchNpmVersion(packageName) {
|
||||
try {
|
||||
const https = require('node:https');
|
||||
const { execSync } = require('node:child_process');
|
||||
|
||||
// Try using npm view first (more reliable)
|
||||
try {
|
||||
const result = execSync(`npm view ${packageName} version`, {
|
||||
encoding: 'utf8',
|
||||
stdio: 'pipe',
|
||||
timeout: 10_000,
|
||||
});
|
||||
return result.trim();
|
||||
} catch {
|
||||
// Fallback to npm registry API
|
||||
return new Promise((resolve, reject) => {
|
||||
https
|
||||
.get(`https://registry.npmjs.org/${packageName}`, (res) => {
|
||||
let data = '';
|
||||
res.on('data', (chunk) => (data += chunk));
|
||||
res.on('end', () => {
|
||||
try {
|
||||
const pkg = JSON.parse(data);
|
||||
resolve(pkg['dist-tags']?.latest || pkg.version || null);
|
||||
} catch {
|
||||
resolve(null);
|
||||
}
|
||||
});
|
||||
})
|
||||
.on('error', () => resolve(null));
|
||||
});
|
||||
}
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check for available updates for installed modules
|
||||
* @param {string} bmadDir - Path to bmad directory
|
||||
* @returns {Array} Array of update info objects
|
||||
*/
|
||||
async checkForUpdates(bmadDir) {
|
||||
const modules = await this.getAllModuleVersions(bmadDir);
|
||||
const updates = [];
|
||||
|
||||
for (const module of modules) {
|
||||
if (!module.npmPackage) {
|
||||
continue; // Skip modules without npm package (built-in)
|
||||
}
|
||||
|
||||
const latestVersion = await this.fetchNpmVersion(module.npmPackage);
|
||||
if (!latestVersion) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (module.version !== latestVersion) {
|
||||
updates.push({
|
||||
name: module.name,
|
||||
installedVersion: module.version,
|
||||
latestVersion: latestVersion,
|
||||
npmPackage: module.npmPackage,
|
||||
updateAvailable: true,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return updates;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compare two semantic versions
|
||||
* @param {string} v1 - First version
|
||||
* @param {string} v2 - Second version
|
||||
* @returns {number} -1 if v1 < v2, 0 if v1 == v2, 1 if v1 > v2
|
||||
*/
|
||||
compareVersions(v1, v2) {
|
||||
if (!v1 || !v2) return 0;
|
||||
|
||||
const normalize = (v) => {
|
||||
// Remove leading 'v' if present
|
||||
v = v.replace(/^v/, '');
|
||||
// Handle prerelease tags
|
||||
const parts = v.split('-');
|
||||
const main = parts[0].split('.');
|
||||
const prerelease = parts[1];
|
||||
return { main, prerelease };
|
||||
};
|
||||
|
||||
const n1 = normalize(v1);
|
||||
const n2 = normalize(v2);
|
||||
|
||||
// Compare main version parts
|
||||
for (let i = 0; i < 3; i++) {
|
||||
const num1 = parseInt(n1.main[i] || '0', 10);
|
||||
const num2 = parseInt(n2.main[i] || '0', 10);
|
||||
if (num1 !== num2) {
|
||||
return num1 < num2 ? -1 : 1;
|
||||
}
|
||||
}
|
||||
|
||||
// If main versions are equal, compare prerelease
|
||||
if (n1.prerelease && n2.prerelease) {
|
||||
return n1.prerelease < n2.prerelease ? -1 : n1.prerelease > n2.prerelease ? 1 : 0;
|
||||
}
|
||||
if (n1.prerelease) return -1; // Prerelease is older than stable
|
||||
if (n2.prerelease) return 1; // Stable is newer than prerelease
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { Manifest };
|
||||
|
|
|
|||
|
|
@ -2,9 +2,9 @@
|
|||
|
||||
## Overview
|
||||
|
||||
Standardize IDE installers to use **flat file naming** with **underscores** (Windows-compatible) and centralize duplicated code in shared utilities.
|
||||
Standardize IDE installers to use **flat file naming** and centralize duplicated code in shared utilities.
|
||||
|
||||
**Key Rule: All IDEs use underscore format for Windows compatibility (colons don't work on Windows).**
|
||||
**Key Rule: Only folder-based IDEs convert to colon format. IDEs already using dashes keep using dashes.**
|
||||
|
||||
## Current State Analysis
|
||||
|
||||
|
|
@ -15,10 +15,10 @@ Standardize IDE installers to use **flat file naming** with **underscores** (Win
|
|||
| **claude-code** | Hierarchical | `.claude/commands/bmad/{module}/agents/{name}.md` |
|
||||
| **cursor** | Hierarchical | `.cursor/commands/bmad/{module}/agents/{name}.md` |
|
||||
| **crush** | Hierarchical | `.crush/commands/bmad/{module}/agents/{name}.md` |
|
||||
| **antigravity** | Flattened (underscores) | `.agent/workflows/bmad_module_agents_name.md` |
|
||||
| **codex** | Flattened (underscores) | `~/.codex/prompts/bmad_module_agents_name.md` |
|
||||
| **cline** | Flattened (underscores) | `.clinerules/workflows/bmad_module_type_name.md` |
|
||||
| **roo** | Flattened (underscores) | `.roo/commands/bmad_module_agent_name.md` |
|
||||
| **antigravity** | Flattened (dashes) | `.agent/workflows/bmad-module-agents-name.md` |
|
||||
| **codex** | Flattened (dashes) | `~/.codex/prompts/bmad-module-agents-name.md` |
|
||||
| **cline** | Flattened (dashes) | `.clinerules/workflows/bmad-module-type-name.md` |
|
||||
| **roo** | Flattened (dashes) | `.roo/commands/bmad-{module}-agent-{name}.md` |
|
||||
| **auggie** | Hybrid | `.augment/commands/bmad/agents/{module}-{name}.md` |
|
||||
| **iflow** | Hybrid | `.iflow/commands/bmad/agents/{module}-{name}.md` |
|
||||
| **trae** | Different (rules) | `.trae/rules/bmad-agent-{module}-{name}.md` |
|
||||
|
|
@ -40,24 +40,35 @@ All currently create artifacts with **nested relative paths** like `{module}/age
|
|||
|
||||
## Target Standardization
|
||||
|
||||
### For All IDEs (underscore format - Windows-compatible)
|
||||
### For Folder-Based IDEs (convert to colon format)
|
||||
|
||||
**IDEs affected:** claude-code, cursor, crush, antigravity, codex, cline, roo
|
||||
**IDEs affected:** claude-code, cursor, crush
|
||||
|
||||
```
|
||||
Format: bmad_{module}_{type}_{name}.md
|
||||
Format: bmad:{module}:{type}:{name}.md
|
||||
|
||||
Examples:
|
||||
- Agent: bmad_bmm_agents_pm.md
|
||||
- Agent: bmad_core_agents_dev.md
|
||||
- Workflow: bmad_bmm_workflows_correct-course.md
|
||||
- Task: bmad_bmm_tasks_bmad-help.md
|
||||
- Tool: bmad_core_tools_code-review.md
|
||||
- Custom: bmad_custom_agents_fred-commit-poet.md
|
||||
- Agent: bmad:bmm:agents:pm.md
|
||||
- Agent: bmad:core:agents:dev.md
|
||||
- Workflow: bmad:bmm:workflows:correct-course.md
|
||||
- Task: bmad:bmm:tasks:bmad-help.md
|
||||
- Tool: bmad:core:tools:code-review.md
|
||||
- Custom: bmad:custom:agents:fred-commit-poet.md
|
||||
```
|
||||
|
||||
**Note:** Type segments (agents, workflows, tasks, tools) are filtered out from names:
|
||||
- `bmm/agents/pm.md` → `bmad_bmm_pm.md` (not `bmad_bmm_agents_pm.md`)
|
||||
### For Already-Flat IDEs (keep using dashes)
|
||||
|
||||
**IDEs affected:** antigravity, codex, cline, roo
|
||||
|
||||
```
|
||||
Format: bmad-{module}-{type}-{name}.md
|
||||
|
||||
Examples:
|
||||
- Agent: bmad-bmm-agents-pm.md
|
||||
- Workflow: bmad-bmm-workflows-correct-course.md
|
||||
- Task: bmad-bmm-tasks-bmad-help.md
|
||||
- Custom: bmad-custom-agents-fred-commit-poet.md
|
||||
```
|
||||
|
||||
### For Hybrid IDEs (keep as-is)
|
||||
|
||||
|
|
@ -77,50 +88,57 @@ These use `{module}-{name}.md` format within subdirectories - keep as-is.
|
|||
|
||||
```javascript
|
||||
/**
|
||||
* Convert hierarchical path to flat underscore-separated name (Windows-compatible)
|
||||
* Convert hierarchical path to flat colon-separated name (for folder-based IDEs)
|
||||
* @param {string} module - Module name (e.g., 'bmm', 'core')
|
||||
* @param {string} type - Artifact type ('agents', 'workflows', 'tasks', 'tools') - filtered out
|
||||
* @param {string} type - Artifact type ('agents', 'workflows', 'tasks', 'tools')
|
||||
* @param {string} name - Artifact name (e.g., 'pm', 'correct-course')
|
||||
* @returns {string} Flat filename like 'bmad_bmm_pm.md'
|
||||
* @returns {string} Flat filename like 'bmad:bmm:agents:pm.md'
|
||||
*/
|
||||
function toUnderscoreName(module, type, name) {
|
||||
return `bmad_${module}_${name}.md`;
|
||||
function toColonName(module, type, name) {
|
||||
return `bmad:${module}:${type}:${name}.md`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert relative path to flat underscore-separated name (Windows-compatible)
|
||||
* Convert relative path to flat colon-separated name (for folder-based IDEs)
|
||||
* @param {string} relativePath - Path like 'bmm/agents/pm.md'
|
||||
* @returns {string} Flat filename like 'bmad_bmm_pm.md'
|
||||
* @returns {string} Flat filename like 'bmad:bmm:agents:pm.md'
|
||||
*/
|
||||
function toUnderscorePath(relativePath) {
|
||||
function toColonPath(relativePath) {
|
||||
const withoutExt = relativePath.replace('.md', '');
|
||||
const parts = withoutExt.split(/[\/\\]/);
|
||||
// Filter out type segments (agents, workflows, tasks, tools)
|
||||
const filtered = parts.filter((p) => !TYPE_SEGMENTS.includes(p));
|
||||
return `bmad_${filtered.join('_')}.md`;
|
||||
return `bmad:${parts.join(':')}.md`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create custom agent underscore name
|
||||
* @param {string} agentName - Custom agent name
|
||||
* @returns {string} Flat filename like 'bmad_custom_fred-commit-poet.md'
|
||||
* Convert hierarchical path to flat dash-separated name (for flat IDEs)
|
||||
* @param {string} relativePath - Path like 'bmm/agents/pm.md'
|
||||
* @returns {string} Flat filename like 'bmad-bmm-agents-pm.md'
|
||||
*/
|
||||
function customAgentUnderscoreName(agentName) {
|
||||
return `bmad_custom_${agentName}.md`;
|
||||
function toDashPath(relativePath) {
|
||||
const withoutExt = relativePath.replace('.md', '');
|
||||
const parts = withoutExt.split(/[\/\\]/);
|
||||
return `bmad-${parts.join('-')}.md`;
|
||||
}
|
||||
|
||||
// Backward compatibility aliases
|
||||
const toColonName = toUnderscoreName;
|
||||
const toColonPath = toUnderscorePath;
|
||||
const toDashPath = toUnderscorePath;
|
||||
const customAgentColonName = customAgentUnderscoreName;
|
||||
const customAgentDashName = customAgentUnderscoreName;
|
||||
/**
|
||||
* Create custom agent colon name
|
||||
* @param {string} agentName - Custom agent name
|
||||
* @returns {string} Flat filename like 'bmad:custom:agents:fred-commit-poet.md'
|
||||
*/
|
||||
function customAgentColonName(agentName) {
|
||||
return `bmad:custom:agents:${agentName}.md`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create custom agent dash name
|
||||
* @param {string} agentName - Custom agent name
|
||||
* @returns {string} Flat filename like 'bmad-custom-agents-fred-commit-poet.md'
|
||||
*/
|
||||
function customAgentDashName(agentName) {
|
||||
return `bmad-custom-agents-${agentName}.md`;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
toUnderscoreName,
|
||||
toUnderscorePath,
|
||||
customAgentUnderscoreName,
|
||||
// Backward compatibility
|
||||
toColonName,
|
||||
toColonPath,
|
||||
toDashPath,
|
||||
|
|
@ -139,26 +157,34 @@ module.exports = {
|
|||
**Changes:**
|
||||
1. Import path utilities
|
||||
2. Change `relativePath` to use flat format
|
||||
3. Add method `writeColonArtifacts()` for folder-based IDEs (uses underscore)
|
||||
4. Add method `writeDashArtifacts()` for flat IDEs (uses underscore)
|
||||
3. Add method `writeColonArtifacts()` for folder-based IDEs
|
||||
4. Add method `writeDashArtifacts()` for flat IDEs
|
||||
|
||||
### Phase 3: Update All IDEs
|
||||
### Phase 3: Update Folder-Based IDEs
|
||||
|
||||
**Files to modify:**
|
||||
- `claude-code.js`
|
||||
- `cursor.js`
|
||||
- `crush.js`
|
||||
|
||||
**Changes:**
|
||||
1. Import `toColonPath`, `customAgentColonName` from path-utils
|
||||
2. Change from hierarchical to flat colon naming
|
||||
3. Update cleanup to handle flat structure
|
||||
|
||||
### Phase 4: Update Flat IDEs
|
||||
|
||||
**Files to modify:**
|
||||
- `antigravity.js`
|
||||
- `codex.js`
|
||||
- `cline.js`
|
||||
- `roo.js`
|
||||
|
||||
**Changes:**
|
||||
1. Import utilities from path-utils
|
||||
2. Change from hierarchical to flat underscore naming
|
||||
3. Update cleanup to handle flat structure (`startsWith('bmad')`)
|
||||
1. Import `toDashPath`, `customAgentDashName` from path-utils
|
||||
2. Replace local `flattenFilename()` with shared `toDashPath()`
|
||||
|
||||
### Phase 4: Update Base Class
|
||||
### Phase 5: Update Base Class
|
||||
|
||||
**File:** `_base-ide.js`
|
||||
|
||||
|
|
@ -169,23 +195,24 @@ module.exports = {
|
|||
## Migration Checklist
|
||||
|
||||
### New Files
|
||||
- [x] Create `shared/path-utils.js`
|
||||
- [ ] Create `shared/path-utils.js`
|
||||
|
||||
### All IDEs (convert to underscore format)
|
||||
- [x] Update `shared/agent-command-generator.js` - update for underscore
|
||||
- [x] Update `shared/task-tool-command-generator.js` - update for underscore
|
||||
- [x] Update `shared/workflow-command-generator.js` - update for underscore
|
||||
- [x] Update `claude-code.js` - convert to underscore format
|
||||
- [x] Update `cursor.js` - convert to underscore format
|
||||
- [x] Update `crush.js` - convert to underscore format
|
||||
- [ ] Update `antigravity.js` - use underscore format
|
||||
- [ ] Update `codex.js` - use underscore format
|
||||
- [ ] Update `cline.js` - use underscore format
|
||||
- [ ] Update `roo.js` - use underscore format
|
||||
### Folder-Based IDEs (convert to colon format)
|
||||
- [ ] Update `shared/agent-command-generator.js` - add `writeColonArtifacts()`
|
||||
- [ ] Update `shared/task-tool-command-generator.js` - add `writeColonArtifacts()`
|
||||
- [ ] Update `shared/workflow-command-generator.js` - add `writeColonArtifacts()`
|
||||
- [ ] Update `claude-code.js` - convert to colon format
|
||||
- [ ] Update `cursor.js` - convert to colon format
|
||||
- [ ] Update `crush.js` - convert to colon format
|
||||
|
||||
### CSV Command Files
|
||||
- [x] Update `src/core/module-help.csv` - change colons to underscores
|
||||
- [x] Update `src/bmm/module-help.csv` - change colons to underscores
|
||||
### Flat IDEs (standardize dash format)
|
||||
- [ ] Update `shared/agent-command-generator.js` - add `writeDashArtifacts()`
|
||||
- [ ] Update `shared/task-tool-command-generator.js` - add `writeDashArtifacts()`
|
||||
- [ ] Update `shared/workflow-command-generator.js` - add `writeDashArtifacts()`
|
||||
- [ ] Update `antigravity.js` - use shared `toDashPath()`
|
||||
- [ ] Update `codex.js` - use shared `toDashPath()`
|
||||
- [ ] Update `cline.js` - use shared `toDashPath()`
|
||||
- [ ] Update `roo.js` - use shared `toDashPath()`
|
||||
|
||||
### Base Class
|
||||
- [ ] Update `_base-ide.js` - add deprecation notice
|
||||
|
|
@ -201,8 +228,7 @@ module.exports = {
|
|||
|
||||
## Notes
|
||||
|
||||
1. **Filter type segments**: agents, workflows, tasks, tools are filtered out from flat names
|
||||
2. **Underscore format**: Universal underscore format for Windows compatibility
|
||||
1. **Keep segments**: agents, workflows, tasks, tools all become part of the flat name
|
||||
2. **Colon vs Dash**: Colons for folder-based IDEs converting to flat, dashes for already-flat IDEs
|
||||
3. **Custom agents**: Follow the same pattern as regular agents
|
||||
4. **Backward compatibility**: Old function names kept as aliases
|
||||
5. **Cleanup**: Will remove old `bmad:` format files on next install
|
||||
4. **Backward compatibility**: Cleanup will remove old folder structure
|
||||
|
|
|
|||
|
|
@ -127,8 +127,8 @@ class AntigravitySetup extends BaseIdeSetup {
|
|||
const { artifacts: agentArtifacts, counts: agentCounts } = await agentGen.collectAgentArtifacts(bmadDir, options.selectedModules || []);
|
||||
|
||||
// Write agent launcher files with FLATTENED naming using shared utility
|
||||
// Antigravity ignores directory structure, so we flatten to: bmad_module_name.md
|
||||
// This creates slash commands like /bmad_bmm_dev instead of /dev
|
||||
// Antigravity ignores directory structure, so we flatten to: bmad-module-name.md
|
||||
// This creates slash commands like /bmad-bmm-dev instead of /dev
|
||||
const agentCount = await agentGen.writeDashArtifacts(bmadWorkflowsDir, agentArtifacts);
|
||||
|
||||
// Process Antigravity specific injections for installed modules
|
||||
|
|
@ -167,7 +167,7 @@ class AntigravitySetup extends BaseIdeSetup {
|
|||
);
|
||||
}
|
||||
console.log(chalk.dim(` - Workflows directory: ${path.relative(projectDir, bmadWorkflowsDir)}`));
|
||||
console.log(chalk.yellow(`\n Note: Antigravity uses flattened slash commands (e.g., /bmad_module_agents_name)`));
|
||||
console.log(chalk.yellow(`\n Note: Antigravity uses flattened slash commands (e.g., /bmad-module-agents-name)`));
|
||||
|
||||
return {
|
||||
success: true,
|
||||
|
|
@ -455,7 +455,7 @@ usage: |
|
|||
|
||||
⚠️ **IMPORTANT**: Run @${agentPath} to load the complete agent before using this launcher!`;
|
||||
|
||||
// Use underscore format: bmad_custom_fred-commit-poet.md
|
||||
// Use dash format: bmad-custom-agents-fred-commit-poet.md
|
||||
const fileName = customAgentDashName(agentName);
|
||||
const launcherPath = path.join(bmadWorkflowsDir, fileName);
|
||||
|
||||
|
|
|
|||
|
|
@ -92,12 +92,12 @@ class ClaudeCodeSetup extends BaseIdeSetup {
|
|||
async cleanup(projectDir) {
|
||||
const commandsDir = path.join(projectDir, this.configDir, this.commandsDir);
|
||||
|
||||
// Remove any bmad* files from the commands directory (cleans up old bmad: and bmad- formats)
|
||||
// Remove any bmad:* files from the commands directory
|
||||
if (await fs.pathExists(commandsDir)) {
|
||||
const entries = await fs.readdir(commandsDir);
|
||||
let removedCount = 0;
|
||||
for (const entry of entries) {
|
||||
if (entry.startsWith('bmad')) {
|
||||
if (entry.startsWith('bmad:')) {
|
||||
await fs.remove(path.join(commandsDir, entry));
|
||||
removedCount++;
|
||||
}
|
||||
|
|
@ -151,16 +151,16 @@ class ClaudeCodeSetup extends BaseIdeSetup {
|
|||
const commandsDir = path.join(claudeDir, this.commandsDir);
|
||||
await this.ensureDir(commandsDir);
|
||||
|
||||
// Use underscore format: files written directly to commands dir (no bmad subfolder)
|
||||
// Creates: .claude/commands/bmad_bmm_pm.md
|
||||
// Use colon format: files written directly to commands dir (no bmad subfolder)
|
||||
// Creates: .claude/commands/bmad:bmm:pm.md
|
||||
|
||||
// Generate agent launchers using AgentCommandGenerator
|
||||
// This creates small launcher files that reference the actual agents in _bmad/
|
||||
const agentGen = new AgentCommandGenerator(this.bmadFolderName);
|
||||
const { artifacts: agentArtifacts, counts: agentCounts } = await agentGen.collectAgentArtifacts(bmadDir, options.selectedModules || []);
|
||||
|
||||
// Write agent launcher files using flat underscore naming
|
||||
// Creates files like: bmad_bmm_pm.md
|
||||
// Write agent launcher files using flat colon naming
|
||||
// Creates files like: bmad:bmm:pm.md
|
||||
const agentCount = await agentGen.writeColonArtifacts(commandsDir, agentArtifacts);
|
||||
|
||||
// Process Claude Code specific injections for installed modules
|
||||
|
|
@ -182,8 +182,8 @@ class ClaudeCodeSetup extends BaseIdeSetup {
|
|||
const workflowGen = new WorkflowCommandGenerator(this.bmadFolderName);
|
||||
const { artifacts: workflowArtifacts } = await workflowGen.collectWorkflowArtifacts(bmadDir);
|
||||
|
||||
// Write workflow-command artifacts using flat underscore naming
|
||||
// Creates files like: bmad_bmm_correct-course.md
|
||||
// Write workflow-command artifacts using flat colon naming
|
||||
// Creates files like: bmad:bmm:correct-course.md
|
||||
const workflowCommandCount = await workflowGen.writeColonArtifacts(commandsDir, workflowArtifacts);
|
||||
|
||||
// Generate task and tool commands from manifests (if they exist)
|
||||
|
|
@ -490,7 +490,7 @@ You must fully embody this agent's persona and follow all activation instruction
|
|||
</agent-activation>
|
||||
`;
|
||||
|
||||
// Use underscore format: bmad_custom_fred-commit-poet.md
|
||||
// Use colon format: bmad:custom:agents:fred-commit-poet.md
|
||||
// Written directly to commands dir (no bmad subfolder)
|
||||
const launcherName = customAgentColonName(agentName);
|
||||
const launcherPath = path.join(commandsDir, launcherName);
|
||||
|
|
|
|||
|
|
@ -57,8 +57,8 @@ class ClineSetup extends BaseIdeSetup {
|
|||
console.log(chalk.cyan(' BMAD workflows are available as slash commands in Cline'));
|
||||
console.log(chalk.dim(' Usage:'));
|
||||
console.log(chalk.dim(' - Type / to see available commands'));
|
||||
console.log(chalk.dim(' - All BMAD items start with "bmad_"'));
|
||||
console.log(chalk.dim(' - Example: /bmad_bmm_pm'));
|
||||
console.log(chalk.dim(' - All BMAD items start with "bmad-"'));
|
||||
console.log(chalk.dim(' - Example: /bmad-bmm-pm'));
|
||||
|
||||
return {
|
||||
success: true,
|
||||
|
|
@ -81,7 +81,7 @@ class ClineSetup extends BaseIdeSetup {
|
|||
}
|
||||
|
||||
const entries = await fs.readdir(workflowsDir);
|
||||
return entries.some((entry) => entry.startsWith('bmad'));
|
||||
return entries.some((entry) => entry.startsWith('bmad-'));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -146,7 +146,7 @@ class ClineSetup extends BaseIdeSetup {
|
|||
}
|
||||
|
||||
/**
|
||||
* Flatten file path to bmad_module_type_name.md format
|
||||
* Flatten file path to bmad-module-type-name.md format
|
||||
* Uses shared toDashPath utility
|
||||
*/
|
||||
flattenFilename(relativePath) {
|
||||
|
|
@ -180,7 +180,7 @@ class ClineSetup extends BaseIdeSetup {
|
|||
const entries = await fs.readdir(destDir);
|
||||
|
||||
for (const entry of entries) {
|
||||
if (!entry.startsWith('bmad')) {
|
||||
if (!entry.startsWith('bmad-')) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
@ -246,7 +246,7 @@ The agent will follow the persona and instructions from the main agent file.
|
|||
|
||||
*Generated by BMAD Method*`;
|
||||
|
||||
// Use underscore format: bmad_custom_fred-commit-poet.md
|
||||
// Use dash format: bmad-custom-agents-fred-commit-poet.md
|
||||
const fileName = customAgentDashName(agentName);
|
||||
const launcherPath = path.join(workflowsDir, fileName);
|
||||
|
||||
|
|
|
|||
|
|
@ -86,7 +86,7 @@ class CodexSetup extends BaseIdeSetup {
|
|||
await fs.ensureDir(destDir);
|
||||
await this.clearOldBmadFiles(destDir);
|
||||
|
||||
// Collect artifacts and write using underscore format
|
||||
// Collect artifacts and write using DASH format
|
||||
const agentGen = new AgentCommandGenerator(this.bmadFolderName);
|
||||
const { artifacts: agentArtifacts } = await agentGen.collectAgentArtifacts(bmadDir, options.selectedModules || []);
|
||||
const agentCount = await agentGen.writeDashArtifacts(destDir, agentArtifacts);
|
||||
|
|
@ -115,7 +115,7 @@ class CodexSetup extends BaseIdeSetup {
|
|||
const { artifacts: workflowArtifacts } = await workflowGenerator.collectWorkflowArtifacts(bmadDir);
|
||||
const workflowCount = await workflowGenerator.writeDashArtifacts(destDir, workflowArtifacts);
|
||||
|
||||
// Also write tasks using underscore format
|
||||
// Also write tasks using dash format
|
||||
const ttGen = new TaskToolCommandGenerator();
|
||||
const tasksWritten = await ttGen.writeDashArtifacts(destDir, taskArtifacts);
|
||||
|
||||
|
|
@ -155,7 +155,7 @@ class CodexSetup extends BaseIdeSetup {
|
|||
// Check global location
|
||||
if (await fs.pathExists(globalDir)) {
|
||||
const entries = await fs.readdir(globalDir);
|
||||
if (entries.some((entry) => entry.startsWith('bmad'))) {
|
||||
if (entries.some((entry) => entry.startsWith('bmad-'))) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
@ -163,7 +163,7 @@ class CodexSetup extends BaseIdeSetup {
|
|||
// Check project-specific location
|
||||
if (await fs.pathExists(projectSpecificDir)) {
|
||||
const entries = await fs.readdir(projectSpecificDir);
|
||||
if (entries.some((entry) => entry.startsWith('bmad'))) {
|
||||
if (entries.some((entry) => entry.startsWith('bmad-'))) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
@ -256,7 +256,7 @@ class CodexSetup extends BaseIdeSetup {
|
|||
const entries = await fs.readdir(destDir);
|
||||
|
||||
for (const entry of entries) {
|
||||
if (!entry.startsWith('bmad')) {
|
||||
if (!entry.startsWith('bmad-')) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
@ -292,7 +292,7 @@ class CodexSetup extends BaseIdeSetup {
|
|||
chalk.dim(" To use with other projects, you'd need to copy the _bmad dir"),
|
||||
'',
|
||||
chalk.green(' ✓ You can now use /commands in Codex CLI'),
|
||||
chalk.dim(' Example: /bmad_bmm_pm'),
|
||||
chalk.dim(' Example: /bmad-bmm-pm'),
|
||||
chalk.dim(' Type / to see all available commands'),
|
||||
'',
|
||||
chalk.bold.cyan('═'.repeat(70)),
|
||||
|
|
@ -397,7 +397,7 @@ You must fully embody this agent's persona and follow all activation instruction
|
|||
</agent-activation>
|
||||
`;
|
||||
|
||||
// Use underscore format: bmad_custom_fred-commit-poet.md
|
||||
// Use dash format: bmad-custom-agents-fred-commit-poet.md
|
||||
const fileName = customAgentDashName(agentName);
|
||||
const launcherPath = path.join(destDir, fileName);
|
||||
await fs.writeFile(launcherPath, launcherContent, 'utf8');
|
||||
|
|
|
|||
|
|
@ -35,26 +35,26 @@ class CrushSetup extends BaseIdeSetup {
|
|||
const commandsDir = path.join(crushDir, this.commandsDir);
|
||||
await this.ensureDir(commandsDir);
|
||||
|
||||
// Use underscore format: files written directly to commands dir (no bmad subfolder)
|
||||
// Creates: .crush/commands/bmad_bmm_pm.md
|
||||
// Use colon format: files written directly to commands dir (no bmad subfolder)
|
||||
// Creates: .crush/commands/bmad:bmm:pm.md
|
||||
|
||||
// Generate agent launchers
|
||||
const agentGen = new AgentCommandGenerator(this.bmadFolderName);
|
||||
const { artifacts: agentArtifacts } = await agentGen.collectAgentArtifacts(bmadDir, options.selectedModules || []);
|
||||
|
||||
// Write agent launcher files using flat underscore naming
|
||||
// Creates files like: bmad_bmm_pm.md
|
||||
// Write agent launcher files using flat colon naming
|
||||
// Creates files like: bmad:bmm:pm.md
|
||||
const agentCount = await agentGen.writeColonArtifacts(commandsDir, agentArtifacts);
|
||||
|
||||
// Get ALL workflows using the new workflow command generator
|
||||
const workflowGenerator = new WorkflowCommandGenerator(this.bmadFolderName);
|
||||
const { artifacts: workflowArtifacts } = await workflowGenerator.collectWorkflowArtifacts(bmadDir);
|
||||
|
||||
// Write workflow-command artifacts using flat underscore naming
|
||||
// Creates files like: bmad_bmm_correct-course.md
|
||||
// Write workflow-command artifacts using flat colon naming
|
||||
// Creates files like: bmad:bmm:correct-course.md
|
||||
const workflowCount = await workflowGenerator.writeColonArtifacts(commandsDir, workflowArtifacts);
|
||||
|
||||
// Generate task and tool commands using flat underscore naming
|
||||
// Generate task and tool commands using flat colon naming
|
||||
const taskToolGen = new TaskToolCommandGenerator();
|
||||
const taskToolResult = await taskToolGen.generateColonTaskToolCommands(projectDir, bmadDir, commandsDir);
|
||||
|
||||
|
|
@ -81,11 +81,11 @@ class CrushSetup extends BaseIdeSetup {
|
|||
async cleanup(projectDir) {
|
||||
const commandsDir = path.join(projectDir, this.configDir, this.commandsDir);
|
||||
|
||||
// Remove any bmad* files from the commands directory (cleans up old bmad: and bmad- formats)
|
||||
// Remove any bmad:* files from the commands directory
|
||||
if (await fs.pathExists(commandsDir)) {
|
||||
const entries = await fs.readdir(commandsDir);
|
||||
for (const entry of entries) {
|
||||
if (entry.startsWith('bmad')) {
|
||||
if (entry.startsWith('bmad:')) {
|
||||
await fs.remove(path.join(commandsDir, entry));
|
||||
}
|
||||
}
|
||||
|
|
@ -129,7 +129,7 @@ The agent will follow the persona and instructions from the main agent file.
|
|||
|
||||
*Generated by BMAD Method*`;
|
||||
|
||||
// Use underscore format: bmad_custom_fred-commit-poet.md
|
||||
// Use colon format: bmad:custom:agents:fred-commit-poet.md
|
||||
// Written directly to commands dir (no bmad subfolder)
|
||||
const launcherName = customAgentColonName(agentName);
|
||||
const launcherPath = path.join(commandsDir, launcherName);
|
||||
|
|
|
|||
|
|
@ -25,11 +25,11 @@ class CursorSetup extends BaseIdeSetup {
|
|||
const fs = require('fs-extra');
|
||||
const commandsDir = path.join(projectDir, this.configDir, this.commandsDir);
|
||||
|
||||
// Remove any bmad* files from the commands directory (cleans up old bmad: and bmad- formats)
|
||||
// Remove any bmad:* files from the commands directory
|
||||
if (await fs.pathExists(commandsDir)) {
|
||||
const entries = await fs.readdir(commandsDir);
|
||||
for (const entry of entries) {
|
||||
if (entry.startsWith('bmad')) {
|
||||
if (entry.startsWith('bmad:')) {
|
||||
await fs.remove(path.join(commandsDir, entry));
|
||||
}
|
||||
}
|
||||
|
|
@ -59,24 +59,24 @@ class CursorSetup extends BaseIdeSetup {
|
|||
const commandsDir = path.join(cursorDir, this.commandsDir);
|
||||
await this.ensureDir(commandsDir);
|
||||
|
||||
// Use underscore format: files written directly to commands dir (no bmad subfolder)
|
||||
// Creates: .cursor/commands/bmad_bmm_pm.md
|
||||
// Use colon format: files written directly to commands dir (no bmad subfolder)
|
||||
// Creates: .cursor/commands/bmad:bmm:pm.md
|
||||
|
||||
// Generate agent launchers using AgentCommandGenerator
|
||||
// This creates small launcher files that reference the actual agents in _bmad/
|
||||
const agentGen = new AgentCommandGenerator(this.bmadFolderName);
|
||||
const { artifacts: agentArtifacts, counts: agentCounts } = await agentGen.collectAgentArtifacts(bmadDir, options.selectedModules || []);
|
||||
|
||||
// Write agent launcher files using flat underscore naming
|
||||
// Creates files like: bmad_bmm_pm.md
|
||||
// Write agent launcher files using flat colon naming
|
||||
// Creates files like: bmad:bmm:pm.md
|
||||
const agentCount = await agentGen.writeColonArtifacts(commandsDir, agentArtifacts);
|
||||
|
||||
// Generate workflow commands from manifest (if it exists)
|
||||
const workflowGen = new WorkflowCommandGenerator(this.bmadFolderName);
|
||||
const { artifacts: workflowArtifacts } = await workflowGen.collectWorkflowArtifacts(bmadDir);
|
||||
|
||||
// Write workflow-command artifacts using flat underscore naming
|
||||
// Creates files like: bmad_bmm_correct-course.md
|
||||
// Write workflow-command artifacts using flat colon naming
|
||||
// Creates files like: bmad:bmm:correct-course.md
|
||||
const workflowCommandCount = await workflowGen.writeColonArtifacts(commandsDir, workflowArtifacts);
|
||||
|
||||
// Generate task and tool commands from manifests (if they exist)
|
||||
|
|
@ -144,7 +144,7 @@ description: '${agentName} agent'
|
|||
${launcherContent}
|
||||
`;
|
||||
|
||||
// Use underscore format: bmad_custom_fred-commit-poet.md
|
||||
// Use colon format: bmad:custom:agents:fred-commit-poet.md
|
||||
// Written directly to commands dir (no bmad subfolder)
|
||||
const launcherName = customAgentColonName(agentName);
|
||||
const launcherPath = path.join(commandsDir, launcherName);
|
||||
|
|
|
|||
|
|
@ -86,7 +86,7 @@ class GeminiSetup extends BaseIdeSetup {
|
|||
await this.writeFile(tomlPath, tomlContent);
|
||||
agentCount++;
|
||||
|
||||
console.log(chalk.green(` ✓ Added agent: /bmad_agents_${artifact.module}_${artifact.name}`));
|
||||
console.log(chalk.green(` ✓ Added agent: /bmad:agents:${artifact.module}:${artifact.name}`));
|
||||
}
|
||||
|
||||
// Install tasks as TOML files with bmad- prefix (flat structure)
|
||||
|
|
@ -100,7 +100,7 @@ class GeminiSetup extends BaseIdeSetup {
|
|||
await this.writeFile(tomlPath, tomlContent);
|
||||
taskCount++;
|
||||
|
||||
console.log(chalk.green(` ✓ Added task: /bmad_tasks_${task.module}_${task.name}`));
|
||||
console.log(chalk.green(` ✓ Added task: /bmad:tasks:${task.module}:${task.name}`));
|
||||
}
|
||||
|
||||
// Install workflows as TOML files with bmad- prefix (flat structure)
|
||||
|
|
@ -116,7 +116,7 @@ class GeminiSetup extends BaseIdeSetup {
|
|||
await this.writeFile(tomlPath, tomlContent);
|
||||
workflowCount++;
|
||||
|
||||
console.log(chalk.green(` ✓ Added workflow: /bmad_workflows_${artifact.module}_${workflowName}`));
|
||||
console.log(chalk.green(` ✓ Added workflow: /bmad:workflows:${artifact.module}:${workflowName}`));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -125,9 +125,9 @@ class GeminiSetup extends BaseIdeSetup {
|
|||
console.log(chalk.dim(` - ${taskCount} tasks configured`));
|
||||
console.log(chalk.dim(` - ${workflowCount} workflows configured`));
|
||||
console.log(chalk.dim(` - Commands directory: ${path.relative(projectDir, commandsDir)}`));
|
||||
console.log(chalk.dim(` - Agent activation: /bmad_agents_{agent-name}`));
|
||||
console.log(chalk.dim(` - Task activation: /bmad_tasks_{task-name}`));
|
||||
console.log(chalk.dim(` - Workflow activation: /bmad_workflows_{workflow-name}`));
|
||||
console.log(chalk.dim(` - Agent activation: /bmad:agents:{agent-name}`));
|
||||
console.log(chalk.dim(` - Task activation: /bmad:tasks:{task-name}`));
|
||||
console.log(chalk.dim(` - Workflow activation: /bmad:workflows:{workflow-name}`));
|
||||
|
||||
return {
|
||||
success: true,
|
||||
|
|
@ -233,12 +233,12 @@ ${contentWithoutFrontmatter}
|
|||
const commandsDir = path.join(projectDir, this.configDir, this.commandsDir);
|
||||
|
||||
if (await fs.pathExists(commandsDir)) {
|
||||
// Remove any bmad* files (cleans up old bmad- and bmad: formats)
|
||||
// Only remove files that start with bmad- prefix
|
||||
const files = await fs.readdir(commandsDir);
|
||||
let removed = 0;
|
||||
|
||||
for (const file of files) {
|
||||
if (file.startsWith('bmad') && file.endsWith('.toml')) {
|
||||
if (file.startsWith('bmad-') && file.endsWith('.toml')) {
|
||||
await fs.remove(path.join(commandsDir, file));
|
||||
removed++;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -275,7 +275,7 @@ ${cleanContent}
|
|||
let removed = 0;
|
||||
|
||||
for (const file of files) {
|
||||
if (file.startsWith('bmad') && file.endsWith('.chatmode.md')) {
|
||||
if (file.startsWith('bmad-') && file.endsWith('.chatmode.md')) {
|
||||
await fs.remove(path.join(chatmodesDir, file));
|
||||
removed++;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ class KiroCliSetup extends BaseIdeSetup {
|
|||
// Remove existing BMad agents
|
||||
const files = await fs.readdir(bmadAgentsDir);
|
||||
for (const file of files) {
|
||||
if (file.startsWith('bmad')) {
|
||||
if (file.startsWith('bmad-') || file.includes('bmad')) {
|
||||
await fs.remove(path.join(bmadAgentsDir, file));
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -185,7 +185,7 @@ class OpenCodeSetup extends BaseIdeSetup {
|
|||
if (await fs.pathExists(agentsDir)) {
|
||||
const files = await fs.readdir(agentsDir);
|
||||
for (const file of files) {
|
||||
if (file.startsWith('bmad') && file.endsWith('.md')) {
|
||||
if (file.startsWith('bmad-') && file.endsWith('.md')) {
|
||||
await fs.remove(path.join(agentsDir, file));
|
||||
removed++;
|
||||
}
|
||||
|
|
@ -196,7 +196,7 @@ class OpenCodeSetup extends BaseIdeSetup {
|
|||
if (await fs.pathExists(commandsDir)) {
|
||||
const files = await fs.readdir(commandsDir);
|
||||
for (const file of files) {
|
||||
if (file.startsWith('bmad') && file.endsWith('.md')) {
|
||||
if (file.startsWith('bmad-') && file.endsWith('.md')) {
|
||||
await fs.remove(path.join(commandsDir, file));
|
||||
removed++;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ class QwenSetup extends BaseIdeSetup {
|
|||
await this.writeFile(targetPath, tomlContent);
|
||||
|
||||
agentCount++;
|
||||
console.log(chalk.green(` ✓ Added agent: /bmad_${artifact.module}_agents_${artifact.name}`));
|
||||
console.log(chalk.green(` ✓ Added agent: /bmad:${artifact.module}:agents:${artifact.name}`));
|
||||
}
|
||||
|
||||
// Create TOML files for each task
|
||||
|
|
@ -90,7 +90,7 @@ class QwenSetup extends BaseIdeSetup {
|
|||
await this.writeFile(targetPath, content);
|
||||
|
||||
taskCount++;
|
||||
console.log(chalk.green(` ✓ Added task: /bmad_${task.module}_tasks_${task.name}`));
|
||||
console.log(chalk.green(` ✓ Added task: /bmad:${task.module}:tasks:${task.name}`));
|
||||
}
|
||||
|
||||
// Create TOML files for each tool
|
||||
|
|
@ -106,7 +106,7 @@ class QwenSetup extends BaseIdeSetup {
|
|||
await this.writeFile(targetPath, content);
|
||||
|
||||
toolCount++;
|
||||
console.log(chalk.green(` ✓ Added tool: /bmad_${tool.module}_tools_${tool.name}`));
|
||||
console.log(chalk.green(` ✓ Added tool: /bmad:${tool.module}:tools:${tool.name}`));
|
||||
}
|
||||
|
||||
// Create TOML files for each workflow
|
||||
|
|
@ -122,7 +122,7 @@ class QwenSetup extends BaseIdeSetup {
|
|||
await this.writeFile(targetPath, content);
|
||||
|
||||
workflowCount++;
|
||||
console.log(chalk.green(` ✓ Added workflow: /bmad_${workflow.module}_workflows_${workflow.name}`));
|
||||
console.log(chalk.green(` ✓ Added workflow: /bmad:${workflow.module}:workflows:${workflow.name}`));
|
||||
}
|
||||
|
||||
console.log(chalk.green(`✓ ${this.name} configured:`));
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ class RooSetup extends BaseIdeSetup {
|
|||
let skippedCount = 0;
|
||||
|
||||
for (const artifact of agentArtifacts) {
|
||||
// Use shared toDashPath to get consistent naming: bmad_bmm_name.md
|
||||
// Use shared toDashPath to get consistent naming: bmad-bmm-name.md
|
||||
const commandName = toDashPath(artifact.relativePath).replace('.md', '');
|
||||
const commandPath = path.join(rooCommandsDir, `${commandName}.md`);
|
||||
|
||||
|
|
@ -169,7 +169,7 @@ class RooSetup extends BaseIdeSetup {
|
|||
let removedCount = 0;
|
||||
|
||||
for (const file of files) {
|
||||
if (file.startsWith('bmad') && file.endsWith('.md')) {
|
||||
if (file.startsWith('bmad-') && file.endsWith('.md')) {
|
||||
await fs.remove(path.join(rooCommandsDir, file));
|
||||
removedCount++;
|
||||
}
|
||||
|
|
@ -192,7 +192,7 @@ class RooSetup extends BaseIdeSetup {
|
|||
let removedCount = 0;
|
||||
|
||||
for (const line of lines) {
|
||||
if (/^\s*- slug: bmad/.test(line)) {
|
||||
if (/^\s*- slug: bmad-/.test(line)) {
|
||||
skipMode = true;
|
||||
removedCount++;
|
||||
} else if (skipMode && /^\s*- slug: /.test(line)) {
|
||||
|
|
@ -224,7 +224,7 @@ class RooSetup extends BaseIdeSetup {
|
|||
const rooCommandsDir = path.join(projectDir, this.configDir, this.commandsDir);
|
||||
await this.ensureDir(rooCommandsDir);
|
||||
|
||||
// Use underscore format: bmad_custom_fred-commit-poet.md
|
||||
// Use dash format: bmad-custom-agents-fred-commit-poet.md
|
||||
const commandName = customAgentDashName(agentName).replace('.md', '');
|
||||
const commandPath = path.join(rooCommandsDir, `${commandName}.md`);
|
||||
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ class RovoDevSetup extends BaseIdeSetup {
|
|||
const subagentsDir = path.join(rovoDevDir, this.subagentsDir);
|
||||
if (await fs.pathExists(subagentsDir)) {
|
||||
const entries = await fs.readdir(subagentsDir);
|
||||
const bmadFiles = entries.filter((file) => file.startsWith('bmad') && file.endsWith('.md'));
|
||||
const bmadFiles = entries.filter((file) => file.startsWith('bmad-') && file.endsWith('.md'));
|
||||
|
||||
for (const file of bmadFiles) {
|
||||
await fs.remove(path.join(subagentsDir, file));
|
||||
|
|
@ -48,7 +48,7 @@ class RovoDevSetup extends BaseIdeSetup {
|
|||
const workflowsDir = path.join(rovoDevDir, this.workflowsDir);
|
||||
if (await fs.pathExists(workflowsDir)) {
|
||||
const entries = await fs.readdir(workflowsDir);
|
||||
const bmadFiles = entries.filter((file) => file.startsWith('bmad') && file.endsWith('.md'));
|
||||
const bmadFiles = entries.filter((file) => file.startsWith('bmad-') && file.endsWith('.md'));
|
||||
|
||||
for (const file of bmadFiles) {
|
||||
await fs.remove(path.join(workflowsDir, file));
|
||||
|
|
@ -59,7 +59,7 @@ class RovoDevSetup extends BaseIdeSetup {
|
|||
const referencesDir = path.join(rovoDevDir, this.referencesDir);
|
||||
if (await fs.pathExists(referencesDir)) {
|
||||
const entries = await fs.readdir(referencesDir);
|
||||
const bmadFiles = entries.filter((file) => file.startsWith('bmad') && file.endsWith('.md'));
|
||||
const bmadFiles = entries.filter((file) => file.startsWith('bmad-') && file.endsWith('.md'));
|
||||
|
||||
for (const file of bmadFiles) {
|
||||
await fs.remove(path.join(referencesDir, file));
|
||||
|
|
@ -249,7 +249,7 @@ class RovoDevSetup extends BaseIdeSetup {
|
|||
if (await fs.pathExists(subagentsDir)) {
|
||||
try {
|
||||
const entries = await fs.readdir(subagentsDir);
|
||||
if (entries.some((entry) => entry.startsWith('bmad') && entry.endsWith('.md'))) {
|
||||
if (entries.some((entry) => entry.startsWith('bmad-') && entry.endsWith('.md'))) {
|
||||
return true;
|
||||
}
|
||||
} catch {
|
||||
|
|
@ -262,7 +262,7 @@ class RovoDevSetup extends BaseIdeSetup {
|
|||
if (await fs.pathExists(workflowsDir)) {
|
||||
try {
|
||||
const entries = await fs.readdir(workflowsDir);
|
||||
if (entries.some((entry) => entry.startsWith('bmad') && entry.endsWith('.md'))) {
|
||||
if (entries.some((entry) => entry.startsWith('bmad-') && entry.endsWith('.md'))) {
|
||||
return true;
|
||||
}
|
||||
} catch {
|
||||
|
|
@ -275,7 +275,7 @@ class RovoDevSetup extends BaseIdeSetup {
|
|||
if (await fs.pathExists(referencesDir)) {
|
||||
try {
|
||||
const entries = await fs.readdir(referencesDir);
|
||||
if (entries.some((entry) => entry.startsWith('bmad') && entry.endsWith('.md'))) {
|
||||
if (entries.some((entry) => entry.startsWith('bmad-') && entry.endsWith('.md'))) {
|
||||
return true;
|
||||
}
|
||||
} catch {
|
||||
|
|
|
|||
|
|
@ -94,8 +94,8 @@ class AgentCommandGenerator {
|
|||
}
|
||||
|
||||
/**
|
||||
* Write agent launcher artifacts using underscore format (Windows-compatible)
|
||||
* Creates flat files like: bmad_bmm_pm.md
|
||||
* Write agent launcher artifacts using COLON format (for folder-based IDEs)
|
||||
* Creates flat files like: bmad:bmm:pm.md
|
||||
*
|
||||
* @param {string} baseCommandsDir - Base commands directory for the IDE
|
||||
* @param {Array} artifacts - Agent launcher artifacts
|
||||
|
|
@ -106,7 +106,7 @@ class AgentCommandGenerator {
|
|||
|
||||
for (const artifact of artifacts) {
|
||||
if (artifact.type === 'agent-launcher') {
|
||||
// Convert relativePath to underscore format: bmm/agents/pm.md → bmad_bmm_pm.md
|
||||
// Convert relativePath to colon format: bmm/agents/pm.md → bmad:bmm:pm.md
|
||||
const flatName = toColonPath(artifact.relativePath);
|
||||
const launcherPath = path.join(baseCommandsDir, flatName);
|
||||
await fs.ensureDir(path.dirname(launcherPath));
|
||||
|
|
@ -119,8 +119,8 @@ class AgentCommandGenerator {
|
|||
}
|
||||
|
||||
/**
|
||||
* Write agent launcher artifacts using underscore format (Windows-compatible)
|
||||
* Creates flat files like: bmad_bmm_pm.md
|
||||
* Write agent launcher artifacts using DASH format (for flat IDEs)
|
||||
* Creates flat files like: bmad-bmm-pm.md
|
||||
*
|
||||
* @param {string} baseCommandsDir - Base commands directory for the IDE
|
||||
* @param {Array} artifacts - Agent launcher artifacts
|
||||
|
|
@ -131,7 +131,7 @@ class AgentCommandGenerator {
|
|||
|
||||
for (const artifact of artifacts) {
|
||||
if (artifact.type === 'agent-launcher') {
|
||||
// Convert relativePath to underscore format: bmm/agents/pm.md → bmad_bmm_pm.md
|
||||
// Convert relativePath to dash format: bmm/agents/pm.md → bmad-bmm-pm.md
|
||||
const flatName = toDashPath(artifact.relativePath);
|
||||
const launcherPath = path.join(baseCommandsDir, flatName);
|
||||
await fs.ensureDir(path.dirname(launcherPath));
|
||||
|
|
@ -144,18 +144,18 @@ class AgentCommandGenerator {
|
|||
}
|
||||
|
||||
/**
|
||||
* Get the custom agent name in underscore format (Windows-compatible)
|
||||
* Get the custom agent name in colon format
|
||||
* @param {string} agentName - Custom agent name
|
||||
* @returns {string} Underscore-formatted filename
|
||||
* @returns {string} Colon-formatted filename
|
||||
*/
|
||||
getCustomAgentColonName(agentName) {
|
||||
return customAgentColonName(agentName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the custom agent name in underscore format (Windows-compatible)
|
||||
* Get the custom agent name in dash format
|
||||
* @param {string} agentName - Custom agent name
|
||||
* @returns {string} Underscore-formatted filename
|
||||
* @returns {string} Dash-formatted filename
|
||||
*/
|
||||
getCustomAgentDashName(agentName) {
|
||||
return customAgentDashName(agentName);
|
||||
|
|
|
|||
|
|
@ -2,72 +2,109 @@
|
|||
* Path transformation utilities for IDE installer standardization
|
||||
*
|
||||
* Provides utilities to convert hierarchical paths to flat naming conventions.
|
||||
* - Underscore format (bmad_module_name.md) - Windows-compatible universal format
|
||||
* - Colon format (bmad:module:name.md) for folder-based IDEs converting to flat
|
||||
* - Dash format (bmad-module-name.md) for already-flat IDEs
|
||||
*/
|
||||
|
||||
// Type segments to filter out from paths
|
||||
const TYPE_SEGMENTS = ['agents', 'workflows', 'tasks', 'tools'];
|
||||
|
||||
/**
|
||||
* Convert hierarchical path to flat underscore-separated name
|
||||
* Converts: 'bmm/agents/pm.md' → 'bmad_bmm_pm.md'
|
||||
* Converts: 'bmm/workflows/correct-course.md' → 'bmad_bmm_correct-course.md'
|
||||
* Convert hierarchical path to flat colon-separated name (for folder-based IDEs)
|
||||
* Converts: 'bmm/agents/pm.md' → 'bmad:bmm:pm.md'
|
||||
* Converts: 'bmm/workflows/correct-course.md' → 'bmad:bmm:correct-course.md'
|
||||
*
|
||||
* @param {string} module - Module name (e.g., 'bmm', 'core')
|
||||
* @param {string} type - Artifact type ('agents', 'workflows', 'tasks', 'tools') - filtered out
|
||||
* @param {string} name - Artifact name (e.g., 'pm', 'correct-course')
|
||||
* @returns {string} Flat filename like 'bmad_bmm_pm.md'
|
||||
* @returns {string} Flat filename like 'bmad:bmm:pm.md'
|
||||
*/
|
||||
function toUnderscoreName(module, type, name) {
|
||||
return `bmad_${module}_${name}.md`;
|
||||
function toColonName(module, type, name) {
|
||||
return `bmad:${module}:${name}.md`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert relative path to flat underscore-separated name
|
||||
* Converts: 'bmm/agents/pm.md' → 'bmad_bmm_pm.md'
|
||||
* Converts: 'bmm/workflows/correct-course.md' → 'bmad_bmm_correct-course.md'
|
||||
* Convert relative path to flat colon-separated name (for folder-based IDEs)
|
||||
* Converts: 'bmm/agents/pm.md' → 'bmad:bmm:pm.md'
|
||||
* Converts: 'bmm/workflows/correct-course.md' → 'bmad:bmm:correct-course.md'
|
||||
*
|
||||
* @param {string} relativePath - Path like 'bmm/agents/pm.md'
|
||||
* @returns {string} Flat filename like 'bmad_bmm_pm.md'
|
||||
* @returns {string} Flat filename like 'bmad:bmm:pm.md'
|
||||
*/
|
||||
function toUnderscorePath(relativePath) {
|
||||
function toColonPath(relativePath) {
|
||||
const withoutExt = relativePath.replace('.md', '');
|
||||
const parts = withoutExt.split(/[/\\]/);
|
||||
// Filter out type segments (agents, workflows, tasks, tools)
|
||||
const filtered = parts.filter((p) => !TYPE_SEGMENTS.includes(p));
|
||||
return `bmad_${filtered.join('_')}.md`;
|
||||
return `bmad:${filtered.join(':')}.md`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create custom agent underscore name
|
||||
* Creates: 'bmad_custom_fred-commit-poet.md'
|
||||
* Convert hierarchical path to flat dash-separated name (for flat IDEs)
|
||||
* Converts: 'bmm/agents/pm.md' → 'bmad-bmm-pm.md'
|
||||
* Converts: 'bmm/workflows/correct-course.md' → 'bmad-bmm-correct-course.md'
|
||||
*
|
||||
* @param {string} relativePath - Path like 'bmm/agents/pm.md'
|
||||
* @returns {string} Flat filename like 'bmad-bmm-pm.md'
|
||||
*/
|
||||
function toDashPath(relativePath) {
|
||||
const withoutExt = relativePath.replace('.md', '');
|
||||
const parts = withoutExt.split(/[/\\]/);
|
||||
// Filter out type segments (agents, workflows, tasks, tools)
|
||||
const filtered = parts.filter((p) => !TYPE_SEGMENTS.includes(p));
|
||||
return `bmad-${filtered.join('-')}.md`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create custom agent colon name (for folder-based IDEs)
|
||||
* Creates: 'bmad:custom:fred-commit-poet.md'
|
||||
*
|
||||
* @param {string} agentName - Custom agent name
|
||||
* @returns {string} Flat filename like 'bmad_custom_fred-commit-poet.md'
|
||||
* @returns {string} Flat filename like 'bmad:custom:fred-commit-poet.md'
|
||||
*/
|
||||
function customAgentUnderscoreName(agentName) {
|
||||
return `bmad_custom_${agentName}.md`;
|
||||
function customAgentColonName(agentName) {
|
||||
return `bmad:custom:${agentName}.md`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a filename uses underscore format
|
||||
* @param {string} filename - Filename to check
|
||||
* @returns {boolean} True if filename uses underscore format
|
||||
*/
|
||||
function isUnderscoreFormat(filename) {
|
||||
return filename.startsWith('bmad_') && filename.includes('_');
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract parts from an underscore-formatted filename
|
||||
* Parses: 'bmad_bmm_pm.md' → { prefix: 'bmad', module: 'bmm', name: 'pm' }
|
||||
* Create custom agent dash name (for flat IDEs)
|
||||
* Creates: 'bmad-custom-fred-commit-poet.md'
|
||||
*
|
||||
* @param {string} filename - Underscore-formatted filename
|
||||
* @param {string} agentName - Custom agent name
|
||||
* @returns {string} Flat filename like 'bmad-custom-fred-commit-poet.md'
|
||||
*/
|
||||
function customAgentDashName(agentName) {
|
||||
return `bmad-custom-${agentName}.md`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a filename uses colon format
|
||||
* @param {string} filename - Filename to check
|
||||
* @returns {boolean} True if filename uses colon format
|
||||
*/
|
||||
function isColonFormat(filename) {
|
||||
return filename.includes('bmad:') && filename.includes(':');
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a filename uses dash format
|
||||
* @param {string} filename - Filename to check
|
||||
* @returns {boolean} True if filename uses dash format
|
||||
*/
|
||||
function isDashFormat(filename) {
|
||||
return filename.startsWith('bmad-') && !filename.includes(':');
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract parts from a colon-formatted filename
|
||||
* Parses: 'bmad:bmm:pm.md' → { prefix: 'bmad', module: 'bmm', name: 'pm' }
|
||||
*
|
||||
* @param {string} filename - Colon-formatted filename
|
||||
* @returns {Object|null} Parsed parts or null if invalid format
|
||||
*/
|
||||
function parseUnderscoreName(filename) {
|
||||
function parseColonName(filename) {
|
||||
const withoutExt = filename.replace('.md', '');
|
||||
const parts = withoutExt.split('_');
|
||||
const parts = withoutExt.split(':');
|
||||
|
||||
if (parts.length < 3 || parts[0] !== 'bmad') {
|
||||
return null;
|
||||
|
|
@ -76,28 +113,33 @@ function parseUnderscoreName(filename) {
|
|||
return {
|
||||
prefix: parts[0],
|
||||
module: parts[1],
|
||||
name: parts.slice(2).join('_'), // Handle names that might contain underscores
|
||||
name: parts.slice(2).join(':'), // Handle names that might contain colons
|
||||
};
|
||||
}
|
||||
|
||||
// Backward compatibility aliases (deprecated)
|
||||
const toColonName = toUnderscoreName;
|
||||
const toColonPath = toUnderscorePath;
|
||||
const toDashPath = toUnderscorePath;
|
||||
const customAgentColonName = customAgentUnderscoreName;
|
||||
const customAgentDashName = customAgentUnderscoreName;
|
||||
const isColonFormat = isUnderscoreFormat;
|
||||
const isDashFormat = isUnderscoreFormat;
|
||||
const parseColonName = parseUnderscoreName;
|
||||
const parseDashName = parseUnderscoreName;
|
||||
/**
|
||||
* Extract parts from a dash-formatted filename
|
||||
* Parses: 'bmad-bmm-pm.md' → { prefix: 'bmad', module: 'bmm', name: 'pm' }
|
||||
*
|
||||
* @param {string} filename - Dash-formatted filename
|
||||
* @returns {Object|null} Parsed parts or null if invalid format
|
||||
*/
|
||||
function parseDashName(filename) {
|
||||
const withoutExt = filename.replace('.md', '');
|
||||
const parts = withoutExt.split('-');
|
||||
|
||||
if (parts.length < 3 || parts[0] !== 'bmad') {
|
||||
return null;
|
||||
}
|
||||
|
||||
return {
|
||||
prefix: parts[0],
|
||||
module: parts[1],
|
||||
name: parts.slice(2).join('-'), // Handle names that might contain dashes
|
||||
};
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
toUnderscoreName,
|
||||
toUnderscorePath,
|
||||
customAgentUnderscoreName,
|
||||
isUnderscoreFormat,
|
||||
parseUnderscoreName,
|
||||
// Backward compatibility aliases
|
||||
toColonName,
|
||||
toColonPath,
|
||||
toDashPath,
|
||||
|
|
|
|||
|
|
@ -117,8 +117,8 @@ Follow all instructions in the ${type} file exactly as written.
|
|||
}
|
||||
|
||||
/**
|
||||
* Generate task and tool commands using underscore format (Windows-compatible)
|
||||
* Creates flat files like: bmad_bmm_bmad-help.md
|
||||
* Generate task and tool commands using COLON format (for folder-based IDEs)
|
||||
* Creates flat files like: bmad:bmm:bmad-help.md
|
||||
*
|
||||
* @param {string} projectDir - Project directory
|
||||
* @param {string} bmadDir - BMAD installation directory
|
||||
|
|
@ -138,7 +138,7 @@ Follow all instructions in the ${type} file exactly as written.
|
|||
// Generate command files for tasks
|
||||
for (const task of standaloneTasks) {
|
||||
const commandContent = this.generateCommandContent(task, 'task');
|
||||
// Use underscore format: bmad_bmm_name.md
|
||||
// Use colon format: bmad:bmm:name.md
|
||||
const flatName = toColonName(task.module, 'tasks', task.name);
|
||||
const commandPath = path.join(baseCommandsDir, flatName);
|
||||
await fs.ensureDir(path.dirname(commandPath));
|
||||
|
|
@ -149,7 +149,7 @@ Follow all instructions in the ${type} file exactly as written.
|
|||
// Generate command files for tools
|
||||
for (const tool of standaloneTools) {
|
||||
const commandContent = this.generateCommandContent(tool, 'tool');
|
||||
// Use underscore format: bmad_bmm_name.md
|
||||
// Use colon format: bmad:bmm:name.md
|
||||
const flatName = toColonName(tool.module, 'tools', tool.name);
|
||||
const commandPath = path.join(baseCommandsDir, flatName);
|
||||
await fs.ensureDir(path.dirname(commandPath));
|
||||
|
|
@ -165,8 +165,8 @@ Follow all instructions in the ${type} file exactly as written.
|
|||
}
|
||||
|
||||
/**
|
||||
* Generate task and tool commands using underscore format (Windows-compatible)
|
||||
* Creates flat files like: bmad_bmm_bmad-help.md
|
||||
* Generate task and tool commands using DASH format (for flat IDEs)
|
||||
* Creates flat files like: bmad-bmm-bmad-help.md
|
||||
*
|
||||
* @param {string} projectDir - Project directory
|
||||
* @param {string} bmadDir - BMAD installation directory
|
||||
|
|
@ -186,7 +186,7 @@ Follow all instructions in the ${type} file exactly as written.
|
|||
// Generate command files for tasks
|
||||
for (const task of standaloneTasks) {
|
||||
const commandContent = this.generateCommandContent(task, 'task');
|
||||
// Use underscore format: bmad_bmm_name.md
|
||||
// Use dash format: bmad-bmm-name.md
|
||||
const flatName = toDashPath(`${task.module}/tasks/${task.name}.md`);
|
||||
const commandPath = path.join(baseCommandsDir, flatName);
|
||||
await fs.ensureDir(path.dirname(commandPath));
|
||||
|
|
@ -197,7 +197,7 @@ Follow all instructions in the ${type} file exactly as written.
|
|||
// Generate command files for tools
|
||||
for (const tool of standaloneTools) {
|
||||
const commandContent = this.generateCommandContent(tool, 'tool');
|
||||
// Use underscore format: bmad_bmm_name.md
|
||||
// Use dash format: bmad-bmm-name.md
|
||||
const flatName = toDashPath(`${tool.module}/tools/${tool.name}.md`);
|
||||
const commandPath = path.join(baseCommandsDir, flatName);
|
||||
await fs.ensureDir(path.dirname(commandPath));
|
||||
|
|
@ -213,8 +213,8 @@ Follow all instructions in the ${type} file exactly as written.
|
|||
}
|
||||
|
||||
/**
|
||||
* Write task/tool artifacts using underscore format (Windows-compatible)
|
||||
* Creates flat files like: bmad_bmm_bmad-help.md
|
||||
* Write task/tool artifacts using COLON format (for folder-based IDEs)
|
||||
* Creates flat files like: bmad:bmm:bmad-help.md
|
||||
*
|
||||
* @param {string} baseCommandsDir - Base commands directory for the IDE
|
||||
* @param {Array} artifacts - Task/tool artifacts with relativePath
|
||||
|
|
@ -226,7 +226,7 @@ Follow all instructions in the ${type} file exactly as written.
|
|||
for (const artifact of artifacts) {
|
||||
if (artifact.type === 'task' || artifact.type === 'tool') {
|
||||
const commandContent = this.generateCommandContent(artifact, artifact.type);
|
||||
// Use underscore format: bmad_module_name.md
|
||||
// Use colon format: bmad:module:name.md
|
||||
const flatName = toColonPath(artifact.relativePath);
|
||||
const commandPath = path.join(baseCommandsDir, flatName);
|
||||
await fs.ensureDir(path.dirname(commandPath));
|
||||
|
|
@ -239,8 +239,8 @@ Follow all instructions in the ${type} file exactly as written.
|
|||
}
|
||||
|
||||
/**
|
||||
* Write task/tool artifacts using underscore format (Windows-compatible)
|
||||
* Creates flat files like: bmad_bmm_bmad-help.md
|
||||
* Write task/tool artifacts using DASH format (for flat IDEs)
|
||||
* Creates flat files like: bmad-bmm-bmad-help.md
|
||||
*
|
||||
* @param {string} baseCommandsDir - Base commands directory for the IDE
|
||||
* @param {Array} artifacts - Task/tool artifacts with relativePath
|
||||
|
|
@ -252,7 +252,7 @@ Follow all instructions in the ${type} file exactly as written.
|
|||
for (const artifact of artifacts) {
|
||||
if (artifact.type === 'task' || artifact.type === 'tool') {
|
||||
const commandContent = this.generateCommandContent(artifact, artifact.type);
|
||||
// Use underscore format: bmad_module_name.md
|
||||
// Use dash format: bmad-module-name.md
|
||||
const flatName = toDashPath(artifact.relativePath);
|
||||
const commandPath = path.join(baseCommandsDir, flatName);
|
||||
await fs.ensureDir(path.dirname(commandPath));
|
||||
|
|
|
|||
|
|
@ -240,8 +240,8 @@ When running any workflow:
|
|||
}
|
||||
|
||||
/**
|
||||
* Write workflow command artifacts using underscore format (Windows-compatible)
|
||||
* Creates flat files like: bmad_bmm_correct-course.md
|
||||
* Write workflow command artifacts using COLON format (for folder-based IDEs)
|
||||
* Creates flat files like: bmad:bmm:correct-course.md
|
||||
*
|
||||
* @param {string} baseCommandsDir - Base commands directory for the IDE
|
||||
* @param {Array} artifacts - Workflow artifacts
|
||||
|
|
@ -252,7 +252,7 @@ When running any workflow:
|
|||
|
||||
for (const artifact of artifacts) {
|
||||
if (artifact.type === 'workflow-command') {
|
||||
// Convert relativePath to underscore format: bmm/workflows/correct-course.md → bmad_bmm_correct-course.md
|
||||
// Convert relativePath to colon format: bmm/workflows/correct-course.md → bmad:bmm:correct-course.md
|
||||
const flatName = toColonPath(artifact.relativePath);
|
||||
const commandPath = path.join(baseCommandsDir, flatName);
|
||||
await fs.ensureDir(path.dirname(commandPath));
|
||||
|
|
@ -265,8 +265,8 @@ When running any workflow:
|
|||
}
|
||||
|
||||
/**
|
||||
* Write workflow command artifacts using underscore format (Windows-compatible)
|
||||
* Creates flat files like: bmad_bmm_correct-course.md
|
||||
* Write workflow command artifacts using DASH format (for flat IDEs)
|
||||
* Creates flat files like: bmad-bmm-correct-course.md
|
||||
*
|
||||
* @param {string} baseCommandsDir - Base commands directory for the IDE
|
||||
* @param {Array} artifacts - Workflow artifacts
|
||||
|
|
@ -277,7 +277,7 @@ When running any workflow:
|
|||
|
||||
for (const artifact of artifacts) {
|
||||
if (artifact.type === 'workflow-command') {
|
||||
// Convert relativePath to underscore format: bmm/workflows/correct-course.md → bmad_bmm_correct-course.md
|
||||
// Convert relativePath to dash format: bmm/workflows/correct-course.md → bmad-bmm-correct-course.md
|
||||
const flatName = toDashPath(artifact.relativePath);
|
||||
const commandPath = path.join(baseCommandsDir, flatName);
|
||||
await fs.ensureDir(path.dirname(commandPath));
|
||||
|
|
|
|||
|
|
@ -246,12 +246,12 @@ Part of the BMAD ${workflow.module.toUpperCase()} module.
|
|||
const rulesPath = path.join(projectDir, this.configDir, this.rulesDir);
|
||||
|
||||
if (await fs.pathExists(rulesPath)) {
|
||||
// Remove any bmad* files (cleans up old bmad- and bmad: formats)
|
||||
// Only remove files that start with bmad- prefix
|
||||
const files = await fs.readdir(rulesPath);
|
||||
let removed = 0;
|
||||
|
||||
for (const file of files) {
|
||||
if (file.startsWith('bmad') && file.endsWith('.md')) {
|
||||
if (file.startsWith('bmad-') && file.endsWith('.md')) {
|
||||
await fs.remove(path.join(rulesPath, file));
|
||||
removed++;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -54,7 +54,6 @@ class ExternalModuleManager {
|
|||
description: moduleConfig.description || '',
|
||||
defaultSelected: moduleConfig.defaultSelected === true,
|
||||
type: moduleConfig.type || 'community', // bmad-org or community
|
||||
npmPackage: moduleConfig.npmPackage || null, // Include npm package name
|
||||
isExternal: true,
|
||||
});
|
||||
}
|
||||
|
|
@ -96,7 +95,6 @@ class ExternalModuleManager {
|
|||
description: moduleConfig.description || '',
|
||||
defaultSelected: moduleConfig.defaultSelected === true,
|
||||
type: moduleConfig.type || 'community', // bmad-org or community
|
||||
npmPackage: moduleConfig.npmPackage || null, // Include npm package name
|
||||
isExternal: true,
|
||||
};
|
||||
}
|
||||
|
|
|
|||
|
|
@ -371,9 +371,9 @@ class ModuleManager {
|
|||
const fetchSpinner = ora(`Fetching ${moduleInfo.name}...`).start();
|
||||
try {
|
||||
const currentRef = execSync('git rev-parse HEAD', { cwd: moduleCacheDir, stdio: 'pipe' }).toString().trim();
|
||||
// Fetch and reset to remote - works better with shallow clones than pull
|
||||
execSync('git fetch origin --depth 1', { cwd: moduleCacheDir, stdio: 'pipe' });
|
||||
execSync('git reset --hard origin/HEAD', { cwd: moduleCacheDir, stdio: 'pipe' });
|
||||
execSync('git fetch --depth 1', { cwd: moduleCacheDir, stdio: 'pipe' });
|
||||
execSync('git checkout -f', { cwd: moduleCacheDir, stdio: 'pipe' });
|
||||
execSync('git pull --ff-only', { cwd: moduleCacheDir, stdio: 'pipe' });
|
||||
const newRef = execSync('git rev-parse HEAD', { cwd: moduleCacheDir, stdio: 'pipe' }).toString().trim();
|
||||
|
||||
fetchSpinner.succeed(`Fetched ${moduleInfo.name}`);
|
||||
|
|
@ -555,23 +555,10 @@ class ModuleManager {
|
|||
await this.runModuleInstaller(moduleName, bmadDir, options);
|
||||
}
|
||||
|
||||
// Capture version info for manifest
|
||||
const { Manifest } = require('../core/manifest');
|
||||
const manifestObj = new Manifest();
|
||||
const versionInfo = await manifestObj.getModuleVersionInfo(moduleName, bmadDir, sourcePath);
|
||||
|
||||
await manifestObj.addModule(bmadDir, moduleName, {
|
||||
version: versionInfo.version,
|
||||
source: versionInfo.source,
|
||||
npmPackage: versionInfo.npmPackage,
|
||||
repoUrl: versionInfo.repoUrl,
|
||||
});
|
||||
|
||||
return {
|
||||
success: true,
|
||||
module: moduleName,
|
||||
path: targetPath,
|
||||
versionInfo,
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1586,131 +1586,6 @@ class UI {
|
|||
|
||||
return proceed === 'proceed';
|
||||
}
|
||||
|
||||
/**
|
||||
* Display module versions with update availability
|
||||
* @param {Array} modules - Array of module info objects with version info
|
||||
* @param {Array} availableUpdates - Array of available updates
|
||||
*/
|
||||
displayModuleVersions(modules, availableUpdates = []) {
|
||||
console.log('');
|
||||
console.log(chalk.cyan.bold('📦 Module Versions'));
|
||||
console.log(chalk.gray('─'.repeat(80)));
|
||||
|
||||
// Group modules by source
|
||||
const builtIn = modules.filter((m) => m.source === 'built-in');
|
||||
const external = modules.filter((m) => m.source === 'external');
|
||||
const custom = modules.filter((m) => m.source === 'custom');
|
||||
const unknown = modules.filter((m) => m.source === 'unknown');
|
||||
|
||||
const displayGroup = (group, title) => {
|
||||
if (group.length === 0) return;
|
||||
|
||||
console.log(chalk.yellow(`\n${title}`));
|
||||
for (const module of group) {
|
||||
const updateInfo = availableUpdates.find((u) => u.name === module.name);
|
||||
const versionDisplay = module.version || chalk.gray('unknown');
|
||||
|
||||
if (updateInfo) {
|
||||
console.log(
|
||||
` ${chalk.cyan(module.name.padEnd(20))} ${versionDisplay} → ${chalk.green(updateInfo.latestVersion)} ${chalk.green('↑')}`,
|
||||
);
|
||||
} else {
|
||||
console.log(` ${chalk.cyan(module.name.padEnd(20))} ${versionDisplay} ${chalk.gray('✓')}`);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
displayGroup(builtIn, 'Built-in Modules');
|
||||
displayGroup(external, 'External Modules (Official)');
|
||||
displayGroup(custom, 'Custom Modules');
|
||||
displayGroup(unknown, 'Other Modules');
|
||||
|
||||
console.log('');
|
||||
}
|
||||
|
||||
/**
|
||||
* Prompt user to select which modules to update
|
||||
* @param {Array} availableUpdates - Array of available updates
|
||||
* @returns {Array} Selected module names to update
|
||||
*/
|
||||
async promptUpdateSelection(availableUpdates) {
|
||||
if (availableUpdates.length === 0) {
|
||||
return [];
|
||||
}
|
||||
|
||||
console.log('');
|
||||
console.log(chalk.cyan.bold('🔄 Available Updates'));
|
||||
console.log(chalk.gray('─'.repeat(80)));
|
||||
|
||||
const choices = availableUpdates.map((update) => ({
|
||||
name: `${update.name} ${chalk.dim(`(v${update.installedVersion} → v${update.latestVersion})`)}`,
|
||||
value: update.name,
|
||||
checked: true, // Default to selecting all updates
|
||||
}));
|
||||
|
||||
// Add "Update All" and "Cancel" options
|
||||
const action = await prompts.select({
|
||||
message: 'How would you like to proceed?',
|
||||
choices: [
|
||||
{ name: 'Update all available modules', value: 'all' },
|
||||
{ name: 'Select specific modules to update', value: 'select' },
|
||||
{ name: 'Skip updates for now', value: 'skip' },
|
||||
],
|
||||
default: 'all',
|
||||
});
|
||||
|
||||
if (action === 'all') {
|
||||
return availableUpdates.map((u) => u.name);
|
||||
}
|
||||
|
||||
if (action === 'skip') {
|
||||
return [];
|
||||
}
|
||||
|
||||
// Allow specific selection
|
||||
const selected = await prompts.multiselect({
|
||||
message: `Select modules to update ${chalk.dim('(↑/↓ navigates, SPACE toggles, ENTER to confirm)')}:`,
|
||||
choices: choices,
|
||||
required: true,
|
||||
});
|
||||
|
||||
return selected || [];
|
||||
}
|
||||
|
||||
/**
|
||||
* Display status of all installed modules
|
||||
* @param {Object} statusData - Status data with modules, installation info, and available updates
|
||||
*/
|
||||
displayStatus(statusData) {
|
||||
const { installation, modules, availableUpdates, bmadDir } = statusData;
|
||||
|
||||
console.log('');
|
||||
console.log(chalk.cyan.bold('📋 BMAD Status'));
|
||||
console.log(chalk.gray('─'.repeat(80)));
|
||||
|
||||
// Installation info
|
||||
console.log(chalk.yellow('\nInstallation'));
|
||||
console.log(` ${chalk.gray('Version:'.padEnd(20))} ${installation.version || chalk.gray('unknown')}`);
|
||||
console.log(` ${chalk.gray('Location:'.padEnd(20))} ${bmadDir}`);
|
||||
console.log(` ${chalk.gray('Installed:'.padEnd(20))} ${new Date(installation.installDate).toLocaleDateString()}`);
|
||||
console.log(
|
||||
` ${chalk.gray('Last Updated:'.padEnd(20))} ${installation.lastUpdated ? new Date(installation.lastUpdated).toLocaleDateString() : chalk.gray('unknown')}`,
|
||||
);
|
||||
|
||||
// Module versions
|
||||
this.displayModuleVersions(modules, availableUpdates);
|
||||
|
||||
// Update summary
|
||||
if (availableUpdates.length > 0) {
|
||||
console.log(chalk.yellow.bold(`\n⚠️ ${availableUpdates.length} update(s) available`));
|
||||
console.log(chalk.dim(` Run 'bmad install' and select "Quick Update" to update`));
|
||||
} else {
|
||||
console.log(chalk.green.bold('\n✓ All modules are up to date'));
|
||||
}
|
||||
|
||||
console.log('');
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { UI };
|
||||
|
|
|
|||
Loading…
Reference in New Issue