Compare commits
5 Commits
f1437c8635
...
000e308ce2
| Author | SHA1 | Date |
|---|---|---|
|
|
000e308ce2 | |
|
|
48881f86a6 | |
|
|
efbe839a0a | |
|
|
3f9ad4868c | |
|
|
aad132c9b1 |
|
|
@ -1,33 +1,33 @@
|
||||||
module,phase,name,code,sequence,workflow-file,command,required,agent,options,description,output-location,outputs,
|
module,phase,name,code,sequence,workflow-file,command,required,agent,options,description,output-location,outputs,
|
||||||
bmm,anytime,Document Project,DP,10,_bmad/bmm/workflows/document-project/workflow.yaml,bmad:bmm:document-project,false,analyst,Create Mode,"Analyze an existing project to produce useful documentation",project-knowledge,*,
|
bmm,anytime,Document Project,DP,10,_bmad/bmm/workflows/document-project/workflow.yaml,bmad_bmm_document-project,false,analyst,Create Mode,"Analyze an existing project to produce useful documentation",project-knowledge,*,
|
||||||
bmm,anytime,Tech Spec,TS,20,_bmad/bmm/workflows/bmad-quick-flow/quick-spec/workflow.md,bmad:bmm:tech-spec,false,quick-flow-solo-dev,Create Mode,"Do not suggest for potentially very complex things unless requested or if the user complains that they do not want to follow the extensive planning of the bmad method. Quick one-off tasks small changes simple apps utilities without extensive planning",planning_artifacts,"tech spec",
|
bmm,anytime,Tech Spec,TS,20,_bmad/bmm/workflows/bmad-quick-flow/quick-spec/workflow.md,bmad_bmm_tech-spec,false,quick-flow-solo-dev,Create Mode,"Do not suggest for potentially very complex things unless requested or if the user complains that they do not want to follow the extensive planning of the bmad method. Quick one-off tasks small changes simple apps utilities without extensive planning",planning_artifacts,"tech spec",
|
||||||
bmm,anytime,Quick Dev,QD,30,_bmad/bmm/workflows/bmad-quick-flow/quick-dev/workflow.md,bmad:bmm:quick-dev,false,quick-flow-solo-dev,Create Mode,"Quick one-off tasks small changes simple apps utilities without extensive planning - Do not suggest for potentially very complex things unless requested or if the user complains that they do not want to follow the extensive planning of the bmad method, unless the user is already working through the implementation phase and just requests a 1 off things not already in the plan",,,
|
bmm,anytime,Quick Dev,QD,30,_bmad/bmm/workflows/bmad-quick-flow/quick-dev/workflow.md,bmad_bmm_quick-dev,false,quick-flow-solo-dev,Create Mode,"Quick one-off tasks small changes simple apps utilities without extensive planning - Do not suggest for potentially very complex things unless requested or if the user complains that they do not want to follow the extensive planning of the bmad method, unless the user is already working through the implementation phase and just requests a 1 off things not already in the plan",,,
|
||||||
bmm,anytime,Correct Course,CC,40,_bmad/bmm/workflows/4-implementation/correct-course/workflow.yaml,bmad:bmm:correct-course,false,sm,Create Mode,"Anytime: Navigate significant changes. May recommend start over update PRD redo architecture sprint planning or correct epics and stories",planning_artifacts,"change proposal",
|
bmm,anytime,Correct Course,CC,40,_bmad/bmm/workflows/4-implementation/correct-course/workflow.yaml,bmad_bmm_correct-course,false,sm,Create Mode,"Anytime: Navigate significant changes. May recommend start over update PRD redo architecture sprint planning or correct epics and stories",planning_artifacts,"change proposal",
|
||||||
bmm,1-analysis,Brainstorm Project,BP,10,_bmad/core/workflows/brainstorming/workflow.md,bmad:bmm:brainstorming,false,analyst,"data=_bmad/bmm/data/project-context-template.md","Expert Guided Facilitation through a single or multiple techniques",planning_artifacts,"brainstorming session",
|
bmm,1-analysis,Brainstorm Project,BP,10,_bmad/core/workflows/brainstorming/workflow.md,bmad_bmm_brainstorming,false,analyst,"data=_bmad/bmm/data/project-context-template.md","Expert Guided Facilitation through a single or multiple techniques",planning_artifacts,"brainstorming session",
|
||||||
bmm,1-analysis,Market Research,MR,20,_bmad/bmm/workflows/1-analysis/research/workflow.md,bmad:bmm:research,false,analyst,Create Mode,"research_type=""market""","Market analysis competitive landscape customer needs and trends","planning_artifacts|project-knowledge","research documents"
|
bmm,1-analysis,Market Research,MR,20,_bmad/bmm/workflows/1-analysis/research/workflow.md,bmad_bmm_research,false,analyst,Create Mode,"research_type=""market""","Market analysis competitive landscape customer needs and trends","planning_artifacts|project-knowledge","research documents"
|
||||||
bmm,1-analysis,Domain Research,DR,21,_bmad/bmm/workflows/1-analysis/research/workflow.md,bmad:bmm:research,false,analyst,Create Mode,"research_type=""domain""","Industry domain deep dive subject matter expertise and terminology","planning_artifacts|project-knowledge","research documents"
|
bmm,1-analysis,Domain Research,DR,21,_bmad/bmm/workflows/1-analysis/research/workflow.md,bmad_bmm_research,false,analyst,Create Mode,"research_type=""domain""","Industry domain deep dive subject matter expertise and terminology","planning_artifacts|project-knowledge","research documents"
|
||||||
bmm,1-analysis,Technical Research,TR,22,_bmad/bmm/workflows/1-analysis/research/workflow.md,bmad:bmm:research,false,analyst,Create Mode,"research_type=""technical""","Technical feasibility architecture options and implementation approaches","planning_artifacts|project-knowledge","research documents"
|
bmm,1-analysis,Technical Research,TR,22,_bmad/bmm/workflows/1-analysis/research/workflow.md,bmad_bmm_research,false,analyst,Create Mode,"research_type=""technical""","Technical feasibility architecture options and implementation approaches","planning_artifacts|project-knowledge","research documents"
|
||||||
bmm,1-analysis,Create Brief,CB,30,_bmad/bmm/workflows/1-analysis/create-product-brief/workflow.md,bmad:bmm:create-brief,false,analyst,Create Mode,"A guided experience to nail down your product idea",planning_artifacts,"product brief",
|
bmm,1-analysis,Create Brief,CB,30,_bmad/bmm/workflows/1-analysis/create-product-brief/workflow.md,bmad_bmm_create-brief,false,analyst,Create Mode,"A guided experience to nail down your product idea",planning_artifacts,"product brief",
|
||||||
bmm,1-analysis,Validate Brief,VB,40,_bmad/bmm/workflows/1-analysis/create-product-brief/workflow.md,bmad:bmm:validate-brief,false,analyst,Validate Mode,"Validates product brief completeness",planning_artifacts,"brief validation report",
|
bmm,1-analysis,Validate Brief,VB,40,_bmad/bmm/workflows/1-analysis/create-product-brief/workflow.md,bmad_bmm_validate-brief,false,analyst,Validate Mode,"Validates product brief completeness",planning_artifacts,"brief validation report",
|
||||||
bmm,2-planning,Create PRD,CP,10,_bmad/bmm/workflows/2-plan-workflows/prd/workflow.md,bmad:bmm:create-prd,true,pm,Create Mode,"Expert led facilitation to produce your Product Requirements Document",planning_artifacts,prd,
|
bmm,2-planning,Create PRD,CP,10,_bmad/bmm/workflows/2-plan-workflows/prd/workflow.md,bmad_bmm_create-prd,true,pm,Create Mode,"Expert led facilitation to produce your Product Requirements Document",planning_artifacts,prd,
|
||||||
bmm,2-planning,Validate PRD,VP,20,_bmad/bmm/workflows/2-plan-workflows/prd/workflow.md,bmad:bmm:validate-prd,false,pm,Validate Mode,"Validate PRD is comprehensive lean well organized and cohesive",planning_artifacts,"prd validation report",
|
bmm,2-planning,Validate PRD,VP,20,_bmad/bmm/workflows/2-plan-workflows/prd/workflow.md,bmad_bmm_validate-prd,false,pm,Validate Mode,"Validate PRD is comprehensive lean well organized and cohesive",planning_artifacts,"prd validation report",
|
||||||
bmm,2-planning,Create UX,CU,30,_bmad/bmm/workflows/2-plan-workflows/create-ux-design/workflow.md,bmad:bmm:create-ux,false,ux-designer,Create Mode,"Guidance through realizing the plan for your UX, strongly recommended if a UI is a primary piece of the proposed project",planning_artifacts,"ux design",
|
bmm,2-planning,Create UX,CU,30,_bmad/bmm/workflows/2-plan-workflows/create-ux-design/workflow.md,bmad_bmm_create-ux,false,ux-designer,Create Mode,"Guidance through realizing the plan for your UX, strongly recommended if a UI is a primary piece of the proposed project",planning_artifacts,"ux design",
|
||||||
bmm,2-planning,Validate UX,VU,40,_bmad/bmm/workflows/2-plan-workflows/create-ux-design/workflow.md,bmad:bmm:validate-ux,false,ux-designer,Validate Mode,"Validates UX design deliverables",planning_artifacts,"ux validation report",
|
bmm,2-planning,Validate UX,VU,40,_bmad/bmm/workflows/2-plan-workflows/create-ux-design/workflow.md,bmad_bmm_validate-ux,false,ux-designer,Validate Mode,"Validates UX design deliverables",planning_artifacts,"ux validation report",
|
||||||
,,Create Dataflow,CDF,50,_bmad/bmm/workflows/excalidraw-diagrams/create-dataflow/workflow.yaml,bmad:bmm:create-dataflow,false,ux-designer,Create Mode,"Create data flow diagrams (DFD) in Excalidraw format - can be called standalone or during any workflow to add visual documentation",planning_artifacts,"dataflow diagram",
|
,,Create Dataflow,CDF,50,_bmad/bmm/workflows/excalidraw-diagrams/create-dataflow/workflow.yaml,bmad_bmm_create-dataflow,false,ux-designer,Create Mode,"Create data flow diagrams (DFD) in Excalidraw format - can be called standalone or during any workflow to add visual documentation",planning_artifacts,"dataflow diagram",
|
||||||
,,Create Diagram,CED,51,_bmad/bmm/workflows/excalidraw-diagrams/create-diagram/workflow.yaml,bmad:bmm:create-diagram,false,ux-designer,Create Mode,"Create system architecture diagrams ERDs UML diagrams or general technical diagrams in Excalidraw format - use anytime or call from architecture workflow to add visual documentation",planning_artifacts,"diagram",
|
,,Create Diagram,CED,51,_bmad/bmm/workflows/excalidraw-diagrams/create-diagram/workflow.yaml,bmad_bmm_create-diagram,false,ux-designer,Create Mode,"Create system architecture diagrams ERDs UML diagrams or general technical diagrams in Excalidraw format - use anytime or call from architecture workflow to add visual documentation",planning_artifacts,"diagram",
|
||||||
,,Create Flowchart,CFC,52,_bmad/bmm/workflows/excalidraw-diagrams/create-flowchart/workflow.yaml,bmad:bmm:create-flowchart,false,ux-designer,Create Mode,"Create a flowchart visualization in Excalidraw format for processes pipelines or logic flows - use anytime or during architecture to add process documentation",planning_artifacts,"flowchart",
|
,,Create Flowchart,CFC,52,_bmad/bmm/workflows/excalidraw-diagrams/create-flowchart/workflow.yaml,bmad_bmm_create-flowchart,false,ux-designer,Create Mode,"Create a flowchart visualization in Excalidraw format for processes pipelines or logic flows - use anytime or during architecture to add process documentation",planning_artifacts,"flowchart",
|
||||||
,,Create Wireframe,CEW,53,_bmad/bmm/workflows/excalidraw-diagrams/create-wireframe/workflow.yaml,bmad:bmm:create-wireframe,false,ux-designer,Create Mode,"Create website or app wireframes in Excalidraw format - use anytime standalone or call from UX workflow to add UI mockups",planning_artifacts,"wireframe",
|
,,Create Wireframe,CEW,53,_bmad/bmm/workflows/excalidraw-diagrams/create-wireframe/workflow.yaml,bmad_bmm_create-wireframe,false,ux-designer,Create Mode,"Create website or app wireframes in Excalidraw format - use anytime standalone or call from UX workflow to add UI mockups",planning_artifacts,"wireframe",
|
||||||
bmm,3-solutioning,Create Architecture,CA,10,_bmad/bmm/workflows/3-solutioning/create-architecture/workflow.md,bmad:bmm:create-architecture,true,architect,Create Mode,"Guided Workflow to document technical decisions",planning_artifacts,architecture,
|
bmm,3-solutioning,Create Architecture,CA,10,_bmad/bmm/workflows/3-solutioning/create-architecture/workflow.md,bmad_bmm_create-architecture,true,architect,Create Mode,"Guided Workflow to document technical decisions",planning_artifacts,architecture,
|
||||||
bmm,3-solutioning,Validate Architecture,VA,20,_bmad/bmm/workflows/3-solutioning/create-architecture/workflow.md,bmad:bmm:validate-architecture,false,architect,Validate Mode,"Validates architecture completeness",planning_artifacts,"architecture validation report",
|
bmm,3-solutioning,Validate Architecture,VA,20,_bmad/bmm/workflows/3-solutioning/create-architecture/workflow.md,bmad_bmm_validate-architecture,false,architect,Validate Mode,"Validates architecture completeness",planning_artifacts,"architecture validation report",
|
||||||
bmm,3-solutioning,Create Epics and Stories,CE,30,_bmad/bmm/workflows/3-solutioning/create-epics-and-stories/workflow.md,bmad:bmm:create-epics-and-stories,true,pm,Create Mode,"Create the Epics and Stories Listing",planning_artifacts,"epics and stories",
|
bmm,3-solutioning,Create Epics and Stories,CE,30,_bmad/bmm/workflows/3-solutioning/create-epics-and-stories/workflow.md,bmad_bmm_create-epics-and-stories,true,pm,Create Mode,"Create the Epics and Stories Listing",planning_artifacts,"epics and stories",
|
||||||
bmm,3-solutioning,Validate Epics and Stories,VE,40,_bmad/bmm/workflows/3-solutioning/create-epics-and-stories/workflow.md,bmad:bmm:validate-epics-and-stories,false,pm,Validate Mode,"Validates epics and stories completeness",planning_artifacts,"epics validation report",
|
bmm,3-solutioning,Validate Epics and Stories,VE,40,_bmad/bmm/workflows/3-solutioning/create-epics-and-stories/workflow.md,bmad_bmm_validate-epics-and-stories,false,pm,Validate Mode,"Validates epics and stories completeness",planning_artifacts,"epics validation report",
|
||||||
bmm,3-solutioning,Test Design,TD,50,_bmad/bmm/workflows/testarch/test-design/workflow.yaml,bmad:bmm:test-design,false,tea,Create Mode,"Create comprehensive test scenarios ahead of development, recommended if string test compliance or assurance is needed. Very critical for distributed applications with separate front ends and backends outside of a monorepo.",planning_artifacts,"test design",
|
bmm,3-solutioning,Test Design,TD,50,_bmad/bmm/workflows/testarch/test-design/workflow.yaml,bmad_bmm_test-design,false,tea,Create Mode,"Create comprehensive test scenarios ahead of development, recommended if string test compliance or assurance is needed. Very critical for distributed applications with separate front ends and backends outside of a monorepo.",planning_artifacts,"test design",
|
||||||
bmm,3-solutioning,Validate Test Design,VT,60,_bmad/bmm/workflows/testarch/test-design/workflow.yaml,bmad:bmm:validate-test-design,false,tea,Validate Mode,"Validates test design coverage",planning_artifacts,"test design validation report",
|
bmm,3-solutioning,Validate Test Design,VT,60,_bmad/bmm/workflows/testarch/test-design/workflow.yaml,bmad_bmm_validate-test-design,false,tea,Validate Mode,"Validates test design coverage",planning_artifacts,"test design validation report",
|
||||||
bmm,3-solutioning,Implementation Readiness,IR,70,_bmad/bmm/workflows/3-solutioning/check-implementation-readiness/workflow.md,bmad:bmm:implementation-readiness,true,architect,Validate Mode,"Ensure PRD UX Architecture and Epics Stories are aligned",planning_artifacts,"readiness report",
|
bmm,3-solutioning,Implementation Readiness,IR,70,_bmad/bmm/workflows/3-solutioning/check-implementation-readiness/workflow.md,bmad_bmm_implementation-readiness,true,architect,Validate Mode,"Ensure PRD UX Architecture and Epics Stories are aligned",planning_artifacts,"readiness report",
|
||||||
bmm,4-implementation,Sprint Planning,SP,10,_bmad/bmm/workflows/4-implementation/sprint-planning/workflow.yaml,bmad:bmm:sprint-planning,true,sm,Create Mode,"Generate sprint plan for development tasks - this kicks off the implementation phase by producing a plan the implementation agents will follow in sequence for every story in the plan.",implementation_artifacts,"sprint status",
|
bmm,4-implementation,Sprint Planning,SP,10,_bmad/bmm/workflows/4-implementation/sprint-planning/workflow.yaml,bmad_bmm_sprint-planning,true,sm,Create Mode,"Generate sprint plan for development tasks - this kicks off the implementation phase by producing a plan the implementation agents will follow in sequence for every story in the plan.",implementation_artifacts,"sprint status",
|
||||||
bmm,4-implementation,Sprint Status,SS,20,_bmad/bmm/workflows/4-implementation/sprint-status/workflow.yaml,bmad:bmm:sprint-status,false,sm,Create Mode,"Anytime: Summarize sprint status and route to next workflow",,,
|
bmm,4-implementation,Sprint Status,SS,20,_bmad/bmm/workflows/4-implementation/sprint-status/workflow.yaml,bmad_bmm_sprint-status,false,sm,Create Mode,"Anytime: Summarize sprint status and route to next workflow",,,
|
||||||
bmm,4-implementation,Create Story,CS,30,_bmad/bmm/workflows/4-implementation/create-story/workflow.yaml,bmad:bmm:create-story,true,sm,Create Mode,"Story cycle start: Prepare first found story in the sprint plan that is next, or if the command is run with a specific epic and story designation with context. Once complete, then VS then DS then CR then back to DS if needed or next CS or ER",implementation_artifacts,story,
|
bmm,4-implementation,Create Story,CS,30,_bmad/bmm/workflows/4-implementation/create-story/workflow.yaml,bmad_bmm_create-story,true,sm,Create Mode,"Story cycle start: Prepare first found story in the sprint plan that is next, or if the command is run with a specific epic and story designation with context. Once complete, then VS then DS then CR then back to DS if needed or next CS or ER",implementation_artifacts,story,
|
||||||
bmm,4-implementation,Validate Story,VS,35,_bmad/bmm/workflows/4-implementation/create-story/workflow.yaml,bmad:bmm:validate-story,false,sm,Validate Mode,"Validates story readiness and completeness before development work begins",implementation_artifacts,"story validation report",
|
bmm,4-implementation,Validate Story,VS,35,_bmad/bmm/workflows/4-implementation/create-story/workflow.yaml,bmad_bmm_validate-story,false,sm,Validate Mode,"Validates story readiness and completeness before development work begins",implementation_artifacts,"story validation report",
|
||||||
bmm,4-implementation,Dev Story,DS,40,_bmad/bmm/workflows/4-implementation/dev-story/workflow.yaml,bmad:bmm:dev-story,true,dev,Create Mode,"Story cycle: Execute story implementation tasks and tests then CR then back to DS if fixes needed",,,
|
bmm,4-implementation,Dev Story,DS,40,_bmad/bmm/workflows/4-implementation/dev-story/workflow.yaml,bmad_bmm_dev-story,true,dev,Create Mode,"Story cycle: Execute story implementation tasks and tests then CR then back to DS if fixes needed",,,
|
||||||
bmm,4-implementation,Code Review,CR,50,_bmad/bmm/workflows/4-implementation/code-review/workflow.yaml,bmad:bmm:code-review,false,dev,Create Mode,"Story cycle: If issues back to DS if approved then next CS or ER if epic complete",,,
|
bmm,4-implementation,Code Review,CR,50,_bmad/bmm/workflows/4-implementation/code-review/workflow.yaml,bmad_bmm_code-review,false,dev,Create Mode,"Story cycle: If issues back to DS if approved then next CS or ER if epic complete",,,
|
||||||
bmm,4-implementation,Retrospective,ER,60,_bmad/bmm/workflows/4-implementation/retrospective/workflow.yaml,bmad:bmm:retrospective,false,sm,Create Mode,"Optional at epic end: Review completed work lessons learned and next epic or if major issues consider CC",implementation_artifacts,retrospective,
|
bmm,4-implementation,Retrospective,ER,60,_bmad/bmm/workflows/4-implementation/retrospective/workflow.yaml,bmad_bmm_retrospective,false,sm,Create Mode,"Optional at epic end: Review completed work lessons learned and next epic or if major issues consider CC",implementation_artifacts,retrospective,
|
||||||
|
|
|
||||||
|
|
|
@ -80,23 +80,29 @@
|
||||||
- [ ] Owners assigned where applicable
|
- [ ] Owners assigned where applicable
|
||||||
- [ ] No duplicate coverage (same behavior at multiple levels)
|
- [ ] No duplicate coverage (same behavior at multiple levels)
|
||||||
|
|
||||||
### Execution Order
|
### Execution Strategy
|
||||||
|
|
||||||
- [ ] Smoke tests defined (<5 min target)
|
**CRITICAL: Keep execution strategy simple, avoid redundancy**
|
||||||
- [ ] P0 tests listed (<10 min target)
|
|
||||||
- [ ] P1 tests listed (<30 min target)
|
- [ ] **Simple structure**: PR / Nightly / Weekly (NOT complex smoke/P0/P1/P2 tiers)
|
||||||
- [ ] P2/P3 tests listed (<60 min target)
|
- [ ] **PR execution**: All functional tests unless significant infrastructure overhead
|
||||||
- [ ] Order optimizes for fast feedback
|
- [ ] **Nightly/Weekly**: Only performance, chaos, long-running, manual tests
|
||||||
|
- [ ] **No redundancy**: Don't re-list all tests (already in coverage plan)
|
||||||
|
- [ ] **Philosophy stated**: "Run everything in PRs if <15 min, defer only if expensive/long"
|
||||||
|
- [ ] **Playwright parallelization noted**: 100s of tests in 10-15 min
|
||||||
|
|
||||||
### Resource Estimates
|
### Resource Estimates
|
||||||
|
|
||||||
- [ ] P0 hours calculated (count × 2 hours)
|
**CRITICAL: Use intervals/ranges, NOT exact numbers**
|
||||||
- [ ] P1 hours calculated (count × 1 hour)
|
|
||||||
- [ ] P2 hours calculated (count × 0.5 hours)
|
- [ ] P0 effort provided as interval range (e.g., "~25-40 hours" NOT "36 hours")
|
||||||
- [ ] P3 hours calculated (count × 0.25 hours)
|
- [ ] P1 effort provided as interval range (e.g., "~20-35 hours" NOT "27 hours")
|
||||||
- [ ] Total hours summed
|
- [ ] P2 effort provided as interval range (e.g., "~10-30 hours" NOT "15.5 hours")
|
||||||
- [ ] Days estimate provided (hours / 8)
|
- [ ] P3 effort provided as interval range (e.g., "~2-5 hours" NOT "2.5 hours")
|
||||||
- [ ] Estimates include setup time
|
- [ ] Total effort provided as interval range (e.g., "~55-110 hours" NOT "81 hours")
|
||||||
|
- [ ] Timeline provided as week range (e.g., "~1.5-3 weeks" NOT "11 days")
|
||||||
|
- [ ] Estimates include setup time and account for complexity variations
|
||||||
|
- [ ] **No false precision**: Avoid exact calculations like "18 tests × 2 hours = 36 hours"
|
||||||
|
|
||||||
### Quality Gate Criteria
|
### Quality Gate Criteria
|
||||||
|
|
||||||
|
|
@ -126,11 +132,16 @@
|
||||||
|
|
||||||
### Priority Assignment Accuracy
|
### Priority Assignment Accuracy
|
||||||
|
|
||||||
- [ ] P0: Truly blocks core functionality
|
**CRITICAL: Priority classification is separate from execution timing**
|
||||||
- [ ] P0: High-risk (score ≥6)
|
|
||||||
- [ ] P0: No workaround exists
|
- [ ] **Priority sections (P0/P1/P2/P3) do NOT include execution context** (e.g., no "Run on every commit" in headers)
|
||||||
- [ ] P1: Important but not blocking
|
- [ ] **Priority sections have only "Criteria" and "Purpose"** (no "Execution:" field)
|
||||||
- [ ] P2/P3: Nice-to-have or edge cases
|
- [ ] **Execution Strategy section** is separate and handles timing based on infrastructure overhead
|
||||||
|
- [ ] P0: Truly blocks core functionality + High-risk (≥6) + No workaround
|
||||||
|
- [ ] P1: Important features + Medium-risk (3-4) + Common workflows
|
||||||
|
- [ ] P2: Secondary features + Low-risk (1-2) + Edge cases
|
||||||
|
- [ ] P3: Nice-to-have + Exploratory + Benchmarks
|
||||||
|
- [ ] **Note at top of Test Coverage Plan**: Clarifies P0/P1/P2/P3 = priority/risk, NOT execution timing
|
||||||
|
|
||||||
### Test Level Selection
|
### Test Level Selection
|
||||||
|
|
||||||
|
|
@ -176,58 +187,90 @@
|
||||||
- [ ] 🚨 BLOCKERS - Team Must Decide (Sprint 0 critical path items)
|
- [ ] 🚨 BLOCKERS - Team Must Decide (Sprint 0 critical path items)
|
||||||
- [ ] ⚠️ HIGH PRIORITY - Team Should Validate (recommendations for approval)
|
- [ ] ⚠️ HIGH PRIORITY - Team Should Validate (recommendations for approval)
|
||||||
- [ ] 📋 INFO ONLY - Solutions Provided (no decisions needed)
|
- [ ] 📋 INFO ONLY - Solutions Provided (no decisions needed)
|
||||||
- [ ] **Risk Assessment** section
|
- [ ] **Risk Assessment** section - **ACTIONABLE**
|
||||||
- [ ] Total risks identified count
|
- [ ] Total risks identified count
|
||||||
- [ ] High-priority risks table (score ≥6) with all columns: Risk ID, Category, Description, Probability, Impact, Score, Mitigation, Owner, Timeline
|
- [ ] High-priority risks table (score ≥6) with all columns: Risk ID, Category, Description, Probability, Impact, Score, Mitigation, Owner, Timeline
|
||||||
- [ ] Medium and low-priority risks tables
|
- [ ] Medium and low-priority risks tables
|
||||||
- [ ] Risk category legend included
|
- [ ] Risk category legend included
|
||||||
- [ ] **Testability Concerns** section (if system has architectural constraints)
|
- [ ] **Testability Concerns and Architectural Gaps** section - **ACTIONABLE**
|
||||||
- [ ] Blockers to fast feedback table
|
- [ ] **Sub-section: 🚨 ACTIONABLE CONCERNS** at TOP
|
||||||
- [ ] Explanation of why standard CI/CD may not apply (if applicable)
|
- [ ] Blockers to Fast Feedback table (WHAT architecture must provide)
|
||||||
- [ ] Tiered testing strategy table (if forced by architecture)
|
- [ ] Architectural Improvements Needed (WHAT must be changed)
|
||||||
- [ ] Architectural improvements needed (or acknowledgment system supports testing well)
|
- [ ] Each concern has: Owner, Timeline, Impact
|
||||||
|
- [ ] **Sub-section: Testability Assessment Summary** at BOTTOM (FYI)
|
||||||
|
- [ ] What Works Well (passing items)
|
||||||
|
- [ ] Accepted Trade-offs (no action required)
|
||||||
|
- [ ] This section only included if worth mentioning; otherwise omitted
|
||||||
- [ ] **Risk Mitigation Plans** for all high-priority risks (≥6)
|
- [ ] **Risk Mitigation Plans** for all high-priority risks (≥6)
|
||||||
- [ ] Each plan has: Strategy (numbered steps), Owner, Timeline, Status, Verification
|
- [ ] Each plan has: Strategy (numbered steps), Owner, Timeline, Status, Verification
|
||||||
|
- [ ] **Only Backend/DevOps/Arch/Security mitigations** (production code changes)
|
||||||
|
- [ ] QA-owned mitigations belong in QA doc instead
|
||||||
- [ ] **Assumptions and Dependencies** section
|
- [ ] **Assumptions and Dependencies** section
|
||||||
|
- [ ] **Architectural assumptions only** (SLO targets, replication lag, system design)
|
||||||
- [ ] Assumptions list (numbered)
|
- [ ] Assumptions list (numbered)
|
||||||
- [ ] Dependencies list with required dates
|
- [ ] Dependencies list with required dates
|
||||||
- [ ] Risks to plan with impact and contingency
|
- [ ] Risks to plan with impact and contingency
|
||||||
|
- [ ] QA execution assumptions belong in QA doc instead
|
||||||
- [ ] **NO test implementation code** (long examples belong in QA doc)
|
- [ ] **NO test implementation code** (long examples belong in QA doc)
|
||||||
|
- [ ] **NO test scripts** (no Playwright test(...) blocks, no assertions, no test setup code)
|
||||||
|
- [ ] **NO NFR test examples** (NFR sections describe WHAT to test, not HOW to test)
|
||||||
- [ ] **NO test scenario checklists** (belong in QA doc)
|
- [ ] **NO test scenario checklists** (belong in QA doc)
|
||||||
- [ ] **Cross-references to QA doc** where appropriate
|
- [ ] **NO bloat or repetition** (consolidate repeated notes, avoid over-explanation)
|
||||||
|
- [ ] **Cross-references to QA doc** where appropriate (instead of duplication)
|
||||||
|
- [ ] **RECIPE SECTIONS NOT IN ARCHITECTURE DOC:**
|
||||||
|
- [ ] NO "Test Levels Strategy" section (unit/integration/E2E split belongs in QA doc only)
|
||||||
|
- [ ] NO "NFR Testing Approach" section with detailed test procedures (belongs in QA doc only)
|
||||||
|
- [ ] NO "Test Environment Requirements" section (belongs in QA doc only)
|
||||||
|
- [ ] NO "Recommendations for Sprint 0" section with test framework setup (belongs in QA doc only)
|
||||||
|
- [ ] NO "Quality Gate Criteria" section (pass rates, coverage targets belong in QA doc only)
|
||||||
|
- [ ] NO "Tool Selection" section (Playwright, k6, etc. belongs in QA doc only)
|
||||||
|
|
||||||
### test-design-qa.md
|
### test-design-qa.md
|
||||||
|
|
||||||
- [ ] **Purpose statement** at top (execution recipe for QA team)
|
**NEW STRUCTURE (streamlined from 375 to ~287 lines):**
|
||||||
- [ ] **Quick Reference for QA** section
|
|
||||||
- [ ] Before You Start checklist
|
- [ ] **Purpose statement** at top (test execution recipe)
|
||||||
- [ ] Test Execution Order
|
- [ ] **Executive Summary** with risk summary and coverage summary
|
||||||
- [ ] Need Help? guidance
|
- [ ] **Dependencies & Test Blockers** section in POSITION 2 (right after Executive Summary)
|
||||||
- [ ] **System Architecture Summary** (brief overview of services and data flow)
|
- [ ] Backend/Architecture dependencies listed (what QA needs from other teams)
|
||||||
- [ ] **Test Environment Requirements** in early section (section 1-3, NOT buried at end)
|
- [ ] QA infrastructure setup listed (factories, fixtures, environments)
|
||||||
- [ ] Table with Local/Dev/Staging environments
|
- [ ] Code example with playwright-utils if config.tea_use_playwright_utils is true
|
||||||
- [ ] Key principles listed (shared DB, randomization, parallel-safe, self-cleaning, shift-left)
|
- [ ] Test from '@seontechnologies/playwright-utils/api-request/fixtures'
|
||||||
- [ ] Code example provided
|
- [ ] Expect from '@playwright/test' (playwright-utils does not re-export expect)
|
||||||
- [ ] **Testability Assessment** with prerequisites checklist
|
- [ ] Code examples include assertions (no unused imports)
|
||||||
- [ ] References Architecture doc blockers (not duplication)
|
- [ ] **Risk Assessment** section (brief, references Architecture doc)
|
||||||
- [ ] **Test Levels Strategy** with unit/integration/E2E split
|
- [ ] High-priority risks table
|
||||||
- [ ] System type identified
|
- [ ] Medium/low-priority risks table
|
||||||
- [ ] Recommended split percentages with rationale
|
- [ ] Each risk shows "QA Test Coverage" column (how QA validates)
|
||||||
- [ ] Test count summary (P0/P1/P2/P3 totals)
|
|
||||||
- [ ] **Test Coverage Plan** with P0/P1/P2/P3 sections
|
- [ ] **Test Coverage Plan** with P0/P1/P2/P3 sections
|
||||||
- [ ] Each priority has: Execution details, Purpose, Criteria, Test Count
|
- [ ] Priority sections have ONLY "Criteria" (no execution context)
|
||||||
- [ ] Detailed test scenarios WITH CHECKBOXES
|
- [ ] Note at top: "P0/P1/P2/P3 = priority, NOT execution timing"
|
||||||
- [ ] Coverage table with columns: Requirement | Test Level | Risk Link | Test Count | Owner | Notes
|
- [ ] Test tables with columns: Test ID | Requirement | Test Level | Risk Link | Notes
|
||||||
- [ ] **Sprint 0 Setup Requirements**
|
- [ ] **Execution Strategy** section (organized by TOOL TYPE)
|
||||||
- [ ] Architecture/Backend blockers listed with cross-references to Architecture doc
|
- [ ] Every PR: Playwright tests (~10-15 min)
|
||||||
- [ ] QA Test Infrastructure section (factories, fixtures)
|
- [ ] Nightly: k6 performance tests (~30-60 min)
|
||||||
- [ ] Test Environments section (Local, CI/CD, Staging, Production)
|
- [ ] Weekly: Chaos & long-running (~hours)
|
||||||
- [ ] Sprint 0 NFR Gates checklist
|
- [ ] Philosophy: "Run everything in PRs unless expensive/long-running"
|
||||||
- [ ] Sprint 1 Items clearly separated
|
- [ ] **QA Effort Estimate** section (QA effort ONLY)
|
||||||
- [ ] **NFR Readiness Summary** (reference to Architecture doc, not duplication)
|
- [ ] Interval-based estimates (e.g., "~1-2 weeks" NOT "36 hours")
|
||||||
- [ ] Table with NFR categories, status, evidence, blocker, next action
|
- [ ] NO DevOps, Backend, Data Eng, Finance effort
|
||||||
- [ ] **Cross-references to Architecture doc** (not duplication)
|
- [ ] NO Sprint breakdowns (too prescriptive)
|
||||||
- [ ] **NO architectural theory** (just reference Architecture doc)
|
- [ ] **Appendix A: Code Examples & Tagging**
|
||||||
|
- [ ] **Appendix B: Knowledge Base References**
|
||||||
|
|
||||||
|
**REMOVED SECTIONS (bloat):**
|
||||||
|
- [ ] ❌ NO Quick Reference section (bloat)
|
||||||
|
- [ ] ❌ NO System Architecture Summary (bloat)
|
||||||
|
- [ ] ❌ NO Test Environment Requirements as separate section (integrated into Dependencies)
|
||||||
|
- [ ] ❌ NO Testability Assessment section (bloat - covered in Dependencies)
|
||||||
|
- [ ] ❌ NO Test Levels Strategy section (bloat - obvious from test scenarios)
|
||||||
|
- [ ] ❌ NO NFR Readiness Summary (bloat)
|
||||||
|
- [ ] ❌ NO Quality Gate Criteria section (teams decide for themselves)
|
||||||
|
- [ ] ❌ NO Follow-on Workflows section (bloat - BMAD commands self-explanatory)
|
||||||
|
- [ ] ❌ NO Approval section (unnecessary formality)
|
||||||
|
- [ ] ❌ NO Infrastructure/DevOps/Finance effort tables (out of scope)
|
||||||
|
- [ ] ❌ NO Sprint 0/1/2/3 breakdown tables (too prescriptive)
|
||||||
|
- [ ] ❌ NO Next Steps section (bloat)
|
||||||
|
|
||||||
### Cross-Document Consistency
|
### Cross-Document Consistency
|
||||||
|
|
||||||
|
|
@ -238,6 +281,40 @@
|
||||||
- [ ] Dates and authors match across documents
|
- [ ] Dates and authors match across documents
|
||||||
- [ ] ADR and PRD references consistent
|
- [ ] ADR and PRD references consistent
|
||||||
|
|
||||||
|
### Document Quality (Anti-Bloat Check)
|
||||||
|
|
||||||
|
**CRITICAL: Check for bloat and repetition across BOTH documents**
|
||||||
|
|
||||||
|
- [ ] **No repeated notes 10+ times** (e.g., "Timing is pessimistic until R-005 fixed" on every section)
|
||||||
|
- [ ] **Repeated information consolidated** (write once at top, reference briefly if needed)
|
||||||
|
- [ ] **No excessive detail** that doesn't add value (obvious concepts, redundant examples)
|
||||||
|
- [ ] **Focus on unique/critical info** (only document what's different from standard practice)
|
||||||
|
- [ ] **Architecture doc**: Concerns-focused, NOT implementation-focused
|
||||||
|
- [ ] **QA doc**: Implementation-focused, NOT theory-focused
|
||||||
|
- [ ] **Clear separation**: Architecture = WHAT and WHY, QA = HOW
|
||||||
|
- [ ] **Professional tone**: No AI slop markers
|
||||||
|
- [ ] Avoid excessive ✅/❌ emojis (use sparingly, only when adding clarity)
|
||||||
|
- [ ] Avoid "absolutely", "excellent", "fantastic", overly enthusiastic language
|
||||||
|
- [ ] Write professionally and directly
|
||||||
|
- [ ] **Architecture doc length**: Target ~150-200 lines max (focus on actionable concerns only)
|
||||||
|
- [ ] **QA doc length**: Keep concise, remove bloat sections
|
||||||
|
|
||||||
|
### Architecture Doc Structure (Actionable-First Principle)
|
||||||
|
|
||||||
|
**CRITICAL: Validate structure follows actionable-first, FYI-last principle**
|
||||||
|
|
||||||
|
- [ ] **Actionable sections at TOP:**
|
||||||
|
- [ ] Quick Guide (🚨 BLOCKERS first, then ⚠️ HIGH PRIORITY, then 📋 INFO ONLY last)
|
||||||
|
- [ ] Risk Assessment (high-priority risks ≥6 at top)
|
||||||
|
- [ ] Testability Concerns (concerns/blockers at top, passing items at bottom)
|
||||||
|
- [ ] Risk Mitigation Plans (for high-priority risks ≥6)
|
||||||
|
- [ ] **FYI sections at BOTTOM:**
|
||||||
|
- [ ] Testability Assessment Summary (what works well - only if worth mentioning)
|
||||||
|
- [ ] Assumptions and Dependencies
|
||||||
|
- [ ] **ASRs categorized correctly:**
|
||||||
|
- [ ] Actionable ASRs included in 🚨 or ⚠️ sections
|
||||||
|
- [ ] FYI ASRs included in 📋 section or omitted if obvious
|
||||||
|
|
||||||
## Completion Criteria
|
## Completion Criteria
|
||||||
|
|
||||||
**All must be true:**
|
**All must be true:**
|
||||||
|
|
@ -295,9 +372,20 @@ If workflow fails:
|
||||||
|
|
||||||
- **Solution**: Use test pyramid - E2E for critical paths only
|
- **Solution**: Use test pyramid - E2E for critical paths only
|
||||||
|
|
||||||
**Issue**: Resource estimates too high
|
**Issue**: Resource estimates too high or too precise
|
||||||
|
|
||||||
- **Solution**: Invest in fixtures/factories to reduce per-test setup time
|
- **Solution**:
|
||||||
|
- Invest in fixtures/factories to reduce per-test setup time
|
||||||
|
- Use interval ranges (e.g., "~55-110 hours") instead of exact numbers (e.g., "81 hours")
|
||||||
|
- Widen intervals if high uncertainty exists
|
||||||
|
|
||||||
|
**Issue**: Execution order section too complex or redundant
|
||||||
|
|
||||||
|
- **Solution**:
|
||||||
|
- Default: Run everything in PRs (<15 min with Playwright parallelization)
|
||||||
|
- Only defer to nightly/weekly if expensive (k6, chaos, 4+ hour tests)
|
||||||
|
- Don't create smoke/P0/P1/P2/P3 tier structure
|
||||||
|
- Don't re-list all tests (already in coverage plan)
|
||||||
|
|
||||||
### Best Practices
|
### Best Practices
|
||||||
|
|
||||||
|
|
@ -305,7 +393,9 @@ If workflow fails:
|
||||||
- High-priority risks (≥6) require immediate mitigation
|
- High-priority risks (≥6) require immediate mitigation
|
||||||
- P0 tests should cover <10% of total scenarios
|
- P0 tests should cover <10% of total scenarios
|
||||||
- Avoid testing same behavior at multiple levels
|
- Avoid testing same behavior at multiple levels
|
||||||
- Include smoke tests (P0 subset) for fast feedback
|
- **Use interval-based estimates** (e.g., "~25-40 hours") instead of exact numbers to avoid false precision and provide flexibility
|
||||||
|
- **Keep execution strategy simple**: Default to "run everything in PRs" (<15 min with Playwright), only defer if expensive/long-running
|
||||||
|
- **Avoid execution order redundancy**: Don't create complex tier structures or re-list tests
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -157,7 +157,13 @@ TEA test-design workflow supports TWO modes, detected automatically:
|
||||||
|
|
||||||
1. **Review Architecture for Testability**
|
1. **Review Architecture for Testability**
|
||||||
|
|
||||||
Evaluate architecture against these criteria:
|
**STRUCTURE PRINCIPLE: CONCERNS FIRST, PASSING ITEMS LAST**
|
||||||
|
|
||||||
|
Evaluate architecture against these criteria and structure output as:
|
||||||
|
1. **Testability Concerns** (ACTIONABLE - what's broken/missing)
|
||||||
|
2. **Testability Assessment Summary** (FYI - what works well)
|
||||||
|
|
||||||
|
**Testability Criteria:**
|
||||||
|
|
||||||
**Controllability:**
|
**Controllability:**
|
||||||
- Can we control system state for testing? (API seeding, factories, database reset)
|
- Can we control system state for testing? (API seeding, factories, database reset)
|
||||||
|
|
@ -174,8 +180,18 @@ TEA test-design workflow supports TWO modes, detected automatically:
|
||||||
- Can we reproduce failures? (deterministic waits, HAR capture, seed data)
|
- Can we reproduce failures? (deterministic waits, HAR capture, seed data)
|
||||||
- Are components loosely coupled? (mockable, testable boundaries)
|
- Are components loosely coupled? (mockable, testable boundaries)
|
||||||
|
|
||||||
|
**In Architecture Doc Output:**
|
||||||
|
- **Section A: Testability Concerns** (TOP) - List what's BROKEN or MISSING
|
||||||
|
- Example: "No API for test data seeding → Cannot parallelize tests"
|
||||||
|
- Example: "Hardcoded DB connection → Cannot test in CI"
|
||||||
|
- **Section B: Testability Assessment Summary** (BOTTOM) - List what PASSES
|
||||||
|
- Example: "✅ API-first design supports test isolation"
|
||||||
|
- Only include if worth mentioning; otherwise omit this section entirely
|
||||||
|
|
||||||
2. **Identify Architecturally Significant Requirements (ASRs)**
|
2. **Identify Architecturally Significant Requirements (ASRs)**
|
||||||
|
|
||||||
|
**CRITICAL: ASRs must indicate if ACTIONABLE or FYI**
|
||||||
|
|
||||||
From PRD NFRs and architecture decisions, identify quality requirements that:
|
From PRD NFRs and architecture decisions, identify quality requirements that:
|
||||||
- Drive architecture decisions (e.g., "Must handle 10K concurrent users" → caching architecture)
|
- Drive architecture decisions (e.g., "Must handle 10K concurrent users" → caching architecture)
|
||||||
- Pose testability challenges (e.g., "Sub-second response time" → performance test infrastructure)
|
- Pose testability challenges (e.g., "Sub-second response time" → performance test infrastructure)
|
||||||
|
|
@ -183,21 +199,60 @@ TEA test-design workflow supports TWO modes, detected automatically:
|
||||||
|
|
||||||
Score each ASR using risk matrix (probability × impact).
|
Score each ASR using risk matrix (probability × impact).
|
||||||
|
|
||||||
|
**In Architecture Doc, categorize ASRs:**
|
||||||
|
- **ACTIONABLE ASRs** (require architecture changes): Include in "Quick Guide" 🚨 or ⚠️ sections
|
||||||
|
- **FYI ASRs** (already satisfied by architecture): Include in "Quick Guide" 📋 section OR omit if obvious
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
- ASR-001 (Score 9): "Multi-region deployment requires region-specific test infrastructure" → **ACTIONABLE** (goes in 🚨 BLOCKERS)
|
||||||
|
- ASR-002 (Score 4): "OAuth 2.1 authentication already implemented in ADR-5" → **FYI** (goes in 📋 INFO ONLY or omit)
|
||||||
|
|
||||||
|
**Structure Principle:** Actionable ASRs at TOP, FYI ASRs at BOTTOM (or omit)
|
||||||
|
|
||||||
3. **Define Test Levels Strategy**
|
3. **Define Test Levels Strategy**
|
||||||
|
|
||||||
|
**IMPORTANT: This section goes in QA doc ONLY, NOT in Architecture doc**
|
||||||
|
|
||||||
Based on architecture (mobile, web, API, microservices, monolith):
|
Based on architecture (mobile, web, API, microservices, monolith):
|
||||||
- Recommend unit/integration/E2E split (e.g., 70/20/10 for API-heavy, 40/30/30 for UI-heavy)
|
- Recommend unit/integration/E2E split (e.g., 70/20/10 for API-heavy, 40/30/30 for UI-heavy)
|
||||||
- Identify test environment needs (local, staging, ephemeral, production-like)
|
- Identify test environment needs (local, staging, ephemeral, production-like)
|
||||||
- Define testing approach per technology (Playwright for web, Maestro for mobile, k6 for performance)
|
- Define testing approach per technology (Playwright for web, Maestro for mobile, k6 for performance)
|
||||||
|
|
||||||
4. **Assess NFR Testing Approach**
|
**In Architecture doc:** Only mention test level split if it's an ACTIONABLE concern
|
||||||
|
- Example: "API response time <100ms requires load testing infrastructure" (concern)
|
||||||
|
- DO NOT include full test level strategy table in Architecture doc
|
||||||
|
|
||||||
For each NFR category:
|
4. **Assess NFR Requirements (MINIMAL in Architecture Doc)**
|
||||||
- **Security**: Auth/authz tests, OWASP validation, secret handling (Playwright E2E + security tools)
|
|
||||||
- **Performance**: Load/stress/spike testing with k6, SLO/SLA thresholds
|
**CRITICAL: NFR testing approach is a RECIPE - belongs in QA doc ONLY**
|
||||||
- **Reliability**: Error handling, retries, circuit breakers, health checks (Playwright + API tests)
|
|
||||||
|
**In Architecture Doc:**
|
||||||
|
- Only mention NFRs if they create testability CONCERNS
|
||||||
|
- Focus on WHAT architecture must provide, not HOW to test
|
||||||
|
- Keep it brief - 1-2 sentences per NFR category at most
|
||||||
|
|
||||||
|
**Example - Security NFR in Architecture doc (if there's a concern):**
|
||||||
|
✅ CORRECT (concern-focused, brief, WHAT/WHY only):
|
||||||
|
- "System must prevent cross-customer data access (GDPR requirement). Requires test infrastructure for multi-tenant isolation in Sprint 0."
|
||||||
|
- "OAuth tokens must expire after 1 hour (ADR-5). Requires test harness for token expiration validation."
|
||||||
|
|
||||||
|
❌ INCORRECT (too detailed, belongs in QA doc):
|
||||||
|
- Full table of security test scenarios
|
||||||
|
- Test scripts with code examples
|
||||||
|
- Detailed test procedures
|
||||||
|
- Tool selection (e.g., "use Playwright E2E + OWASP ZAP")
|
||||||
|
- Specific test approaches (e.g., "Test approach: Playwright E2E for auth/authz")
|
||||||
|
|
||||||
|
**In QA Doc (full NFR testing approach):**
|
||||||
|
- **Security**: Full test scenarios, tooling (Playwright + OWASP ZAP), test procedures
|
||||||
|
- **Performance**: Load/stress/spike test scenarios, k6 scripts, SLO thresholds
|
||||||
|
- **Reliability**: Error handling tests, retry logic validation, circuit breaker tests
|
||||||
- **Maintainability**: Coverage targets, code quality gates, observability validation
|
- **Maintainability**: Coverage targets, code quality gates, observability validation
|
||||||
|
|
||||||
|
**Rule of Thumb:**
|
||||||
|
- Architecture doc: "What NFRs exist and what concerns they create" (1-2 sentences)
|
||||||
|
- QA doc: "How to test those NFRs" (full sections with tables, code, procedures)
|
||||||
|
|
||||||
5. **Flag Testability Concerns**
|
5. **Flag Testability Concerns**
|
||||||
|
|
||||||
Identify architecture decisions that harm testability:
|
Identify architecture decisions that harm testability:
|
||||||
|
|
@ -228,22 +283,54 @@ TEA test-design workflow supports TWO modes, detected automatically:
|
||||||
**Standard Structures (REQUIRED):**
|
**Standard Structures (REQUIRED):**
|
||||||
|
|
||||||
**test-design-architecture.md sections (in this order):**
|
**test-design-architecture.md sections (in this order):**
|
||||||
|
|
||||||
|
**STRUCTURE PRINCIPLE: Actionable items FIRST, FYI items LAST**
|
||||||
|
|
||||||
1. Executive Summary (scope, business context, architecture, risk summary)
|
1. Executive Summary (scope, business context, architecture, risk summary)
|
||||||
2. Quick Guide (🚨 BLOCKERS / ⚠️ HIGH PRIORITY / 📋 INFO ONLY)
|
2. Quick Guide (🚨 BLOCKERS / ⚠️ HIGH PRIORITY / 📋 INFO ONLY)
|
||||||
3. Risk Assessment (high/medium/low-priority risks with scoring)
|
3. Risk Assessment (high/medium/low-priority risks with scoring) - **ACTIONABLE**
|
||||||
4. Testability Concerns and Architectural Gaps (if system has constraints)
|
4. Testability Concerns and Architectural Gaps - **ACTIONABLE** (what arch team must do)
|
||||||
5. Risk Mitigation Plans (detailed for high-priority risks ≥6)
|
- Sub-section: Blockers to Fast Feedback (ACTIONABLE - concerns FIRST)
|
||||||
6. Assumptions and Dependencies
|
- Sub-section: Architectural Improvements Needed (ACTIONABLE)
|
||||||
|
- Sub-section: Testability Assessment Summary (FYI - passing items LAST, only if worth mentioning)
|
||||||
|
5. Risk Mitigation Plans (detailed for high-priority risks ≥6) - **ACTIONABLE**
|
||||||
|
6. Assumptions and Dependencies - **FYI**
|
||||||
|
|
||||||
|
**SECTIONS THAT DO NOT BELONG IN ARCHITECTURE DOC:**
|
||||||
|
- ❌ Test Levels Strategy (unit/integration/E2E split) - This is a RECIPE, belongs in QA doc ONLY
|
||||||
|
- ❌ NFR Testing Approach with test examples - This is a RECIPE, belongs in QA doc ONLY
|
||||||
|
- ❌ Test Environment Requirements - This is a RECIPE, belongs in QA doc ONLY
|
||||||
|
- ❌ Recommendations for Sprint 0 (test framework setup, factories) - This is a RECIPE, belongs in QA doc ONLY
|
||||||
|
- ❌ Quality Gate Criteria (pass rates, coverage targets) - This is a RECIPE, belongs in QA doc ONLY
|
||||||
|
- ❌ Tool Selection (Playwright, k6, etc.) - This is a RECIPE, belongs in QA doc ONLY
|
||||||
|
|
||||||
|
**WHAT BELONGS IN ARCHITECTURE DOC:**
|
||||||
|
- ✅ Testability CONCERNS (what makes it hard to test)
|
||||||
|
- ✅ Architecture GAPS (what's missing for testability)
|
||||||
|
- ✅ What architecture team must DO (blockers, improvements)
|
||||||
|
- ✅ Risks and mitigation plans
|
||||||
|
- ✅ ASRs (Architecturally Significant Requirements) - but clarify if FYI or actionable
|
||||||
|
|
||||||
**test-design-qa.md sections (in this order):**
|
**test-design-qa.md sections (in this order):**
|
||||||
1. Quick Reference for QA (Before You Start, Execution Order, Need Help)
|
1. Executive Summary (risk summary, coverage summary)
|
||||||
2. System Architecture Summary (brief overview)
|
2. **Dependencies & Test Blockers** (CRITICAL: RIGHT AFTER SUMMARY - what QA needs from other teams)
|
||||||
3. Test Environment Requirements (MOVE UP - section 3, NOT buried at end)
|
3. Risk Assessment (scored risks with categories - reference Arch doc, don't duplicate)
|
||||||
4. Testability Assessment (lightweight prerequisites checklist)
|
4. Test Coverage Plan (P0/P1/P2/P3 with detailed scenarios + checkboxes)
|
||||||
5. Test Levels Strategy (unit/integration/E2E split with rationale)
|
5. **Execution Strategy** (SIMPLE: Organized by TOOL TYPE: PR (Playwright) / Nightly (k6) / Weekly (chaos/manual))
|
||||||
6. Test Coverage Plan (P0/P1/P2/P3 with detailed scenarios + checkboxes)
|
6. QA Effort Estimate (QA effort ONLY - no DevOps, Data Eng, Finance, Backend)
|
||||||
7. Sprint 0 Setup Requirements (blockers, infrastructure, environments)
|
7. Appendices (code examples with playwright-utils, tagging strategy, knowledge base refs)
|
||||||
8. NFR Readiness Summary (reference to Architecture doc)
|
|
||||||
|
**SECTIONS TO EXCLUDE FROM QA DOC:**
|
||||||
|
- ❌ Quality Gate Criteria (pass/fail thresholds - teams decide for themselves)
|
||||||
|
- ❌ Follow-on Workflows (bloat - BMAD commands are self-explanatory)
|
||||||
|
- ❌ Approval section (unnecessary formality)
|
||||||
|
- ❌ Test Environment Requirements (remove as separate section - integrate into Dependencies if needed)
|
||||||
|
- ❌ NFR Readiness Summary (bloat - covered in Risk Assessment)
|
||||||
|
- ❌ Testability Assessment (bloat - covered in Dependencies)
|
||||||
|
- ❌ Test Levels Strategy (bloat - obvious from test scenarios)
|
||||||
|
- ❌ Sprint breakdowns (too prescriptive)
|
||||||
|
- ❌ Infrastructure/DevOps/Data Eng effort tables (out of scope)
|
||||||
|
- ❌ Mitigation plans for non-QA work (belongs in Arch doc)
|
||||||
|
|
||||||
**Content Guidelines:**
|
**Content Guidelines:**
|
||||||
|
|
||||||
|
|
@ -252,26 +339,46 @@ TEA test-design workflow supports TWO modes, detected automatically:
|
||||||
- ✅ Clear ownership (each blocker/ASR has owner + timeline)
|
- ✅ Clear ownership (each blocker/ASR has owner + timeline)
|
||||||
- ✅ Testability requirements (what architecture must support)
|
- ✅ Testability requirements (what architecture must support)
|
||||||
- ✅ Mitigation plans (for each high-risk item ≥6)
|
- ✅ Mitigation plans (for each high-risk item ≥6)
|
||||||
- ✅ Short code examples (5-10 lines max showing what to support)
|
- ✅ Brief conceptual examples ONLY if needed to clarify architecture concerns (5-10 lines max)
|
||||||
|
- ✅ **Target length**: ~150-200 lines max (focus on actionable concerns only)
|
||||||
|
- ✅ **Professional tone**: Avoid AI slop (excessive ✅/❌ emojis, "absolutely", "excellent", overly enthusiastic language)
|
||||||
|
|
||||||
**Architecture doc (DON'T):**
|
**Architecture doc (DON'T) - CRITICAL:**
|
||||||
- ❌ NO long test code examples (belongs in QA doc)
|
- ❌ NO test scripts or test implementation code AT ALL - This is a communication doc for architects, not a testing guide
|
||||||
- ❌ NO test scenario checklists (belongs in QA doc)
|
- ❌ NO Playwright test examples (e.g., test('...', async ({ request }) => ...))
|
||||||
- ❌ NO implementation details (how QA will test)
|
- ❌ NO assertion logic (e.g., expect(...).toBe(...))
|
||||||
|
- ❌ NO test scenario checklists with checkboxes (belongs in QA doc)
|
||||||
|
- ❌ NO implementation details about HOW QA will test
|
||||||
|
- ❌ Focus on CONCERNS, not IMPLEMENTATION
|
||||||
|
|
||||||
**QA doc (DO):**
|
**QA doc (DO):**
|
||||||
- ✅ Test scenario recipes (clear P0/P1/P2/P3 with checkboxes)
|
- ✅ Test scenario recipes (clear P0/P1/P2/P3 with checkboxes)
|
||||||
- ✅ Environment setup (Sprint 0 checklist with blockers)
|
- ✅ Full test implementation code samples when helpful
|
||||||
- ✅ Tool setup (factories, fixtures, frameworks)
|
- ✅ **IMPORTANT: If config.tea_use_playwright_utils is true, ALL code samples MUST use @seontechnologies/playwright-utils fixtures and utilities**
|
||||||
|
- ✅ Import test fixtures from '@seontechnologies/playwright-utils/api-request/fixtures'
|
||||||
|
- ✅ Import expect from '@playwright/test' (playwright-utils does not re-export expect)
|
||||||
|
- ✅ Use apiRequest fixture with schema validation, retry logic, and structured responses
|
||||||
|
- ✅ Dependencies & Test Blockers section RIGHT AFTER Executive Summary (what QA needs from other teams)
|
||||||
|
- ✅ **QA effort estimates ONLY** (no DevOps, Data Eng, Finance, Backend effort - out of scope)
|
||||||
- ✅ Cross-references to Architecture doc (not duplication)
|
- ✅ Cross-references to Architecture doc (not duplication)
|
||||||
|
- ✅ **Professional tone**: Avoid AI slop (excessive ✅/❌ emojis, "absolutely", "excellent", overly enthusiastic language)
|
||||||
|
|
||||||
**QA doc (DON'T):**
|
**QA doc (DON'T):**
|
||||||
- ❌ NO architectural theory (just reference Architecture doc)
|
- ❌ NO architectural theory (just reference Architecture doc)
|
||||||
- ❌ NO ASR explanations (link to Architecture doc instead)
|
- ❌ NO ASR explanations (link to Architecture doc instead)
|
||||||
- ❌ NO duplicate risk assessments (reference Architecture doc)
|
- ❌ NO duplicate risk assessments (reference Architecture doc)
|
||||||
|
- ❌ NO Quality Gate Criteria section (teams decide pass/fail thresholds for themselves)
|
||||||
|
- ❌ NO Follow-on Workflows section (bloat - BMAD commands are self-explanatory)
|
||||||
|
- ❌ NO Approval section (unnecessary formality)
|
||||||
|
- ❌ NO effort estimates for other teams (DevOps, Backend, Data Eng, Finance - out of scope, QA effort only)
|
||||||
|
- ❌ NO Sprint breakdowns (too prescriptive - e.g., "Sprint 0: 40 hours, Sprint 1: 48 hours")
|
||||||
|
- ❌ NO mitigation plans for Backend/Arch/DevOps work (those belong in Architecture doc)
|
||||||
|
- ❌ NO architectural assumptions or debates (those belong in Architecture doc)
|
||||||
|
|
||||||
**Anti-Patterns to Avoid (Cross-Document Redundancy):**
|
**Anti-Patterns to Avoid (Cross-Document Redundancy):**
|
||||||
|
|
||||||
|
**CRITICAL: NO BLOAT, NO REPETITION, NO OVERINFO**
|
||||||
|
|
||||||
❌ **DON'T duplicate OAuth requirements:**
|
❌ **DON'T duplicate OAuth requirements:**
|
||||||
- Architecture doc: Explain OAuth 2.1 flow in detail
|
- Architecture doc: Explain OAuth 2.1 flow in detail
|
||||||
- QA doc: Re-explain why OAuth 2.1 is required
|
- QA doc: Re-explain why OAuth 2.1 is required
|
||||||
|
|
@ -280,6 +387,24 @@ TEA test-design workflow supports TWO modes, detected automatically:
|
||||||
- Architecture doc: "ASR-1: OAuth 2.1 required (see QA doc for 12 test scenarios)"
|
- Architecture doc: "ASR-1: OAuth 2.1 required (see QA doc for 12 test scenarios)"
|
||||||
- QA doc: "OAuth tests: 12 P0 scenarios (see Architecture doc R-001 for risk details)"
|
- QA doc: "OAuth tests: 12 P0 scenarios (see Architecture doc R-001 for risk details)"
|
||||||
|
|
||||||
|
❌ **DON'T repeat the same note 10+ times:**
|
||||||
|
- Example: "Timing is pessimistic until R-005 is fixed" repeated on every P0, P1, P2 section
|
||||||
|
- This creates bloat and makes docs hard to read
|
||||||
|
|
||||||
|
✅ **DO consolidate repeated information:**
|
||||||
|
- Write once at the top: "**Note**: All timing estimates are pessimistic pending R-005 resolution"
|
||||||
|
- Reference briefly if needed: "(pessimistic timing)"
|
||||||
|
|
||||||
|
❌ **DON'T include excessive detail that doesn't add value:**
|
||||||
|
- Long explanations of obvious concepts
|
||||||
|
- Redundant examples showing the same pattern
|
||||||
|
- Over-documentation of standard practices
|
||||||
|
|
||||||
|
✅ **DO focus on what's unique or critical:**
|
||||||
|
- Document only what's different from standard practice
|
||||||
|
- Highlight critical decisions and risks
|
||||||
|
- Keep explanations concise and actionable
|
||||||
|
|
||||||
**Markdown Cross-Reference Syntax Examples:**
|
**Markdown Cross-Reference Syntax Examples:**
|
||||||
|
|
||||||
```markdown
|
```markdown
|
||||||
|
|
@ -330,6 +455,24 @@ TEA test-design workflow supports TWO modes, detected automatically:
|
||||||
- Cross-reference between docs (no duplication)
|
- Cross-reference between docs (no duplication)
|
||||||
- Validate against checklist.md (System-Level Mode section)
|
- Validate against checklist.md (System-Level Mode section)
|
||||||
|
|
||||||
|
**Common Over-Engineering to Avoid:**
|
||||||
|
|
||||||
|
**In QA Doc:**
|
||||||
|
1. ❌ Quality gate thresholds ("P0 must be 100%, P1 ≥95%") - Let teams decide for themselves
|
||||||
|
2. ❌ Effort estimates for other teams - QA doc should only estimate QA effort
|
||||||
|
3. ❌ Sprint breakdowns ("Sprint 0: 40 hours, Sprint 1: 48 hours") - Too prescriptive
|
||||||
|
4. ❌ Approval sections - Unnecessary formality
|
||||||
|
5. ❌ Assumptions about architecture (SLO targets, replication lag) - These are architectural concerns, belong in Arch doc
|
||||||
|
6. ❌ Mitigation plans for Backend/Arch/DevOps - Those belong in Arch doc
|
||||||
|
7. ❌ Follow-on workflows section - Bloat, BMAD commands are self-explanatory
|
||||||
|
8. ❌ NFR Readiness Summary - Bloat, covered in Risk Assessment
|
||||||
|
|
||||||
|
**Test Coverage Numbers Reality Check:**
|
||||||
|
- With Playwright parallelization, running ALL Playwright tests is as fast as running just P0
|
||||||
|
- Don't split Playwright tests by priority into different CI gates - it adds no value
|
||||||
|
- Tool type matters, not priority labels
|
||||||
|
- Defer based on infrastructure cost, not importance
|
||||||
|
|
||||||
**After System-Level Mode:** Workflow COMPLETE. System-level outputs (test-design-architecture.md + test-design-qa.md) are written in this step. Steps 2-4 are epic-level only - do NOT execute them in system-level mode.
|
**After System-Level Mode:** Workflow COMPLETE. System-level outputs (test-design-architecture.md + test-design-qa.md) are written in this step. Steps 2-4 are epic-level only - do NOT execute them in system-level mode.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
@ -540,12 +683,51 @@ TEA test-design workflow supports TWO modes, detected automatically:
|
||||||
|
|
||||||
8. **Plan Mitigations**
|
8. **Plan Mitigations**
|
||||||
|
|
||||||
|
**CRITICAL: Mitigation placement depends on WHO does the work**
|
||||||
|
|
||||||
For each high-priority risk:
|
For each high-priority risk:
|
||||||
- Define mitigation strategy
|
- Define mitigation strategy
|
||||||
- Assign owner (dev, QA, ops)
|
- Assign owner (dev, QA, ops)
|
||||||
- Set timeline
|
- Set timeline
|
||||||
- Update residual risk expectation
|
- Update residual risk expectation
|
||||||
|
|
||||||
|
**Mitigation Plan Placement:**
|
||||||
|
|
||||||
|
**Architecture Doc:**
|
||||||
|
- Mitigations owned by Backend, DevOps, Architecture, Security, Data Eng
|
||||||
|
- Example: "Add authorization layer for customer-scoped access" (Backend work)
|
||||||
|
- Example: "Configure AWS Fault Injection Simulator" (DevOps work)
|
||||||
|
- Example: "Define CloudWatch log schema for backfill events" (Architecture work)
|
||||||
|
|
||||||
|
**QA Doc:**
|
||||||
|
- Mitigations owned by QA (test development work)
|
||||||
|
- Example: "Create factories for test data with randomization" (QA work)
|
||||||
|
- Example: "Implement polling with retry for async validation" (QA test code)
|
||||||
|
- Brief reference to Architecture doc mitigations (don't duplicate)
|
||||||
|
|
||||||
|
**Rule of Thumb:**
|
||||||
|
- If mitigation requires production code changes → Architecture doc
|
||||||
|
- If mitigation is test infrastructure/code → QA doc
|
||||||
|
- If mitigation involves multiple teams → Architecture doc with QA validation approach
|
||||||
|
|
||||||
|
**Assumptions Placement:**
|
||||||
|
|
||||||
|
**Architecture Doc:**
|
||||||
|
- Architectural assumptions (SLO targets, replication lag, system design assumptions)
|
||||||
|
- Example: "P95 <500ms inferred from <2s timeout (requires Product approval)"
|
||||||
|
- Example: "Multi-region replication lag <1s assumed (ADR doesn't specify SLA)"
|
||||||
|
- Example: "Recent Cache hit ratio >80% assumed (not in PRD/ADR)"
|
||||||
|
|
||||||
|
**QA Doc:**
|
||||||
|
- Test execution assumptions (test infrastructure readiness, test data availability)
|
||||||
|
- Example: "Assumes test factories already created"
|
||||||
|
- Example: "Assumes CI/CD pipeline configured"
|
||||||
|
- Brief reference to Architecture doc for architectural assumptions
|
||||||
|
|
||||||
|
**Rule of Thumb:**
|
||||||
|
- If assumption is about system architecture/design → Architecture doc
|
||||||
|
- If assumption is about test infrastructure/execution → QA doc
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Step 3: Design Test Coverage
|
## Step 3: Design Test Coverage
|
||||||
|
|
@ -594,6 +776,8 @@ TEA test-design workflow supports TWO modes, detected automatically:
|
||||||
|
|
||||||
3. **Assign Priority Levels**
|
3. **Assign Priority Levels**
|
||||||
|
|
||||||
|
**CRITICAL: P0/P1/P2/P3 indicates priority and risk level, NOT execution timing**
|
||||||
|
|
||||||
**Knowledge Base Reference**: `test-priorities-matrix.md`
|
**Knowledge Base Reference**: `test-priorities-matrix.md`
|
||||||
|
|
||||||
**P0 (Critical)**:
|
**P0 (Critical)**:
|
||||||
|
|
@ -601,25 +785,28 @@ TEA test-design workflow supports TWO modes, detected automatically:
|
||||||
- High-risk areas (score ≥6)
|
- High-risk areas (score ≥6)
|
||||||
- Revenue-impacting
|
- Revenue-impacting
|
||||||
- Security-critical
|
- Security-critical
|
||||||
- **Run on every commit**
|
- No workaround exists
|
||||||
|
- Affects majority of users
|
||||||
|
|
||||||
**P1 (High)**:
|
**P1 (High)**:
|
||||||
- Important user features
|
- Important user features
|
||||||
- Medium-risk areas (score 3-4)
|
- Medium-risk areas (score 3-4)
|
||||||
- Common workflows
|
- Common workflows
|
||||||
- **Run on PR to main**
|
- Workaround exists but difficult
|
||||||
|
|
||||||
**P2 (Medium)**:
|
**P2 (Medium)**:
|
||||||
- Secondary features
|
- Secondary features
|
||||||
- Low-risk areas (score 1-2)
|
- Low-risk areas (score 1-2)
|
||||||
- Edge cases
|
- Edge cases
|
||||||
- **Run nightly or weekly**
|
- Regression prevention
|
||||||
|
|
||||||
**P3 (Low)**:
|
**P3 (Low)**:
|
||||||
- Nice-to-have
|
- Nice-to-have
|
||||||
- Exploratory
|
- Exploratory
|
||||||
- Performance benchmarks
|
- Performance benchmarks
|
||||||
- **Run on-demand**
|
- Documentation validation
|
||||||
|
|
||||||
|
**NOTE:** Priority classification is separate from execution timing. A P1 test might run in PRs if it's fast, or nightly if it requires expensive infrastructure (e.g., k6 performance test). See "Execution Strategy" section for timing guidance.
|
||||||
|
|
||||||
4. **Outline Data and Tooling Prerequisites**
|
4. **Outline Data and Tooling Prerequisites**
|
||||||
|
|
||||||
|
|
@ -629,13 +816,55 @@ TEA test-design workflow supports TWO modes, detected automatically:
|
||||||
- Environment setup
|
- Environment setup
|
||||||
- Tools and dependencies
|
- Tools and dependencies
|
||||||
|
|
||||||
5. **Define Execution Order**
|
5. **Define Execution Strategy** (Keep It Simple)
|
||||||
|
|
||||||
Recommend test execution sequence:
|
**IMPORTANT: Avoid over-engineering execution order**
|
||||||
1. **Smoke tests** (P0 subset, <5 min)
|
|
||||||
2. **P0 tests** (critical paths, <10 min)
|
**Default Philosophy:**
|
||||||
3. **P1 tests** (important features, <30 min)
|
- Run **everything** in PRs if total duration <15 minutes
|
||||||
4. **P2/P3 tests** (full regression, <60 min)
|
- Playwright is fast with parallelization (100s of tests in ~10-15 min)
|
||||||
|
- Only defer to nightly/weekly if there's significant overhead:
|
||||||
|
- Performance tests (k6, load testing) - expensive infrastructure
|
||||||
|
- Chaos engineering - requires special setup (AWS FIS)
|
||||||
|
- Long-running tests - endurance (4+ hours), disaster recovery
|
||||||
|
- Manual tests - require human intervention
|
||||||
|
|
||||||
|
**Simple Execution Strategy (Organized by TOOL TYPE):**
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
## Execution Strategy
|
||||||
|
|
||||||
|
**Philosophy**: Run everything in PRs unless significant infrastructure overhead.
|
||||||
|
Playwright with parallelization is extremely fast (100s of tests in ~10-15 min).
|
||||||
|
|
||||||
|
**Organized by TOOL TYPE:**
|
||||||
|
|
||||||
|
### Every PR: Playwright Tests (~10-15 min)
|
||||||
|
All functional tests (from any priority level):
|
||||||
|
- All E2E, API, integration, unit tests using Playwright
|
||||||
|
- Parallelized across {N} shards
|
||||||
|
- Total: ~{N} tests (includes P0, P1, P2, P3)
|
||||||
|
|
||||||
|
### Nightly: k6 Performance Tests (~30-60 min)
|
||||||
|
All performance tests (from any priority level):
|
||||||
|
- Load, stress, spike, endurance
|
||||||
|
- Reason: Expensive infrastructure, long-running (10-40 min per test)
|
||||||
|
|
||||||
|
### Weekly: Chaos & Long-Running (~hours)
|
||||||
|
Special infrastructure tests (from any priority level):
|
||||||
|
- Multi-region failover, disaster recovery, endurance
|
||||||
|
- Reason: Very expensive, very long (4+ hours)
|
||||||
|
```
|
||||||
|
|
||||||
|
**KEY INSIGHT: Organize by TOOL TYPE, not priority**
|
||||||
|
- Playwright (fast, cheap) → PR
|
||||||
|
- k6 (expensive, long) → Nightly
|
||||||
|
- Chaos/Manual (very expensive, very long) → Weekly
|
||||||
|
|
||||||
|
**Avoid:**
|
||||||
|
- ❌ Don't organize by priority (smoke → P0 → P1 → P2 → P3)
|
||||||
|
- ❌ Don't say "P1 runs on PR to main" (some P1 are Playwright/PR, some are k6/Nightly)
|
||||||
|
- ❌ Don't create artificial tiers - organize by tool type and infrastructure overhead
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|
@ -661,34 +890,66 @@ TEA test-design workflow supports TWO modes, detected automatically:
|
||||||
| Login flow | E2E | P0 | R-001 | 3 | QA |
|
| Login flow | E2E | P0 | R-001 | 3 | QA |
|
||||||
```
|
```
|
||||||
|
|
||||||
3. **Document Execution Order**
|
3. **Document Execution Strategy** (Simple, Not Redundant)
|
||||||
|
|
||||||
|
**IMPORTANT: Keep execution strategy simple and avoid redundancy**
|
||||||
|
|
||||||
```markdown
|
```markdown
|
||||||
### Smoke Tests (<5 min)
|
## Execution Strategy
|
||||||
|
|
||||||
- Login successful
|
**Default: Run all functional tests in PRs (~10-15 min)**
|
||||||
- Dashboard loads
|
- All Playwright tests (parallelized across 4 shards)
|
||||||
|
- Includes E2E, API, integration, unit tests
|
||||||
|
- Total: ~{N} tests
|
||||||
|
|
||||||
### P0 Tests (<10 min)
|
**Nightly: Performance & Infrastructure tests**
|
||||||
|
- k6 load/stress/spike tests (~30-60 min)
|
||||||
|
- Reason: Expensive infrastructure, long-running
|
||||||
|
|
||||||
- [Full P0 list]
|
**Weekly: Chaos & Disaster Recovery**
|
||||||
|
- Endurance tests (4+ hours)
|
||||||
### P1 Tests (<30 min)
|
- Multi-region failover (requires AWS FIS)
|
||||||
|
- Backup restore validation
|
||||||
- [Full P1 list]
|
- Reason: Special infrastructure, very long-running
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**DO NOT:**
|
||||||
|
- ❌ Create redundant smoke/P0/P1/P2/P3 tier structure
|
||||||
|
- ❌ List all tests again in execution order (already in coverage plan)
|
||||||
|
- ❌ Split tests by priority unless there's infrastructure overhead
|
||||||
|
|
||||||
4. **Include Resource Estimates**
|
4. **Include Resource Estimates**
|
||||||
|
|
||||||
|
**IMPORTANT: Use intervals/ranges, not exact numbers**
|
||||||
|
|
||||||
|
Provide rough estimates with intervals to avoid false precision:
|
||||||
|
|
||||||
```markdown
|
```markdown
|
||||||
### Test Effort Estimates
|
### Test Effort Estimates
|
||||||
|
|
||||||
- P0 scenarios: 15 tests × 2 hours = 30 hours
|
- P0 scenarios: 15 tests (~1.5-2.5 hours each) = **~25-40 hours**
|
||||||
- P1 scenarios: 25 tests × 1 hour = 25 hours
|
- P1 scenarios: 25 tests (~0.75-1.5 hours each) = **~20-35 hours**
|
||||||
- P2 scenarios: 40 tests × 0.5 hour = 20 hours
|
- P2 scenarios: 40 tests (~0.25-0.75 hours each) = **~10-30 hours**
|
||||||
- **Total:** 75 hours (~10 days)
|
- **Total:** **~55-105 hours** (~1.5-3 weeks with 1 QA engineer)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**Why intervals:**
|
||||||
|
- Avoids false precision (estimates are never exact)
|
||||||
|
- Provides flexibility for complexity variations
|
||||||
|
- Accounts for unknowns and dependencies
|
||||||
|
- More realistic and less prescriptive
|
||||||
|
|
||||||
|
**Guidelines:**
|
||||||
|
- P0 tests: 1.5-2.5 hours each (complex setup, security, performance)
|
||||||
|
- P1 tests: 0.75-1.5 hours each (standard integration, API tests)
|
||||||
|
- P2 tests: 0.25-0.75 hours each (edge cases, simple validation)
|
||||||
|
- P3 tests: 0.1-0.5 hours each (exploratory, documentation)
|
||||||
|
|
||||||
|
**Express totals as:**
|
||||||
|
- Hour ranges: "~55-105 hours"
|
||||||
|
- Week ranges: "~1.5-3 weeks"
|
||||||
|
- Avoid: Exact numbers like "75 hours" or "11 days"
|
||||||
|
|
||||||
5. **Add Gate Criteria**
|
5. **Add Gate Criteria**
|
||||||
|
|
||||||
```markdown
|
```markdown
|
||||||
|
|
|
||||||
|
|
@ -108,54 +108,51 @@
|
||||||
|
|
||||||
### Testability Concerns and Architectural Gaps
|
### Testability Concerns and Architectural Gaps
|
||||||
|
|
||||||
**IMPORTANT**: {If system has constraints, explain them. If standard CI/CD achievable, state that.}
|
**🚨 ACTIONABLE CONCERNS - Architecture Team Must Address**
|
||||||
|
|
||||||
#### Blockers to Fast Feedback
|
{If system has critical testability concerns, list them here. If architecture supports testing well, state "No critical testability concerns identified" and skip to Testability Assessment Summary}
|
||||||
|
|
||||||
| Blocker | Impact | Current Mitigation | Ideal Solution |
|
#### 1. Blockers to Fast Feedback (WHAT WE NEED FROM ARCHITECTURE)
|
||||||
|---------|--------|-------------------|----------------|
|
|
||||||
| **{Blocker name}** | {Impact description} | {How we're working around it} | {What architecture should provide} |
|
|
||||||
|
|
||||||
#### Why This Matters
|
| Concern | Impact | What Architecture Must Provide | Owner | Timeline |
|
||||||
|
|---------|--------|--------------------------------|-------|----------|
|
||||||
|
| **{Concern name}** | {Impact on testing} | {Specific architectural change needed} | {Team} | {Sprint} |
|
||||||
|
|
||||||
**Standard CI/CD expectations:**
|
**Example:**
|
||||||
- Full test suite on every commit (~5-15 min feedback)
|
- **No API for test data seeding** → Cannot parallelize tests → Provide POST /test/seed endpoint (Backend, Sprint 0)
|
||||||
- Parallel test execution (isolated test data per worker)
|
|
||||||
- Ephemeral test environments (spin up → test → tear down)
|
|
||||||
- Fast feedback loop (devs stay in flow state)
|
|
||||||
|
|
||||||
**Current reality for {Feature}:**
|
#### 2. Architectural Improvements Needed (WHAT SHOULD BE CHANGED)
|
||||||
- {Actual situation - what's different from standard}
|
|
||||||
|
|
||||||
#### Tiered Testing Strategy
|
{List specific improvements that would make the system more testable}
|
||||||
|
|
||||||
{If forced by architecture, explain. If standard approach works, state that.}
|
|
||||||
|
|
||||||
| Tier | When | Duration | Coverage | Why Not Full Suite? |
|
|
||||||
|------|------|----------|----------|---------------------|
|
|
||||||
| **Smoke** | Every commit | <5 min | {N} tests | Fast feedback, catch build-breaking changes |
|
|
||||||
| **P0** | Every commit | ~{X} min | ~{N} tests | Critical paths, security-critical flows |
|
|
||||||
| **P1** | PR to main | ~{X} min | ~{N} tests | Important features, algorithm accuracy |
|
|
||||||
| **P2/P3** | Nightly | ~{X} min | ~{N} tests | Edge cases, performance, NFR |
|
|
||||||
|
|
||||||
**Note**: {Any timing assumptions or constraints}
|
|
||||||
|
|
||||||
#### Architectural Improvements Needed
|
|
||||||
|
|
||||||
{If system has technical debt affecting testing, list improvements. If architecture supports testing well, acknowledge that.}
|
|
||||||
|
|
||||||
1. **{Improvement name}**
|
1. **{Improvement name}**
|
||||||
- {What to change}
|
- **Current problem**: {What's wrong}
|
||||||
- **Impact**: {How it improves testing}
|
- **Required change**: {What architecture must do}
|
||||||
|
- **Impact if not fixed**: {Consequences}
|
||||||
|
- **Owner**: {Team}
|
||||||
|
- **Timeline**: {Sprint}
|
||||||
|
|
||||||
#### Acceptance of Trade-offs
|
---
|
||||||
|
|
||||||
For {Feature} Phase 1, the team accepts:
|
### Testability Assessment Summary
|
||||||
- **{Trade-off 1}** ({Reasoning})
|
|
||||||
- **{Trade-off 2}** ({Reasoning})
|
|
||||||
- ⚠️ **{Known limitation}** ({Why acceptable for now})
|
|
||||||
|
|
||||||
This is {**technical debt** OR **acceptable for Phase 1**} that should be {revisited post-GA OR maintained as-is}.
|
**📊 CURRENT STATE - FYI**
|
||||||
|
|
||||||
|
{Only include this section if there are passing items worth mentioning. Otherwise omit.}
|
||||||
|
|
||||||
|
#### What Works Well
|
||||||
|
|
||||||
|
- ✅ {Passing item 1} (e.g., "API-first design supports parallel test execution")
|
||||||
|
- ✅ {Passing item 2} (e.g., "Feature flags enable test isolation")
|
||||||
|
- ✅ {Passing item 3}
|
||||||
|
|
||||||
|
#### Accepted Trade-offs (No Action Required)
|
||||||
|
|
||||||
|
For {Feature} Phase 1, the following trade-offs are acceptable:
|
||||||
|
- **{Trade-off 1}** - {Why acceptable for now}
|
||||||
|
- **{Trade-off 2}** - {Why acceptable for now}
|
||||||
|
|
||||||
|
{This is technical debt OR acceptable for Phase 1} that {should be revisited post-GA OR maintained as-is}
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,314 +1,286 @@
|
||||||
# Test Design for QA: {Feature Name}
|
# Test Design for QA: {Feature Name}
|
||||||
|
|
||||||
**Purpose:** Test execution recipe for QA team. Defines test scenarios, coverage plan, tooling, and Sprint 0 setup requirements. Use this as your implementation guide after architectural blockers are resolved.
|
**Purpose:** Test execution recipe for QA team. Defines what to test, how to test it, and what QA needs from other teams.
|
||||||
|
|
||||||
**Date:** {date}
|
**Date:** {date}
|
||||||
**Author:** {author}
|
**Author:** {author}
|
||||||
**Status:** Draft / Ready for Implementation
|
**Status:** Draft
|
||||||
**Project:** {project_name}
|
**Project:** {project_name}
|
||||||
**PRD Reference:** {prd_link}
|
|
||||||
**ADR Reference:** {adr_link}
|
**Related:** See Architecture doc (test-design-architecture.md) for testability concerns and architectural blockers.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Quick Reference for QA
|
## Executive Summary
|
||||||
|
|
||||||
**Before You Start:**
|
**Scope:** {Brief description of testing scope}
|
||||||
- [ ] Review Architecture doc (test-design-architecture.md) - understand blockers and risks
|
|
||||||
- [ ] Verify Sprint 0 blockers resolved (see Sprint 0 section below)
|
|
||||||
- [ ] Confirm test infrastructure ready (factories, fixtures, environments)
|
|
||||||
|
|
||||||
**Test Execution Order:**
|
**Risk Summary:**
|
||||||
1. **Smoke tests** (<5 min) - Fast feedback on critical paths
|
- Total Risks: {N} ({X} high-priority score ≥6, {Y} medium, {Z} low)
|
||||||
2. **P0 tests** (~{X} min) - Critical paths, security-critical flows
|
- Critical Categories: {Categories with most high-priority risks}
|
||||||
3. **P1 tests** (~{X} min) - Important features, algorithm accuracy
|
|
||||||
4. **P2/P3 tests** (~{X} min) - Edge cases, performance, NFR
|
|
||||||
|
|
||||||
**Need Help?**
|
**Coverage Summary:**
|
||||||
- Blockers: See Architecture doc "Quick Guide" for mitigation plans
|
- P0 tests: ~{N} (critical paths, security)
|
||||||
- Test scenarios: See "Test Coverage Plan" section below
|
- P1 tests: ~{N} (important features, integration)
|
||||||
- Sprint 0 setup: See "Sprint 0 Setup Requirements" section
|
- P2 tests: ~{N} (edge cases, regression)
|
||||||
|
- P3 tests: ~{N} (exploratory, benchmarks)
|
||||||
|
- **Total**: ~{N} tests (~{X}-{Y} weeks with 1 QA)
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## System Architecture Summary
|
## Dependencies & Test Blockers
|
||||||
|
|
||||||
**Data Pipeline:**
|
**CRITICAL:** QA cannot proceed without these items from other teams.
|
||||||
{Brief description of system flow}
|
|
||||||
|
|
||||||
**Key Services:**
|
### Backend/Architecture Dependencies (Sprint 0)
|
||||||
- **{Service 1}**: {Purpose and key responsibilities}
|
|
||||||
- **{Service 2}**: {Purpose and key responsibilities}
|
|
||||||
- **{Service 3}**: {Purpose and key responsibilities}
|
|
||||||
|
|
||||||
**Data Stores:**
|
**Source:** See Architecture doc "Quick Guide" for detailed mitigation plans
|
||||||
- **{Database 1}**: {What it stores}
|
|
||||||
- **{Database 2}**: {What it stores}
|
|
||||||
|
|
||||||
**Expected Scale** (from ADR):
|
1. **{Dependency 1}** - {Team} - {Timeline}
|
||||||
- {Key metrics: RPS, volume, users, etc.}
|
- {What QA needs}
|
||||||
|
- {Why it blocks testing}
|
||||||
|
|
||||||
---
|
2. **{Dependency 2}** - {Team} - {Timeline}
|
||||||
|
- {What QA needs}
|
||||||
|
- {Why it blocks testing}
|
||||||
|
|
||||||
## Test Environment Requirements
|
### QA Infrastructure Setup (Sprint 0)
|
||||||
|
|
||||||
**{Company} Standard:** Shared DB per Environment with Randomization (Shift-Left)
|
1. **Test Data Factories** - QA
|
||||||
|
- {Entity} factory with faker-based randomization
|
||||||
|
- Auto-cleanup fixtures for parallel safety
|
||||||
|
|
||||||
| Environment | Database | Test Data Strategy | Purpose |
|
2. **Test Environments** - QA
|
||||||
|-------------|----------|-------------------|---------|
|
- Local: {Setup details}
|
||||||
| **Local** | {DB} (shared) | Randomized (faker), auto-cleanup | Local development |
|
- CI/CD: {Setup details}
|
||||||
| **Dev (CI)** | {DB} (shared) | Randomized (faker), auto-cleanup | PR validation |
|
- Staging: {Setup details}
|
||||||
| **Staging** | {DB} (shared) | Randomized (faker), auto-cleanup | Pre-production, E2E |
|
|
||||||
|
|
||||||
**Key Principles:**
|
**Example factory pattern:**
|
||||||
- **Shared database per environment** (no ephemeral)
|
|
||||||
- **Randomization for isolation** (faker-based unique IDs)
|
|
||||||
- **Parallel-safe** (concurrent test runs don't conflict)
|
|
||||||
- **Self-cleaning** (tests delete their own data)
|
|
||||||
- **Shift-left** (test against real DBs early)
|
|
||||||
|
|
||||||
**Example:**
|
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
import { faker } from "@faker-js/faker";
|
import { test } from '@seontechnologies/playwright-utils/api-request/fixtures';
|
||||||
|
import { expect } from '@playwright/test';
|
||||||
|
import { faker } from '@faker-js/faker';
|
||||||
|
|
||||||
test("example with randomized test data @p0", async ({ apiRequest }) => {
|
test('example test @p0', async ({ apiRequest }) => {
|
||||||
const testData = {
|
const testData = {
|
||||||
id: `test-${faker.string.uuid()}`,
|
id: `test-${faker.string.uuid()}`,
|
||||||
customerId: `test-customer-${faker.string.alphanumeric(8)}`,
|
email: faker.internet.email(),
|
||||||
// ... unique test data
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Seed, test, cleanup
|
const { status } = await apiRequest({
|
||||||
|
method: 'POST',
|
||||||
|
path: '/api/resource',
|
||||||
|
body: testData,
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(status).toBe(201);
|
||||||
});
|
});
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Testability Assessment
|
## Risk Assessment
|
||||||
|
|
||||||
**Prerequisites from Architecture Doc:**
|
**Note:** Full risk details in Architecture doc. This section summarizes risks relevant to QA test planning.
|
||||||
|
|
||||||
Verify these blockers are resolved before test development:
|
### High-Priority Risks (Score ≥6)
|
||||||
- [ ] {Blocker 1} (see Architecture doc Quick Guide → 🚨 BLOCKERS)
|
|
||||||
- [ ] {Blocker 2}
|
|
||||||
- [ ] {Blocker 3}
|
|
||||||
|
|
||||||
**If Prerequisites Not Met:** Coordinate with Architecture team (see Architecture doc for mitigation plans and owner assignments)
|
| Risk ID | Category | Description | Score | QA Test Coverage |
|
||||||
|
|---------|----------|-------------|-------|------------------|
|
||||||
|
| **{R-ID}** | {CAT} | {Brief description} | **{Score}** | {How QA validates this risk} |
|
||||||
|
|
||||||
---
|
### Medium/Low-Priority Risks
|
||||||
|
|
||||||
## Test Levels Strategy
|
| Risk ID | Category | Description | Score | QA Test Coverage |
|
||||||
|
|---------|----------|-------------|-------|------------------|
|
||||||
**System Type:** {API-heavy / UI-heavy / Mixed backend system}
|
| {R-ID} | {CAT} | {Brief description} | {Score} | {How QA validates this risk} |
|
||||||
|
|
||||||
**Recommended Split:**
|
|
||||||
- **Unit Tests: {X}%** - {What to unit test}
|
|
||||||
- **Integration/API Tests: {X}%** - ⭐ **PRIMARY FOCUS** - {What to integration test}
|
|
||||||
- **E2E Tests: {X}%** - {What to E2E test}
|
|
||||||
|
|
||||||
**Rationale:** {Why this split makes sense for this system}
|
|
||||||
|
|
||||||
**Test Count Summary:**
|
|
||||||
- P0: ~{N} tests - Critical paths, run on every commit
|
|
||||||
- P1: ~{N} tests - Important features, run on PR to main
|
|
||||||
- P2: ~{N} tests - Edge cases, run nightly/weekly
|
|
||||||
- P3: ~{N} tests - Exploratory, run on-demand
|
|
||||||
- **Total: ~{N} tests** (~{X} weeks for 1 QA, ~{Y} weeks for 2 QAs)
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Test Coverage Plan
|
## Test Coverage Plan
|
||||||
|
|
||||||
**Repository Note:** {Where tests live - backend repo, admin panel repo, etc. - and how CI pipelines are organized}
|
**IMPORTANT:** P0/P1/P2/P3 = **priority and risk level** (what to focus on if time-constrained), NOT execution timing. See "Execution Strategy" for when tests run.
|
||||||
|
|
||||||
### P0 (Critical) - Run on every commit (~{X} min)
|
### P0 (Critical)
|
||||||
|
|
||||||
**Execution:** CI/CD on every commit, parallel workers, smoke tests first (<5 min)
|
**Criteria:** Blocks core functionality + High risk (≥6) + No workaround + Affects majority of users
|
||||||
|
|
||||||
**Purpose:** Critical path validation - catch build-breaking changes and security violations immediately
|
| Test ID | Requirement | Test Level | Risk Link | Notes |
|
||||||
|
|---------|-------------|------------|-----------|-------|
|
||||||
|
| **P0-001** | {Requirement} | {Level} | {R-ID} | {Notes} |
|
||||||
|
| **P0-002** | {Requirement} | {Level} | {R-ID} | {Notes} |
|
||||||
|
|
||||||
**Criteria:** Blocks core functionality OR High risk (≥6) OR No workaround
|
**Total P0:** ~{N} tests
|
||||||
|
|
||||||
**Key Smoke Tests** (subset of P0, run first for fast feedback):
|
|
||||||
- {Smoke test 1} - {Duration}
|
|
||||||
- {Smoke test 2} - {Duration}
|
|
||||||
- {Smoke test 3} - {Duration}
|
|
||||||
|
|
||||||
| Requirement | Test Level | Risk Link | Test Count | Owner | Notes |
|
|
||||||
|-------------|------------|-----------|------------|-------|-------|
|
|
||||||
| {Requirement 1} | {Level} | {R-ID} | {N} | QA | {Notes} |
|
|
||||||
| {Requirement 2} | {Level} | {R-ID} | {N} | QA | {Notes} |
|
|
||||||
|
|
||||||
**Total P0:** ~{N} tests (~{X} weeks)
|
|
||||||
|
|
||||||
#### P0 Test Scenarios (Detailed)
|
|
||||||
|
|
||||||
**1. {Test Category} ({N} tests) - {CRITICALITY if applicable}**
|
|
||||||
|
|
||||||
- [ ] {Scenario 1 with checkbox}
|
|
||||||
- [ ] {Scenario 2}
|
|
||||||
- [ ] {Scenario 3}
|
|
||||||
|
|
||||||
**2. {Test Category 2} ({N} tests)**
|
|
||||||
|
|
||||||
- [ ] {Scenario 1}
|
|
||||||
- [ ] {Scenario 2}
|
|
||||||
|
|
||||||
{Continue for all P0 categories}
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### P1 (High) - Run on PR to main (~{X} min additional)
|
### P1 (High)
|
||||||
|
|
||||||
**Execution:** CI/CD on pull requests to main branch, runs after P0 passes, parallel workers
|
**Criteria:** Important features + Medium risk (3-4) + Common workflows + Workaround exists but difficult
|
||||||
|
|
||||||
**Purpose:** Important feature coverage - algorithm accuracy, complex workflows, Admin Panel interactions
|
| Test ID | Requirement | Test Level | Risk Link | Notes |
|
||||||
|
|---------|-------------|------------|-----------|-------|
|
||||||
|
| **P1-001** | {Requirement} | {Level} | {R-ID} | {Notes} |
|
||||||
|
| **P1-002** | {Requirement} | {Level} | {R-ID} | {Notes} |
|
||||||
|
|
||||||
**Criteria:** Important features OR Medium risk (3-4) OR Common workflows
|
**Total P1:** ~{N} tests
|
||||||
|
|
||||||
| Requirement | Test Level | Risk Link | Test Count | Owner | Notes |
|
|
||||||
|-------------|------------|-----------|------------|-------|-------|
|
|
||||||
| {Requirement 1} | {Level} | {R-ID} | {N} | QA | {Notes} |
|
|
||||||
| {Requirement 2} | {Level} | {R-ID} | {N} | QA | {Notes} |
|
|
||||||
|
|
||||||
**Total P1:** ~{N} tests (~{X} weeks)
|
|
||||||
|
|
||||||
#### P1 Test Scenarios (Detailed)
|
|
||||||
|
|
||||||
**1. {Test Category} ({N} tests)**
|
|
||||||
|
|
||||||
- [ ] {Scenario 1}
|
|
||||||
- [ ] {Scenario 2}
|
|
||||||
|
|
||||||
{Continue for all P1 categories}
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### P2 (Medium) - Run nightly/weekly (~{X} min)
|
### P2 (Medium)
|
||||||
|
|
||||||
**Execution:** Scheduled nightly run (or weekly for P3), full infrastructure, sequential execution acceptable
|
**Criteria:** Secondary features + Low risk (1-2) + Edge cases + Regression prevention
|
||||||
|
|
||||||
**Purpose:** Edge case coverage, error handling, data integrity validation - slow feedback acceptable
|
| Test ID | Requirement | Test Level | Risk Link | Notes |
|
||||||
|
|---------|-------------|------------|-----------|-------|
|
||||||
|
| **P2-001** | {Requirement} | {Level} | {R-ID} | {Notes} |
|
||||||
|
|
||||||
**Criteria:** Secondary features OR Low risk (1-2) OR Edge cases
|
**Total P2:** ~{N} tests
|
||||||
|
|
||||||
| Requirement | Test Level | Risk Link | Test Count | Owner | Notes |
|
|
||||||
|-------------|------------|-----------|------------|-------|-------|
|
|
||||||
| {Requirement 1} | {Level} | {R-ID} | {N} | QA | {Notes} |
|
|
||||||
| {Requirement 2} | {Level} | {R-ID} | {N} | QA | {Notes} |
|
|
||||||
|
|
||||||
**Total P2:** ~{N} tests (~{X} weeks)
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### P3 (Low) - Run on-demand (exploratory)
|
### P3 (Low)
|
||||||
|
|
||||||
**Execution:** Manual trigger or weekly scheduled run, performance testing
|
**Criteria:** Nice-to-have + Exploratory + Performance benchmarks + Documentation validation
|
||||||
|
|
||||||
**Purpose:** Full regression, performance benchmarks, accessibility validation - no time pressure
|
| Test ID | Requirement | Test Level | Notes |
|
||||||
|
|---------|-------------|------------|-------|
|
||||||
|
| **P3-001** | {Requirement} | {Level} | {Notes} |
|
||||||
|
|
||||||
**Criteria:** Nice-to-have OR Exploratory OR Performance benchmarks
|
**Total P3:** ~{N} tests
|
||||||
|
|
||||||
| Requirement | Test Level | Test Count | Owner | Notes |
|
|
||||||
|-------------|------------|------------|-------|-------|
|
|
||||||
| {Requirement 1} | {Level} | {N} | QA | {Notes} |
|
|
||||||
| {Requirement 2} | {Level} | {N} | QA | {Notes} |
|
|
||||||
|
|
||||||
**Total P3:** ~{N} tests (~{X} days)
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### Coverage Matrix (Requirements → Tests)
|
## Execution Strategy
|
||||||
|
|
||||||
| Requirement | Test Level | Priority | Risk Link | Test Count | Owner |
|
**Philosophy:** Run everything in PRs unless there's significant infrastructure overhead. Playwright with parallelization is extremely fast (100s of tests in ~10-15 min).
|
||||||
|-------------|------------|----------|-----------|------------|-------|
|
|
||||||
| {Requirement 1} | {Level} | {P0-P3} | {R-ID} | {N} | {Owner} |
|
**Organized by TOOL TYPE:**
|
||||||
| {Requirement 2} | {Level} | {P0-P3} | {R-ID} | {N} | {Owner} |
|
|
||||||
|
### Every PR: Playwright Tests (~10-15 min)
|
||||||
|
|
||||||
|
**All functional tests** (from any priority level):
|
||||||
|
- All E2E, API, integration, unit tests using Playwright
|
||||||
|
- Parallelized across {N} shards
|
||||||
|
- Total: ~{N} Playwright tests (includes P0, P1, P2, P3)
|
||||||
|
|
||||||
|
**Why run in PRs:** Fast feedback, no expensive infrastructure
|
||||||
|
|
||||||
|
### Nightly: k6 Performance Tests (~30-60 min)
|
||||||
|
|
||||||
|
**All performance tests** (from any priority level):
|
||||||
|
- Load, stress, spike, endurance tests
|
||||||
|
- Total: ~{N} k6 tests (may include P0, P1, P2)
|
||||||
|
|
||||||
|
**Why defer to nightly:** Expensive infrastructure (k6 Cloud), long-running (10-40 min per test)
|
||||||
|
|
||||||
|
### Weekly: Chaos & Long-Running (~hours)
|
||||||
|
|
||||||
|
**Special infrastructure tests** (from any priority level):
|
||||||
|
- Multi-region failover (requires AWS Fault Injection Simulator)
|
||||||
|
- Disaster recovery (backup restore, 4+ hours)
|
||||||
|
- Endurance tests (4+ hours runtime)
|
||||||
|
|
||||||
|
**Why defer to weekly:** Very expensive infrastructure, very long-running, infrequent validation sufficient
|
||||||
|
|
||||||
|
**Manual tests** (excluded from automation):
|
||||||
|
- DevOps validation (deployment, monitoring)
|
||||||
|
- Finance validation (cost alerts)
|
||||||
|
- Documentation validation
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Sprint 0 Setup Requirements
|
## QA Effort Estimate
|
||||||
|
|
||||||
**IMPORTANT:** These items **BLOCK test development**. Complete in Sprint 0 before QA can write tests.
|
**QA test development effort only** (excludes DevOps, Backend, Data Eng, Finance work):
|
||||||
|
|
||||||
### Architecture/Backend Blockers (from Architecture doc)
|
| Priority | Count | Effort Range | Notes |
|
||||||
|
|----------|-------|--------------|-------|
|
||||||
|
| P0 | ~{N} | ~{X}-{Y} weeks | Complex setup (security, performance, multi-step) |
|
||||||
|
| P1 | ~{N} | ~{X}-{Y} weeks | Standard coverage (integration, API tests) |
|
||||||
|
| P2 | ~{N} | ~{X}-{Y} days | Edge cases, simple validation |
|
||||||
|
| P3 | ~{N} | ~{X}-{Y} days | Exploratory, benchmarks |
|
||||||
|
| **Total** | ~{N} | **~{X}-{Y} weeks** | **1 QA engineer, full-time** |
|
||||||
|
|
||||||
**Source:** See Architecture doc "Quick Guide" for detailed mitigation plans
|
**Assumptions:**
|
||||||
|
- Includes test design, implementation, debugging, CI integration
|
||||||
|
- Excludes ongoing maintenance (~10% effort)
|
||||||
|
- Assumes test infrastructure (factories, fixtures) ready
|
||||||
|
|
||||||
1. **{Blocker 1}** 🚨 **BLOCKER** - {Owner}
|
**Dependencies from other teams:**
|
||||||
- {What needs to be provided}
|
- See "Dependencies & Test Blockers" section for what QA needs from Backend, DevOps, Data Eng
|
||||||
- **Details:** Architecture doc {Risk-ID} mitigation plan
|
|
||||||
|
|
||||||
2. **{Blocker 2}** 🚨 **BLOCKER** - {Owner}
|
|
||||||
- {What needs to be provided}
|
|
||||||
- **Details:** Architecture doc {Risk-ID} mitigation plan
|
|
||||||
|
|
||||||
### QA Test Infrastructure
|
|
||||||
|
|
||||||
1. **{Factory/Fixture Name}** - QA
|
|
||||||
- Faker-based generator: `{function_signature}`
|
|
||||||
- Auto-cleanup after tests
|
|
||||||
|
|
||||||
2. **{Entity} Fixtures** - QA
|
|
||||||
- Seed scripts for {states/scenarios}
|
|
||||||
- Isolated {id_pattern} per test
|
|
||||||
|
|
||||||
### Test Environments
|
|
||||||
|
|
||||||
**Local:** {Setup details - Docker, LocalStack, etc.}
|
|
||||||
|
|
||||||
**CI/CD:** {Setup details - shared infrastructure, parallel workers, artifacts}
|
|
||||||
|
|
||||||
**Staging:** {Setup details - shared multi-tenant, nightly E2E}
|
|
||||||
|
|
||||||
**Production:** {Setup details - feature flags, canary transactions}
|
|
||||||
|
|
||||||
**Sprint 0 NFR Gates** (MUST complete before integration testing):
|
|
||||||
- [ ] {Gate 1}: {Description} (Owner) 🚨
|
|
||||||
- [ ] {Gate 2}: {Description} (Owner) 🚨
|
|
||||||
- [ ] {Gate 3}: {Description} (Owner) 🚨
|
|
||||||
|
|
||||||
### Sprint 1 Items (Not Sprint 0)
|
|
||||||
|
|
||||||
- **{Item 1}** ({Owner}): {Description}
|
|
||||||
- **{Item 2}** ({Owner}): {Description}
|
|
||||||
|
|
||||||
**Sprint 1 NFR Gates** (MUST complete before GA):
|
|
||||||
- [ ] {Gate 1}: {Description} (Owner)
|
|
||||||
- [ ] {Gate 2}: {Description} (Owner)
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## NFR Readiness Summary
|
## Appendix A: Code Examples & Tagging
|
||||||
|
|
||||||
**Based on Architecture Doc Risk Assessment**
|
**Playwright Tags for Selective Execution:**
|
||||||
|
|
||||||
| NFR Category | Status | Evidence Status | Blocker | Next Action |
|
```typescript
|
||||||
|--------------|--------|-----------------|---------|-------------|
|
import { test } from '@seontechnologies/playwright-utils/api-request/fixtures';
|
||||||
| **Testability & Automation** | {Status} | {Evidence} | {Sprint} | {Action} |
|
import { expect } from '@playwright/test';
|
||||||
| **Test Data Strategy** | {Status} | {Evidence} | {Sprint} | {Action} |
|
|
||||||
| **Scalability & Availability** | {Status} | {Evidence} | {Sprint} | {Action} |
|
|
||||||
| **Disaster Recovery** | {Status} | {Evidence} | {Sprint} | {Action} |
|
|
||||||
| **Security** | {Status} | {Evidence} | {Sprint} | {Action} |
|
|
||||||
| **Monitorability, Debuggability & Manageability** | {Status} | {Evidence} | {Sprint} | {Action} |
|
|
||||||
| **QoS & QoE** | {Status} | {Evidence} | {Sprint} | {Action} |
|
|
||||||
| **Deployability** | {Status} | {Evidence} | {Sprint} | {Action} |
|
|
||||||
|
|
||||||
**Total:** {N} PASS, {N} CONCERNS across {N} categories
|
// P0 critical test
|
||||||
|
test('@P0 @API @Security unauthenticated request returns 401', async ({ apiRequest }) => {
|
||||||
|
const { status, body } = await apiRequest({
|
||||||
|
method: 'POST',
|
||||||
|
path: '/api/endpoint',
|
||||||
|
body: { data: 'test' },
|
||||||
|
skipAuth: true,
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(status).toBe(401);
|
||||||
|
expect(body.error).toContain('unauthorized');
|
||||||
|
});
|
||||||
|
|
||||||
|
// P1 integration test
|
||||||
|
test('@P1 @Integration data syncs correctly', async ({ apiRequest }) => {
|
||||||
|
// Seed data
|
||||||
|
await apiRequest({
|
||||||
|
method: 'POST',
|
||||||
|
path: '/api/seed',
|
||||||
|
body: { /* test data */ },
|
||||||
|
});
|
||||||
|
|
||||||
|
// Validate
|
||||||
|
const { status, body } = await apiRequest({
|
||||||
|
method: 'GET',
|
||||||
|
path: '/api/resource',
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(status).toBe(200);
|
||||||
|
expect(body).toHaveProperty('data');
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
**Run specific tags:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run only P0 tests
|
||||||
|
npx playwright test --grep @P0
|
||||||
|
|
||||||
|
# Run P0 + P1 tests
|
||||||
|
npx playwright test --grep "@P0|@P1"
|
||||||
|
|
||||||
|
# Run only security tests
|
||||||
|
npx playwright test --grep @Security
|
||||||
|
|
||||||
|
# Run all Playwright tests in PR (default)
|
||||||
|
npx playwright test
|
||||||
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
**End of QA Document**
|
## Appendix B: Knowledge Base References
|
||||||
|
|
||||||
**Next Steps for QA Team:**
|
- **Risk Governance**: `risk-governance.md` - Risk scoring methodology
|
||||||
1. Verify Sprint 0 blockers resolved (coordinate with Architecture team if not)
|
- **Test Priorities Matrix**: `test-priorities-matrix.md` - P0-P3 criteria
|
||||||
2. Set up test infrastructure (factories, fixtures, environments)
|
- **Test Levels Framework**: `test-levels-framework.md` - E2E vs API vs Unit selection
|
||||||
3. Begin test implementation following priority order (P0 → P1 → P2 → P3)
|
- **Test Quality**: `test-quality.md` - Definition of Done (no hard waits, <300 lines, <1.5 min)
|
||||||
4. Run smoke tests first for fast feedback
|
|
||||||
5. Track progress using test scenario checklists above
|
|
||||||
|
|
||||||
**Next Steps for Architecture Team:**
|
---
|
||||||
1. Monitor Sprint 0 blocker resolution
|
|
||||||
2. Provide support for QA infrastructure setup if needed
|
**Generated by:** BMad TEA Agent
|
||||||
3. Review test results and address any newly discovered testability gaps
|
**Workflow:** `_bmad/bmm/testarch/test-design`
|
||||||
|
**Version:** 4.0 (BMad v6)
|
||||||
|
|
|
||||||
|
|
@ -1,11 +1,11 @@
|
||||||
module,phase,name,code,sequence,workflow-file,command,required,agent,options,description,output-location,outputs
|
module,phase,name,code,sequence,workflow-file,command,required,agent,options,description,output-location,outputs
|
||||||
core,,Advanced Elicitation,AE,10,_bmad/core/workflows/advanced-elicitation/workflow.xml,bmad:advanced-elicitation,false,,,"Apply elicitation methods iteratively to enhance content being generated, presenting options and allowing reshuffle or full method listing for comprehensive content improvement",,
|
core,,Advanced Elicitation,AE,10,_bmad/core/workflows/advanced-elicitation/workflow.xml,bmad_advanced-elicitation,false,,,"Apply elicitation methods iteratively to enhance content being generated, presenting options and allowing reshuffle or full method listing for comprehensive content improvement",,
|
||||||
core,,Brainstorming,BS,20,_bmad/core/workflows/brainstorming/workflow.md,bmad:brainstorming,false,analyst,,Facilitate interactive brainstorming sessions using diverse creative techniques and ideation methods,{output_folder}/brainstorming/brainstorming-session-{{date}}.md,,
|
core,,Brainstorming,BS,20,_bmad/core/workflows/brainstorming/workflow.md,bmad_brainstorming,false,analyst,,Facilitate interactive brainstorming sessions using diverse creative techniques and ideation methods,{output_folder}/brainstorming/brainstorming-session-{{date}}.md,,
|
||||||
core,,Party Mode,PM,30,_bmad/core/workflows/party-mode/workflow.md,bmad:party-mode,false,party-mode facilitator,,Orchestrates group discussions between all installed BMAD agents enabling natural multi-agent conversations,,
|
core,,Party Mode,PM,30,_bmad/core/workflows/party-mode/workflow.md,bmad_party-mode,false,party-mode facilitator,,Orchestrates group discussions between all installed BMAD agents enabling natural multi-agent conversations,,
|
||||||
core,,bmad-help,BH,40,_bmad/core/tasks/bmad-help.md,bmad:help,false,system,,Get unstuck by showing what workflow steps come next or answering questions about what to do in the BMad Method,,
|
core,,bmad-help,BH,40,_bmad/core/tasks/bmad-help.md,bmad_help,false,system,,Get unstuck by showing what workflow steps come next or answering questions about what to do in the BMad Method,,
|
||||||
core,,Index Docs,ID,50,_bmad/core/tasks/index-docs.xml,bmad:index-docs,false,llm,,Generates or updates an index.md of all documents in the specified directory,,
|
core,,Index Docs,ID,50,_bmad/core/tasks/index-docs.xml,bmad_index-docs,false,llm,,Generates or updates an index.md of all documents in the specified directory,,
|
||||||
core,,Execute Workflow,WF,60,_bmad/core/tasks/workflow.xml,bmad:workflow,false,llm,,Execute given workflow by loading its configuration following instructions and producing output,,
|
core,,Execute Workflow,WF,60,_bmad/core/tasks/workflow.xml,bmad_workflow,false,llm,,Execute given workflow by loading its configuration following instructions and producing output,,
|
||||||
core,,Shard Document,SD,70,_bmad/core/tasks/shard-doc.xml,bmad:shard-doc,false,llm,,Splits large markdown documents into smaller organized files based on level 2 sections,,
|
core,,Shard Document,SD,70,_bmad/core/tasks/shard-doc.xml,bmad_shard-doc,false,llm,,Splits large markdown documents into smaller organized files based on level 2 sections,,
|
||||||
core,,Editorial Review - Prose,EP,80,_bmad/core/tasks/editorial-review-prose.xml,bmad:editorial-review-prose,false,llm,reader_type,Clinical copy-editor that reviews text for communication issues,,"three-column markdown table with suggested fixes",
|
core,,Editorial Review - Prose,EP,80,_bmad/core/tasks/editorial-review-prose.xml,bmad_editorial-review-prose,false,llm,reader_type,Clinical copy-editor that reviews text for communication issues,,"three-column markdown table with suggested fixes",
|
||||||
core,,Editorial Review - Structure,ES,90,_bmad/core/tasks/editorial-review-structure.xml,bmad:editorial-review-structure,false,llm,,Structural editor that proposes cuts reorganization and simplification while preserving comprehension,,
|
core,,Editorial Review - Structure,ES,90,_bmad/core/tasks/editorial-review-structure.xml,bmad_editorial-review-structure,false,llm,,Structural editor that proposes cuts reorganization and simplification while preserving comprehension,,
|
||||||
core,,Adversarial Review (General),AR,100,_bmad/core/tasks/review-adversarial-general.xml,bmad:review-adversarial-general,false,llm,,Cynically review content and produce findings,,
|
core,,Adversarial Review (General),AR,100,_bmad/core/tasks/review-adversarial-general.xml,bmad_review-adversarial-general,false,llm,,Cynically review content and produce findings,,
|
||||||
|
|
|
||||||
|
Can't render this file because it has a wrong number of fields in line 3.
|
|
|
@ -6,6 +6,8 @@
|
||||||
|
|
||||||
<inputs>
|
<inputs>
|
||||||
<input name="content" desc="Content to review - diff, spec, story, doc, or any artifact" />
|
<input name="content" desc="Content to review - diff, spec, story, doc, or any artifact" />
|
||||||
|
<input name="also_consider" required="false"
|
||||||
|
desc="Optional areas to keep in mind during review alongside normal adversarial analysis" />
|
||||||
</inputs>
|
</inputs>
|
||||||
|
|
||||||
<llm critical="true">
|
<llm critical="true">
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,56 @@
|
||||||
|
# Adversarial Review Test Suite
|
||||||
|
|
||||||
|
Tests for the `also_consider` optional input in `review-adversarial-general.xml`.
|
||||||
|
|
||||||
|
## Purpose
|
||||||
|
|
||||||
|
Evaluate whether the `also_consider` input gently nudges the reviewer toward specific areas without overriding normal adversarial analysis.
|
||||||
|
|
||||||
|
## Test Content
|
||||||
|
|
||||||
|
All tests use `sample-content.md` - a deliberately imperfect User Authentication API doc with:
|
||||||
|
|
||||||
|
- Vague error handling section
|
||||||
|
- Missing rate limit details
|
||||||
|
- No token expiration info
|
||||||
|
- Password in plain text example
|
||||||
|
- Missing authentication headers
|
||||||
|
- No error response examples
|
||||||
|
|
||||||
|
## Running Tests
|
||||||
|
|
||||||
|
For each test case in `test-cases.yaml`, invoke the adversarial review task.
|
||||||
|
|
||||||
|
### Manual Test Invocation
|
||||||
|
|
||||||
|
```
|
||||||
|
Review this content using the adversarial review task:
|
||||||
|
|
||||||
|
<content>
|
||||||
|
[paste sample-content.md]
|
||||||
|
</content>
|
||||||
|
|
||||||
|
<also_consider>
|
||||||
|
[paste items from test case, or omit for TC01]
|
||||||
|
</also_consider>
|
||||||
|
```
|
||||||
|
|
||||||
|
## Evaluation Criteria
|
||||||
|
|
||||||
|
For each test, note:
|
||||||
|
|
||||||
|
1. **Total findings** - Still hitting ~10 issues?
|
||||||
|
2. **Distribution** - Are findings spread across concerns or clustered?
|
||||||
|
3. **Relevance** - Do findings relate to `also_consider` items when provided?
|
||||||
|
4. **Balance** - Are `also_consider` findings elevated over others, or naturally mixed?
|
||||||
|
5. **Quality** - Are findings actionable regardless of source?
|
||||||
|
|
||||||
|
## Expected Outcomes
|
||||||
|
|
||||||
|
- **TC01 (baseline)**: Generic spread of findings
|
||||||
|
- **TC02-TC05 (domain-focused)**: Some findings align with domain, others still organic
|
||||||
|
- **TC06 (single item)**: Light influence, not dominant
|
||||||
|
- **TC07 (vague items)**: Minimal change from baseline
|
||||||
|
- **TC08 (specific items)**: Direct answers if gaps exist
|
||||||
|
- **TC09 (mixed)**: Balanced across domains
|
||||||
|
- **TC10 (contradictory)**: Graceful handling
|
||||||
|
|
@ -0,0 +1,46 @@
|
||||||
|
# User Authentication API
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
This API provides endpoints for user authentication and session management.
|
||||||
|
|
||||||
|
## Endpoints
|
||||||
|
|
||||||
|
### POST /api/auth/login
|
||||||
|
|
||||||
|
Authenticates a user and returns a token.
|
||||||
|
|
||||||
|
**Request Body:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"email": "user@example.com",
|
||||||
|
"password": "password123"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Response:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"token": "eyJhbGciOiJIUzI1NiIs...",
|
||||||
|
"user": {
|
||||||
|
"id": 1,
|
||||||
|
"email": "user@example.com"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### POST /api/auth/logout
|
||||||
|
|
||||||
|
Logs out the current user.
|
||||||
|
|
||||||
|
### GET /api/auth/me
|
||||||
|
|
||||||
|
Returns the current user's profile.
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
Errors return appropriate HTTP status codes.
|
||||||
|
|
||||||
|
## Rate Limiting
|
||||||
|
|
||||||
|
Rate limiting is applied to prevent abuse.
|
||||||
|
|
@ -0,0 +1,103 @@
|
||||||
|
# Test Cases for review-adversarial-general.xml with also_consider input
|
||||||
|
#
|
||||||
|
# Purpose: Evaluate how the optional also_consider input influences review findings
|
||||||
|
# Content: All tests use sample-content.md (User Authentication API docs)
|
||||||
|
#
|
||||||
|
# To run: Manually invoke the task with each configuration and compare outputs
|
||||||
|
|
||||||
|
test_cases:
|
||||||
|
# BASELINE - No also_consider
|
||||||
|
- id: TC01
|
||||||
|
name: "Baseline - no also_consider"
|
||||||
|
description: "Control test with no also_consider input"
|
||||||
|
also_consider: null
|
||||||
|
expected_behavior: "Generic adversarial findings across all aspects"
|
||||||
|
|
||||||
|
# DOCUMENTATION-FOCUSED
|
||||||
|
- id: TC02
|
||||||
|
name: "Documentation - reader confusion"
|
||||||
|
description: "Nudge toward documentation UX issues"
|
||||||
|
also_consider:
|
||||||
|
- What would confuse a first-time reader?
|
||||||
|
- What questions are left unanswered?
|
||||||
|
- What could be interpreted multiple ways?
|
||||||
|
- What jargon is unexplained?
|
||||||
|
expected_behavior: "More findings about clarity, completeness, reader experience"
|
||||||
|
|
||||||
|
- id: TC03
|
||||||
|
name: "Documentation - examples and usage"
|
||||||
|
description: "Nudge toward practical usage gaps"
|
||||||
|
also_consider:
|
||||||
|
- Missing code examples
|
||||||
|
- Unclear usage patterns
|
||||||
|
- Edge cases not documented
|
||||||
|
expected_behavior: "More findings about practical application gaps"
|
||||||
|
|
||||||
|
# SECURITY-FOCUSED
|
||||||
|
- id: TC04
|
||||||
|
name: "Security review"
|
||||||
|
description: "Nudge toward security concerns"
|
||||||
|
also_consider:
|
||||||
|
- Authentication vulnerabilities
|
||||||
|
- Token handling issues
|
||||||
|
- Input validation gaps
|
||||||
|
- Information disclosure risks
|
||||||
|
expected_behavior: "More security-related findings"
|
||||||
|
|
||||||
|
# API DESIGN-FOCUSED
|
||||||
|
- id: TC05
|
||||||
|
name: "API design"
|
||||||
|
description: "Nudge toward API design best practices"
|
||||||
|
also_consider:
|
||||||
|
- REST conventions not followed
|
||||||
|
- Inconsistent response formats
|
||||||
|
- Missing pagination or filtering
|
||||||
|
- Versioning concerns
|
||||||
|
expected_behavior: "More API design pattern findings"
|
||||||
|
|
||||||
|
# SINGLE ITEM
|
||||||
|
- id: TC06
|
||||||
|
name: "Single item - error handling"
|
||||||
|
description: "Test with just one also_consider item"
|
||||||
|
also_consider:
|
||||||
|
- Error handling completeness
|
||||||
|
expected_behavior: "Some emphasis on error handling while still covering other areas"
|
||||||
|
|
||||||
|
# BROAD/VAGUE
|
||||||
|
- id: TC07
|
||||||
|
name: "Broad items"
|
||||||
|
description: "Test with vague also_consider items"
|
||||||
|
also_consider:
|
||||||
|
- Quality issues
|
||||||
|
- Things that seem off
|
||||||
|
expected_behavior: "Minimal change from baseline - items too vague to steer"
|
||||||
|
|
||||||
|
# VERY SPECIFIC
|
||||||
|
- id: TC08
|
||||||
|
name: "Very specific items"
|
||||||
|
description: "Test with highly specific also_consider items"
|
||||||
|
also_consider:
|
||||||
|
- Is the JWT token expiration documented?
|
||||||
|
- Are refresh token mechanics explained?
|
||||||
|
- What happens on concurrent sessions?
|
||||||
|
expected_behavior: "Specific findings addressing these exact questions if gaps exist"
|
||||||
|
|
||||||
|
# MIXED DOMAINS
|
||||||
|
- id: TC09
|
||||||
|
name: "Mixed domain concerns"
|
||||||
|
description: "Test with items from different domains"
|
||||||
|
also_consider:
|
||||||
|
- Security vulnerabilities
|
||||||
|
- Reader confusion points
|
||||||
|
- API design inconsistencies
|
||||||
|
- Performance implications
|
||||||
|
expected_behavior: "Balanced findings across multiple domains"
|
||||||
|
|
||||||
|
# CONTRADICTORY/UNUSUAL
|
||||||
|
- id: TC10
|
||||||
|
name: "Contradictory items"
|
||||||
|
description: "Test resilience with odd inputs"
|
||||||
|
also_consider:
|
||||||
|
- Things that are too detailed
|
||||||
|
- Things that are not detailed enough
|
||||||
|
expected_behavior: "Reviewer handles gracefully, finds issues in both directions"
|
||||||
|
|
@ -0,0 +1,65 @@
|
||||||
|
const chalk = require('chalk');
|
||||||
|
const path = require('node:path');
|
||||||
|
const { Installer } = require('../installers/lib/core/installer');
|
||||||
|
const { Manifest } = require('../installers/lib/core/manifest');
|
||||||
|
const { UI } = require('../lib/ui');
|
||||||
|
|
||||||
|
const installer = new Installer();
|
||||||
|
const manifest = new Manifest();
|
||||||
|
const ui = new UI();
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
command: 'status',
|
||||||
|
description: 'Display BMAD installation status and module versions',
|
||||||
|
options: [],
|
||||||
|
action: async (options) => {
|
||||||
|
try {
|
||||||
|
// Find the bmad directory
|
||||||
|
const projectDir = process.cwd();
|
||||||
|
const { bmadDir } = await installer.findBmadDir(projectDir);
|
||||||
|
|
||||||
|
// Check if bmad directory exists
|
||||||
|
const fs = require('fs-extra');
|
||||||
|
if (!(await fs.pathExists(bmadDir))) {
|
||||||
|
console.log(chalk.yellow('No BMAD installation found in the current directory.'));
|
||||||
|
console.log(chalk.dim(`Expected location: ${bmadDir}`));
|
||||||
|
console.log(chalk.dim('\nRun "bmad install" to set up a new installation.'));
|
||||||
|
process.exit(0);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read manifest
|
||||||
|
const manifestData = await manifest._readRaw(bmadDir);
|
||||||
|
|
||||||
|
if (!manifestData) {
|
||||||
|
console.log(chalk.yellow('No BMAD installation manifest found.'));
|
||||||
|
console.log(chalk.dim('\nRun "bmad install" to set up a new installation.'));
|
||||||
|
process.exit(0);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get installation info
|
||||||
|
const installation = manifestData.installation || {};
|
||||||
|
const modules = manifestData.modules || [];
|
||||||
|
|
||||||
|
// Check for available updates (only for external modules)
|
||||||
|
const availableUpdates = await manifest.checkForUpdates(bmadDir);
|
||||||
|
|
||||||
|
// Display status
|
||||||
|
ui.displayStatus({
|
||||||
|
installation,
|
||||||
|
modules,
|
||||||
|
availableUpdates,
|
||||||
|
bmadDir,
|
||||||
|
});
|
||||||
|
|
||||||
|
process.exit(0);
|
||||||
|
} catch (error) {
|
||||||
|
console.error(chalk.red('Status check failed:'), error.message);
|
||||||
|
if (process.env.BMAD_DEBUG) {
|
||||||
|
console.error(chalk.dim(error.stack));
|
||||||
|
}
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
@ -10,6 +10,7 @@ modules:
|
||||||
description: "Agent, Workflow and Module Builder"
|
description: "Agent, Workflow and Module Builder"
|
||||||
defaultSelected: false
|
defaultSelected: false
|
||||||
type: bmad-org
|
type: bmad-org
|
||||||
|
npmPackage: bmad-builder
|
||||||
|
|
||||||
bmad-creative-intelligence-suite:
|
bmad-creative-intelligence-suite:
|
||||||
url: https://github.com/bmad-code-org/bmad-module-creative-intelligence-suite
|
url: https://github.com/bmad-code-org/bmad-module-creative-intelligence-suite
|
||||||
|
|
@ -19,6 +20,7 @@ modules:
|
||||||
description: "Creative tools for writing, brainstorming, and more"
|
description: "Creative tools for writing, brainstorming, and more"
|
||||||
defaultSelected: false
|
defaultSelected: false
|
||||||
type: bmad-org
|
type: bmad-org
|
||||||
|
npmPackage: bmad-creative-intelligence-suite
|
||||||
|
|
||||||
bmad-game-dev-studio:
|
bmad-game-dev-studio:
|
||||||
url: https://github.com/bmad-code-org/bmad-module-game-dev-studio.git
|
url: https://github.com/bmad-code-org/bmad-module-game-dev-studio.git
|
||||||
|
|
@ -28,6 +30,7 @@ modules:
|
||||||
description: "Game development agents and workflows"
|
description: "Game development agents and workflows"
|
||||||
defaultSelected: false
|
defaultSelected: false
|
||||||
type: bmad-org
|
type: bmad-org
|
||||||
|
npmPackage: bmad-game-dev-studio
|
||||||
|
|
||||||
# TODO: Enable once fixes applied:
|
# TODO: Enable once fixes applied:
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -534,18 +534,71 @@ class ManifestGenerator {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Write main manifest as YAML with installation info only
|
* Write main manifest as YAML with installation info only
|
||||||
|
* Fetches fresh version info for all modules
|
||||||
* @returns {string} Path to the manifest file
|
* @returns {string} Path to the manifest file
|
||||||
*/
|
*/
|
||||||
async writeMainManifest(cfgDir) {
|
async writeMainManifest(cfgDir) {
|
||||||
const manifestPath = path.join(cfgDir, 'manifest.yaml');
|
const manifestPath = path.join(cfgDir, 'manifest.yaml');
|
||||||
|
|
||||||
|
// Read existing manifest to preserve install date
|
||||||
|
let existingInstallDate = null;
|
||||||
|
const existingModulesMap = new Map();
|
||||||
|
|
||||||
|
if (await fs.pathExists(manifestPath)) {
|
||||||
|
try {
|
||||||
|
const existingContent = await fs.readFile(manifestPath, 'utf8');
|
||||||
|
const existingManifest = yaml.parse(existingContent);
|
||||||
|
|
||||||
|
// Preserve original install date
|
||||||
|
if (existingManifest.installation?.installDate) {
|
||||||
|
existingInstallDate = existingManifest.installation.installDate;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build map of existing modules for quick lookup
|
||||||
|
if (existingManifest.modules && Array.isArray(existingManifest.modules)) {
|
||||||
|
for (const m of existingManifest.modules) {
|
||||||
|
if (typeof m === 'object' && m.name) {
|
||||||
|
existingModulesMap.set(m.name, m);
|
||||||
|
} else if (typeof m === 'string') {
|
||||||
|
existingModulesMap.set(m, { installDate: existingInstallDate });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// If we can't read existing manifest, continue with defaults
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch fresh version info for all modules
|
||||||
|
const { Manifest } = require('./manifest');
|
||||||
|
const manifestObj = new Manifest();
|
||||||
|
const updatedModules = [];
|
||||||
|
|
||||||
|
for (const moduleName of this.modules) {
|
||||||
|
// Get fresh version info from source
|
||||||
|
const versionInfo = await manifestObj.getModuleVersionInfo(moduleName, this.bmadDir);
|
||||||
|
|
||||||
|
// Get existing install date if available
|
||||||
|
const existing = existingModulesMap.get(moduleName);
|
||||||
|
|
||||||
|
updatedModules.push({
|
||||||
|
name: moduleName,
|
||||||
|
version: versionInfo.version,
|
||||||
|
installDate: existing?.installDate || new Date().toISOString(),
|
||||||
|
lastUpdated: new Date().toISOString(),
|
||||||
|
source: versionInfo.source,
|
||||||
|
npmPackage: versionInfo.npmPackage,
|
||||||
|
repoUrl: versionInfo.repoUrl,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
const manifest = {
|
const manifest = {
|
||||||
installation: {
|
installation: {
|
||||||
version: packageJson.version,
|
version: packageJson.version,
|
||||||
installDate: new Date().toISOString(),
|
installDate: existingInstallDate || new Date().toISOString(),
|
||||||
lastUpdated: new Date().toISOString(),
|
lastUpdated: new Date().toISOString(),
|
||||||
},
|
},
|
||||||
modules: this.modules, // Include ALL modules (standard and custom)
|
modules: updatedModules,
|
||||||
ides: this.selectedIdes,
|
ides: this.selectedIdes,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,7 @@
|
||||||
const path = require('node:path');
|
const path = require('node:path');
|
||||||
const fs = require('fs-extra');
|
const fs = require('fs-extra');
|
||||||
const crypto = require('node:crypto');
|
const crypto = require('node:crypto');
|
||||||
|
const { getProjectRoot } = require('../../../lib/project-root');
|
||||||
|
|
||||||
class Manifest {
|
class Manifest {
|
||||||
/**
|
/**
|
||||||
|
|
@ -16,14 +17,35 @@ class Manifest {
|
||||||
// Ensure _config directory exists
|
// Ensure _config directory exists
|
||||||
await fs.ensureDir(path.dirname(manifestPath));
|
await fs.ensureDir(path.dirname(manifestPath));
|
||||||
|
|
||||||
|
// Get the BMad version from package.json
|
||||||
|
const bmadVersion = data.version || require(path.join(process.cwd(), 'package.json')).version;
|
||||||
|
|
||||||
|
// Convert module list to new detailed format
|
||||||
|
const moduleDetails = [];
|
||||||
|
if (data.modules && Array.isArray(data.modules)) {
|
||||||
|
for (const moduleName of data.modules) {
|
||||||
|
// Core and BMM modules use the BMad version
|
||||||
|
const moduleVersion = moduleName === 'core' || moduleName === 'bmm' ? bmadVersion : null;
|
||||||
|
const now = data.installDate || new Date().toISOString();
|
||||||
|
|
||||||
|
moduleDetails.push({
|
||||||
|
name: moduleName,
|
||||||
|
version: moduleVersion,
|
||||||
|
installDate: now,
|
||||||
|
lastUpdated: now,
|
||||||
|
source: moduleName === 'core' || moduleName === 'bmm' ? 'built-in' : 'unknown',
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Structure the manifest data
|
// Structure the manifest data
|
||||||
const manifestData = {
|
const manifestData = {
|
||||||
installation: {
|
installation: {
|
||||||
version: data.version || require(path.join(process.cwd(), 'package.json')).version,
|
version: bmadVersion,
|
||||||
installDate: data.installDate || new Date().toISOString(),
|
installDate: data.installDate || new Date().toISOString(),
|
||||||
lastUpdated: data.lastUpdated || new Date().toISOString(),
|
lastUpdated: data.lastUpdated || new Date().toISOString(),
|
||||||
},
|
},
|
||||||
modules: data.modules || [],
|
modules: moduleDetails,
|
||||||
ides: data.ides || [],
|
ides: data.ides || [],
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -57,12 +79,23 @@ class Manifest {
|
||||||
const content = await fs.readFile(yamlPath, 'utf8');
|
const content = await fs.readFile(yamlPath, 'utf8');
|
||||||
const manifestData = yaml.parse(content);
|
const manifestData = yaml.parse(content);
|
||||||
|
|
||||||
|
// Handle new detailed module format
|
||||||
|
const modules = manifestData.modules || [];
|
||||||
|
|
||||||
|
// For backward compatibility: if modules is an array of strings (old format),
|
||||||
|
// the calling code may need the array of names
|
||||||
|
const moduleNames = modules.map((m) => (typeof m === 'string' ? m : m.name));
|
||||||
|
|
||||||
|
// Check if we have the new detailed format
|
||||||
|
const hasDetailedModules = modules.length > 0 && typeof modules[0] === 'object';
|
||||||
|
|
||||||
// Flatten the structure for compatibility with existing code
|
// Flatten the structure for compatibility with existing code
|
||||||
return {
|
return {
|
||||||
version: manifestData.installation?.version,
|
version: manifestData.installation?.version,
|
||||||
installDate: manifestData.installation?.installDate,
|
installDate: manifestData.installation?.installDate,
|
||||||
lastUpdated: manifestData.installation?.lastUpdated,
|
lastUpdated: manifestData.installation?.lastUpdated,
|
||||||
modules: manifestData.modules || [], // All modules (standard and custom)
|
modules: moduleNames, // Simple array of module names for backward compatibility
|
||||||
|
modulesDetailed: hasDetailedModules ? modules : null, // New detailed format
|
||||||
customModules: manifestData.customModules || [], // Keep for backward compatibility
|
customModules: manifestData.customModules || [], // Keep for backward compatibility
|
||||||
ides: manifestData.ides || [],
|
ides: manifestData.ides || [],
|
||||||
};
|
};
|
||||||
|
|
@ -82,28 +115,92 @@ class Manifest {
|
||||||
*/
|
*/
|
||||||
async update(bmadDir, updates, installedFiles = null) {
|
async update(bmadDir, updates, installedFiles = null) {
|
||||||
const yaml = require('yaml');
|
const yaml = require('yaml');
|
||||||
const manifest = (await this.read(bmadDir)) || {};
|
const manifest = (await this._readRaw(bmadDir)) || {
|
||||||
|
installation: {},
|
||||||
// Merge updates
|
modules: [],
|
||||||
Object.assign(manifest, updates);
|
ides: [],
|
||||||
manifest.lastUpdated = new Date().toISOString();
|
|
||||||
|
|
||||||
// Convert back to structured format for YAML
|
|
||||||
const manifestData = {
|
|
||||||
installation: {
|
|
||||||
version: manifest.version,
|
|
||||||
installDate: manifest.installDate,
|
|
||||||
lastUpdated: manifest.lastUpdated,
|
|
||||||
},
|
|
||||||
modules: manifest.modules || [], // All modules (standard and custom)
|
|
||||||
ides: manifest.ides || [],
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Handle module updates
|
||||||
|
if (updates.modules) {
|
||||||
|
// If modules is being updated, we need to preserve detailed module info
|
||||||
|
const existingDetailed = manifest.modules || [];
|
||||||
|
const incomingNames = updates.modules;
|
||||||
|
|
||||||
|
// Build updated modules array
|
||||||
|
const updatedModules = [];
|
||||||
|
for (const name of incomingNames) {
|
||||||
|
const existing = existingDetailed.find((m) => m.name === name);
|
||||||
|
if (existing) {
|
||||||
|
// Preserve existing details, update lastUpdated if this module is being updated
|
||||||
|
updatedModules.push({
|
||||||
|
...existing,
|
||||||
|
lastUpdated: new Date().toISOString(),
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
// New module - add with minimal details
|
||||||
|
updatedModules.push({
|
||||||
|
name,
|
||||||
|
version: null,
|
||||||
|
installDate: new Date().toISOString(),
|
||||||
|
lastUpdated: new Date().toISOString(),
|
||||||
|
source: 'unknown',
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
manifest.modules = updatedModules;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge other updates
|
||||||
|
if (updates.version) {
|
||||||
|
manifest.installation.version = updates.version;
|
||||||
|
}
|
||||||
|
if (updates.installDate) {
|
||||||
|
manifest.installation.installDate = updates.installDate;
|
||||||
|
}
|
||||||
|
manifest.installation.lastUpdated = new Date().toISOString();
|
||||||
|
|
||||||
|
if (updates.ides) {
|
||||||
|
manifest.ides = updates.ides;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle per-module version updates
|
||||||
|
if (updates.moduleVersions) {
|
||||||
|
for (const [moduleName, versionInfo] of Object.entries(updates.moduleVersions)) {
|
||||||
|
const moduleIndex = manifest.modules.findIndex((m) => m.name === moduleName);
|
||||||
|
if (moduleIndex !== -1) {
|
||||||
|
manifest.modules[moduleIndex] = {
|
||||||
|
...manifest.modules[moduleIndex],
|
||||||
|
...versionInfo,
|
||||||
|
lastUpdated: new Date().toISOString(),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle adding a new module with version info
|
||||||
|
if (updates.addModule) {
|
||||||
|
const { name, version, source, npmPackage, repoUrl } = updates.addModule;
|
||||||
|
const existing = manifest.modules.find((m) => m.name === name);
|
||||||
|
if (!existing) {
|
||||||
|
manifest.modules.push({
|
||||||
|
name,
|
||||||
|
version: version || null,
|
||||||
|
installDate: new Date().toISOString(),
|
||||||
|
lastUpdated: new Date().toISOString(),
|
||||||
|
source: source || 'external',
|
||||||
|
npmPackage: npmPackage || null,
|
||||||
|
repoUrl: repoUrl || null,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const manifestPath = path.join(bmadDir, '_config', 'manifest.yaml');
|
const manifestPath = path.join(bmadDir, '_config', 'manifest.yaml');
|
||||||
await fs.ensureDir(path.dirname(manifestPath));
|
await fs.ensureDir(path.dirname(manifestPath));
|
||||||
|
|
||||||
// Clean the manifest data to remove any non-serializable values
|
// Clean the manifest data to remove any non-serializable values
|
||||||
const cleanManifestData = structuredClone(manifestData);
|
const cleanManifestData = structuredClone(manifest);
|
||||||
|
|
||||||
const yamlContent = yaml.stringify(cleanManifestData, {
|
const yamlContent = yaml.stringify(cleanManifestData, {
|
||||||
indent: 2,
|
indent: 2,
|
||||||
|
|
@ -115,16 +212,61 @@ class Manifest {
|
||||||
const content = yamlContent.endsWith('\n') ? yamlContent : yamlContent + '\n';
|
const content = yamlContent.endsWith('\n') ? yamlContent : yamlContent + '\n';
|
||||||
await fs.writeFile(manifestPath, content, 'utf8');
|
await fs.writeFile(manifestPath, content, 'utf8');
|
||||||
|
|
||||||
return manifest;
|
// Return the flattened format for compatibility
|
||||||
|
return this._flattenManifest(manifest);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Add a module to the manifest
|
* Read raw manifest data without flattening
|
||||||
|
* @param {string} bmadDir - Path to bmad directory
|
||||||
|
* @returns {Object|null} Raw manifest data or null if not found
|
||||||
|
*/
|
||||||
|
async _readRaw(bmadDir) {
|
||||||
|
const yamlPath = path.join(bmadDir, '_config', 'manifest.yaml');
|
||||||
|
const yaml = require('yaml');
|
||||||
|
|
||||||
|
if (await fs.pathExists(yamlPath)) {
|
||||||
|
try {
|
||||||
|
const content = await fs.readFile(yamlPath, 'utf8');
|
||||||
|
return yaml.parse(content);
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Failed to read YAML manifest:', error.message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Flatten manifest for backward compatibility
|
||||||
|
* @param {Object} manifest - Raw manifest data
|
||||||
|
* @returns {Object} Flattened manifest
|
||||||
|
*/
|
||||||
|
_flattenManifest(manifest) {
|
||||||
|
const modules = manifest.modules || [];
|
||||||
|
const moduleNames = modules.map((m) => (typeof m === 'string' ? m : m.name));
|
||||||
|
const hasDetailedModules = modules.length > 0 && typeof modules[0] === 'object';
|
||||||
|
|
||||||
|
return {
|
||||||
|
version: manifest.installation?.version,
|
||||||
|
installDate: manifest.installation?.installDate,
|
||||||
|
lastUpdated: manifest.installation?.lastUpdated,
|
||||||
|
modules: moduleNames,
|
||||||
|
modulesDetailed: hasDetailedModules ? modules : null,
|
||||||
|
customModules: manifest.customModules || [],
|
||||||
|
ides: manifest.ides || [],
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add a module to the manifest with optional version info
|
||||||
|
* If module already exists, update its version info
|
||||||
* @param {string} bmadDir - Path to bmad directory
|
* @param {string} bmadDir - Path to bmad directory
|
||||||
* @param {string} moduleName - Module name to add
|
* @param {string} moduleName - Module name to add
|
||||||
|
* @param {Object} options - Optional version info
|
||||||
*/
|
*/
|
||||||
async addModule(bmadDir, moduleName) {
|
async addModule(bmadDir, moduleName, options = {}) {
|
||||||
const manifest = await this.read(bmadDir);
|
const manifest = await this._readRaw(bmadDir);
|
||||||
if (!manifest) {
|
if (!manifest) {
|
||||||
throw new Error('No manifest found');
|
throw new Error('No manifest found');
|
||||||
}
|
}
|
||||||
|
|
@ -133,10 +275,33 @@ class Manifest {
|
||||||
manifest.modules = [];
|
manifest.modules = [];
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!manifest.modules.includes(moduleName)) {
|
const existingIndex = manifest.modules.findIndex((m) => m.name === moduleName);
|
||||||
manifest.modules.push(moduleName);
|
|
||||||
await this.update(bmadDir, { modules: manifest.modules });
|
if (existingIndex === -1) {
|
||||||
|
// Module doesn't exist, add it
|
||||||
|
manifest.modules.push({
|
||||||
|
name: moduleName,
|
||||||
|
version: options.version || null,
|
||||||
|
installDate: new Date().toISOString(),
|
||||||
|
lastUpdated: new Date().toISOString(),
|
||||||
|
source: options.source || 'unknown',
|
||||||
|
npmPackage: options.npmPackage || null,
|
||||||
|
repoUrl: options.repoUrl || null,
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
// Module exists, update its version info
|
||||||
|
const existing = manifest.modules[existingIndex];
|
||||||
|
manifest.modules[existingIndex] = {
|
||||||
|
...existing,
|
||||||
|
version: options.version === undefined ? existing.version : options.version,
|
||||||
|
source: options.source || existing.source,
|
||||||
|
npmPackage: options.npmPackage === undefined ? existing.npmPackage : options.npmPackage,
|
||||||
|
repoUrl: options.repoUrl === undefined ? existing.repoUrl : options.repoUrl,
|
||||||
|
lastUpdated: new Date().toISOString(),
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
await this._writeRaw(bmadDir, manifest);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -145,18 +310,93 @@ class Manifest {
|
||||||
* @param {string} moduleName - Module name to remove
|
* @param {string} moduleName - Module name to remove
|
||||||
*/
|
*/
|
||||||
async removeModule(bmadDir, moduleName) {
|
async removeModule(bmadDir, moduleName) {
|
||||||
const manifest = await this.read(bmadDir);
|
const manifest = await this._readRaw(bmadDir);
|
||||||
if (!manifest || !manifest.modules) {
|
if (!manifest || !manifest.modules) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const index = manifest.modules.indexOf(moduleName);
|
const index = manifest.modules.findIndex((m) => m.name === moduleName);
|
||||||
if (index !== -1) {
|
if (index !== -1) {
|
||||||
manifest.modules.splice(index, 1);
|
manifest.modules.splice(index, 1);
|
||||||
await this.update(bmadDir, { modules: manifest.modules });
|
await this._writeRaw(bmadDir, manifest);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Update a single module's version info
|
||||||
|
* @param {string} bmadDir - Path to bmad directory
|
||||||
|
* @param {string} moduleName - Module name
|
||||||
|
* @param {Object} versionInfo - Version info to update
|
||||||
|
*/
|
||||||
|
async updateModuleVersion(bmadDir, moduleName, versionInfo) {
|
||||||
|
const manifest = await this._readRaw(bmadDir);
|
||||||
|
if (!manifest || !manifest.modules) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const index = manifest.modules.findIndex((m) => m.name === moduleName);
|
||||||
|
if (index !== -1) {
|
||||||
|
manifest.modules[index] = {
|
||||||
|
...manifest.modules[index],
|
||||||
|
...versionInfo,
|
||||||
|
lastUpdated: new Date().toISOString(),
|
||||||
|
};
|
||||||
|
await this._writeRaw(bmadDir, manifest);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get version info for a specific module
|
||||||
|
* @param {string} bmadDir - Path to bmad directory
|
||||||
|
* @param {string} moduleName - Module name
|
||||||
|
* @returns {Object|null} Module version info or null
|
||||||
|
*/
|
||||||
|
async getModuleVersion(bmadDir, moduleName) {
|
||||||
|
const manifest = await this._readRaw(bmadDir);
|
||||||
|
if (!manifest || !manifest.modules) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
return manifest.modules.find((m) => m.name === moduleName) || null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get all modules with their version info
|
||||||
|
* @param {string} bmadDir - Path to bmad directory
|
||||||
|
* @returns {Array} Array of module info objects
|
||||||
|
*/
|
||||||
|
async getAllModuleVersions(bmadDir) {
|
||||||
|
const manifest = await this._readRaw(bmadDir);
|
||||||
|
if (!manifest || !manifest.modules) {
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
|
||||||
|
return manifest.modules;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Write raw manifest data to file
|
||||||
|
* @param {string} bmadDir - Path to bmad directory
|
||||||
|
* @param {Object} manifestData - Raw manifest data to write
|
||||||
|
*/
|
||||||
|
async _writeRaw(bmadDir, manifestData) {
|
||||||
|
const yaml = require('yaml');
|
||||||
|
const manifestPath = path.join(bmadDir, '_config', 'manifest.yaml');
|
||||||
|
|
||||||
|
await fs.ensureDir(path.dirname(manifestPath));
|
||||||
|
|
||||||
|
const cleanManifestData = structuredClone(manifestData);
|
||||||
|
|
||||||
|
const yamlContent = yaml.stringify(cleanManifestData, {
|
||||||
|
indent: 2,
|
||||||
|
lineWidth: 0,
|
||||||
|
sortKeys: false,
|
||||||
|
});
|
||||||
|
|
||||||
|
const content = yamlContent.endsWith('\n') ? yamlContent : yamlContent + '\n';
|
||||||
|
await fs.writeFile(manifestPath, content, 'utf8');
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Add an IDE configuration to the manifest
|
* Add an IDE configuration to the manifest
|
||||||
* @param {string} bmadDir - Path to bmad directory
|
* @param {string} bmadDir - Path to bmad directory
|
||||||
|
|
@ -585,6 +825,212 @@ class Manifest {
|
||||||
await this.update(bmadDir, { customModules: manifest.customModules });
|
await this.update(bmadDir, { customModules: manifest.customModules });
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get module version info from source
|
||||||
|
* @param {string} moduleName - Module name/code
|
||||||
|
* @param {string} bmadDir - Path to bmad directory
|
||||||
|
* @param {string} moduleSourcePath - Optional source path for custom modules
|
||||||
|
* @returns {Object} Version info object with version, source, npmPackage, repoUrl
|
||||||
|
*/
|
||||||
|
async getModuleVersionInfo(moduleName, bmadDir, moduleSourcePath = null) {
|
||||||
|
const os = require('node:os');
|
||||||
|
|
||||||
|
// Built-in modules use BMad version (only core and bmm are in BMAD-METHOD repo)
|
||||||
|
if (['core', 'bmm'].includes(moduleName)) {
|
||||||
|
const bmadVersion = require(path.join(getProjectRoot(), 'package.json')).version;
|
||||||
|
return {
|
||||||
|
version: bmadVersion,
|
||||||
|
source: 'built-in',
|
||||||
|
npmPackage: null,
|
||||||
|
repoUrl: null,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if this is an external official module
|
||||||
|
const { ExternalModuleManager } = require('../modules/external-manager');
|
||||||
|
const extMgr = new ExternalModuleManager();
|
||||||
|
const moduleInfo = await extMgr.getModuleByCode(moduleName);
|
||||||
|
|
||||||
|
if (moduleInfo) {
|
||||||
|
// External module - try to get version from npm registry first, then fall back to cache
|
||||||
|
let version = null;
|
||||||
|
|
||||||
|
if (moduleInfo.npmPackage) {
|
||||||
|
// Fetch version from npm registry
|
||||||
|
try {
|
||||||
|
version = await this.fetchNpmVersion(moduleInfo.npmPackage);
|
||||||
|
} catch {
|
||||||
|
// npm fetch failed, try cache as fallback
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If npm didn't work, try reading from cached repo's package.json
|
||||||
|
if (!version) {
|
||||||
|
const cacheDir = path.join(os.homedir(), '.bmad', 'cache', 'external-modules', moduleName);
|
||||||
|
const packageJsonPath = path.join(cacheDir, 'package.json');
|
||||||
|
|
||||||
|
if (await fs.pathExists(packageJsonPath)) {
|
||||||
|
try {
|
||||||
|
const pkg = require(packageJsonPath);
|
||||||
|
version = pkg.version;
|
||||||
|
} catch (error) {
|
||||||
|
console.warn(`Failed to read package.json for ${moduleName}: ${error.message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
version: version,
|
||||||
|
source: 'external',
|
||||||
|
npmPackage: moduleInfo.npmPackage || null,
|
||||||
|
repoUrl: moduleInfo.url || null,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Custom module - check cache directory
|
||||||
|
const cacheDir = path.join(bmadDir, '_config', 'custom', moduleName);
|
||||||
|
const moduleYamlPath = path.join(cacheDir, 'module.yaml');
|
||||||
|
|
||||||
|
if (await fs.pathExists(moduleYamlPath)) {
|
||||||
|
try {
|
||||||
|
const yamlContent = await fs.readFile(moduleYamlPath, 'utf8');
|
||||||
|
const moduleConfig = yaml.parse(yamlContent);
|
||||||
|
return {
|
||||||
|
version: moduleConfig.version || null,
|
||||||
|
source: 'custom',
|
||||||
|
npmPackage: moduleConfig.npmPackage || null,
|
||||||
|
repoUrl: moduleConfig.repoUrl || null,
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
console.warn(`Failed to read module.yaml for ${moduleName}: ${error.message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unknown module
|
||||||
|
return {
|
||||||
|
version: null,
|
||||||
|
source: 'unknown',
|
||||||
|
npmPackage: null,
|
||||||
|
repoUrl: null,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Fetch latest version from npm for a package
|
||||||
|
* @param {string} packageName - npm package name
|
||||||
|
* @returns {string|null} Latest version or null
|
||||||
|
*/
|
||||||
|
async fetchNpmVersion(packageName) {
|
||||||
|
try {
|
||||||
|
const https = require('node:https');
|
||||||
|
const { execSync } = require('node:child_process');
|
||||||
|
|
||||||
|
// Try using npm view first (more reliable)
|
||||||
|
try {
|
||||||
|
const result = execSync(`npm view ${packageName} version`, {
|
||||||
|
encoding: 'utf8',
|
||||||
|
stdio: 'pipe',
|
||||||
|
timeout: 10_000,
|
||||||
|
});
|
||||||
|
return result.trim();
|
||||||
|
} catch {
|
||||||
|
// Fallback to npm registry API
|
||||||
|
return new Promise((resolve, reject) => {
|
||||||
|
https
|
||||||
|
.get(`https://registry.npmjs.org/${packageName}`, (res) => {
|
||||||
|
let data = '';
|
||||||
|
res.on('data', (chunk) => (data += chunk));
|
||||||
|
res.on('end', () => {
|
||||||
|
try {
|
||||||
|
const pkg = JSON.parse(data);
|
||||||
|
resolve(pkg['dist-tags']?.latest || pkg.version || null);
|
||||||
|
} catch {
|
||||||
|
resolve(null);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
})
|
||||||
|
.on('error', () => resolve(null));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check for available updates for installed modules
|
||||||
|
* @param {string} bmadDir - Path to bmad directory
|
||||||
|
* @returns {Array} Array of update info objects
|
||||||
|
*/
|
||||||
|
async checkForUpdates(bmadDir) {
|
||||||
|
const modules = await this.getAllModuleVersions(bmadDir);
|
||||||
|
const updates = [];
|
||||||
|
|
||||||
|
for (const module of modules) {
|
||||||
|
if (!module.npmPackage) {
|
||||||
|
continue; // Skip modules without npm package (built-in)
|
||||||
|
}
|
||||||
|
|
||||||
|
const latestVersion = await this.fetchNpmVersion(module.npmPackage);
|
||||||
|
if (!latestVersion) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (module.version !== latestVersion) {
|
||||||
|
updates.push({
|
||||||
|
name: module.name,
|
||||||
|
installedVersion: module.version,
|
||||||
|
latestVersion: latestVersion,
|
||||||
|
npmPackage: module.npmPackage,
|
||||||
|
updateAvailable: true,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return updates;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Compare two semantic versions
|
||||||
|
* @param {string} v1 - First version
|
||||||
|
* @param {string} v2 - Second version
|
||||||
|
* @returns {number} -1 if v1 < v2, 0 if v1 == v2, 1 if v1 > v2
|
||||||
|
*/
|
||||||
|
compareVersions(v1, v2) {
|
||||||
|
if (!v1 || !v2) return 0;
|
||||||
|
|
||||||
|
const normalize = (v) => {
|
||||||
|
// Remove leading 'v' if present
|
||||||
|
v = v.replace(/^v/, '');
|
||||||
|
// Handle prerelease tags
|
||||||
|
const parts = v.split('-');
|
||||||
|
const main = parts[0].split('.');
|
||||||
|
const prerelease = parts[1];
|
||||||
|
return { main, prerelease };
|
||||||
|
};
|
||||||
|
|
||||||
|
const n1 = normalize(v1);
|
||||||
|
const n2 = normalize(v2);
|
||||||
|
|
||||||
|
// Compare main version parts
|
||||||
|
for (let i = 0; i < 3; i++) {
|
||||||
|
const num1 = parseInt(n1.main[i] || '0', 10);
|
||||||
|
const num2 = parseInt(n2.main[i] || '0', 10);
|
||||||
|
if (num1 !== num2) {
|
||||||
|
return num1 < num2 ? -1 : 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If main versions are equal, compare prerelease
|
||||||
|
if (n1.prerelease && n2.prerelease) {
|
||||||
|
return n1.prerelease < n2.prerelease ? -1 : n1.prerelease > n2.prerelease ? 1 : 0;
|
||||||
|
}
|
||||||
|
if (n1.prerelease) return -1; // Prerelease is older than stable
|
||||||
|
if (n2.prerelease) return 1; // Stable is newer than prerelease
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
module.exports = { Manifest };
|
module.exports = { Manifest };
|
||||||
|
|
|
||||||
|
|
@ -2,9 +2,9 @@
|
||||||
|
|
||||||
## Overview
|
## Overview
|
||||||
|
|
||||||
Standardize IDE installers to use **flat file naming** and centralize duplicated code in shared utilities.
|
Standardize IDE installers to use **flat file naming** with **underscores** (Windows-compatible) and centralize duplicated code in shared utilities.
|
||||||
|
|
||||||
**Key Rule: Only folder-based IDEs convert to colon format. IDEs already using dashes keep using dashes.**
|
**Key Rule: All IDEs use underscore format for Windows compatibility (colons don't work on Windows).**
|
||||||
|
|
||||||
## Current State Analysis
|
## Current State Analysis
|
||||||
|
|
||||||
|
|
@ -15,10 +15,10 @@ Standardize IDE installers to use **flat file naming** and centralize duplicated
|
||||||
| **claude-code** | Hierarchical | `.claude/commands/bmad/{module}/agents/{name}.md` |
|
| **claude-code** | Hierarchical | `.claude/commands/bmad/{module}/agents/{name}.md` |
|
||||||
| **cursor** | Hierarchical | `.cursor/commands/bmad/{module}/agents/{name}.md` |
|
| **cursor** | Hierarchical | `.cursor/commands/bmad/{module}/agents/{name}.md` |
|
||||||
| **crush** | Hierarchical | `.crush/commands/bmad/{module}/agents/{name}.md` |
|
| **crush** | Hierarchical | `.crush/commands/bmad/{module}/agents/{name}.md` |
|
||||||
| **antigravity** | Flattened (dashes) | `.agent/workflows/bmad-module-agents-name.md` |
|
| **antigravity** | Flattened (underscores) | `.agent/workflows/bmad_module_agents_name.md` |
|
||||||
| **codex** | Flattened (dashes) | `~/.codex/prompts/bmad-module-agents-name.md` |
|
| **codex** | Flattened (underscores) | `~/.codex/prompts/bmad_module_agents_name.md` |
|
||||||
| **cline** | Flattened (dashes) | `.clinerules/workflows/bmad-module-type-name.md` |
|
| **cline** | Flattened (underscores) | `.clinerules/workflows/bmad_module_type_name.md` |
|
||||||
| **roo** | Flattened (dashes) | `.roo/commands/bmad-{module}-agent-{name}.md` |
|
| **roo** | Flattened (underscores) | `.roo/commands/bmad_module_agent_name.md` |
|
||||||
| **auggie** | Hybrid | `.augment/commands/bmad/agents/{module}-{name}.md` |
|
| **auggie** | Hybrid | `.augment/commands/bmad/agents/{module}-{name}.md` |
|
||||||
| **iflow** | Hybrid | `.iflow/commands/bmad/agents/{module}-{name}.md` |
|
| **iflow** | Hybrid | `.iflow/commands/bmad/agents/{module}-{name}.md` |
|
||||||
| **trae** | Different (rules) | `.trae/rules/bmad-agent-{module}-{name}.md` |
|
| **trae** | Different (rules) | `.trae/rules/bmad-agent-{module}-{name}.md` |
|
||||||
|
|
@ -40,35 +40,24 @@ All currently create artifacts with **nested relative paths** like `{module}/age
|
||||||
|
|
||||||
## Target Standardization
|
## Target Standardization
|
||||||
|
|
||||||
### For Folder-Based IDEs (convert to colon format)
|
### For All IDEs (underscore format - Windows-compatible)
|
||||||
|
|
||||||
**IDEs affected:** claude-code, cursor, crush
|
**IDEs affected:** claude-code, cursor, crush, antigravity, codex, cline, roo
|
||||||
|
|
||||||
```
|
```
|
||||||
Format: bmad:{module}:{type}:{name}.md
|
Format: bmad_{module}_{type}_{name}.md
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
- Agent: bmad:bmm:agents:pm.md
|
- Agent: bmad_bmm_agents_pm.md
|
||||||
- Agent: bmad:core:agents:dev.md
|
- Agent: bmad_core_agents_dev.md
|
||||||
- Workflow: bmad:bmm:workflows:correct-course.md
|
- Workflow: bmad_bmm_workflows_correct-course.md
|
||||||
- Task: bmad:bmm:tasks:bmad-help.md
|
- Task: bmad_bmm_tasks_bmad-help.md
|
||||||
- Tool: bmad:core:tools:code-review.md
|
- Tool: bmad_core_tools_code-review.md
|
||||||
- Custom: bmad:custom:agents:fred-commit-poet.md
|
- Custom: bmad_custom_agents_fred-commit-poet.md
|
||||||
```
|
```
|
||||||
|
|
||||||
### For Already-Flat IDEs (keep using dashes)
|
**Note:** Type segments (agents, workflows, tasks, tools) are filtered out from names:
|
||||||
|
- `bmm/agents/pm.md` → `bmad_bmm_pm.md` (not `bmad_bmm_agents_pm.md`)
|
||||||
**IDEs affected:** antigravity, codex, cline, roo
|
|
||||||
|
|
||||||
```
|
|
||||||
Format: bmad-{module}-{type}-{name}.md
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
- Agent: bmad-bmm-agents-pm.md
|
|
||||||
- Workflow: bmad-bmm-workflows-correct-course.md
|
|
||||||
- Task: bmad-bmm-tasks-bmad-help.md
|
|
||||||
- Custom: bmad-custom-agents-fred-commit-poet.md
|
|
||||||
```
|
|
||||||
|
|
||||||
### For Hybrid IDEs (keep as-is)
|
### For Hybrid IDEs (keep as-is)
|
||||||
|
|
||||||
|
|
@ -88,57 +77,50 @@ These use `{module}-{name}.md` format within subdirectories - keep as-is.
|
||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
/**
|
/**
|
||||||
* Convert hierarchical path to flat colon-separated name (for folder-based IDEs)
|
* Convert hierarchical path to flat underscore-separated name (Windows-compatible)
|
||||||
* @param {string} module - Module name (e.g., 'bmm', 'core')
|
* @param {string} module - Module name (e.g., 'bmm', 'core')
|
||||||
* @param {string} type - Artifact type ('agents', 'workflows', 'tasks', 'tools')
|
* @param {string} type - Artifact type ('agents', 'workflows', 'tasks', 'tools') - filtered out
|
||||||
* @param {string} name - Artifact name (e.g., 'pm', 'correct-course')
|
* @param {string} name - Artifact name (e.g., 'pm', 'correct-course')
|
||||||
* @returns {string} Flat filename like 'bmad:bmm:agents:pm.md'
|
* @returns {string} Flat filename like 'bmad_bmm_pm.md'
|
||||||
*/
|
*/
|
||||||
function toColonName(module, type, name) {
|
function toUnderscoreName(module, type, name) {
|
||||||
return `bmad:${module}:${type}:${name}.md`;
|
return `bmad_${module}_${name}.md`;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Convert relative path to flat colon-separated name (for folder-based IDEs)
|
* Convert relative path to flat underscore-separated name (Windows-compatible)
|
||||||
* @param {string} relativePath - Path like 'bmm/agents/pm.md'
|
* @param {string} relativePath - Path like 'bmm/agents/pm.md'
|
||||||
* @returns {string} Flat filename like 'bmad:bmm:agents:pm.md'
|
* @returns {string} Flat filename like 'bmad_bmm_pm.md'
|
||||||
*/
|
*/
|
||||||
function toColonPath(relativePath) {
|
function toUnderscorePath(relativePath) {
|
||||||
const withoutExt = relativePath.replace('.md', '');
|
const withoutExt = relativePath.replace('.md', '');
|
||||||
const parts = withoutExt.split(/[\/\\]/);
|
const parts = withoutExt.split(/[\/\\]/);
|
||||||
return `bmad:${parts.join(':')}.md`;
|
// Filter out type segments (agents, workflows, tasks, tools)
|
||||||
|
const filtered = parts.filter((p) => !TYPE_SEGMENTS.includes(p));
|
||||||
|
return `bmad_${filtered.join('_')}.md`;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Convert hierarchical path to flat dash-separated name (for flat IDEs)
|
* Create custom agent underscore name
|
||||||
* @param {string} relativePath - Path like 'bmm/agents/pm.md'
|
|
||||||
* @returns {string} Flat filename like 'bmad-bmm-agents-pm.md'
|
|
||||||
*/
|
|
||||||
function toDashPath(relativePath) {
|
|
||||||
const withoutExt = relativePath.replace('.md', '');
|
|
||||||
const parts = withoutExt.split(/[\/\\]/);
|
|
||||||
return `bmad-${parts.join('-')}.md`;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Create custom agent colon name
|
|
||||||
* @param {string} agentName - Custom agent name
|
* @param {string} agentName - Custom agent name
|
||||||
* @returns {string} Flat filename like 'bmad:custom:agents:fred-commit-poet.md'
|
* @returns {string} Flat filename like 'bmad_custom_fred-commit-poet.md'
|
||||||
*/
|
*/
|
||||||
function customAgentColonName(agentName) {
|
function customAgentUnderscoreName(agentName) {
|
||||||
return `bmad:custom:agents:${agentName}.md`;
|
return `bmad_custom_${agentName}.md`;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
// Backward compatibility aliases
|
||||||
* Create custom agent dash name
|
const toColonName = toUnderscoreName;
|
||||||
* @param {string} agentName - Custom agent name
|
const toColonPath = toUnderscorePath;
|
||||||
* @returns {string} Flat filename like 'bmad-custom-agents-fred-commit-poet.md'
|
const toDashPath = toUnderscorePath;
|
||||||
*/
|
const customAgentColonName = customAgentUnderscoreName;
|
||||||
function customAgentDashName(agentName) {
|
const customAgentDashName = customAgentUnderscoreName;
|
||||||
return `bmad-custom-agents-${agentName}.md`;
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
|
toUnderscoreName,
|
||||||
|
toUnderscorePath,
|
||||||
|
customAgentUnderscoreName,
|
||||||
|
// Backward compatibility
|
||||||
toColonName,
|
toColonName,
|
||||||
toColonPath,
|
toColonPath,
|
||||||
toDashPath,
|
toDashPath,
|
||||||
|
|
@ -157,34 +139,26 @@ module.exports = {
|
||||||
**Changes:**
|
**Changes:**
|
||||||
1. Import path utilities
|
1. Import path utilities
|
||||||
2. Change `relativePath` to use flat format
|
2. Change `relativePath` to use flat format
|
||||||
3. Add method `writeColonArtifacts()` for folder-based IDEs
|
3. Add method `writeColonArtifacts()` for folder-based IDEs (uses underscore)
|
||||||
4. Add method `writeDashArtifacts()` for flat IDEs
|
4. Add method `writeDashArtifacts()` for flat IDEs (uses underscore)
|
||||||
|
|
||||||
### Phase 3: Update Folder-Based IDEs
|
### Phase 3: Update All IDEs
|
||||||
|
|
||||||
**Files to modify:**
|
**Files to modify:**
|
||||||
- `claude-code.js`
|
- `claude-code.js`
|
||||||
- `cursor.js`
|
- `cursor.js`
|
||||||
- `crush.js`
|
- `crush.js`
|
||||||
|
|
||||||
**Changes:**
|
|
||||||
1. Import `toColonPath`, `customAgentColonName` from path-utils
|
|
||||||
2. Change from hierarchical to flat colon naming
|
|
||||||
3. Update cleanup to handle flat structure
|
|
||||||
|
|
||||||
### Phase 4: Update Flat IDEs
|
|
||||||
|
|
||||||
**Files to modify:**
|
|
||||||
- `antigravity.js`
|
- `antigravity.js`
|
||||||
- `codex.js`
|
- `codex.js`
|
||||||
- `cline.js`
|
- `cline.js`
|
||||||
- `roo.js`
|
- `roo.js`
|
||||||
|
|
||||||
**Changes:**
|
**Changes:**
|
||||||
1. Import `toDashPath`, `customAgentDashName` from path-utils
|
1. Import utilities from path-utils
|
||||||
2. Replace local `flattenFilename()` with shared `toDashPath()`
|
2. Change from hierarchical to flat underscore naming
|
||||||
|
3. Update cleanup to handle flat structure (`startsWith('bmad')`)
|
||||||
|
|
||||||
### Phase 5: Update Base Class
|
### Phase 4: Update Base Class
|
||||||
|
|
||||||
**File:** `_base-ide.js`
|
**File:** `_base-ide.js`
|
||||||
|
|
||||||
|
|
@ -195,24 +169,23 @@ module.exports = {
|
||||||
## Migration Checklist
|
## Migration Checklist
|
||||||
|
|
||||||
### New Files
|
### New Files
|
||||||
- [ ] Create `shared/path-utils.js`
|
- [x] Create `shared/path-utils.js`
|
||||||
|
|
||||||
### Folder-Based IDEs (convert to colon format)
|
### All IDEs (convert to underscore format)
|
||||||
- [ ] Update `shared/agent-command-generator.js` - add `writeColonArtifacts()`
|
- [x] Update `shared/agent-command-generator.js` - update for underscore
|
||||||
- [ ] Update `shared/task-tool-command-generator.js` - add `writeColonArtifacts()`
|
- [x] Update `shared/task-tool-command-generator.js` - update for underscore
|
||||||
- [ ] Update `shared/workflow-command-generator.js` - add `writeColonArtifacts()`
|
- [x] Update `shared/workflow-command-generator.js` - update for underscore
|
||||||
- [ ] Update `claude-code.js` - convert to colon format
|
- [x] Update `claude-code.js` - convert to underscore format
|
||||||
- [ ] Update `cursor.js` - convert to colon format
|
- [x] Update `cursor.js` - convert to underscore format
|
||||||
- [ ] Update `crush.js` - convert to colon format
|
- [x] Update `crush.js` - convert to underscore format
|
||||||
|
- [ ] Update `antigravity.js` - use underscore format
|
||||||
|
- [ ] Update `codex.js` - use underscore format
|
||||||
|
- [ ] Update `cline.js` - use underscore format
|
||||||
|
- [ ] Update `roo.js` - use underscore format
|
||||||
|
|
||||||
### Flat IDEs (standardize dash format)
|
### CSV Command Files
|
||||||
- [ ] Update `shared/agent-command-generator.js` - add `writeDashArtifacts()`
|
- [x] Update `src/core/module-help.csv` - change colons to underscores
|
||||||
- [ ] Update `shared/task-tool-command-generator.js` - add `writeDashArtifacts()`
|
- [x] Update `src/bmm/module-help.csv` - change colons to underscores
|
||||||
- [ ] Update `shared/workflow-command-generator.js` - add `writeDashArtifacts()`
|
|
||||||
- [ ] Update `antigravity.js` - use shared `toDashPath()`
|
|
||||||
- [ ] Update `codex.js` - use shared `toDashPath()`
|
|
||||||
- [ ] Update `cline.js` - use shared `toDashPath()`
|
|
||||||
- [ ] Update `roo.js` - use shared `toDashPath()`
|
|
||||||
|
|
||||||
### Base Class
|
### Base Class
|
||||||
- [ ] Update `_base-ide.js` - add deprecation notice
|
- [ ] Update `_base-ide.js` - add deprecation notice
|
||||||
|
|
@ -228,7 +201,8 @@ module.exports = {
|
||||||
|
|
||||||
## Notes
|
## Notes
|
||||||
|
|
||||||
1. **Keep segments**: agents, workflows, tasks, tools all become part of the flat name
|
1. **Filter type segments**: agents, workflows, tasks, tools are filtered out from flat names
|
||||||
2. **Colon vs Dash**: Colons for folder-based IDEs converting to flat, dashes for already-flat IDEs
|
2. **Underscore format**: Universal underscore format for Windows compatibility
|
||||||
3. **Custom agents**: Follow the same pattern as regular agents
|
3. **Custom agents**: Follow the same pattern as regular agents
|
||||||
4. **Backward compatibility**: Cleanup will remove old folder structure
|
4. **Backward compatibility**: Old function names kept as aliases
|
||||||
|
5. **Cleanup**: Will remove old `bmad:` format files on next install
|
||||||
|
|
|
||||||
|
|
@ -127,8 +127,8 @@ class AntigravitySetup extends BaseIdeSetup {
|
||||||
const { artifacts: agentArtifacts, counts: agentCounts } = await agentGen.collectAgentArtifacts(bmadDir, options.selectedModules || []);
|
const { artifacts: agentArtifacts, counts: agentCounts } = await agentGen.collectAgentArtifacts(bmadDir, options.selectedModules || []);
|
||||||
|
|
||||||
// Write agent launcher files with FLATTENED naming using shared utility
|
// Write agent launcher files with FLATTENED naming using shared utility
|
||||||
// Antigravity ignores directory structure, so we flatten to: bmad-module-name.md
|
// Antigravity ignores directory structure, so we flatten to: bmad_module_name.md
|
||||||
// This creates slash commands like /bmad-bmm-dev instead of /dev
|
// This creates slash commands like /bmad_bmm_dev instead of /dev
|
||||||
const agentCount = await agentGen.writeDashArtifacts(bmadWorkflowsDir, agentArtifacts);
|
const agentCount = await agentGen.writeDashArtifacts(bmadWorkflowsDir, agentArtifacts);
|
||||||
|
|
||||||
// Process Antigravity specific injections for installed modules
|
// Process Antigravity specific injections for installed modules
|
||||||
|
|
@ -167,7 +167,7 @@ class AntigravitySetup extends BaseIdeSetup {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
console.log(chalk.dim(` - Workflows directory: ${path.relative(projectDir, bmadWorkflowsDir)}`));
|
console.log(chalk.dim(` - Workflows directory: ${path.relative(projectDir, bmadWorkflowsDir)}`));
|
||||||
console.log(chalk.yellow(`\n Note: Antigravity uses flattened slash commands (e.g., /bmad-module-agents-name)`));
|
console.log(chalk.yellow(`\n Note: Antigravity uses flattened slash commands (e.g., /bmad_module_agents_name)`));
|
||||||
|
|
||||||
return {
|
return {
|
||||||
success: true,
|
success: true,
|
||||||
|
|
@ -455,7 +455,7 @@ usage: |
|
||||||
|
|
||||||
⚠️ **IMPORTANT**: Run @${agentPath} to load the complete agent before using this launcher!`;
|
⚠️ **IMPORTANT**: Run @${agentPath} to load the complete agent before using this launcher!`;
|
||||||
|
|
||||||
// Use dash format: bmad-custom-agents-fred-commit-poet.md
|
// Use underscore format: bmad_custom_fred-commit-poet.md
|
||||||
const fileName = customAgentDashName(agentName);
|
const fileName = customAgentDashName(agentName);
|
||||||
const launcherPath = path.join(bmadWorkflowsDir, fileName);
|
const launcherPath = path.join(bmadWorkflowsDir, fileName);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -92,12 +92,12 @@ class ClaudeCodeSetup extends BaseIdeSetup {
|
||||||
async cleanup(projectDir) {
|
async cleanup(projectDir) {
|
||||||
const commandsDir = path.join(projectDir, this.configDir, this.commandsDir);
|
const commandsDir = path.join(projectDir, this.configDir, this.commandsDir);
|
||||||
|
|
||||||
// Remove any bmad:* files from the commands directory
|
// Remove any bmad* files from the commands directory (cleans up old bmad: and bmad- formats)
|
||||||
if (await fs.pathExists(commandsDir)) {
|
if (await fs.pathExists(commandsDir)) {
|
||||||
const entries = await fs.readdir(commandsDir);
|
const entries = await fs.readdir(commandsDir);
|
||||||
let removedCount = 0;
|
let removedCount = 0;
|
||||||
for (const entry of entries) {
|
for (const entry of entries) {
|
||||||
if (entry.startsWith('bmad:')) {
|
if (entry.startsWith('bmad')) {
|
||||||
await fs.remove(path.join(commandsDir, entry));
|
await fs.remove(path.join(commandsDir, entry));
|
||||||
removedCount++;
|
removedCount++;
|
||||||
}
|
}
|
||||||
|
|
@ -151,16 +151,16 @@ class ClaudeCodeSetup extends BaseIdeSetup {
|
||||||
const commandsDir = path.join(claudeDir, this.commandsDir);
|
const commandsDir = path.join(claudeDir, this.commandsDir);
|
||||||
await this.ensureDir(commandsDir);
|
await this.ensureDir(commandsDir);
|
||||||
|
|
||||||
// Use colon format: files written directly to commands dir (no bmad subfolder)
|
// Use underscore format: files written directly to commands dir (no bmad subfolder)
|
||||||
// Creates: .claude/commands/bmad:bmm:pm.md
|
// Creates: .claude/commands/bmad_bmm_pm.md
|
||||||
|
|
||||||
// Generate agent launchers using AgentCommandGenerator
|
// Generate agent launchers using AgentCommandGenerator
|
||||||
// This creates small launcher files that reference the actual agents in _bmad/
|
// This creates small launcher files that reference the actual agents in _bmad/
|
||||||
const agentGen = new AgentCommandGenerator(this.bmadFolderName);
|
const agentGen = new AgentCommandGenerator(this.bmadFolderName);
|
||||||
const { artifacts: agentArtifacts, counts: agentCounts } = await agentGen.collectAgentArtifacts(bmadDir, options.selectedModules || []);
|
const { artifacts: agentArtifacts, counts: agentCounts } = await agentGen.collectAgentArtifacts(bmadDir, options.selectedModules || []);
|
||||||
|
|
||||||
// Write agent launcher files using flat colon naming
|
// Write agent launcher files using flat underscore naming
|
||||||
// Creates files like: bmad:bmm:pm.md
|
// Creates files like: bmad_bmm_pm.md
|
||||||
const agentCount = await agentGen.writeColonArtifacts(commandsDir, agentArtifacts);
|
const agentCount = await agentGen.writeColonArtifacts(commandsDir, agentArtifacts);
|
||||||
|
|
||||||
// Process Claude Code specific injections for installed modules
|
// Process Claude Code specific injections for installed modules
|
||||||
|
|
@ -182,8 +182,8 @@ class ClaudeCodeSetup extends BaseIdeSetup {
|
||||||
const workflowGen = new WorkflowCommandGenerator(this.bmadFolderName);
|
const workflowGen = new WorkflowCommandGenerator(this.bmadFolderName);
|
||||||
const { artifacts: workflowArtifacts } = await workflowGen.collectWorkflowArtifacts(bmadDir);
|
const { artifacts: workflowArtifacts } = await workflowGen.collectWorkflowArtifacts(bmadDir);
|
||||||
|
|
||||||
// Write workflow-command artifacts using flat colon naming
|
// Write workflow-command artifacts using flat underscore naming
|
||||||
// Creates files like: bmad:bmm:correct-course.md
|
// Creates files like: bmad_bmm_correct-course.md
|
||||||
const workflowCommandCount = await workflowGen.writeColonArtifacts(commandsDir, workflowArtifacts);
|
const workflowCommandCount = await workflowGen.writeColonArtifacts(commandsDir, workflowArtifacts);
|
||||||
|
|
||||||
// Generate task and tool commands from manifests (if they exist)
|
// Generate task and tool commands from manifests (if they exist)
|
||||||
|
|
@ -490,7 +490,7 @@ You must fully embody this agent's persona and follow all activation instruction
|
||||||
</agent-activation>
|
</agent-activation>
|
||||||
`;
|
`;
|
||||||
|
|
||||||
// Use colon format: bmad:custom:agents:fred-commit-poet.md
|
// Use underscore format: bmad_custom_fred-commit-poet.md
|
||||||
// Written directly to commands dir (no bmad subfolder)
|
// Written directly to commands dir (no bmad subfolder)
|
||||||
const launcherName = customAgentColonName(agentName);
|
const launcherName = customAgentColonName(agentName);
|
||||||
const launcherPath = path.join(commandsDir, launcherName);
|
const launcherPath = path.join(commandsDir, launcherName);
|
||||||
|
|
|
||||||
|
|
@ -57,8 +57,8 @@ class ClineSetup extends BaseIdeSetup {
|
||||||
console.log(chalk.cyan(' BMAD workflows are available as slash commands in Cline'));
|
console.log(chalk.cyan(' BMAD workflows are available as slash commands in Cline'));
|
||||||
console.log(chalk.dim(' Usage:'));
|
console.log(chalk.dim(' Usage:'));
|
||||||
console.log(chalk.dim(' - Type / to see available commands'));
|
console.log(chalk.dim(' - Type / to see available commands'));
|
||||||
console.log(chalk.dim(' - All BMAD items start with "bmad-"'));
|
console.log(chalk.dim(' - All BMAD items start with "bmad_"'));
|
||||||
console.log(chalk.dim(' - Example: /bmad-bmm-pm'));
|
console.log(chalk.dim(' - Example: /bmad_bmm_pm'));
|
||||||
|
|
||||||
return {
|
return {
|
||||||
success: true,
|
success: true,
|
||||||
|
|
@ -81,7 +81,7 @@ class ClineSetup extends BaseIdeSetup {
|
||||||
}
|
}
|
||||||
|
|
||||||
const entries = await fs.readdir(workflowsDir);
|
const entries = await fs.readdir(workflowsDir);
|
||||||
return entries.some((entry) => entry.startsWith('bmad-'));
|
return entries.some((entry) => entry.startsWith('bmad'));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -146,7 +146,7 @@ class ClineSetup extends BaseIdeSetup {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Flatten file path to bmad-module-type-name.md format
|
* Flatten file path to bmad_module_type_name.md format
|
||||||
* Uses shared toDashPath utility
|
* Uses shared toDashPath utility
|
||||||
*/
|
*/
|
||||||
flattenFilename(relativePath) {
|
flattenFilename(relativePath) {
|
||||||
|
|
@ -180,7 +180,7 @@ class ClineSetup extends BaseIdeSetup {
|
||||||
const entries = await fs.readdir(destDir);
|
const entries = await fs.readdir(destDir);
|
||||||
|
|
||||||
for (const entry of entries) {
|
for (const entry of entries) {
|
||||||
if (!entry.startsWith('bmad-')) {
|
if (!entry.startsWith('bmad')) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -246,7 +246,7 @@ The agent will follow the persona and instructions from the main agent file.
|
||||||
|
|
||||||
*Generated by BMAD Method*`;
|
*Generated by BMAD Method*`;
|
||||||
|
|
||||||
// Use dash format: bmad-custom-agents-fred-commit-poet.md
|
// Use underscore format: bmad_custom_fred-commit-poet.md
|
||||||
const fileName = customAgentDashName(agentName);
|
const fileName = customAgentDashName(agentName);
|
||||||
const launcherPath = path.join(workflowsDir, fileName);
|
const launcherPath = path.join(workflowsDir, fileName);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -86,7 +86,7 @@ class CodexSetup extends BaseIdeSetup {
|
||||||
await fs.ensureDir(destDir);
|
await fs.ensureDir(destDir);
|
||||||
await this.clearOldBmadFiles(destDir);
|
await this.clearOldBmadFiles(destDir);
|
||||||
|
|
||||||
// Collect artifacts and write using DASH format
|
// Collect artifacts and write using underscore format
|
||||||
const agentGen = new AgentCommandGenerator(this.bmadFolderName);
|
const agentGen = new AgentCommandGenerator(this.bmadFolderName);
|
||||||
const { artifacts: agentArtifacts } = await agentGen.collectAgentArtifacts(bmadDir, options.selectedModules || []);
|
const { artifacts: agentArtifacts } = await agentGen.collectAgentArtifacts(bmadDir, options.selectedModules || []);
|
||||||
const agentCount = await agentGen.writeDashArtifacts(destDir, agentArtifacts);
|
const agentCount = await agentGen.writeDashArtifacts(destDir, agentArtifacts);
|
||||||
|
|
@ -115,7 +115,7 @@ class CodexSetup extends BaseIdeSetup {
|
||||||
const { artifacts: workflowArtifacts } = await workflowGenerator.collectWorkflowArtifacts(bmadDir);
|
const { artifacts: workflowArtifacts } = await workflowGenerator.collectWorkflowArtifacts(bmadDir);
|
||||||
const workflowCount = await workflowGenerator.writeDashArtifacts(destDir, workflowArtifacts);
|
const workflowCount = await workflowGenerator.writeDashArtifacts(destDir, workflowArtifacts);
|
||||||
|
|
||||||
// Also write tasks using dash format
|
// Also write tasks using underscore format
|
||||||
const ttGen = new TaskToolCommandGenerator();
|
const ttGen = new TaskToolCommandGenerator();
|
||||||
const tasksWritten = await ttGen.writeDashArtifacts(destDir, taskArtifacts);
|
const tasksWritten = await ttGen.writeDashArtifacts(destDir, taskArtifacts);
|
||||||
|
|
||||||
|
|
@ -155,7 +155,7 @@ class CodexSetup extends BaseIdeSetup {
|
||||||
// Check global location
|
// Check global location
|
||||||
if (await fs.pathExists(globalDir)) {
|
if (await fs.pathExists(globalDir)) {
|
||||||
const entries = await fs.readdir(globalDir);
|
const entries = await fs.readdir(globalDir);
|
||||||
if (entries.some((entry) => entry.startsWith('bmad-'))) {
|
if (entries.some((entry) => entry.startsWith('bmad'))) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -163,7 +163,7 @@ class CodexSetup extends BaseIdeSetup {
|
||||||
// Check project-specific location
|
// Check project-specific location
|
||||||
if (await fs.pathExists(projectSpecificDir)) {
|
if (await fs.pathExists(projectSpecificDir)) {
|
||||||
const entries = await fs.readdir(projectSpecificDir);
|
const entries = await fs.readdir(projectSpecificDir);
|
||||||
if (entries.some((entry) => entry.startsWith('bmad-'))) {
|
if (entries.some((entry) => entry.startsWith('bmad'))) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -256,7 +256,7 @@ class CodexSetup extends BaseIdeSetup {
|
||||||
const entries = await fs.readdir(destDir);
|
const entries = await fs.readdir(destDir);
|
||||||
|
|
||||||
for (const entry of entries) {
|
for (const entry of entries) {
|
||||||
if (!entry.startsWith('bmad-')) {
|
if (!entry.startsWith('bmad')) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -292,7 +292,7 @@ class CodexSetup extends BaseIdeSetup {
|
||||||
chalk.dim(" To use with other projects, you'd need to copy the _bmad dir"),
|
chalk.dim(" To use with other projects, you'd need to copy the _bmad dir"),
|
||||||
'',
|
'',
|
||||||
chalk.green(' ✓ You can now use /commands in Codex CLI'),
|
chalk.green(' ✓ You can now use /commands in Codex CLI'),
|
||||||
chalk.dim(' Example: /bmad-bmm-pm'),
|
chalk.dim(' Example: /bmad_bmm_pm'),
|
||||||
chalk.dim(' Type / to see all available commands'),
|
chalk.dim(' Type / to see all available commands'),
|
||||||
'',
|
'',
|
||||||
chalk.bold.cyan('═'.repeat(70)),
|
chalk.bold.cyan('═'.repeat(70)),
|
||||||
|
|
@ -397,7 +397,7 @@ You must fully embody this agent's persona and follow all activation instruction
|
||||||
</agent-activation>
|
</agent-activation>
|
||||||
`;
|
`;
|
||||||
|
|
||||||
// Use dash format: bmad-custom-agents-fred-commit-poet.md
|
// Use underscore format: bmad_custom_fred-commit-poet.md
|
||||||
const fileName = customAgentDashName(agentName);
|
const fileName = customAgentDashName(agentName);
|
||||||
const launcherPath = path.join(destDir, fileName);
|
const launcherPath = path.join(destDir, fileName);
|
||||||
await fs.writeFile(launcherPath, launcherContent, 'utf8');
|
await fs.writeFile(launcherPath, launcherContent, 'utf8');
|
||||||
|
|
|
||||||
|
|
@ -35,26 +35,26 @@ class CrushSetup extends BaseIdeSetup {
|
||||||
const commandsDir = path.join(crushDir, this.commandsDir);
|
const commandsDir = path.join(crushDir, this.commandsDir);
|
||||||
await this.ensureDir(commandsDir);
|
await this.ensureDir(commandsDir);
|
||||||
|
|
||||||
// Use colon format: files written directly to commands dir (no bmad subfolder)
|
// Use underscore format: files written directly to commands dir (no bmad subfolder)
|
||||||
// Creates: .crush/commands/bmad:bmm:pm.md
|
// Creates: .crush/commands/bmad_bmm_pm.md
|
||||||
|
|
||||||
// Generate agent launchers
|
// Generate agent launchers
|
||||||
const agentGen = new AgentCommandGenerator(this.bmadFolderName);
|
const agentGen = new AgentCommandGenerator(this.bmadFolderName);
|
||||||
const { artifacts: agentArtifacts } = await agentGen.collectAgentArtifacts(bmadDir, options.selectedModules || []);
|
const { artifacts: agentArtifacts } = await agentGen.collectAgentArtifacts(bmadDir, options.selectedModules || []);
|
||||||
|
|
||||||
// Write agent launcher files using flat colon naming
|
// Write agent launcher files using flat underscore naming
|
||||||
// Creates files like: bmad:bmm:pm.md
|
// Creates files like: bmad_bmm_pm.md
|
||||||
const agentCount = await agentGen.writeColonArtifacts(commandsDir, agentArtifacts);
|
const agentCount = await agentGen.writeColonArtifacts(commandsDir, agentArtifacts);
|
||||||
|
|
||||||
// Get ALL workflows using the new workflow command generator
|
// Get ALL workflows using the new workflow command generator
|
||||||
const workflowGenerator = new WorkflowCommandGenerator(this.bmadFolderName);
|
const workflowGenerator = new WorkflowCommandGenerator(this.bmadFolderName);
|
||||||
const { artifacts: workflowArtifacts } = await workflowGenerator.collectWorkflowArtifacts(bmadDir);
|
const { artifacts: workflowArtifacts } = await workflowGenerator.collectWorkflowArtifacts(bmadDir);
|
||||||
|
|
||||||
// Write workflow-command artifacts using flat colon naming
|
// Write workflow-command artifacts using flat underscore naming
|
||||||
// Creates files like: bmad:bmm:correct-course.md
|
// Creates files like: bmad_bmm_correct-course.md
|
||||||
const workflowCount = await workflowGenerator.writeColonArtifacts(commandsDir, workflowArtifacts);
|
const workflowCount = await workflowGenerator.writeColonArtifacts(commandsDir, workflowArtifacts);
|
||||||
|
|
||||||
// Generate task and tool commands using flat colon naming
|
// Generate task and tool commands using flat underscore naming
|
||||||
const taskToolGen = new TaskToolCommandGenerator();
|
const taskToolGen = new TaskToolCommandGenerator();
|
||||||
const taskToolResult = await taskToolGen.generateColonTaskToolCommands(projectDir, bmadDir, commandsDir);
|
const taskToolResult = await taskToolGen.generateColonTaskToolCommands(projectDir, bmadDir, commandsDir);
|
||||||
|
|
||||||
|
|
@ -81,11 +81,11 @@ class CrushSetup extends BaseIdeSetup {
|
||||||
async cleanup(projectDir) {
|
async cleanup(projectDir) {
|
||||||
const commandsDir = path.join(projectDir, this.configDir, this.commandsDir);
|
const commandsDir = path.join(projectDir, this.configDir, this.commandsDir);
|
||||||
|
|
||||||
// Remove any bmad:* files from the commands directory
|
// Remove any bmad* files from the commands directory (cleans up old bmad: and bmad- formats)
|
||||||
if (await fs.pathExists(commandsDir)) {
|
if (await fs.pathExists(commandsDir)) {
|
||||||
const entries = await fs.readdir(commandsDir);
|
const entries = await fs.readdir(commandsDir);
|
||||||
for (const entry of entries) {
|
for (const entry of entries) {
|
||||||
if (entry.startsWith('bmad:')) {
|
if (entry.startsWith('bmad')) {
|
||||||
await fs.remove(path.join(commandsDir, entry));
|
await fs.remove(path.join(commandsDir, entry));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -129,7 +129,7 @@ The agent will follow the persona and instructions from the main agent file.
|
||||||
|
|
||||||
*Generated by BMAD Method*`;
|
*Generated by BMAD Method*`;
|
||||||
|
|
||||||
// Use colon format: bmad:custom:agents:fred-commit-poet.md
|
// Use underscore format: bmad_custom_fred-commit-poet.md
|
||||||
// Written directly to commands dir (no bmad subfolder)
|
// Written directly to commands dir (no bmad subfolder)
|
||||||
const launcherName = customAgentColonName(agentName);
|
const launcherName = customAgentColonName(agentName);
|
||||||
const launcherPath = path.join(commandsDir, launcherName);
|
const launcherPath = path.join(commandsDir, launcherName);
|
||||||
|
|
|
||||||
|
|
@ -25,11 +25,11 @@ class CursorSetup extends BaseIdeSetup {
|
||||||
const fs = require('fs-extra');
|
const fs = require('fs-extra');
|
||||||
const commandsDir = path.join(projectDir, this.configDir, this.commandsDir);
|
const commandsDir = path.join(projectDir, this.configDir, this.commandsDir);
|
||||||
|
|
||||||
// Remove any bmad:* files from the commands directory
|
// Remove any bmad* files from the commands directory (cleans up old bmad: and bmad- formats)
|
||||||
if (await fs.pathExists(commandsDir)) {
|
if (await fs.pathExists(commandsDir)) {
|
||||||
const entries = await fs.readdir(commandsDir);
|
const entries = await fs.readdir(commandsDir);
|
||||||
for (const entry of entries) {
|
for (const entry of entries) {
|
||||||
if (entry.startsWith('bmad:')) {
|
if (entry.startsWith('bmad')) {
|
||||||
await fs.remove(path.join(commandsDir, entry));
|
await fs.remove(path.join(commandsDir, entry));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -59,24 +59,24 @@ class CursorSetup extends BaseIdeSetup {
|
||||||
const commandsDir = path.join(cursorDir, this.commandsDir);
|
const commandsDir = path.join(cursorDir, this.commandsDir);
|
||||||
await this.ensureDir(commandsDir);
|
await this.ensureDir(commandsDir);
|
||||||
|
|
||||||
// Use colon format: files written directly to commands dir (no bmad subfolder)
|
// Use underscore format: files written directly to commands dir (no bmad subfolder)
|
||||||
// Creates: .cursor/commands/bmad:bmm:pm.md
|
// Creates: .cursor/commands/bmad_bmm_pm.md
|
||||||
|
|
||||||
// Generate agent launchers using AgentCommandGenerator
|
// Generate agent launchers using AgentCommandGenerator
|
||||||
// This creates small launcher files that reference the actual agents in _bmad/
|
// This creates small launcher files that reference the actual agents in _bmad/
|
||||||
const agentGen = new AgentCommandGenerator(this.bmadFolderName);
|
const agentGen = new AgentCommandGenerator(this.bmadFolderName);
|
||||||
const { artifacts: agentArtifacts, counts: agentCounts } = await agentGen.collectAgentArtifacts(bmadDir, options.selectedModules || []);
|
const { artifacts: agentArtifacts, counts: agentCounts } = await agentGen.collectAgentArtifacts(bmadDir, options.selectedModules || []);
|
||||||
|
|
||||||
// Write agent launcher files using flat colon naming
|
// Write agent launcher files using flat underscore naming
|
||||||
// Creates files like: bmad:bmm:pm.md
|
// Creates files like: bmad_bmm_pm.md
|
||||||
const agentCount = await agentGen.writeColonArtifacts(commandsDir, agentArtifacts);
|
const agentCount = await agentGen.writeColonArtifacts(commandsDir, agentArtifacts);
|
||||||
|
|
||||||
// Generate workflow commands from manifest (if it exists)
|
// Generate workflow commands from manifest (if it exists)
|
||||||
const workflowGen = new WorkflowCommandGenerator(this.bmadFolderName);
|
const workflowGen = new WorkflowCommandGenerator(this.bmadFolderName);
|
||||||
const { artifacts: workflowArtifacts } = await workflowGen.collectWorkflowArtifacts(bmadDir);
|
const { artifacts: workflowArtifacts } = await workflowGen.collectWorkflowArtifacts(bmadDir);
|
||||||
|
|
||||||
// Write workflow-command artifacts using flat colon naming
|
// Write workflow-command artifacts using flat underscore naming
|
||||||
// Creates files like: bmad:bmm:correct-course.md
|
// Creates files like: bmad_bmm_correct-course.md
|
||||||
const workflowCommandCount = await workflowGen.writeColonArtifacts(commandsDir, workflowArtifacts);
|
const workflowCommandCount = await workflowGen.writeColonArtifacts(commandsDir, workflowArtifacts);
|
||||||
|
|
||||||
// Generate task and tool commands from manifests (if they exist)
|
// Generate task and tool commands from manifests (if they exist)
|
||||||
|
|
@ -144,7 +144,7 @@ description: '${agentName} agent'
|
||||||
${launcherContent}
|
${launcherContent}
|
||||||
`;
|
`;
|
||||||
|
|
||||||
// Use colon format: bmad:custom:agents:fred-commit-poet.md
|
// Use underscore format: bmad_custom_fred-commit-poet.md
|
||||||
// Written directly to commands dir (no bmad subfolder)
|
// Written directly to commands dir (no bmad subfolder)
|
||||||
const launcherName = customAgentColonName(agentName);
|
const launcherName = customAgentColonName(agentName);
|
||||||
const launcherPath = path.join(commandsDir, launcherName);
|
const launcherPath = path.join(commandsDir, launcherName);
|
||||||
|
|
|
||||||
|
|
@ -86,7 +86,7 @@ class GeminiSetup extends BaseIdeSetup {
|
||||||
await this.writeFile(tomlPath, tomlContent);
|
await this.writeFile(tomlPath, tomlContent);
|
||||||
agentCount++;
|
agentCount++;
|
||||||
|
|
||||||
console.log(chalk.green(` ✓ Added agent: /bmad:agents:${artifact.module}:${artifact.name}`));
|
console.log(chalk.green(` ✓ Added agent: /bmad_agents_${artifact.module}_${artifact.name}`));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Install tasks as TOML files with bmad- prefix (flat structure)
|
// Install tasks as TOML files with bmad- prefix (flat structure)
|
||||||
|
|
@ -100,7 +100,7 @@ class GeminiSetup extends BaseIdeSetup {
|
||||||
await this.writeFile(tomlPath, tomlContent);
|
await this.writeFile(tomlPath, tomlContent);
|
||||||
taskCount++;
|
taskCount++;
|
||||||
|
|
||||||
console.log(chalk.green(` ✓ Added task: /bmad:tasks:${task.module}:${task.name}`));
|
console.log(chalk.green(` ✓ Added task: /bmad_tasks_${task.module}_${task.name}`));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Install workflows as TOML files with bmad- prefix (flat structure)
|
// Install workflows as TOML files with bmad- prefix (flat structure)
|
||||||
|
|
@ -116,7 +116,7 @@ class GeminiSetup extends BaseIdeSetup {
|
||||||
await this.writeFile(tomlPath, tomlContent);
|
await this.writeFile(tomlPath, tomlContent);
|
||||||
workflowCount++;
|
workflowCount++;
|
||||||
|
|
||||||
console.log(chalk.green(` ✓ Added workflow: /bmad:workflows:${artifact.module}:${workflowName}`));
|
console.log(chalk.green(` ✓ Added workflow: /bmad_workflows_${artifact.module}_${workflowName}`));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -125,9 +125,9 @@ class GeminiSetup extends BaseIdeSetup {
|
||||||
console.log(chalk.dim(` - ${taskCount} tasks configured`));
|
console.log(chalk.dim(` - ${taskCount} tasks configured`));
|
||||||
console.log(chalk.dim(` - ${workflowCount} workflows configured`));
|
console.log(chalk.dim(` - ${workflowCount} workflows configured`));
|
||||||
console.log(chalk.dim(` - Commands directory: ${path.relative(projectDir, commandsDir)}`));
|
console.log(chalk.dim(` - Commands directory: ${path.relative(projectDir, commandsDir)}`));
|
||||||
console.log(chalk.dim(` - Agent activation: /bmad:agents:{agent-name}`));
|
console.log(chalk.dim(` - Agent activation: /bmad_agents_{agent-name}`));
|
||||||
console.log(chalk.dim(` - Task activation: /bmad:tasks:{task-name}`));
|
console.log(chalk.dim(` - Task activation: /bmad_tasks_{task-name}`));
|
||||||
console.log(chalk.dim(` - Workflow activation: /bmad:workflows:{workflow-name}`));
|
console.log(chalk.dim(` - Workflow activation: /bmad_workflows_{workflow-name}`));
|
||||||
|
|
||||||
return {
|
return {
|
||||||
success: true,
|
success: true,
|
||||||
|
|
@ -233,12 +233,12 @@ ${contentWithoutFrontmatter}
|
||||||
const commandsDir = path.join(projectDir, this.configDir, this.commandsDir);
|
const commandsDir = path.join(projectDir, this.configDir, this.commandsDir);
|
||||||
|
|
||||||
if (await fs.pathExists(commandsDir)) {
|
if (await fs.pathExists(commandsDir)) {
|
||||||
// Only remove files that start with bmad- prefix
|
// Remove any bmad* files (cleans up old bmad- and bmad: formats)
|
||||||
const files = await fs.readdir(commandsDir);
|
const files = await fs.readdir(commandsDir);
|
||||||
let removed = 0;
|
let removed = 0;
|
||||||
|
|
||||||
for (const file of files) {
|
for (const file of files) {
|
||||||
if (file.startsWith('bmad-') && file.endsWith('.toml')) {
|
if (file.startsWith('bmad') && file.endsWith('.toml')) {
|
||||||
await fs.remove(path.join(commandsDir, file));
|
await fs.remove(path.join(commandsDir, file));
|
||||||
removed++;
|
removed++;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -275,7 +275,7 @@ ${cleanContent}
|
||||||
let removed = 0;
|
let removed = 0;
|
||||||
|
|
||||||
for (const file of files) {
|
for (const file of files) {
|
||||||
if (file.startsWith('bmad-') && file.endsWith('.chatmode.md')) {
|
if (file.startsWith('bmad') && file.endsWith('.chatmode.md')) {
|
||||||
await fs.remove(path.join(chatmodesDir, file));
|
await fs.remove(path.join(chatmodesDir, file));
|
||||||
removed++;
|
removed++;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -25,7 +25,7 @@ class KiroCliSetup extends BaseIdeSetup {
|
||||||
// Remove existing BMad agents
|
// Remove existing BMad agents
|
||||||
const files = await fs.readdir(bmadAgentsDir);
|
const files = await fs.readdir(bmadAgentsDir);
|
||||||
for (const file of files) {
|
for (const file of files) {
|
||||||
if (file.startsWith('bmad-') || file.includes('bmad')) {
|
if (file.startsWith('bmad')) {
|
||||||
await fs.remove(path.join(bmadAgentsDir, file));
|
await fs.remove(path.join(bmadAgentsDir, file));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -185,7 +185,7 @@ class OpenCodeSetup extends BaseIdeSetup {
|
||||||
if (await fs.pathExists(agentsDir)) {
|
if (await fs.pathExists(agentsDir)) {
|
||||||
const files = await fs.readdir(agentsDir);
|
const files = await fs.readdir(agentsDir);
|
||||||
for (const file of files) {
|
for (const file of files) {
|
||||||
if (file.startsWith('bmad-') && file.endsWith('.md')) {
|
if (file.startsWith('bmad') && file.endsWith('.md')) {
|
||||||
await fs.remove(path.join(agentsDir, file));
|
await fs.remove(path.join(agentsDir, file));
|
||||||
removed++;
|
removed++;
|
||||||
}
|
}
|
||||||
|
|
@ -196,7 +196,7 @@ class OpenCodeSetup extends BaseIdeSetup {
|
||||||
if (await fs.pathExists(commandsDir)) {
|
if (await fs.pathExists(commandsDir)) {
|
||||||
const files = await fs.readdir(commandsDir);
|
const files = await fs.readdir(commandsDir);
|
||||||
for (const file of files) {
|
for (const file of files) {
|
||||||
if (file.startsWith('bmad-') && file.endsWith('.md')) {
|
if (file.startsWith('bmad') && file.endsWith('.md')) {
|
||||||
await fs.remove(path.join(commandsDir, file));
|
await fs.remove(path.join(commandsDir, file));
|
||||||
removed++;
|
removed++;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -74,7 +74,7 @@ class QwenSetup extends BaseIdeSetup {
|
||||||
await this.writeFile(targetPath, tomlContent);
|
await this.writeFile(targetPath, tomlContent);
|
||||||
|
|
||||||
agentCount++;
|
agentCount++;
|
||||||
console.log(chalk.green(` ✓ Added agent: /bmad:${artifact.module}:agents:${artifact.name}`));
|
console.log(chalk.green(` ✓ Added agent: /bmad_${artifact.module}_agents_${artifact.name}`));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create TOML files for each task
|
// Create TOML files for each task
|
||||||
|
|
@ -90,7 +90,7 @@ class QwenSetup extends BaseIdeSetup {
|
||||||
await this.writeFile(targetPath, content);
|
await this.writeFile(targetPath, content);
|
||||||
|
|
||||||
taskCount++;
|
taskCount++;
|
||||||
console.log(chalk.green(` ✓ Added task: /bmad:${task.module}:tasks:${task.name}`));
|
console.log(chalk.green(` ✓ Added task: /bmad_${task.module}_tasks_${task.name}`));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create TOML files for each tool
|
// Create TOML files for each tool
|
||||||
|
|
@ -106,7 +106,7 @@ class QwenSetup extends BaseIdeSetup {
|
||||||
await this.writeFile(targetPath, content);
|
await this.writeFile(targetPath, content);
|
||||||
|
|
||||||
toolCount++;
|
toolCount++;
|
||||||
console.log(chalk.green(` ✓ Added tool: /bmad:${tool.module}:tools:${tool.name}`));
|
console.log(chalk.green(` ✓ Added tool: /bmad_${tool.module}_tools_${tool.name}`));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create TOML files for each workflow
|
// Create TOML files for each workflow
|
||||||
|
|
@ -122,7 +122,7 @@ class QwenSetup extends BaseIdeSetup {
|
||||||
await this.writeFile(targetPath, content);
|
await this.writeFile(targetPath, content);
|
||||||
|
|
||||||
workflowCount++;
|
workflowCount++;
|
||||||
console.log(chalk.green(` ✓ Added workflow: /bmad:${workflow.module}:workflows:${workflow.name}`));
|
console.log(chalk.green(` ✓ Added workflow: /bmad_${workflow.module}_workflows_${workflow.name}`));
|
||||||
}
|
}
|
||||||
|
|
||||||
console.log(chalk.green(`✓ ${this.name} configured:`));
|
console.log(chalk.green(`✓ ${this.name} configured:`));
|
||||||
|
|
|
||||||
|
|
@ -36,7 +36,7 @@ class RooSetup extends BaseIdeSetup {
|
||||||
let skippedCount = 0;
|
let skippedCount = 0;
|
||||||
|
|
||||||
for (const artifact of agentArtifacts) {
|
for (const artifact of agentArtifacts) {
|
||||||
// Use shared toDashPath to get consistent naming: bmad-bmm-name.md
|
// Use shared toDashPath to get consistent naming: bmad_bmm_name.md
|
||||||
const commandName = toDashPath(artifact.relativePath).replace('.md', '');
|
const commandName = toDashPath(artifact.relativePath).replace('.md', '');
|
||||||
const commandPath = path.join(rooCommandsDir, `${commandName}.md`);
|
const commandPath = path.join(rooCommandsDir, `${commandName}.md`);
|
||||||
|
|
||||||
|
|
@ -169,7 +169,7 @@ class RooSetup extends BaseIdeSetup {
|
||||||
let removedCount = 0;
|
let removedCount = 0;
|
||||||
|
|
||||||
for (const file of files) {
|
for (const file of files) {
|
||||||
if (file.startsWith('bmad-') && file.endsWith('.md')) {
|
if (file.startsWith('bmad') && file.endsWith('.md')) {
|
||||||
await fs.remove(path.join(rooCommandsDir, file));
|
await fs.remove(path.join(rooCommandsDir, file));
|
||||||
removedCount++;
|
removedCount++;
|
||||||
}
|
}
|
||||||
|
|
@ -192,7 +192,7 @@ class RooSetup extends BaseIdeSetup {
|
||||||
let removedCount = 0;
|
let removedCount = 0;
|
||||||
|
|
||||||
for (const line of lines) {
|
for (const line of lines) {
|
||||||
if (/^\s*- slug: bmad-/.test(line)) {
|
if (/^\s*- slug: bmad/.test(line)) {
|
||||||
skipMode = true;
|
skipMode = true;
|
||||||
removedCount++;
|
removedCount++;
|
||||||
} else if (skipMode && /^\s*- slug: /.test(line)) {
|
} else if (skipMode && /^\s*- slug: /.test(line)) {
|
||||||
|
|
@ -224,7 +224,7 @@ class RooSetup extends BaseIdeSetup {
|
||||||
const rooCommandsDir = path.join(projectDir, this.configDir, this.commandsDir);
|
const rooCommandsDir = path.join(projectDir, this.configDir, this.commandsDir);
|
||||||
await this.ensureDir(rooCommandsDir);
|
await this.ensureDir(rooCommandsDir);
|
||||||
|
|
||||||
// Use dash format: bmad-custom-agents-fred-commit-poet.md
|
// Use underscore format: bmad_custom_fred-commit-poet.md
|
||||||
const commandName = customAgentDashName(agentName).replace('.md', '');
|
const commandName = customAgentDashName(agentName).replace('.md', '');
|
||||||
const commandPath = path.join(rooCommandsDir, `${commandName}.md`);
|
const commandPath = path.join(rooCommandsDir, `${commandName}.md`);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -37,7 +37,7 @@ class RovoDevSetup extends BaseIdeSetup {
|
||||||
const subagentsDir = path.join(rovoDevDir, this.subagentsDir);
|
const subagentsDir = path.join(rovoDevDir, this.subagentsDir);
|
||||||
if (await fs.pathExists(subagentsDir)) {
|
if (await fs.pathExists(subagentsDir)) {
|
||||||
const entries = await fs.readdir(subagentsDir);
|
const entries = await fs.readdir(subagentsDir);
|
||||||
const bmadFiles = entries.filter((file) => file.startsWith('bmad-') && file.endsWith('.md'));
|
const bmadFiles = entries.filter((file) => file.startsWith('bmad') && file.endsWith('.md'));
|
||||||
|
|
||||||
for (const file of bmadFiles) {
|
for (const file of bmadFiles) {
|
||||||
await fs.remove(path.join(subagentsDir, file));
|
await fs.remove(path.join(subagentsDir, file));
|
||||||
|
|
@ -48,7 +48,7 @@ class RovoDevSetup extends BaseIdeSetup {
|
||||||
const workflowsDir = path.join(rovoDevDir, this.workflowsDir);
|
const workflowsDir = path.join(rovoDevDir, this.workflowsDir);
|
||||||
if (await fs.pathExists(workflowsDir)) {
|
if (await fs.pathExists(workflowsDir)) {
|
||||||
const entries = await fs.readdir(workflowsDir);
|
const entries = await fs.readdir(workflowsDir);
|
||||||
const bmadFiles = entries.filter((file) => file.startsWith('bmad-') && file.endsWith('.md'));
|
const bmadFiles = entries.filter((file) => file.startsWith('bmad') && file.endsWith('.md'));
|
||||||
|
|
||||||
for (const file of bmadFiles) {
|
for (const file of bmadFiles) {
|
||||||
await fs.remove(path.join(workflowsDir, file));
|
await fs.remove(path.join(workflowsDir, file));
|
||||||
|
|
@ -59,7 +59,7 @@ class RovoDevSetup extends BaseIdeSetup {
|
||||||
const referencesDir = path.join(rovoDevDir, this.referencesDir);
|
const referencesDir = path.join(rovoDevDir, this.referencesDir);
|
||||||
if (await fs.pathExists(referencesDir)) {
|
if (await fs.pathExists(referencesDir)) {
|
||||||
const entries = await fs.readdir(referencesDir);
|
const entries = await fs.readdir(referencesDir);
|
||||||
const bmadFiles = entries.filter((file) => file.startsWith('bmad-') && file.endsWith('.md'));
|
const bmadFiles = entries.filter((file) => file.startsWith('bmad') && file.endsWith('.md'));
|
||||||
|
|
||||||
for (const file of bmadFiles) {
|
for (const file of bmadFiles) {
|
||||||
await fs.remove(path.join(referencesDir, file));
|
await fs.remove(path.join(referencesDir, file));
|
||||||
|
|
@ -249,7 +249,7 @@ class RovoDevSetup extends BaseIdeSetup {
|
||||||
if (await fs.pathExists(subagentsDir)) {
|
if (await fs.pathExists(subagentsDir)) {
|
||||||
try {
|
try {
|
||||||
const entries = await fs.readdir(subagentsDir);
|
const entries = await fs.readdir(subagentsDir);
|
||||||
if (entries.some((entry) => entry.startsWith('bmad-') && entry.endsWith('.md'))) {
|
if (entries.some((entry) => entry.startsWith('bmad') && entry.endsWith('.md'))) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
} catch {
|
} catch {
|
||||||
|
|
@ -262,7 +262,7 @@ class RovoDevSetup extends BaseIdeSetup {
|
||||||
if (await fs.pathExists(workflowsDir)) {
|
if (await fs.pathExists(workflowsDir)) {
|
||||||
try {
|
try {
|
||||||
const entries = await fs.readdir(workflowsDir);
|
const entries = await fs.readdir(workflowsDir);
|
||||||
if (entries.some((entry) => entry.startsWith('bmad-') && entry.endsWith('.md'))) {
|
if (entries.some((entry) => entry.startsWith('bmad') && entry.endsWith('.md'))) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
} catch {
|
} catch {
|
||||||
|
|
@ -275,7 +275,7 @@ class RovoDevSetup extends BaseIdeSetup {
|
||||||
if (await fs.pathExists(referencesDir)) {
|
if (await fs.pathExists(referencesDir)) {
|
||||||
try {
|
try {
|
||||||
const entries = await fs.readdir(referencesDir);
|
const entries = await fs.readdir(referencesDir);
|
||||||
if (entries.some((entry) => entry.startsWith('bmad-') && entry.endsWith('.md'))) {
|
if (entries.some((entry) => entry.startsWith('bmad') && entry.endsWith('.md'))) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
} catch {
|
} catch {
|
||||||
|
|
|
||||||
|
|
@ -94,8 +94,8 @@ class AgentCommandGenerator {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Write agent launcher artifacts using COLON format (for folder-based IDEs)
|
* Write agent launcher artifacts using underscore format (Windows-compatible)
|
||||||
* Creates flat files like: bmad:bmm:pm.md
|
* Creates flat files like: bmad_bmm_pm.md
|
||||||
*
|
*
|
||||||
* @param {string} baseCommandsDir - Base commands directory for the IDE
|
* @param {string} baseCommandsDir - Base commands directory for the IDE
|
||||||
* @param {Array} artifacts - Agent launcher artifacts
|
* @param {Array} artifacts - Agent launcher artifacts
|
||||||
|
|
@ -106,7 +106,7 @@ class AgentCommandGenerator {
|
||||||
|
|
||||||
for (const artifact of artifacts) {
|
for (const artifact of artifacts) {
|
||||||
if (artifact.type === 'agent-launcher') {
|
if (artifact.type === 'agent-launcher') {
|
||||||
// Convert relativePath to colon format: bmm/agents/pm.md → bmad:bmm:pm.md
|
// Convert relativePath to underscore format: bmm/agents/pm.md → bmad_bmm_pm.md
|
||||||
const flatName = toColonPath(artifact.relativePath);
|
const flatName = toColonPath(artifact.relativePath);
|
||||||
const launcherPath = path.join(baseCommandsDir, flatName);
|
const launcherPath = path.join(baseCommandsDir, flatName);
|
||||||
await fs.ensureDir(path.dirname(launcherPath));
|
await fs.ensureDir(path.dirname(launcherPath));
|
||||||
|
|
@ -119,8 +119,8 @@ class AgentCommandGenerator {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Write agent launcher artifacts using DASH format (for flat IDEs)
|
* Write agent launcher artifacts using underscore format (Windows-compatible)
|
||||||
* Creates flat files like: bmad-bmm-pm.md
|
* Creates flat files like: bmad_bmm_pm.md
|
||||||
*
|
*
|
||||||
* @param {string} baseCommandsDir - Base commands directory for the IDE
|
* @param {string} baseCommandsDir - Base commands directory for the IDE
|
||||||
* @param {Array} artifacts - Agent launcher artifacts
|
* @param {Array} artifacts - Agent launcher artifacts
|
||||||
|
|
@ -131,7 +131,7 @@ class AgentCommandGenerator {
|
||||||
|
|
||||||
for (const artifact of artifacts) {
|
for (const artifact of artifacts) {
|
||||||
if (artifact.type === 'agent-launcher') {
|
if (artifact.type === 'agent-launcher') {
|
||||||
// Convert relativePath to dash format: bmm/agents/pm.md → bmad-bmm-pm.md
|
// Convert relativePath to underscore format: bmm/agents/pm.md → bmad_bmm_pm.md
|
||||||
const flatName = toDashPath(artifact.relativePath);
|
const flatName = toDashPath(artifact.relativePath);
|
||||||
const launcherPath = path.join(baseCommandsDir, flatName);
|
const launcherPath = path.join(baseCommandsDir, flatName);
|
||||||
await fs.ensureDir(path.dirname(launcherPath));
|
await fs.ensureDir(path.dirname(launcherPath));
|
||||||
|
|
@ -144,18 +144,18 @@ class AgentCommandGenerator {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the custom agent name in colon format
|
* Get the custom agent name in underscore format (Windows-compatible)
|
||||||
* @param {string} agentName - Custom agent name
|
* @param {string} agentName - Custom agent name
|
||||||
* @returns {string} Colon-formatted filename
|
* @returns {string} Underscore-formatted filename
|
||||||
*/
|
*/
|
||||||
getCustomAgentColonName(agentName) {
|
getCustomAgentColonName(agentName) {
|
||||||
return customAgentColonName(agentName);
|
return customAgentColonName(agentName);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the custom agent name in dash format
|
* Get the custom agent name in underscore format (Windows-compatible)
|
||||||
* @param {string} agentName - Custom agent name
|
* @param {string} agentName - Custom agent name
|
||||||
* @returns {string} Dash-formatted filename
|
* @returns {string} Underscore-formatted filename
|
||||||
*/
|
*/
|
||||||
getCustomAgentDashName(agentName) {
|
getCustomAgentDashName(agentName) {
|
||||||
return customAgentDashName(agentName);
|
return customAgentDashName(agentName);
|
||||||
|
|
|
||||||
|
|
@ -2,109 +2,72 @@
|
||||||
* Path transformation utilities for IDE installer standardization
|
* Path transformation utilities for IDE installer standardization
|
||||||
*
|
*
|
||||||
* Provides utilities to convert hierarchical paths to flat naming conventions.
|
* Provides utilities to convert hierarchical paths to flat naming conventions.
|
||||||
* - Colon format (bmad:module:name.md) for folder-based IDEs converting to flat
|
* - Underscore format (bmad_module_name.md) - Windows-compatible universal format
|
||||||
* - Dash format (bmad-module-name.md) for already-flat IDEs
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// Type segments to filter out from paths
|
// Type segments to filter out from paths
|
||||||
const TYPE_SEGMENTS = ['agents', 'workflows', 'tasks', 'tools'];
|
const TYPE_SEGMENTS = ['agents', 'workflows', 'tasks', 'tools'];
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Convert hierarchical path to flat colon-separated name (for folder-based IDEs)
|
* Convert hierarchical path to flat underscore-separated name
|
||||||
* Converts: 'bmm/agents/pm.md' → 'bmad:bmm:pm.md'
|
* Converts: 'bmm/agents/pm.md' → 'bmad_bmm_pm.md'
|
||||||
* Converts: 'bmm/workflows/correct-course.md' → 'bmad:bmm:correct-course.md'
|
* Converts: 'bmm/workflows/correct-course.md' → 'bmad_bmm_correct-course.md'
|
||||||
*
|
*
|
||||||
* @param {string} module - Module name (e.g., 'bmm', 'core')
|
* @param {string} module - Module name (e.g., 'bmm', 'core')
|
||||||
* @param {string} type - Artifact type ('agents', 'workflows', 'tasks', 'tools') - filtered out
|
* @param {string} type - Artifact type ('agents', 'workflows', 'tasks', 'tools') - filtered out
|
||||||
* @param {string} name - Artifact name (e.g., 'pm', 'correct-course')
|
* @param {string} name - Artifact name (e.g., 'pm', 'correct-course')
|
||||||
* @returns {string} Flat filename like 'bmad:bmm:pm.md'
|
* @returns {string} Flat filename like 'bmad_bmm_pm.md'
|
||||||
*/
|
*/
|
||||||
function toColonName(module, type, name) {
|
function toUnderscoreName(module, type, name) {
|
||||||
return `bmad:${module}:${name}.md`;
|
return `bmad_${module}_${name}.md`;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Convert relative path to flat colon-separated name (for folder-based IDEs)
|
* Convert relative path to flat underscore-separated name
|
||||||
* Converts: 'bmm/agents/pm.md' → 'bmad:bmm:pm.md'
|
* Converts: 'bmm/agents/pm.md' → 'bmad_bmm_pm.md'
|
||||||
* Converts: 'bmm/workflows/correct-course.md' → 'bmad:bmm:correct-course.md'
|
* Converts: 'bmm/workflows/correct-course.md' → 'bmad_bmm_correct-course.md'
|
||||||
*
|
*
|
||||||
* @param {string} relativePath - Path like 'bmm/agents/pm.md'
|
* @param {string} relativePath - Path like 'bmm/agents/pm.md'
|
||||||
* @returns {string} Flat filename like 'bmad:bmm:pm.md'
|
* @returns {string} Flat filename like 'bmad_bmm_pm.md'
|
||||||
*/
|
*/
|
||||||
function toColonPath(relativePath) {
|
function toUnderscorePath(relativePath) {
|
||||||
const withoutExt = relativePath.replace('.md', '');
|
const withoutExt = relativePath.replace('.md', '');
|
||||||
const parts = withoutExt.split(/[/\\]/);
|
const parts = withoutExt.split(/[/\\]/);
|
||||||
// Filter out type segments (agents, workflows, tasks, tools)
|
// Filter out type segments (agents, workflows, tasks, tools)
|
||||||
const filtered = parts.filter((p) => !TYPE_SEGMENTS.includes(p));
|
const filtered = parts.filter((p) => !TYPE_SEGMENTS.includes(p));
|
||||||
return `bmad:${filtered.join(':')}.md`;
|
return `bmad_${filtered.join('_')}.md`;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Convert hierarchical path to flat dash-separated name (for flat IDEs)
|
* Create custom agent underscore name
|
||||||
* Converts: 'bmm/agents/pm.md' → 'bmad-bmm-pm.md'
|
* Creates: 'bmad_custom_fred-commit-poet.md'
|
||||||
* Converts: 'bmm/workflows/correct-course.md' → 'bmad-bmm-correct-course.md'
|
|
||||||
*
|
|
||||||
* @param {string} relativePath - Path like 'bmm/agents/pm.md'
|
|
||||||
* @returns {string} Flat filename like 'bmad-bmm-pm.md'
|
|
||||||
*/
|
|
||||||
function toDashPath(relativePath) {
|
|
||||||
const withoutExt = relativePath.replace('.md', '');
|
|
||||||
const parts = withoutExt.split(/[/\\]/);
|
|
||||||
// Filter out type segments (agents, workflows, tasks, tools)
|
|
||||||
const filtered = parts.filter((p) => !TYPE_SEGMENTS.includes(p));
|
|
||||||
return `bmad-${filtered.join('-')}.md`;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Create custom agent colon name (for folder-based IDEs)
|
|
||||||
* Creates: 'bmad:custom:fred-commit-poet.md'
|
|
||||||
*
|
*
|
||||||
* @param {string} agentName - Custom agent name
|
* @param {string} agentName - Custom agent name
|
||||||
* @returns {string} Flat filename like 'bmad:custom:fred-commit-poet.md'
|
* @returns {string} Flat filename like 'bmad_custom_fred-commit-poet.md'
|
||||||
*/
|
*/
|
||||||
function customAgentColonName(agentName) {
|
function customAgentUnderscoreName(agentName) {
|
||||||
return `bmad:custom:${agentName}.md`;
|
return `bmad_custom_${agentName}.md`;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create custom agent dash name (for flat IDEs)
|
* Check if a filename uses underscore format
|
||||||
* Creates: 'bmad-custom-fred-commit-poet.md'
|
|
||||||
*
|
|
||||||
* @param {string} agentName - Custom agent name
|
|
||||||
* @returns {string} Flat filename like 'bmad-custom-fred-commit-poet.md'
|
|
||||||
*/
|
|
||||||
function customAgentDashName(agentName) {
|
|
||||||
return `bmad-custom-${agentName}.md`;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Check if a filename uses colon format
|
|
||||||
* @param {string} filename - Filename to check
|
* @param {string} filename - Filename to check
|
||||||
* @returns {boolean} True if filename uses colon format
|
* @returns {boolean} True if filename uses underscore format
|
||||||
*/
|
*/
|
||||||
function isColonFormat(filename) {
|
function isUnderscoreFormat(filename) {
|
||||||
return filename.includes('bmad:') && filename.includes(':');
|
return filename.startsWith('bmad_') && filename.includes('_');
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check if a filename uses dash format
|
* Extract parts from an underscore-formatted filename
|
||||||
* @param {string} filename - Filename to check
|
* Parses: 'bmad_bmm_pm.md' → { prefix: 'bmad', module: 'bmm', name: 'pm' }
|
||||||
* @returns {boolean} True if filename uses dash format
|
|
||||||
*/
|
|
||||||
function isDashFormat(filename) {
|
|
||||||
return filename.startsWith('bmad-') && !filename.includes(':');
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Extract parts from a colon-formatted filename
|
|
||||||
* Parses: 'bmad:bmm:pm.md' → { prefix: 'bmad', module: 'bmm', name: 'pm' }
|
|
||||||
*
|
*
|
||||||
* @param {string} filename - Colon-formatted filename
|
* @param {string} filename - Underscore-formatted filename
|
||||||
* @returns {Object|null} Parsed parts or null if invalid format
|
* @returns {Object|null} Parsed parts or null if invalid format
|
||||||
*/
|
*/
|
||||||
function parseColonName(filename) {
|
function parseUnderscoreName(filename) {
|
||||||
const withoutExt = filename.replace('.md', '');
|
const withoutExt = filename.replace('.md', '');
|
||||||
const parts = withoutExt.split(':');
|
const parts = withoutExt.split('_');
|
||||||
|
|
||||||
if (parts.length < 3 || parts[0] !== 'bmad') {
|
if (parts.length < 3 || parts[0] !== 'bmad') {
|
||||||
return null;
|
return null;
|
||||||
|
|
@ -113,33 +76,28 @@ function parseColonName(filename) {
|
||||||
return {
|
return {
|
||||||
prefix: parts[0],
|
prefix: parts[0],
|
||||||
module: parts[1],
|
module: parts[1],
|
||||||
name: parts.slice(2).join(':'), // Handle names that might contain colons
|
name: parts.slice(2).join('_'), // Handle names that might contain underscores
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
// Backward compatibility aliases (deprecated)
|
||||||
* Extract parts from a dash-formatted filename
|
const toColonName = toUnderscoreName;
|
||||||
* Parses: 'bmad-bmm-pm.md' → { prefix: 'bmad', module: 'bmm', name: 'pm' }
|
const toColonPath = toUnderscorePath;
|
||||||
*
|
const toDashPath = toUnderscorePath;
|
||||||
* @param {string} filename - Dash-formatted filename
|
const customAgentColonName = customAgentUnderscoreName;
|
||||||
* @returns {Object|null} Parsed parts or null if invalid format
|
const customAgentDashName = customAgentUnderscoreName;
|
||||||
*/
|
const isColonFormat = isUnderscoreFormat;
|
||||||
function parseDashName(filename) {
|
const isDashFormat = isUnderscoreFormat;
|
||||||
const withoutExt = filename.replace('.md', '');
|
const parseColonName = parseUnderscoreName;
|
||||||
const parts = withoutExt.split('-');
|
const parseDashName = parseUnderscoreName;
|
||||||
|
|
||||||
if (parts.length < 3 || parts[0] !== 'bmad') {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
return {
|
|
||||||
prefix: parts[0],
|
|
||||||
module: parts[1],
|
|
||||||
name: parts.slice(2).join('-'), // Handle names that might contain dashes
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
|
toUnderscoreName,
|
||||||
|
toUnderscorePath,
|
||||||
|
customAgentUnderscoreName,
|
||||||
|
isUnderscoreFormat,
|
||||||
|
parseUnderscoreName,
|
||||||
|
// Backward compatibility aliases
|
||||||
toColonName,
|
toColonName,
|
||||||
toColonPath,
|
toColonPath,
|
||||||
toDashPath,
|
toDashPath,
|
||||||
|
|
|
||||||
|
|
@ -117,8 +117,8 @@ Follow all instructions in the ${type} file exactly as written.
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Generate task and tool commands using COLON format (for folder-based IDEs)
|
* Generate task and tool commands using underscore format (Windows-compatible)
|
||||||
* Creates flat files like: bmad:bmm:bmad-help.md
|
* Creates flat files like: bmad_bmm_bmad-help.md
|
||||||
*
|
*
|
||||||
* @param {string} projectDir - Project directory
|
* @param {string} projectDir - Project directory
|
||||||
* @param {string} bmadDir - BMAD installation directory
|
* @param {string} bmadDir - BMAD installation directory
|
||||||
|
|
@ -138,7 +138,7 @@ Follow all instructions in the ${type} file exactly as written.
|
||||||
// Generate command files for tasks
|
// Generate command files for tasks
|
||||||
for (const task of standaloneTasks) {
|
for (const task of standaloneTasks) {
|
||||||
const commandContent = this.generateCommandContent(task, 'task');
|
const commandContent = this.generateCommandContent(task, 'task');
|
||||||
// Use colon format: bmad:bmm:name.md
|
// Use underscore format: bmad_bmm_name.md
|
||||||
const flatName = toColonName(task.module, 'tasks', task.name);
|
const flatName = toColonName(task.module, 'tasks', task.name);
|
||||||
const commandPath = path.join(baseCommandsDir, flatName);
|
const commandPath = path.join(baseCommandsDir, flatName);
|
||||||
await fs.ensureDir(path.dirname(commandPath));
|
await fs.ensureDir(path.dirname(commandPath));
|
||||||
|
|
@ -149,7 +149,7 @@ Follow all instructions in the ${type} file exactly as written.
|
||||||
// Generate command files for tools
|
// Generate command files for tools
|
||||||
for (const tool of standaloneTools) {
|
for (const tool of standaloneTools) {
|
||||||
const commandContent = this.generateCommandContent(tool, 'tool');
|
const commandContent = this.generateCommandContent(tool, 'tool');
|
||||||
// Use colon format: bmad:bmm:name.md
|
// Use underscore format: bmad_bmm_name.md
|
||||||
const flatName = toColonName(tool.module, 'tools', tool.name);
|
const flatName = toColonName(tool.module, 'tools', tool.name);
|
||||||
const commandPath = path.join(baseCommandsDir, flatName);
|
const commandPath = path.join(baseCommandsDir, flatName);
|
||||||
await fs.ensureDir(path.dirname(commandPath));
|
await fs.ensureDir(path.dirname(commandPath));
|
||||||
|
|
@ -165,8 +165,8 @@ Follow all instructions in the ${type} file exactly as written.
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Generate task and tool commands using DASH format (for flat IDEs)
|
* Generate task and tool commands using underscore format (Windows-compatible)
|
||||||
* Creates flat files like: bmad-bmm-bmad-help.md
|
* Creates flat files like: bmad_bmm_bmad-help.md
|
||||||
*
|
*
|
||||||
* @param {string} projectDir - Project directory
|
* @param {string} projectDir - Project directory
|
||||||
* @param {string} bmadDir - BMAD installation directory
|
* @param {string} bmadDir - BMAD installation directory
|
||||||
|
|
@ -186,7 +186,7 @@ Follow all instructions in the ${type} file exactly as written.
|
||||||
// Generate command files for tasks
|
// Generate command files for tasks
|
||||||
for (const task of standaloneTasks) {
|
for (const task of standaloneTasks) {
|
||||||
const commandContent = this.generateCommandContent(task, 'task');
|
const commandContent = this.generateCommandContent(task, 'task');
|
||||||
// Use dash format: bmad-bmm-name.md
|
// Use underscore format: bmad_bmm_name.md
|
||||||
const flatName = toDashPath(`${task.module}/tasks/${task.name}.md`);
|
const flatName = toDashPath(`${task.module}/tasks/${task.name}.md`);
|
||||||
const commandPath = path.join(baseCommandsDir, flatName);
|
const commandPath = path.join(baseCommandsDir, flatName);
|
||||||
await fs.ensureDir(path.dirname(commandPath));
|
await fs.ensureDir(path.dirname(commandPath));
|
||||||
|
|
@ -197,7 +197,7 @@ Follow all instructions in the ${type} file exactly as written.
|
||||||
// Generate command files for tools
|
// Generate command files for tools
|
||||||
for (const tool of standaloneTools) {
|
for (const tool of standaloneTools) {
|
||||||
const commandContent = this.generateCommandContent(tool, 'tool');
|
const commandContent = this.generateCommandContent(tool, 'tool');
|
||||||
// Use dash format: bmad-bmm-name.md
|
// Use underscore format: bmad_bmm_name.md
|
||||||
const flatName = toDashPath(`${tool.module}/tools/${tool.name}.md`);
|
const flatName = toDashPath(`${tool.module}/tools/${tool.name}.md`);
|
||||||
const commandPath = path.join(baseCommandsDir, flatName);
|
const commandPath = path.join(baseCommandsDir, flatName);
|
||||||
await fs.ensureDir(path.dirname(commandPath));
|
await fs.ensureDir(path.dirname(commandPath));
|
||||||
|
|
@ -213,8 +213,8 @@ Follow all instructions in the ${type} file exactly as written.
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Write task/tool artifacts using COLON format (for folder-based IDEs)
|
* Write task/tool artifacts using underscore format (Windows-compatible)
|
||||||
* Creates flat files like: bmad:bmm:bmad-help.md
|
* Creates flat files like: bmad_bmm_bmad-help.md
|
||||||
*
|
*
|
||||||
* @param {string} baseCommandsDir - Base commands directory for the IDE
|
* @param {string} baseCommandsDir - Base commands directory for the IDE
|
||||||
* @param {Array} artifacts - Task/tool artifacts with relativePath
|
* @param {Array} artifacts - Task/tool artifacts with relativePath
|
||||||
|
|
@ -226,7 +226,7 @@ Follow all instructions in the ${type} file exactly as written.
|
||||||
for (const artifact of artifacts) {
|
for (const artifact of artifacts) {
|
||||||
if (artifact.type === 'task' || artifact.type === 'tool') {
|
if (artifact.type === 'task' || artifact.type === 'tool') {
|
||||||
const commandContent = this.generateCommandContent(artifact, artifact.type);
|
const commandContent = this.generateCommandContent(artifact, artifact.type);
|
||||||
// Use colon format: bmad:module:name.md
|
// Use underscore format: bmad_module_name.md
|
||||||
const flatName = toColonPath(artifact.relativePath);
|
const flatName = toColonPath(artifact.relativePath);
|
||||||
const commandPath = path.join(baseCommandsDir, flatName);
|
const commandPath = path.join(baseCommandsDir, flatName);
|
||||||
await fs.ensureDir(path.dirname(commandPath));
|
await fs.ensureDir(path.dirname(commandPath));
|
||||||
|
|
@ -239,8 +239,8 @@ Follow all instructions in the ${type} file exactly as written.
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Write task/tool artifacts using DASH format (for flat IDEs)
|
* Write task/tool artifacts using underscore format (Windows-compatible)
|
||||||
* Creates flat files like: bmad-bmm-bmad-help.md
|
* Creates flat files like: bmad_bmm_bmad-help.md
|
||||||
*
|
*
|
||||||
* @param {string} baseCommandsDir - Base commands directory for the IDE
|
* @param {string} baseCommandsDir - Base commands directory for the IDE
|
||||||
* @param {Array} artifacts - Task/tool artifacts with relativePath
|
* @param {Array} artifacts - Task/tool artifacts with relativePath
|
||||||
|
|
@ -252,7 +252,7 @@ Follow all instructions in the ${type} file exactly as written.
|
||||||
for (const artifact of artifacts) {
|
for (const artifact of artifacts) {
|
||||||
if (artifact.type === 'task' || artifact.type === 'tool') {
|
if (artifact.type === 'task' || artifact.type === 'tool') {
|
||||||
const commandContent = this.generateCommandContent(artifact, artifact.type);
|
const commandContent = this.generateCommandContent(artifact, artifact.type);
|
||||||
// Use dash format: bmad-module-name.md
|
// Use underscore format: bmad_module_name.md
|
||||||
const flatName = toDashPath(artifact.relativePath);
|
const flatName = toDashPath(artifact.relativePath);
|
||||||
const commandPath = path.join(baseCommandsDir, flatName);
|
const commandPath = path.join(baseCommandsDir, flatName);
|
||||||
await fs.ensureDir(path.dirname(commandPath));
|
await fs.ensureDir(path.dirname(commandPath));
|
||||||
|
|
|
||||||
|
|
@ -240,8 +240,8 @@ When running any workflow:
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Write workflow command artifacts using COLON format (for folder-based IDEs)
|
* Write workflow command artifacts using underscore format (Windows-compatible)
|
||||||
* Creates flat files like: bmad:bmm:correct-course.md
|
* Creates flat files like: bmad_bmm_correct-course.md
|
||||||
*
|
*
|
||||||
* @param {string} baseCommandsDir - Base commands directory for the IDE
|
* @param {string} baseCommandsDir - Base commands directory for the IDE
|
||||||
* @param {Array} artifacts - Workflow artifacts
|
* @param {Array} artifacts - Workflow artifacts
|
||||||
|
|
@ -252,7 +252,7 @@ When running any workflow:
|
||||||
|
|
||||||
for (const artifact of artifacts) {
|
for (const artifact of artifacts) {
|
||||||
if (artifact.type === 'workflow-command') {
|
if (artifact.type === 'workflow-command') {
|
||||||
// Convert relativePath to colon format: bmm/workflows/correct-course.md → bmad:bmm:correct-course.md
|
// Convert relativePath to underscore format: bmm/workflows/correct-course.md → bmad_bmm_correct-course.md
|
||||||
const flatName = toColonPath(artifact.relativePath);
|
const flatName = toColonPath(artifact.relativePath);
|
||||||
const commandPath = path.join(baseCommandsDir, flatName);
|
const commandPath = path.join(baseCommandsDir, flatName);
|
||||||
await fs.ensureDir(path.dirname(commandPath));
|
await fs.ensureDir(path.dirname(commandPath));
|
||||||
|
|
@ -265,8 +265,8 @@ When running any workflow:
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Write workflow command artifacts using DASH format (for flat IDEs)
|
* Write workflow command artifacts using underscore format (Windows-compatible)
|
||||||
* Creates flat files like: bmad-bmm-correct-course.md
|
* Creates flat files like: bmad_bmm_correct-course.md
|
||||||
*
|
*
|
||||||
* @param {string} baseCommandsDir - Base commands directory for the IDE
|
* @param {string} baseCommandsDir - Base commands directory for the IDE
|
||||||
* @param {Array} artifacts - Workflow artifacts
|
* @param {Array} artifacts - Workflow artifacts
|
||||||
|
|
@ -277,7 +277,7 @@ When running any workflow:
|
||||||
|
|
||||||
for (const artifact of artifacts) {
|
for (const artifact of artifacts) {
|
||||||
if (artifact.type === 'workflow-command') {
|
if (artifact.type === 'workflow-command') {
|
||||||
// Convert relativePath to dash format: bmm/workflows/correct-course.md → bmad-bmm-correct-course.md
|
// Convert relativePath to underscore format: bmm/workflows/correct-course.md → bmad_bmm_correct-course.md
|
||||||
const flatName = toDashPath(artifact.relativePath);
|
const flatName = toDashPath(artifact.relativePath);
|
||||||
const commandPath = path.join(baseCommandsDir, flatName);
|
const commandPath = path.join(baseCommandsDir, flatName);
|
||||||
await fs.ensureDir(path.dirname(commandPath));
|
await fs.ensureDir(path.dirname(commandPath));
|
||||||
|
|
|
||||||
|
|
@ -246,12 +246,12 @@ Part of the BMAD ${workflow.module.toUpperCase()} module.
|
||||||
const rulesPath = path.join(projectDir, this.configDir, this.rulesDir);
|
const rulesPath = path.join(projectDir, this.configDir, this.rulesDir);
|
||||||
|
|
||||||
if (await fs.pathExists(rulesPath)) {
|
if (await fs.pathExists(rulesPath)) {
|
||||||
// Only remove files that start with bmad- prefix
|
// Remove any bmad* files (cleans up old bmad- and bmad: formats)
|
||||||
const files = await fs.readdir(rulesPath);
|
const files = await fs.readdir(rulesPath);
|
||||||
let removed = 0;
|
let removed = 0;
|
||||||
|
|
||||||
for (const file of files) {
|
for (const file of files) {
|
||||||
if (file.startsWith('bmad-') && file.endsWith('.md')) {
|
if (file.startsWith('bmad') && file.endsWith('.md')) {
|
||||||
await fs.remove(path.join(rulesPath, file));
|
await fs.remove(path.join(rulesPath, file));
|
||||||
removed++;
|
removed++;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -54,6 +54,7 @@ class ExternalModuleManager {
|
||||||
description: moduleConfig.description || '',
|
description: moduleConfig.description || '',
|
||||||
defaultSelected: moduleConfig.defaultSelected === true,
|
defaultSelected: moduleConfig.defaultSelected === true,
|
||||||
type: moduleConfig.type || 'community', // bmad-org or community
|
type: moduleConfig.type || 'community', // bmad-org or community
|
||||||
|
npmPackage: moduleConfig.npmPackage || null, // Include npm package name
|
||||||
isExternal: true,
|
isExternal: true,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
@ -95,6 +96,7 @@ class ExternalModuleManager {
|
||||||
description: moduleConfig.description || '',
|
description: moduleConfig.description || '',
|
||||||
defaultSelected: moduleConfig.defaultSelected === true,
|
defaultSelected: moduleConfig.defaultSelected === true,
|
||||||
type: moduleConfig.type || 'community', // bmad-org or community
|
type: moduleConfig.type || 'community', // bmad-org or community
|
||||||
|
npmPackage: moduleConfig.npmPackage || null, // Include npm package name
|
||||||
isExternal: true,
|
isExternal: true,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -371,9 +371,9 @@ class ModuleManager {
|
||||||
const fetchSpinner = ora(`Fetching ${moduleInfo.name}...`).start();
|
const fetchSpinner = ora(`Fetching ${moduleInfo.name}...`).start();
|
||||||
try {
|
try {
|
||||||
const currentRef = execSync('git rev-parse HEAD', { cwd: moduleCacheDir, stdio: 'pipe' }).toString().trim();
|
const currentRef = execSync('git rev-parse HEAD', { cwd: moduleCacheDir, stdio: 'pipe' }).toString().trim();
|
||||||
execSync('git fetch --depth 1', { cwd: moduleCacheDir, stdio: 'pipe' });
|
// Fetch and reset to remote - works better with shallow clones than pull
|
||||||
execSync('git checkout -f', { cwd: moduleCacheDir, stdio: 'pipe' });
|
execSync('git fetch origin --depth 1', { cwd: moduleCacheDir, stdio: 'pipe' });
|
||||||
execSync('git pull --ff-only', { cwd: moduleCacheDir, stdio: 'pipe' });
|
execSync('git reset --hard origin/HEAD', { cwd: moduleCacheDir, stdio: 'pipe' });
|
||||||
const newRef = execSync('git rev-parse HEAD', { cwd: moduleCacheDir, stdio: 'pipe' }).toString().trim();
|
const newRef = execSync('git rev-parse HEAD', { cwd: moduleCacheDir, stdio: 'pipe' }).toString().trim();
|
||||||
|
|
||||||
fetchSpinner.succeed(`Fetched ${moduleInfo.name}`);
|
fetchSpinner.succeed(`Fetched ${moduleInfo.name}`);
|
||||||
|
|
@ -555,10 +555,23 @@ class ModuleManager {
|
||||||
await this.runModuleInstaller(moduleName, bmadDir, options);
|
await this.runModuleInstaller(moduleName, bmadDir, options);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Capture version info for manifest
|
||||||
|
const { Manifest } = require('../core/manifest');
|
||||||
|
const manifestObj = new Manifest();
|
||||||
|
const versionInfo = await manifestObj.getModuleVersionInfo(moduleName, bmadDir, sourcePath);
|
||||||
|
|
||||||
|
await manifestObj.addModule(bmadDir, moduleName, {
|
||||||
|
version: versionInfo.version,
|
||||||
|
source: versionInfo.source,
|
||||||
|
npmPackage: versionInfo.npmPackage,
|
||||||
|
repoUrl: versionInfo.repoUrl,
|
||||||
|
});
|
||||||
|
|
||||||
return {
|
return {
|
||||||
success: true,
|
success: true,
|
||||||
module: moduleName,
|
module: moduleName,
|
||||||
path: targetPath,
|
path: targetPath,
|
||||||
|
versionInfo,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1586,6 +1586,131 @@ class UI {
|
||||||
|
|
||||||
return proceed === 'proceed';
|
return proceed === 'proceed';
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Display module versions with update availability
|
||||||
|
* @param {Array} modules - Array of module info objects with version info
|
||||||
|
* @param {Array} availableUpdates - Array of available updates
|
||||||
|
*/
|
||||||
|
displayModuleVersions(modules, availableUpdates = []) {
|
||||||
|
console.log('');
|
||||||
|
console.log(chalk.cyan.bold('📦 Module Versions'));
|
||||||
|
console.log(chalk.gray('─'.repeat(80)));
|
||||||
|
|
||||||
|
// Group modules by source
|
||||||
|
const builtIn = modules.filter((m) => m.source === 'built-in');
|
||||||
|
const external = modules.filter((m) => m.source === 'external');
|
||||||
|
const custom = modules.filter((m) => m.source === 'custom');
|
||||||
|
const unknown = modules.filter((m) => m.source === 'unknown');
|
||||||
|
|
||||||
|
const displayGroup = (group, title) => {
|
||||||
|
if (group.length === 0) return;
|
||||||
|
|
||||||
|
console.log(chalk.yellow(`\n${title}`));
|
||||||
|
for (const module of group) {
|
||||||
|
const updateInfo = availableUpdates.find((u) => u.name === module.name);
|
||||||
|
const versionDisplay = module.version || chalk.gray('unknown');
|
||||||
|
|
||||||
|
if (updateInfo) {
|
||||||
|
console.log(
|
||||||
|
` ${chalk.cyan(module.name.padEnd(20))} ${versionDisplay} → ${chalk.green(updateInfo.latestVersion)} ${chalk.green('↑')}`,
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
console.log(` ${chalk.cyan(module.name.padEnd(20))} ${versionDisplay} ${chalk.gray('✓')}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
displayGroup(builtIn, 'Built-in Modules');
|
||||||
|
displayGroup(external, 'External Modules (Official)');
|
||||||
|
displayGroup(custom, 'Custom Modules');
|
||||||
|
displayGroup(unknown, 'Other Modules');
|
||||||
|
|
||||||
|
console.log('');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Prompt user to select which modules to update
|
||||||
|
* @param {Array} availableUpdates - Array of available updates
|
||||||
|
* @returns {Array} Selected module names to update
|
||||||
|
*/
|
||||||
|
async promptUpdateSelection(availableUpdates) {
|
||||||
|
if (availableUpdates.length === 0) {
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log('');
|
||||||
|
console.log(chalk.cyan.bold('🔄 Available Updates'));
|
||||||
|
console.log(chalk.gray('─'.repeat(80)));
|
||||||
|
|
||||||
|
const choices = availableUpdates.map((update) => ({
|
||||||
|
name: `${update.name} ${chalk.dim(`(v${update.installedVersion} → v${update.latestVersion})`)}`,
|
||||||
|
value: update.name,
|
||||||
|
checked: true, // Default to selecting all updates
|
||||||
|
}));
|
||||||
|
|
||||||
|
// Add "Update All" and "Cancel" options
|
||||||
|
const action = await prompts.select({
|
||||||
|
message: 'How would you like to proceed?',
|
||||||
|
choices: [
|
||||||
|
{ name: 'Update all available modules', value: 'all' },
|
||||||
|
{ name: 'Select specific modules to update', value: 'select' },
|
||||||
|
{ name: 'Skip updates for now', value: 'skip' },
|
||||||
|
],
|
||||||
|
default: 'all',
|
||||||
|
});
|
||||||
|
|
||||||
|
if (action === 'all') {
|
||||||
|
return availableUpdates.map((u) => u.name);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (action === 'skip') {
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allow specific selection
|
||||||
|
const selected = await prompts.multiselect({
|
||||||
|
message: `Select modules to update ${chalk.dim('(↑/↓ navigates, SPACE toggles, ENTER to confirm)')}:`,
|
||||||
|
choices: choices,
|
||||||
|
required: true,
|
||||||
|
});
|
||||||
|
|
||||||
|
return selected || [];
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Display status of all installed modules
|
||||||
|
* @param {Object} statusData - Status data with modules, installation info, and available updates
|
||||||
|
*/
|
||||||
|
displayStatus(statusData) {
|
||||||
|
const { installation, modules, availableUpdates, bmadDir } = statusData;
|
||||||
|
|
||||||
|
console.log('');
|
||||||
|
console.log(chalk.cyan.bold('📋 BMAD Status'));
|
||||||
|
console.log(chalk.gray('─'.repeat(80)));
|
||||||
|
|
||||||
|
// Installation info
|
||||||
|
console.log(chalk.yellow('\nInstallation'));
|
||||||
|
console.log(` ${chalk.gray('Version:'.padEnd(20))} ${installation.version || chalk.gray('unknown')}`);
|
||||||
|
console.log(` ${chalk.gray('Location:'.padEnd(20))} ${bmadDir}`);
|
||||||
|
console.log(` ${chalk.gray('Installed:'.padEnd(20))} ${new Date(installation.installDate).toLocaleDateString()}`);
|
||||||
|
console.log(
|
||||||
|
` ${chalk.gray('Last Updated:'.padEnd(20))} ${installation.lastUpdated ? new Date(installation.lastUpdated).toLocaleDateString() : chalk.gray('unknown')}`,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Module versions
|
||||||
|
this.displayModuleVersions(modules, availableUpdates);
|
||||||
|
|
||||||
|
// Update summary
|
||||||
|
if (availableUpdates.length > 0) {
|
||||||
|
console.log(chalk.yellow.bold(`\n⚠️ ${availableUpdates.length} update(s) available`));
|
||||||
|
console.log(chalk.dim(` Run 'bmad install' and select "Quick Update" to update`));
|
||||||
|
} else {
|
||||||
|
console.log(chalk.green.bold('\n✓ All modules are up to date'));
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log('');
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
module.exports = { UI };
|
module.exports = { UI };
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue