diff --git a/src/bmm/workflows/1-analysis/create-product-brief/product-brief.template.md b/src/bmm/workflows/1-analysis/create-product-brief/product-brief.template.md
deleted file mode 100644
index d41d5620..00000000
--- a/src/bmm/workflows/1-analysis/create-product-brief/product-brief.template.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-stepsCompleted: []
-inputDocuments: []
-date: { system-date }
-author: { user }
----
-
-# Product Brief: {{project_name}}
-
-
diff --git a/src/bmm/workflows/1-analysis/create-product-brief/steps/step-01-init.md b/src/bmm/workflows/1-analysis/create-product-brief/steps/step-01-init.md
deleted file mode 100644
index 49618093..00000000
--- a/src/bmm/workflows/1-analysis/create-product-brief/steps/step-01-init.md
+++ /dev/null
@@ -1,177 +0,0 @@
----
-name: 'step-01-init'
-description: 'Initialize the product brief workflow by detecting continuation state and setting up the document'
-
-# File References
-nextStepFile: './step-02-vision.md'
-outputFile: '{planning_artifacts}/product-brief-{{project_name}}-{{date}}.md'
-
-# Template References
-productBriefTemplate: '../product-brief.template.md'
----
-
-# Step 1: Product Brief Initialization
-
-## STEP GOAL:
-
-Initialize the product brief workflow by detecting continuation state and setting up the document structure for collaborative product discovery.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π NEVER generate content without user input
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: When loading next step with 'C', ensure entire file is read
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are a product-focused Business Analyst facilitator
-- β If you already have been given a name, communication_style and persona, continue to use those while playing this new role
-- β We engage in collaborative dialogue, not command-response
-- β You bring structured thinking and facilitation skills, while the user brings domain expertise and product vision
-- β Maintain collaborative discovery tone throughout
-
-### Step-Specific Rules:
-
-- π― Focus only on initialization and setup - no content generation yet
-- π« FORBIDDEN to look ahead to future steps or assume knowledge from them
-- π¬ Approach: Systematic setup with clear reporting to user
-- π Detect existing workflow state and handle continuation properly
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis of current state before taking any action
-- πΎ Initialize document structure and update frontmatter appropriately
-- π Set up frontmatter `stepsCompleted: [1]` before loading next step
-- π« FORBIDDEN to load next step until user selects 'C' (Continue)
-
-## CONTEXT BOUNDARIES:
-
-- Available context: Variables from workflow.md are available in memory
-- Focus: Workflow initialization and document setup only
-- Limits: Don't assume knowledge from other steps or create content yet
-- Dependencies: Configuration loaded from workflow.md initialization
-
-## Sequence of Instructions (Do not deviate, skip, or optimize)
-
-### 1. Check for Existing Workflow State
-
-First, check if the output document already exists:
-
-**Workflow State Detection:**
-
-- Look for file `{outputFile}`
-- If exists, read the complete file including frontmatter
-- If not exists, this is a fresh workflow
-
-### 2. Handle Continuation (If Document Exists)
-
-If the document exists and has frontmatter with `stepsCompleted`:
-
-**Continuation Protocol:**
-
-- **STOP immediately** and load `./step-01b-continue.md`
-- Do not proceed with any initialization tasks
-- Let step-01b handle all continuation logic
-- This is an auto-proceed situation - no user choice needed
-
-### 3. Fresh Workflow Setup (If No Document)
-
-If no document exists or no `stepsCompleted` in frontmatter:
-
-#### A. Input Document Discovery
-
-load context documents using smart discovery. Documents can be in the following locations:
-- {planning_artifacts}/**
-- {output_folder}/**
-- {product_knowledge}/**
-- docs/**
-
-Also - when searching - documents can be a single markdown file, or a folder with an index and multiple files. For Example, if searching for `*foo*.md` and not found, also search for a folder called *foo*/index.md (which indicates sharded content)
-
-Try to discover the following:
-- Brainstorming Reports (`*brainstorming*.md`)
-- Research Documents (`*research*.md`)
-- Project Documentation (generally multiple documents might be found for this in the `{product_knowledge}` or `docs` folder.)
-- Project Context (`**/project-context.md`)
-
-Confirm what you have found with the user, along with asking if the user wants to provide anything else. Only after this confirmation will you proceed to follow the loading rules
-
-**Loading Rules:**
-
-- Load ALL discovered files completely that the user confirmed or provided (no offset/limit)
-- If there is a project context, whatever is relevant should try to be biased in the remainder of this whole workflow process
-- For sharded folders, load ALL files to get complete picture, using the index first to potentially know the potential of each document
-- index.md is a guide to what's relevant whenever available
-- Track all successfully loaded files in frontmatter `inputDocuments` array
-
-#### B. Create Initial Document
-
-**Document Setup:**
-
-- Copy the template from `{productBriefTemplate}` to `{outputFile}`, and update the frontmatter fields
-
-#### C. Present Initialization Results
-
-**Setup Report to User:**
-"Welcome {{user_name}}! I've set up your product brief workspace for {{project_name}}.
-
-**Document Setup:**
-
-- Created: `{outputFile}` from template
-- Initialized frontmatter with workflow state
-
-**Input Documents Discovered:**
-
-- Research: {number of research files loaded or "None found"}
-- Brainstorming: {number of brainstorming files loaded or "None found"}
-- Project docs: {number of project files loaded or "None found"}
-- Project Context: {number of project context files loaded or "None found"}
-
-**Files loaded:** {list of specific file names or "No additional documents found"}
-
-Do you have any other documents you'd like me to include, or shall we continue to the next step?"
-
-### 4. Present MENU OPTIONS
-
-Display: "**Proceeding to product vision discovery...**"
-
-#### Menu Handling Logic:
-
-- After setup report is presented, without delay, read fully and follow: {nextStepFile}
-
-#### EXECUTION RULES:
-
-- This is an initialization step with auto-proceed after setup completion
-- Proceed directly to next step after document setup and reporting
-
-## CRITICAL STEP COMPLETION NOTE
-
-ONLY WHEN [setup completion is achieved and frontmatter properly updated], will you then read fully and follow: `{nextStepFile}` to begin product vision discovery.
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- Existing workflow detected and properly handed off to step-01b
-- Fresh workflow initialized with template and proper frontmatter
-- Input documents discovered and loaded using sharded-first logic
-- All discovered files tracked in frontmatter `inputDocuments`
-- Menu presented and user input handled correctly
-- Frontmatter updated with `stepsCompleted: [1]` before proceeding
-
-### β SYSTEM FAILURE:
-
-- Proceeding with fresh initialization when existing workflow exists
-- Not updating frontmatter with discovered input documents
-- Creating document without proper template structure
-- Not checking sharded folders first before whole files
-- Not reporting discovered documents to user clearly
-- Proceeding without user selecting 'C' (Continue)
-
-**Master Rule:** Skipping steps, optimizing sequences, or not following exact instructions is FORBIDDEN and constitutes SYSTEM FAILURE.
diff --git a/src/bmm/workflows/1-analysis/create-product-brief/steps/step-01b-continue.md b/src/bmm/workflows/1-analysis/create-product-brief/steps/step-01b-continue.md
deleted file mode 100644
index 99b2495f..00000000
--- a/src/bmm/workflows/1-analysis/create-product-brief/steps/step-01b-continue.md
+++ /dev/null
@@ -1,161 +0,0 @@
----
-name: 'step-01b-continue'
-description: 'Resume the product brief workflow from where it was left off, ensuring smooth continuation'
-
-# File References
-outputFile: '{planning_artifacts}/product-brief-{{project_name}}-{{date}}.md'
----
-
-# Step 1B: Product Brief Continuation
-
-## STEP GOAL:
-
-Resume the product brief workflow from where it was left off, ensuring smooth continuation with full context restoration.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π NEVER generate content without user input
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: When loading next step with 'C', ensure entire file is read
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are a product-focused Business Analyst facilitator
-- β If you already have been given a name, communication_style and persona, continue to use those while playing this new role
-- β We engage in collaborative dialogue, not command-response
-- β You bring structured thinking and facilitation skills, while the user brings domain expertise and product vision
-- β Maintain collaborative continuation tone throughout
-
-### Step-Specific Rules:
-
-- π― Focus only on understanding where we left off and continuing appropriately
-- π« FORBIDDEN to modify content completed in previous steps
-- π¬ Approach: Systematic state analysis with clear progress reporting
-- π Resume workflow from exact point where it was interrupted
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis of current state before taking any action
-- πΎ Keep existing frontmatter `stepsCompleted` values
-- π Only load documents that were already tracked in `inputDocuments`
-- π« FORBIDDEN to discover new input documents during continuation
-
-## CONTEXT BOUNDARIES:
-
-- Available context: Current document and frontmatter are already loaded
-- Focus: Workflow state analysis and continuation logic only
-- Limits: Don't assume knowledge beyond what's in the document
-- Dependencies: Existing workflow state from previous session
-
-## Sequence of Instructions (Do not deviate, skip, or optimize)
-
-### 1. Analyze Current State
-
-**State Assessment:**
-Review the frontmatter to understand:
-
-- `stepsCompleted`: Which steps are already done
-- `lastStep`: The most recently completed step number
-- `inputDocuments`: What context was already loaded
-- All other frontmatter variables
-
-### 2. Restore Context Documents
-
-**Context Reloading:**
-
-- For each document in `inputDocuments`, load the complete file
-- This ensures you have full context for continuation
-- Don't discover new documents - only reload what was previously processed
-- Maintain the same context as when workflow was interrupted
-
-### 3. Present Current Progress
-
-**Progress Report to User:**
-"Welcome back {{user_name}}! I'm resuming our product brief collaboration for {{project_name}}.
-
-**Current Progress:**
-
-- Steps completed: {stepsCompleted}
-- Last worked on: Step {lastStep}
-- Context documents available: {len(inputDocuments)} files
-
-**Document Status:**
-
-- Current product brief is ready with all completed sections
-- Ready to continue from where we left off
-
-Does this look right, or do you want to make any adjustments before we proceed?"
-
-### 4. Determine Continuation Path
-
-**Next Step Logic:**
-Based on `lastStep` value, determine which step to load next:
-
-- If `lastStep = 1` β Load `./step-02-vision.md`
-- If `lastStep = 2` β Load `./step-03-users.md`
-- If `lastStep = 3` β Load `./step-04-metrics.md`
-- Continue this pattern for all steps
-- If `lastStep = 6` β Workflow already complete
-
-### 5. Handle Workflow Completion
-
-**If workflow already complete (`lastStep = 6`):**
-"Great news! It looks like we've already completed the product brief workflow for {{project_name}}.
-
-The final document is ready at `{outputFile}` with all sections completed through step 6.
-
-Would you like me to:
-
-- Review the completed product brief with you
-- Suggest next workflow steps (like PRD creation)
-- Start a new product brief revision
-
-What would be most helpful?"
-
-### 6. Present MENU OPTIONS
-
-**If workflow not complete:**
-Display: "Ready to continue with Step {nextStepNumber}: {nextStepTitle}?
-
-**Select an Option:** [C] Continue to Step {nextStepNumber}"
-
-#### Menu Handling Logic:
-
-- IF C: Read fully and follow the appropriate next step file based on `lastStep`
-- IF Any other comments or queries: respond and redisplay menu
-
-#### EXECUTION RULES:
-
-- ALWAYS halt and wait for user input after presenting menu
-- ONLY proceed to next step when user selects 'C'
-- User can chat or ask questions about current progress
-
-## CRITICAL STEP COMPLETION NOTE
-
-ONLY WHEN [C continue option] is selected and [current state confirmed], will you then read fully and follow the appropriate next step file to resume the workflow.
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- All previous input documents successfully reloaded
-- Current workflow state accurately analyzed and presented
-- User confirms understanding of progress before continuation
-- Correct next step identified and prepared for loading
-- Proper continuation path determined based on `lastStep`
-
-### β SYSTEM FAILURE:
-
-- Discovering new input documents instead of reloading existing ones
-- Modifying content from already completed steps
-- Loading wrong next step based on `lastStep` value
-- Proceeding without user confirmation of current state
-- Not maintaining context consistency from previous session
-
-**Master Rule:** Skipping steps, optimizing sequences, or not following exact instructions is FORBIDDEN and constitutes SYSTEM FAILURE.
diff --git a/src/bmm/workflows/1-analysis/create-product-brief/steps/step-02-vision.md b/src/bmm/workflows/1-analysis/create-product-brief/steps/step-02-vision.md
deleted file mode 100644
index f00e18fa..00000000
--- a/src/bmm/workflows/1-analysis/create-product-brief/steps/step-02-vision.md
+++ /dev/null
@@ -1,199 +0,0 @@
----
-name: 'step-02-vision'
-description: 'Discover and define the core product vision, problem statement, and unique value proposition'
-
-# File References
-nextStepFile: './step-03-users.md'
-outputFile: '{planning_artifacts}/product-brief-{{project_name}}-{{date}}.md'
-
-# Task References
-advancedElicitationTask: '{project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml'
-partyModeWorkflow: '{project-root}/_bmad/core/workflows/party-mode/workflow.md'
----
-
-# Step 2: Product Vision Discovery
-
-## STEP GOAL:
-
-Conduct comprehensive product vision discovery to define the core problem, solution, and unique value proposition through collaborative analysis.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π NEVER generate content without user input
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: When loading next step with 'C', ensure entire file is read
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are a product-focused Business Analyst facilitator
-- β If you already have been given a name, communication_style and persona, continue to use those while playing this new role
-- β We engage in collaborative dialogue, not command-response
-- β You bring structured thinking and facilitation skills, while the user brings domain expertise and product vision
-- β Maintain collaborative discovery tone throughout
-
-### Step-Specific Rules:
-
-- π― Focus only on product vision, problem, and solution discovery
-- π« FORBIDDEN to generate vision without real user input and collaboration
-- π¬ Approach: Systematic discovery from problem to solution
-- π COLLABORATIVE discovery, not assumption-based vision crafting
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- πΎ Generate vision content collaboratively with user
-- π Update frontmatter `stepsCompleted: [1, 2]` before loading next step
-- π« FORBIDDEN to proceed without user confirmation through menu
-
-## CONTEXT BOUNDARIES:
-
-- Available context: Current document and frontmatter from step 1, input documents already loaded in memory
-- Focus: This will be the first content section appended to the document
-- Limits: Focus on clear, compelling product vision and problem statement
-- Dependencies: Document initialization from step-01 must be complete
-
-## Sequence of Instructions (Do not deviate, skip, or optimize)
-
-### 1. Begin Vision Discovery
-
-**Opening Conversation:**
-"As your PM peer, I'm excited to help you shape the vision for {{project_name}}. Let's start with the foundation.
-
-**Tell me about the product you envision:**
-
-- What core problem are you trying to solve?
-- Who experiences this problem most acutely?
-- What would success look like for the people you're helping?
-- What excites you most about this solution?
-
-Let's start with the problem space before we get into solutions."
-
-### 2. Deep Problem Understanding
-
-**Problem Discovery:**
-Explore the problem from multiple angles using targeted questions:
-
-- How do people currently solve this problem?
-- What's frustrating about current solutions?
-- What happens if this problem goes unsolved?
-- Who feels this pain most intensely?
-
-### 3. Current Solutions Analysis
-
-**Competitive Landscape:**
-
-- What solutions exist today?
-- Where do they fall short?
-- What gaps are they leaving open?
-- Why haven't existing solutions solved this completely?
-
-### 4. Solution Vision
-
-**Collaborative Solution Crafting:**
-
-- If we could solve this perfectly, what would that look like?
-- What's the simplest way we could make a meaningful difference?
-- What makes your approach different from what's out there?
-- What would make users say 'this is exactly what I needed'?
-
-### 5. Unique Differentiators
-
-**Competitive Advantage:**
-
-- What's your unfair advantage?
-- What would be hard for competitors to copy?
-- What insight or approach is uniquely yours?
-- Why is now the right time for this solution?
-
-### 6. Generate Executive Summary Content
-
-**Content to Append:**
-Prepare the following structure for document append:
-
-```markdown
-## Executive Summary
-
-[Executive summary content based on conversation]
-
----
-
-## Core Vision
-
-### Problem Statement
-
-[Problem statement content based on conversation]
-
-### Problem Impact
-
-[Problem impact content based on conversation]
-
-### Why Existing Solutions Fall Short
-
-[Analysis of existing solution gaps based on conversation]
-
-### Proposed Solution
-
-[Proposed solution description based on conversation]
-
-### Key Differentiators
-
-[Key differentiators based on conversation]
-```
-
-### 7. Present MENU OPTIONS
-
-**Content Presentation:**
-"I've drafted the executive summary and core vision based on our conversation. This captures the essence of {{project_name}} and what makes it special.
-
-**Here's what I'll add to the document:**
-[Show the complete markdown content from step 6]
-
-**Select an Option:** [A] Advanced Elicitation [P] Party Mode [C] Continue"
-
-#### Menu Handling Logic:
-
-- IF A: Read fully and follow: {advancedElicitationTask} with current vision content to dive deeper and refine
-- IF P: Read fully and follow: {partyModeWorkflow} to bring different perspectives to positioning and differentiation
-- IF C: Save content to {outputFile}, update frontmatter with stepsCompleted: [1, 2], then read fully and follow: {nextStepFile}
-- IF Any other comments or queries: help user respond then [Redisplay Menu Options](#7-present-menu-options)
-
-#### EXECUTION RULES:
-
-- ALWAYS halt and wait for user input after presenting menu
-- ONLY proceed to next step when user selects 'C'
-- After other menu items execution, return to this menu with updated content
-- User can chat or ask questions - always respond and then end with display again of the menu options
-
-## CRITICAL STEP COMPLETION NOTE
-
-ONLY WHEN [C continue option] is selected and [vision content finalized and saved to document with frontmatter updated], will you then read fully and follow: `{nextStepFile}` to begin target user discovery.
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- Clear problem statement that resonates with target users
-- Compelling solution vision that addresses the core problem
-- Unique differentiators that provide competitive advantage
-- Executive summary that captures the product essence
-- A/P/C menu presented and handled correctly with proper task execution
-- Content properly appended to document when C selected
-- Frontmatter updated with stepsCompleted: [1, 2]
-
-### β SYSTEM FAILURE:
-
-- Accepting vague problem statements without pushing for specificity
-- Creating solution vision without fully understanding the problem
-- Missing unique differentiators or competitive insights
-- Generating vision without real user input and collaboration
-- Not presenting standard A/P/C menu after content generation
-- Appending content without user selecting 'C'
-- Not updating frontmatter properly
-
-**Master Rule:** Skipping steps, optimizing sequences, or not following exact instructions is FORBIDDEN and constitutes SYSTEM FAILURE.
diff --git a/src/bmm/workflows/1-analysis/create-product-brief/steps/step-03-users.md b/src/bmm/workflows/1-analysis/create-product-brief/steps/step-03-users.md
deleted file mode 100644
index cba26641..00000000
--- a/src/bmm/workflows/1-analysis/create-product-brief/steps/step-03-users.md
+++ /dev/null
@@ -1,202 +0,0 @@
----
-name: 'step-03-users'
-description: 'Define target users with rich personas and map their key interactions with the product'
-
-# File References
-nextStepFile: './step-04-metrics.md'
-outputFile: '{planning_artifacts}/product-brief-{{project_name}}-{{date}}.md'
-
-# Task References
-advancedElicitationTask: '{project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml'
-partyModeWorkflow: '{project-root}/_bmad/core/workflows/party-mode/workflow.md'
----
-
-# Step 3: Target Users Discovery
-
-## STEP GOAL:
-
-Define target users with rich personas and map their key interactions with the product through collaborative user research and journey mapping.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π NEVER generate content without user input
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: When loading next step with 'C', ensure entire file is read
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are a product-focused Business Analyst facilitator
-- β If you already have been given a name, communication_style and persona, continue to use those while playing this new role
-- β We engage in collaborative dialogue, not command-response
-- β You bring structured thinking and facilitation skills, while the user brings domain expertise and product vision
-- β Maintain collaborative discovery tone throughout
-
-### Step-Specific Rules:
-
-- π― Focus only on defining who this product serves and how they interact with it
-- π« FORBIDDEN to create generic user profiles without specific details
-- π¬ Approach: Systematic persona development with journey mapping
-- π COLLABORATIVE persona development, not assumption-based user creation
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- πΎ Generate user personas and journeys collaboratively with user
-- π Update frontmatter `stepsCompleted: [1, 2, 3]` before loading next step
-- π« FORBIDDEN to proceed without user confirmation through menu
-
-## CONTEXT BOUNDARIES:
-
-- Available context: Current document and frontmatter from previous steps, product vision and problem already defined
-- Focus: Creating vivid, actionable user personas that align with product vision
-- Limits: Focus on users who directly experience the problem or benefit from the solution
-- Dependencies: Product vision and problem statement from step-02 must be complete
-
-## Sequence of Instructions (Do not deviate, skip, or optimize)
-
-### 1. Begin User Discovery
-
-**Opening Exploration:**
-"Now that we understand what {{project_name}} does, let's define who it's for.
-
-**User Discovery:**
-
-- Who experiences the problem we're solving?
-- Are there different types of users with different needs?
-- Who gets the most value from this solution?
-- Are there primary users and secondary users we should consider?
-
-Let's start by identifying the main user groups."
-
-### 2. Primary User Segment Development
-
-**Persona Development Process:**
-For each primary user segment, create rich personas:
-
-**Name & Context:**
-
-- Give them a realistic name and brief backstory
-- Define their role, environment, and context
-- What motivates them? What are their goals?
-
-**Problem Experience:**
-
-- How do they currently experience the problem?
-- What workarounds are they using?
-- What are the emotional and practical impacts?
-
-**Success Vision:**
-
-- What would success look like for them?
-- What would make them say "this is exactly what I needed"?
-
-**Primary User Questions:**
-
-- "Tell me about a typical person who would use {{project_name}}"
-- "What's their day like? Where does our product fit in?"
-- "What are they trying to accomplish that's hard right now?"
-
-### 3. Secondary User Segment Exploration
-
-**Secondary User Considerations:**
-
-- "Who else benefits from this solution, even if they're not the primary user?"
-- "Are there admin, support, or oversight roles we should consider?"
-- "Who influences the decision to adopt or purchase this product?"
-- "Are there partner or stakeholder users who matter?"
-
-### 4. User Journey Mapping
-
-**Journey Elements:**
-Map key interactions for each user segment:
-
-- **Discovery:** How do they find out about the solution?
-- **Onboarding:** What's their first experience like?
-- **Core Usage:** How do they use the product day-to-day?
-- **Success Moment:** When do they realize the value?
-- **Long-term:** How does it become part of their routine?
-
-**Journey Questions:**
-
-- "Walk me through how [Persona Name] would discover and start using {{project_name}}"
-- "What's their 'aha!' moment?"
-- "How does this product change how they work or live?"
-
-### 5. Generate Target Users Content
-
-**Content to Append:**
-Prepare the following structure for document append:
-
-```markdown
-## Target Users
-
-### Primary Users
-
-[Primary user segment content based on conversation]
-
-### Secondary Users
-
-[Secondary user segment content based on conversation, or N/A if not discussed]
-
-### User Journey
-
-[User journey content based on conversation, or N/A if not discussed]
-```
-
-### 6. Present MENU OPTIONS
-
-**Content Presentation:**
-"I've mapped out who {{project_name}} serves and how they'll interact with it. This helps us ensure we're building something that real people will love to use.
-
-**Here's what I'll add to the document:**
-[Show the complete markdown content from step 5]
-
-**Select an Option:** [A] Advanced Elicitation [P] Party Mode [C] Continue"
-
-#### Menu Handling Logic:
-
-- IF A: Read fully and follow: {advancedElicitationTask} with current user content to dive deeper into personas and journeys
-- IF P: Read fully and follow: {partyModeWorkflow} to bring different perspectives to validate user understanding
-- IF C: Save content to {outputFile}, update frontmatter with stepsCompleted: [1, 2, 3], then read fully and follow: {nextStepFile}
-- IF Any other comments or queries: help user respond then [Redisplay Menu Options](#6-present-menu-options)
-
-#### EXECUTION RULES:
-
-- ALWAYS halt and wait for user input after presenting menu
-- ONLY proceed to next step when user selects 'C'
-- After other menu items execution, return to this menu with updated content
-- User can chat or ask questions - always respond and then end with display again of the menu options
-
-## CRITICAL STEP COMPLETION NOTE
-
-ONLY WHEN [C continue option] is selected and [user personas finalized and saved to document with frontmatter updated], will you then read fully and follow: `{nextStepFile}` to begin success metrics definition.
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- Rich, believable user personas with clear motivations
-- Clear distinction between primary and secondary users
-- User journeys that show key interaction points and value creation
-- User segments that align with product vision and problem statement
-- A/P/C menu presented and handled correctly with proper task execution
-- Content properly appended to document when C selected
-- Frontmatter updated with stepsCompleted: [1, 2, 3]
-
-### β SYSTEM FAILURE:
-
-- Creating generic user profiles without specific details
-- Missing key user segments that are important to success
-- User journeys that don't show how the product creates value
-- Not connecting user needs back to the problem statement
-- Not presenting standard A/P/C menu after content generation
-- Appending content without user selecting 'C'
-- Not updating frontmatter properly
-
-**Master Rule:** Skipping steps, optimizing sequences, or not following exact instructions is FORBIDDEN and constitutes SYSTEM FAILURE.
diff --git a/src/bmm/workflows/1-analysis/create-product-brief/steps/step-04-metrics.md b/src/bmm/workflows/1-analysis/create-product-brief/steps/step-04-metrics.md
deleted file mode 100644
index e6b297c3..00000000
--- a/src/bmm/workflows/1-analysis/create-product-brief/steps/step-04-metrics.md
+++ /dev/null
@@ -1,205 +0,0 @@
----
-name: 'step-04-metrics'
-description: 'Define comprehensive success metrics that include user success, business objectives, and key performance indicators'
-
-# File References
-nextStepFile: './step-05-scope.md'
-outputFile: '{planning_artifacts}/product-brief-{{project_name}}-{{date}}.md'
-
-# Task References
-advancedElicitationTask: '{project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml'
-partyModeWorkflow: '{project-root}/_bmad/core/workflows/party-mode/workflow.md'
----
-
-# Step 4: Success Metrics Definition
-
-## STEP GOAL:
-
-Define comprehensive success metrics that include user success, business objectives, and key performance indicators through collaborative metric definition aligned with product vision and user value.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π NEVER generate content without user input
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: When loading next step with 'C', ensure entire file is read
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are a product-focused Business Analyst facilitator
-- β If you already have been given a name, communication_style and persona, continue to use those while playing this new role
-- β We engage in collaborative dialogue, not command-response
-- β You bring structured thinking and facilitation skills, while the user brings domain expertise and product vision
-- β Maintain collaborative discovery tone throughout
-
-### Step-Specific Rules:
-
-- π― Focus only on defining measurable success criteria and business objectives
-- π« FORBIDDEN to create vague metrics that can't be measured or tracked
-- π¬ Approach: Systematic metric definition that connects user value to business success
-- π COLLABORATIVE metric definition that drives actionable decisions
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- πΎ Generate success metrics collaboratively with user
-- π Update frontmatter `stepsCompleted: [1, 2, 3, 4]` before loading next step
-- π« FORBIDDEN to proceed without user confirmation through menu
-
-## CONTEXT BOUNDARIES:
-
-- Available context: Current document and frontmatter from previous steps, product vision and target users already defined
-- Focus: Creating measurable, actionable success criteria that align with product strategy
-- Limits: Focus on metrics that drive decisions and demonstrate real value creation
-- Dependencies: Product vision and user personas from previous steps must be complete
-
-## Sequence of Instructions (Do not deviate, skip, or optimize)
-
-### 1. Begin Success Metrics Discovery
-
-**Opening Exploration:**
-"Now that we know who {{project_name}} serves and what problem it solves, let's define what success looks like.
-
-**Success Discovery:**
-
-- How will we know we're succeeding for our users?
-- What would make users say 'this was worth it'?
-- What metrics show we're creating real value?
-
-Let's start with the user perspective."
-
-### 2. User Success Metrics
-
-**User Success Questions:**
-Define success from the user's perspective:
-
-- "What outcome are users trying to achieve?"
-- "How will they know the product is working for them?"
-- "What's the moment where they realize this is solving their problem?"
-- "What behaviors indicate users are getting value?"
-
-**User Success Exploration:**
-Guide from vague to specific metrics:
-
-- "Users are happy" β "Users complete [key action] within [timeframe]"
-- "Product is useful" β "Users return [frequency] and use [core feature]"
-- Focus on outcomes and behaviors, not just satisfaction scores
-
-### 3. Business Objectives
-
-**Business Success Questions:**
-Define business success metrics:
-
-- "What does success look like for the business at 3 months? 12 months?"
-- "Are we measuring revenue, user growth, engagement, something else?"
-- "What business metrics would make you say 'this is working'?"
-- "How does this product contribute to broader company goals?"
-
-**Business Success Categories:**
-
-- **Growth Metrics:** User acquisition, market penetration
-- **Engagement Metrics:** Usage patterns, retention, satisfaction
-- **Financial Metrics:** Revenue, profitability, cost efficiency
-- **Strategic Metrics:** Market position, competitive advantage
-
-### 4. Key Performance Indicators
-
-**KPI Development Process:**
-Define specific, measurable KPIs:
-
-- Transform objectives into measurable indicators
-- Ensure each KPI has a clear measurement method
-- Define targets and timeframes where appropriate
-- Include leading indicators that predict success
-
-**KPI Examples:**
-
-- User acquisition: "X new users per month"
-- Engagement: "Y% of users complete core journey weekly"
-- Business impact: "$Z in cost savings or revenue generation"
-
-### 5. Connect Metrics to Strategy
-
-**Strategic Alignment:**
-Ensure metrics align with product vision and user needs:
-
-- Connect each metric back to the product vision
-- Ensure user success metrics drive business success
-- Validate that metrics measure what truly matters
-- Avoid vanity metrics that don't drive decisions
-
-### 6. Generate Success Metrics Content
-
-**Content to Append:**
-Prepare the following structure for document append:
-
-```markdown
-## Success Metrics
-
-[Success metrics content based on conversation]
-
-### Business Objectives
-
-[Business objectives content based on conversation, or N/A if not discussed]
-
-### Key Performance Indicators
-
-[Key performance indicators content based on conversation, or N/A if not discussed]
-```
-
-### 7. Present MENU OPTIONS
-
-**Content Presentation:**
-"I've defined success metrics that will help us track whether {{project_name}} is creating real value for users and achieving business objectives.
-
-**Here's what I'll add to the document:**
-[Show the complete markdown content from step 6]
-
-**Select an Option:** [A] Advanced Elicitation [P] Party Mode [C] Continue"
-
-#### Menu Handling Logic:
-
-- IF A: Read fully and follow: {advancedElicitationTask} with current metrics content to dive deeper into success metric insights
-- IF P: Read fully and follow: {partyModeWorkflow} to bring different perspectives to validate comprehensive metrics
-- IF C: Save content to {outputFile}, update frontmatter with stepsCompleted: [1, 2, 3, 4], then read fully and follow: {nextStepFile}
-- IF Any other comments or queries: help user respond then [Redisplay Menu Options](#7-present-menu-options)
-
-#### EXECUTION RULES:
-
-- ALWAYS halt and wait for user input after presenting menu
-- ONLY proceed to next step when user selects 'C'
-- After other menu items execution, return to this menu with updated content
-- User can chat or ask questions - always respond and then end with display again of the menu options
-
-## CRITICAL STEP COMPLETION NOTE
-
-ONLY WHEN [C continue option] is selected and [success metrics finalized and saved to document with frontmatter updated], will you then read fully and follow: `{nextStepFile}` to begin MVP scope definition.
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- User success metrics that focus on outcomes and behaviors
-- Clear business objectives aligned with product strategy
-- Specific, measurable KPIs with defined targets and timeframes
-- Metrics that connect user value to business success
-- A/P/C menu presented and handled correctly with proper task execution
-- Content properly appended to document when C selected
-- Frontmatter updated with stepsCompleted: [1, 2, 3, 4]
-
-### β SYSTEM FAILURE:
-
-- Vague success metrics that can't be measured or tracked
-- Business objectives disconnected from user success
-- Too many metrics or missing critical success indicators
-- Metrics that don't drive actionable decisions
-- Not presenting standard A/P/C menu after content generation
-- Appending content without user selecting 'C'
-- Not updating frontmatter properly
-
-**Master Rule:** Skipping steps, optimizing sequences, or not following exact instructions is FORBIDDEN and constitutes SYSTEM FAILURE.
diff --git a/src/bmm/workflows/1-analysis/create-product-brief/steps/step-05-scope.md b/src/bmm/workflows/1-analysis/create-product-brief/steps/step-05-scope.md
deleted file mode 100644
index 0914b835..00000000
--- a/src/bmm/workflows/1-analysis/create-product-brief/steps/step-05-scope.md
+++ /dev/null
@@ -1,219 +0,0 @@
----
-name: 'step-05-scope'
-description: 'Define MVP scope with clear boundaries and outline future vision while managing scope creep'
-
-# File References
-nextStepFile: './step-06-complete.md'
-outputFile: '{planning_artifacts}/product-brief-{{project_name}}-{{date}}.md'
-
-# Task References
-advancedElicitationTask: '{project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml'
-partyModeWorkflow: '{project-root}/_bmad/core/workflows/party-mode/workflow.md'
----
-
-# Step 5: MVP Scope Definition
-
-## STEP GOAL:
-
-Define MVP scope with clear boundaries and outline future vision through collaborative scope negotiation that balances ambition with realism.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π NEVER generate content without user input
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: When loading next step with 'C', ensure entire file is read
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are a product-focused Business Analyst facilitator
-- β If you already have been given a name, communication_style and persona, continue to use those while playing this new role
-- β We engage in collaborative dialogue, not command-response
-- β You bring structured thinking and facilitation skills, while the user brings domain expertise and product vision
-- β Maintain collaborative discovery tone throughout
-
-### Step-Specific Rules:
-
-- π― Focus only on defining minimum viable scope and future vision
-- π« FORBIDDEN to create MVP scope that's too large or includes non-essential features
-- π¬ Approach: Systematic scope negotiation with clear boundary setting
-- π COLLABORATIVE scope definition that prevents scope creep
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- πΎ Generate MVP scope collaboratively with user
-- π Update frontmatter `stepsCompleted: [1, 2, 3, 4, 5]` before loading next step
-- π« FORBIDDEN to proceed without user confirmation through menu
-
-## CONTEXT BOUNDARIES:
-
-- Available context: Current document and frontmatter from previous steps, product vision, users, and success metrics already defined
-- Focus: Defining what's essential for MVP vs. future enhancements
-- Limits: Balance user needs with implementation feasibility
-- Dependencies: Product vision, user personas, and success metrics from previous steps must be complete
-
-## Sequence of Instructions (Do not deviate, skip, or optimize)
-
-### 1. Begin Scope Definition
-
-**Opening Exploration:**
-"Now that we understand what {{project_name}} does, who it serves, and how we'll measure success, let's define what we need to build first.
-
-**Scope Discovery:**
-
-- What's the absolute minimum we need to deliver to solve the core problem?
-- What features would make users say 'this solves my problem'?
-- How do we balance ambition with getting something valuable to users quickly?
-
-Let's start with the MVP mindset: what's the smallest version that creates real value?"
-
-### 2. MVP Core Features Definition
-
-**MVP Feature Questions:**
-Define essential features for minimum viable product:
-
-- "What's the core functionality that must work?"
-- "Which features directly address the main problem we're solving?"
-- "What would users consider 'incomplete' if it was missing?"
-- "What features create the 'aha!' moment we discussed earlier?"
-
-**MVP Criteria:**
-
-- **Solves Core Problem:** Addresses the main pain point effectively
-- **User Value:** Creates meaningful outcome for target users
-- **Feasible:** Achievable with available resources and timeline
-- **Testable:** Allows learning and iteration based on user feedback
-
-### 3. Out of Scope Boundaries
-
-**Out of Scope Exploration:**
-Define what explicitly won't be in MVP:
-
-- "What features would be nice to have but aren't essential?"
-- "What functionality could wait for version 2.0?"
-- "What are we intentionally saying 'no' to for now?"
-- "How do we communicate these boundaries to stakeholders?"
-
-**Boundary Setting:**
-
-- Clear communication about what's not included
-- Rationale for deferring certain features
-- Timeline considerations for future additions
-- Trade-off explanations for stakeholders
-
-### 4. MVP Success Criteria
-
-**Success Validation:**
-Define what makes the MVP successful:
-
-- "How will we know the MVP is successful?"
-- "What metrics will indicate we should proceed beyond MVP?"
-- "What user feedback signals validate our approach?"
-- "What's the decision point for scaling beyond MVP?"
-
-**Success Gates:**
-
-- User adoption metrics
-- Problem validation evidence
-- Technical feasibility confirmation
-- Business model validation
-
-### 5. Future Vision Exploration
-
-**Vision Questions:**
-Define the longer-term product vision:
-
-- "If this is wildly successful, what does it become in 2-3 years?"
-- "What capabilities would we add with more resources?"
-- "How does the MVP evolve into the full product vision?"
-- "What markets or user segments could we expand to?"
-
-**Future Features:**
-
-- Post-MVP enhancements that build on core functionality
-- Scale considerations and growth capabilities
-- Platform or ecosystem expansion opportunities
-- Advanced features that differentiate in the long term
-
-### 6. Generate MVP Scope Content
-
-**Content to Append:**
-Prepare the following structure for document append:
-
-```markdown
-## MVP Scope
-
-### Core Features
-
-[Core features content based on conversation]
-
-### Out of Scope for MVP
-
-[Out of scope content based on conversation, or N/A if not discussed]
-
-### MVP Success Criteria
-
-[MVP success criteria content based on conversation, or N/A if not discussed]
-
-### Future Vision
-
-[Future vision content based on conversation, or N/A if not discussed]
-```
-
-### 7. Present MENU OPTIONS
-
-**Content Presentation:**
-"I've defined the MVP scope for {{project_name}} that balances delivering real value with realistic boundaries. This gives us a clear path forward while keeping our options open for future growth.
-
-**Here's what I'll add to the document:**
-[Show the complete markdown content from step 6]
-
-**Select an Option:** [A] Advanced Elicitation [P] Party Mode [C] Continue"
-
-#### Menu Handling Logic:
-
-- IF A: Read fully and follow: {advancedElicitationTask} with current scope content to optimize scope definition
-- IF P: Read fully and follow: {partyModeWorkflow} to bring different perspectives to validate MVP scope
-- IF C: Save content to {outputFile}, update frontmatter with stepsCompleted: [1, 2, 3, 4, 5], then read fully and follow: {nextStepFile}
-- IF Any other comments or queries: help user respond then [Redisplay Menu Options](#7-present-menu-options)
-
-#### EXECUTION RULES:
-
-- ALWAYS halt and wait for user input after presenting menu
-- ONLY proceed to next step when user selects 'C'
-- After other menu items execution, return to this menu with updated content
-- User can chat or ask questions - always respond and then end with display again of the menu options
-
-## CRITICAL STEP COMPLETION NOTE
-
-ONLY WHEN [C continue option] is selected and [MVP scope finalized and saved to document with frontmatter updated], will you then read fully and follow: `{nextStepFile}` to complete the product brief workflow.
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- MVP features that solve the core problem effectively
-- Clear out-of-scope boundaries that prevent scope creep
-- Success criteria that validate MVP approach and inform go/no-go decisions
-- Future vision that inspires while maintaining focus on MVP
-- A/P/C menu presented and handled correctly with proper task execution
-- Content properly appended to document when C selected
-- Frontmatter updated with stepsCompleted: [1, 2, 3, 4, 5]
-
-### β SYSTEM FAILURE:
-
-- MVP scope too large or includes non-essential features
-- Missing clear boundaries leading to scope creep
-- No success criteria to validate MVP approach
-- Future vision disconnected from MVP foundation
-- Not presenting standard A/P/C menu after content generation
-- Appending content without user selecting 'C'
-- Not updating frontmatter properly
-
-**Master Rule:** Skipping steps, optimizing sequences, or not following exact instructions is FORBIDDEN and constitutes SYSTEM FAILURE.
diff --git a/src/bmm/workflows/1-analysis/create-product-brief/steps/step-06-complete.md b/src/bmm/workflows/1-analysis/create-product-brief/steps/step-06-complete.md
deleted file mode 100644
index 91c1ba66..00000000
--- a/src/bmm/workflows/1-analysis/create-product-brief/steps/step-06-complete.md
+++ /dev/null
@@ -1,162 +0,0 @@
----
-name: 'step-06-complete'
-description: 'Complete the product brief workflow, update status files, and suggest next steps for the project'
-
-# File References
-outputFile: '{planning_artifacts}/product-brief-{{project_name}}-{{date}}.md'
----
-
-# Step 6: Product Brief Completion
-
-## STEP GOAL:
-
-Complete the product brief workflow, update status files, and provide guidance on logical next steps for continued product development.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π NEVER generate content without user input
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: When loading next step with 'C', ensure entire file is read
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are a product-focused Business Analyst facilitator
-- β If you already have been given a name, communication_style and persona, continue to use those while playing this new role
-- β We engage in collaborative dialogue, not command-response
-- β You bring structured thinking and facilitation skills, while the user brings domain expertise and product vision
-- β Maintain collaborative completion tone throughout
-
-### Step-Specific Rules:
-
-- π― Focus only on completion, next steps, and project guidance
-- π« FORBIDDEN to generate new content for the product brief
-- π¬ Approach: Systematic completion with quality validation and next step recommendations
-- π FINALIZE document and update workflow status appropriately
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- πΎ Update the main workflow status file with completion information
-- π Suggest potential next workflow steps for the user
-- π« DO NOT load additional steps after this one (this is final)
-
-## CONTEXT BOUNDARIES:
-
-- Available context: Complete product brief document from all previous steps, workflow frontmatter shows all completed steps
-- Focus: Completion validation, status updates, and next step guidance
-- Limits: No new content generation, only completion and wrap-up activities
-- Dependencies: All previous steps must be completed with content saved to document
-
-## Sequence of Instructions (Do not deviate, skip, or optimize)
-
-### 1. Announce Workflow Completion
-
-**Completion Announcement:**
-"π **Product Brief Complete, {{user_name}}!**
-
-I've successfully collaborated with you to create a comprehensive Product Brief for {{project_name}}.
-
-**What we've accomplished:**
-
-- β Executive Summary with clear vision and problem statement
-- β Core Vision with solution definition and unique differentiators
-- β Target Users with rich personas and user journeys
-- β Success Metrics with measurable outcomes and business objectives
-- β MVP Scope with focused feature set and clear boundaries
-- β Future Vision that inspires while maintaining current focus
-
-**The complete Product Brief is now available at:** `{outputFile}`
-
-This brief serves as the foundation for all subsequent product development activities and strategic decisions."
-
-### 2. Document Quality Check
-
-**Completeness Validation:**
-Perform final validation of the product brief:
-
-- Does the executive summary clearly communicate the vision and problem?
-- Are target users well-defined with compelling personas?
-- Do success metrics connect user value to business objectives?
-- Is MVP scope focused and realistic?
-- Does the brief provide clear direction for next steps?
-
-**Consistency Validation:**
-
-- Do all sections align with the core problem statement?
-- Is user value consistently emphasized throughout?
-- Are success criteria traceable to user needs and business goals?
-- Does MVP scope align with the problem and solution?
-
-### 3. Suggest Next Steps
-
-**Recommended Next Workflow:**
-Provide guidance on logical next workflows:
-
-1. `create-prd` - Create detailed Product Requirements Document
- - Brief provides foundation for detailed requirements
- - User personas inform journey mapping
- - Success metrics become specific acceptance criteria
- - MVP scope becomes detailed feature specifications
-
-**Other Potential Next Steps:**
-
-1. `create-ux-design` - UX research and design (can run parallel with PRD)
-2. `domain-research` - Deep market or domain research (if needed)
-
-**Strategic Considerations:**
-
-- The PRD workflow builds directly on this brief for detailed planning
-- Consider team capacity and immediate priorities
-- Use brief to validate concept before committing to detailed work
-- Brief can guide early technical feasibility discussions
-
-### 4. Congrats to the user
-
-"**Your Product Brief for {{project_name}} is now complete and ready for the next phase!**"
-
-Recap that the brief captures everything needed to guide subsequent product development:
-
-- Clear vision and problem definition
-- Deep understanding of target users
-- Measurable success criteria
-- Focused MVP scope with realistic boundaries
-- Inspiring long-term vision
-
-### 5. Suggest next steps
-
-Product Brief complete. Read fully and follow: `_bmad/core/tasks/bmad-help.md` with argument `Validate PRD`.
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- Product brief contains all essential sections with collaborative content
-- All collaborative content properly saved to document with proper frontmatter
-- Workflow status file updated with completion information and timestamp
-- Clear next step guidance provided to user with specific workflow recommendations
-- Document quality validation completed with completeness and consistency checks
-- User acknowledges completion and understands next available options
-- Workflow properly marked as complete in status tracking
-
-### β SYSTEM FAILURE:
-
-- Not updating workflow status file with completion information
-- Missing clear next step guidance for user
-- Not confirming document completeness with user
-- Workflow not properly marked as complete in status tracking
-- User unclear about what happens next or available options
-- Document quality issues not identified or addressed
-
-**Master Rule:** Skipping steps, optimizing sequences, or not following exact instructions is FORBIDDEN and constitutes SYSTEM FAILURE.
-
-## FINAL WORKFLOW COMPLETION
-
-This product brief is now complete and serves as the strategic foundation for the entire product lifecycle. All subsequent design, architecture, and development work should trace back to the vision, user needs, and success criteria documented in this brief.
-
-**Congratulations on completing the Product Brief for {{project_name}}!** π
diff --git a/src/bmm/workflows/1-analysis/create-product-brief/workflow.md b/src/bmm/workflows/1-analysis/create-product-brief/workflow.md
deleted file mode 100644
index c17b1821..00000000
--- a/src/bmm/workflows/1-analysis/create-product-brief/workflow.md
+++ /dev/null
@@ -1,58 +0,0 @@
----
-name: create-product-brief
-description: Create comprehensive product briefs through collaborative step-by-step discovery as creative Business Analyst working with the user as peers.
-web_bundle: true
----
-
-# Product Brief Workflow
-
-**Goal:** Create comprehensive product briefs through collaborative step-by-step discovery as creative Business Analyst working with the user as peers.
-
-**Your Role:** In addition to your name, communication_style, and persona, you are also a product-focused Business Analyst collaborating with an expert peer. This is a partnership, not a client-vendor relationship. You bring structured thinking and facilitation skills, while the user brings domain expertise and product vision. Work together as equals.
-
----
-
-## WORKFLOW ARCHITECTURE
-
-This uses **step-file architecture** for disciplined execution:
-
-### Core Principles
-
-- **Micro-file Design**: Each step is a self contained instruction file that is a part of an overall workflow that must be followed exactly
-- **Just-In-Time Loading**: Only the current step file is in memory - never load future step files until told to do so
-- **Sequential Enforcement**: Sequence within the step files must be completed in order, no skipping or optimization allowed
-- **State Tracking**: Document progress in output file frontmatter using `stepsCompleted` array when a workflow produces a document
-- **Append-Only Building**: Build documents by appending content as directed to the output file
-
-### Step Processing Rules
-
-1. **READ COMPLETELY**: Always read the entire step file before taking any action
-2. **FOLLOW SEQUENCE**: Execute all numbered sections in order, never deviate
-3. **WAIT FOR INPUT**: If a menu is presented, halt and wait for user selection
-4. **CHECK CONTINUATION**: If the step has a menu with Continue as an option, only proceed to next step when user selects 'C' (Continue)
-5. **SAVE STATE**: Update `stepsCompleted` in frontmatter before loading next step
-6. **LOAD NEXT**: When directed, read fully and follow the next step file
-
-### Critical Rules (NO EXCEPTIONS)
-
-- π **NEVER** load multiple step files simultaneously
-- π **ALWAYS** read entire step file before execution
-- π« **NEVER** skip steps or optimize the sequence
-- πΎ **ALWAYS** update frontmatter of output files when writing the final output for a specific step
-- π― **ALWAYS** follow the exact instructions in the step file
-- βΈοΈ **ALWAYS** halt at menus and wait for user input
-- π **NEVER** create mental todo lists from future steps
-
----
-
-## INITIALIZATION SEQUENCE
-
-### 1. Configuration Loading
-
-Load and read full config from {project-root}/_bmad/bmm/config.yaml and resolve:
-
-- `project_name`, `output_folder`, `planning_artifacts`, `user_name`, `communication_language`, `document_output_language`, `user_skill_level`
-
-### 2. First Step EXECUTION
-
-Read fully and follow: `{project-root}/_bmad/bmm/workflows/1-analysis/create-product-brief/steps/step-01-init.md` to begin the workflow.
diff --git a/src/bmm/workflows/1-analysis/research/domain-steps/step-01-init.md b/src/bmm/workflows/1-analysis/research/domain-steps/step-01-init.md
deleted file mode 100644
index 27d056b1..00000000
--- a/src/bmm/workflows/1-analysis/research/domain-steps/step-01-init.md
+++ /dev/null
@@ -1,137 +0,0 @@
-# Domain Research Step 1: Domain Research Scope Confirmation
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without user confirmation
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β FOCUS EXCLUSIVELY on confirming domain research scope and approach
-- π YOU ARE A DOMAIN RESEARCH PLANNER, not content generator
-- π¬ ACKNOWLEDGE and CONFIRM understanding of domain research goals
-- π This is SCOPE CONFIRMATION ONLY - no web research yet
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- β οΈ Present [C] continue option after scope confirmation
-- πΎ ONLY proceed when user chooses C (Continue)
-- π Update frontmatter `stepsCompleted: [1]` before loading next step
-- π« FORBIDDEN to load next step until C is selected
-
-## CONTEXT BOUNDARIES:
-
-- Research type = "domain" is already set
-- **Research topic = "{{research_topic}}"** - discovered from initial discussion
-- **Research goals = "{{research_goals}}"** - captured from initial discussion
-- Focus on industry/domain analysis with web research
-- Web search is required to verify and supplement your knowledge with current facts
-
-## YOUR TASK:
-
-Confirm domain research scope and approach for **{{research_topic}}** with the user's goals in mind.
-
-## DOMAIN SCOPE CONFIRMATION:
-
-### 1. Begin Scope Confirmation
-
-Start with domain scope understanding:
-"I understand you want to conduct **domain research** for **{{research_topic}}** with these goals: {{research_goals}}
-
-**Domain Research Scope:**
-
-- **Industry Analysis**: Industry structure, market dynamics, and competitive landscape
-- **Regulatory Environment**: Compliance requirements, regulations, and standards
-- **Technology Patterns**: Innovation trends, technology adoption, and digital transformation
-- **Economic Factors**: Market size, growth trends, and economic impact
-- **Supply Chain**: Value chain analysis and ecosystem relationships
-
-**Research Approach:**
-
-- All claims verified against current public sources
-- Multi-source validation for critical domain claims
-- Confidence levels for uncertain domain information
-- Comprehensive domain coverage with industry-specific insights
-
-### 2. Scope Confirmation
-
-Present clear scope confirmation:
-"**Domain Research Scope Confirmation:**
-
-For **{{research_topic}}**, I will research:
-
-β **Industry Analysis** - market structure, key players, competitive dynamics
-β **Regulatory Requirements** - compliance standards, legal frameworks
-β **Technology Trends** - innovation patterns, digital transformation
-β **Economic Factors** - market size, growth projections, economic impact
-β **Supply Chain Analysis** - value chain, ecosystem, partnerships
-
-**All claims verified against current public sources.**
-
-**Does this domain research scope and approach align with your goals?**
-[C] Continue - Begin domain research with this scope
-
-### 3. Handle Continue Selection
-
-#### If 'C' (Continue):
-
-- Document scope confirmation in research file
-- Update frontmatter: `stepsCompleted: [1]`
-- Load: `./step-02-domain-analysis.md`
-
-## APPEND TO DOCUMENT:
-
-When user selects 'C', append scope confirmation:
-
-```markdown
-## Domain Research Scope Confirmation
-
-**Research Topic:** {{research_topic}}
-**Research Goals:** {{research_goals}}
-
-**Domain Research Scope:**
-
-- Industry Analysis - market structure, competitive landscape
-- Regulatory Environment - compliance requirements, legal frameworks
-- Technology Trends - innovation patterns, digital transformation
-- Economic Factors - market size, growth projections
-- Supply Chain Analysis - value chain, ecosystem relationships
-
-**Research Methodology:**
-
-- All claims verified against current public sources
-- Multi-source validation for critical domain claims
-- Confidence level framework for uncertain information
-- Comprehensive domain coverage with industry-specific insights
-
-**Scope Confirmed:** {{date}}
-```
-
-## SUCCESS METRICS:
-
-β Domain research scope clearly confirmed with user
-β All domain analysis areas identified and explained
-β Research methodology emphasized
-β [C] continue option presented and handled correctly
-β Scope confirmation documented when user proceeds
-β Proper routing to next domain research step
-
-## FAILURE MODES:
-
-β Not clearly confirming domain research scope with user
-β Missing critical domain analysis areas
-β Not explaining that web search is required for current facts
-β Not presenting [C] continue option
-β Proceeding without user scope confirmation
-β Not routing to next domain research step
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## NEXT STEP:
-
-After user selects 'C', load `./step-02-domain-analysis.md` to begin industry analysis.
-
-Remember: This is SCOPE CONFIRMATION ONLY - no actual domain research yet, just confirming the research approach and scope!
diff --git a/src/bmm/workflows/1-analysis/research/domain-steps/step-02-domain-analysis.md b/src/bmm/workflows/1-analysis/research/domain-steps/step-02-domain-analysis.md
deleted file mode 100644
index bb4cbb63..00000000
--- a/src/bmm/workflows/1-analysis/research/domain-steps/step-02-domain-analysis.md
+++ /dev/null
@@ -1,229 +0,0 @@
-# Domain Research Step 2: Industry Analysis
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without web search verification
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β Search the web to verify and supplement your knowledge with current facts
-- π YOU ARE AN INDUSTRY ANALYST, not content generator
-- π¬ FOCUS on market size, growth, and industry dynamics
-- π WEB SEARCH REQUIRED - verify current facts against live sources
-- π WRITE CONTENT IMMEDIATELY TO DOCUMENT
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show web search analysis before presenting findings
-- β οΈ Present [C] continue option after industry analysis content generation
-- π WRITE INDUSTRY ANALYSIS TO DOCUMENT IMMEDIATELY
-- πΎ ONLY proceed when user chooses C (Continue)
-- π Update frontmatter `stepsCompleted: [1, 2]` before loading next step
-- π« FORBIDDEN to load next step until C is selected
-
-## CONTEXT BOUNDARIES:
-
-- Current document and frontmatter from step-01 are available
-- **Research topic = "{{research_topic}}"** - established from initial discussion
-- **Research goals = "{{research_goals}}"** - established from initial discussion
-- Focus on market size, growth, and industry dynamics
-- Web search capabilities with source verification are enabled
-
-## YOUR TASK:
-
-Conduct industry analysis focusing on market size, growth, and industry dynamics. Search the web to verify and supplement current facts.
-
-## INDUSTRY ANALYSIS SEQUENCE:
-
-### 1. Begin Industry Analysis
-
-**UTILIZE SUBPROCESSES AND SUBAGENTS**: Use research subagents, subprocesses or parallel processing if available to thoroughly analyze different industry areas simultaneously and thoroughly.
-
-Start with industry research approach:
-"Now I'll conduct **industry analysis** for **{{research_topic}}** to understand market dynamics.
-
-**Industry Analysis Focus:**
-
-- Market size and valuation metrics
-- Growth rates and market dynamics
-- Market segmentation and structure
-- Industry trends and evolution patterns
-- Economic impact and value creation
-
-**Let me search for current industry insights.**"
-
-### 2. Parallel Industry Research Execution
-
-**Execute multiple web searches simultaneously:**
-
-Search the web: "{{research_topic}} market size value"
-Search the web: "{{research_topic}} market growth rate dynamics"
-Search the web: "{{research_topic}} market segmentation structure"
-Search the web: "{{research_topic}} industry trends evolution"
-
-**Analysis approach:**
-
-- Look for recent market research reports and industry analyses
-- Search for authoritative sources (market research firms, industry associations)
-- Identify market size, growth rates, and segmentation data
-- Research industry trends and evolution patterns
-- Analyze economic impact and value creation metrics
-
-### 3. Analyze and Aggregate Results
-
-**Collect and analyze findings from all parallel searches:**
-
-"After executing comprehensive parallel web searches, let me analyze and aggregate industry findings:
-
-**Research Coverage:**
-
-- Market size and valuation analysis
-- Growth rates and market dynamics
-- Market segmentation and structure
-- Industry trends and evolution patterns
-
-**Cross-Industry Analysis:**
-[Identify patterns connecting market dynamics, segmentation, and trends]
-
-**Quality Assessment:**
-[Overall confidence levels and research gaps identified]"
-
-### 4. Generate Industry Analysis Content
-
-**WRITE IMMEDIATELY TO DOCUMENT**
-
-Prepare industry analysis with web search citations:
-
-#### Content Structure:
-
-When saving to document, append these Level 2 and Level 3 sections:
-
-```markdown
-## Industry Analysis
-
-### Market Size and Valuation
-
-[Market size analysis with source citations]
-_Total Market Size: [Current market valuation]_
-_Growth Rate: [CAGR and market growth projections]_
-_Market Segments: [Size and value of key market segments]_
-_Economic Impact: [Economic contribution and value creation]_
-_Source: [URL]_
-
-### Market Dynamics and Growth
-
-[Market dynamics analysis with source citations]
-_Growth Drivers: [Key factors driving market growth]_
-_Growth Barriers: [Factors limiting market expansion]_
-_Cyclical Patterns: [Industry seasonality and cycles]_
-_Market Maturity: [Life cycle stage and development phase]_
-_Source: [URL]_
-
-### Market Structure and Segmentation
-
-[Market structure analysis with source citations]
-_Primary Segments: [Key market segments and their characteristics]_
-_Sub-segment Analysis: [Detailed breakdown of market sub-segments]_
-_Geographic Distribution: [Regional market variations and concentrations]_
-_Vertical Integration: [Supply chain and value chain structure]_
-_Source: [URL]_
-
-### Industry Trends and Evolution
-
-[Industry trends analysis with source citations]
-_Emerging Trends: [Current industry developments and transformations]_
-_Historical Evolution: [Industry development over recent years]_
-_Technology Integration: [How technology is changing the industry]_
-_Future Outlook: [Projected industry developments and changes]_
-_Source: [URL]_
-
-### Competitive Dynamics
-
-[Competitive dynamics analysis with source citations]
-_Market Concentration: [Level of market consolidation and competition]_
-_Competitive Intensity: [Degree of competition and rivalry]_
-_Barriers to Entry: [Obstacles for new market entrants]_
-_Innovation Pressure: [Rate of innovation and change]_
-_Source: [URL]_
-```
-
-### 5. Present Analysis and Continue Option
-
-**Show analysis and present continue option:**
-
-"I've completed **industry analysis** for {{research_topic}}.
-
-**Key Industry Findings:**
-
-- Market size and valuation thoroughly analyzed
-- Growth dynamics and market structure documented
-- Industry trends and evolution patterns identified
-- Competitive dynamics clearly mapped
-- Multiple sources verified for critical insights
-
-**Ready to proceed to competitive landscape analysis?**
-[C] Continue - Save this to document and proceed to competitive landscape
-
-### 6. Handle Continue Selection
-
-#### If 'C' (Continue):
-
-- **CONTENT ALREADY WRITTEN TO DOCUMENT**
-- Update frontmatter: `stepsCompleted: [1, 2]`
-- Load: `./step-03-competitive-landscape.md`
-
-## APPEND TO DOCUMENT:
-
-Content is already written to document when generated in step 4. No additional append needed.
-
-## SUCCESS METRICS:
-
-β Market size and valuation thoroughly analyzed
-β Growth dynamics and market structure documented
-β Industry trends and evolution patterns identified
-β Competitive dynamics clearly mapped
-β Multiple sources verified for critical insights
-β Content written immediately to document
-β [C] continue option presented and handled correctly
-β Proper routing to next step (competitive landscape)
-β Research goals alignment maintained
-
-## FAILURE MODES:
-
-β Relying on training data instead of web search for current facts
-β Missing critical market size or growth data
-β Incomplete market structure analysis
-β Not identifying key industry trends
-β Not writing content immediately to document
-β Not presenting [C] continue option after content generation
-β Not routing to competitive landscape step
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## INDUSTRY RESEARCH PROTOCOLS:
-
-- Research market research reports and industry analyses
-- Use authoritative sources (market research firms, industry associations)
-- Analyze market size, growth rates, and segmentation data
-- Study industry trends and evolution patterns
-- Search the web to verify facts
-- Present conflicting information when sources disagree
-- Apply confidence levels appropriately
-
-## INDUSTRY ANALYSIS STANDARDS:
-
-- Always cite URLs for web search results
-- Use authoritative industry research sources
-- Note data currency and potential limitations
-- Present multiple perspectives when sources conflict
-- Apply confidence levels to uncertain data
-- Focus on actionable industry insights
-
-## NEXT STEP:
-
-After user selects 'C', load `./step-03-competitive-landscape.md` to analyze competitive landscape, key players, and ecosystem analysis for {{research_topic}}.
-
-Remember: Always write research content to document immediately and search the web to verify facts!
diff --git a/src/bmm/workflows/1-analysis/research/domain-steps/step-03-competitive-landscape.md b/src/bmm/workflows/1-analysis/research/domain-steps/step-03-competitive-landscape.md
deleted file mode 100644
index 0dc2de6e..00000000
--- a/src/bmm/workflows/1-analysis/research/domain-steps/step-03-competitive-landscape.md
+++ /dev/null
@@ -1,238 +0,0 @@
-# Domain Research Step 3: Competitive Landscape
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without web search verification
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β Search the web to verify and supplement your knowledge with current facts
-- π YOU ARE A COMPETITIVE ANALYST, not content generator
-- π¬ FOCUS on key players, market share, and competitive dynamics
-- π WEB SEARCH REQUIRED - verify current facts against live sources
-- π WRITE CONTENT IMMEDIATELY TO DOCUMENT
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show web search analysis before presenting findings
-- β οΈ Present [C] continue option after competitive analysis content generation
-- π WRITE COMPETITIVE ANALYSIS TO DOCUMENT IMMEDIATELY
-- πΎ ONLY proceed when user chooses C (Continue)
-- π Update frontmatter `stepsCompleted: [1, 2, 3]` before loading next step
-- π« FORBIDDEN to load next step until C is selected
-
-## CONTEXT BOUNDARIES:
-
-- Current document and frontmatter from previous steps are available
-- **Research topic = "{{research_topic}}"** - established from initial discussion
-- **Research goals = "{{research_goals}}"** - established from initial discussion
-- Focus on key players, market share, and competitive dynamics
-- Web search capabilities with source verification are enabled
-
-## YOUR TASK:
-
-Conduct competitive landscape analysis focusing on key players, market share, and competitive dynamics. Search the web to verify and supplement current facts.
-
-## COMPETITIVE LANDSCAPE ANALYSIS SEQUENCE:
-
-### 1. Begin Competitive Landscape Analysis
-
-**UTILIZE SUBPROCESSES AND SUBAGENTS**: Use research subagents, subprocesses or parallel processing if available to thoroughly analyze different competitive areas simultaneously and thoroughly.
-
-Start with competitive research approach:
-"Now I'll conduct **competitive landscape analysis** for **{{research_topic}}** to understand the competitive ecosystem.
-
-**Competitive Landscape Focus:**
-
-- Key players and market leaders
-- Market share and competitive positioning
-- Competitive strategies and differentiation
-- Business models and value propositions
-- Entry barriers and competitive dynamics
-
-**Let me search for current competitive insights.**"
-
-### 2. Parallel Competitive Research Execution
-
-**Execute multiple web searches simultaneously:**
-
-Search the web: "{{research_topic}} key players market leaders"
-Search the web: "{{research_topic}} market share competitive landscape"
-Search the web: "{{research_topic}} competitive strategies differentiation"
-Search the web: "{{research_topic}} entry barriers competitive dynamics"
-
-**Analysis approach:**
-
-- Look for recent competitive intelligence reports and market analyses
-- Search for company websites, annual reports, and investor presentations
-- Research market share data and competitive positioning
-- Analyze competitive strategies and differentiation approaches
-- Study entry barriers and competitive dynamics
-
-### 3. Analyze and Aggregate Results
-
-**Collect and analyze findings from all parallel searches:**
-
-"After executing comprehensive parallel web searches, let me analyze and aggregate competitive findings:
-
-**Research Coverage:**
-
-- Key players and market leaders analysis
-- Market share and competitive positioning assessment
-- Competitive strategies and differentiation mapping
-- Entry barriers and competitive dynamics evaluation
-
-**Cross-Competitive Analysis:**
-[Identify patterns connecting players, strategies, and market dynamics]
-
-**Quality Assessment:**
-[Overall confidence levels and research gaps identified]"
-
-### 4. Generate Competitive Landscape Content
-
-**WRITE IMMEDIATELY TO DOCUMENT**
-
-Prepare competitive landscape analysis with web search citations:
-
-#### Content Structure:
-
-When saving to document, append these Level 2 and Level 3 sections:
-
-```markdown
-## Competitive Landscape
-
-### Key Players and Market Leaders
-
-[Key players analysis with source citations]
-_Market Leaders: [Dominant players and their market positions]_
-_Major Competitors: [Significant competitors and their specialties]_
-_Emerging Players: [New entrants and innovative companies]_
-_Global vs Regional: [Geographic distribution of key players]_
-_Source: [URL]_
-
-### Market Share and Competitive Positioning
-
-[Market share analysis with source citations]
-_Market Share Distribution: [Current market share breakdown]_
-_Competitive Positioning: [How players position themselves in the market]_
-_Value Proposition Mapping: [Different value propositions across players]_
-_Customer Segments Served: [Different customer bases by competitor]_
-_Source: [URL]_
-
-### Competitive Strategies and Differentiation
-
-[Competitive strategies analysis with source citations]
-_Cost Leadership Strategies: [Players competing on price and efficiency]_
-_Differentiation Strategies: [Players competing on unique value]_
-_Focus/Niche Strategies: [Players targeting specific segments]_
-_Innovation Approaches: [How different players innovate]_
-_Source: [URL]_
-
-### Business Models and Value Propositions
-
-[Business models analysis with source citations]
-_Primary Business Models: [How competitors make money]_
-_Revenue Streams: [Different approaches to monetization]_
-_Value Chain Integration: [Vertical integration vs partnership models]_
-_Customer Relationship Models: [How competitors build customer loyalty]_
-_Source: [URL]_
-
-### Competitive Dynamics and Entry Barriers
-
-[Competitive dynamics analysis with source citations]
-_Barriers to Entry: [Obstacles facing new market entrants]_
-_Competitive Intensity: [Level of rivalry and competitive pressure]_
-_Market Consolidation Trends: [M&A activity and market concentration]_
-_Switching Costs: [Costs for customers to switch between providers]_
-_Source: [URL]_
-
-### Ecosystem and Partnership Analysis
-
-[Ecosystem analysis with source citations]
-_Supplier Relationships: [Key supplier partnerships and dependencies]_
-_Distribution Channels: [How competitors reach customers]_
-_Technology Partnerships: [Strategic technology alliances]_
-_Ecosystem Control: [Who controls key parts of the value chain]_
-_Source: [URL]_
-```
-
-### 5. Present Analysis and Continue Option
-
-**Show analysis and present continue option:**
-
-"I've completed **competitive landscape analysis** for {{research_topic}}.
-
-**Key Competitive Findings:**
-
-- Key players and market leaders thoroughly identified
-- Market share and competitive positioning clearly mapped
-- Competitive strategies and differentiation analyzed
-- Business models and value propositions documented
-- Competitive dynamics and entry barriers evaluated
-
-**Ready to proceed to regulatory focus analysis?**
-[C] Continue - Save this to document and proceed to regulatory focus
-
-### 6. Handle Continue Selection
-
-#### If 'C' (Continue):
-
-- **CONTENT ALREADY WRITTEN TO DOCUMENT**
-- Update frontmatter: `stepsCompleted: [1, 2, 3]`
-- Load: `./step-04-regulatory-focus.md`
-
-## APPEND TO DOCUMENT:
-
-Content is already written to document when generated in step 4. No additional append needed.
-
-## SUCCESS METRICS:
-
-β Key players and market leaders thoroughly identified
-β Market share and competitive positioning clearly mapped
-β Competitive strategies and differentiation analyzed
-β Business models and value propositions documented
-β Competitive dynamics and entry barriers evaluated
-β Content written immediately to document
-β [C] continue option presented and handled correctly
-β Proper routing to next step (regulatory focus)
-β Research goals alignment maintained
-
-## FAILURE MODES:
-
-β Relying on training data instead of web search for current facts
-β Missing critical key players or market leaders
-β Incomplete market share or positioning analysis
-β Not identifying competitive strategies
-β Not writing content immediately to document
-β Not presenting [C] continue option after content generation
-β Not routing to regulatory focus step
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## COMPETITIVE RESEARCH PROTOCOLS:
-
-- Research competitive intelligence reports and market analyses
-- Use company websites, annual reports, and investor presentations
-- Analyze market share data and competitive positioning
-- Study competitive strategies and differentiation approaches
-- Search the web to verify facts
-- Present conflicting information when sources disagree
-- Apply confidence levels appropriately
-
-## COMPETITIVE ANALYSIS STANDARDS:
-
-- Always cite URLs for web search results
-- Use authoritative competitive intelligence sources
-- Note data currency and potential limitations
-- Present multiple perspectives when sources conflict
-- Apply confidence levels to uncertain data
-- Focus on actionable competitive insights
-
-## NEXT STEP:
-
-After user selects 'C', load `./step-04-regulatory-focus.md` to analyze regulatory requirements, compliance frameworks, and legal considerations for {{research_topic}}.
-
-Remember: Always write research content to document immediately and search the web to verify facts!
diff --git a/src/bmm/workflows/1-analysis/research/domain-steps/step-04-regulatory-focus.md b/src/bmm/workflows/1-analysis/research/domain-steps/step-04-regulatory-focus.md
deleted file mode 100644
index e98010c7..00000000
--- a/src/bmm/workflows/1-analysis/research/domain-steps/step-04-regulatory-focus.md
+++ /dev/null
@@ -1,206 +0,0 @@
-# Domain Research Step 4: Regulatory Focus
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without web search verification
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β Search the web to verify and supplement your knowledge with current facts
-- π YOU ARE A REGULATORY ANALYST, not content generator
-- π¬ FOCUS on compliance requirements and regulatory landscape
-- π WEB SEARCH REQUIRED - verify current facts against live sources
-- π WRITE CONTENT IMMEDIATELY TO DOCUMENT
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show web search analysis before presenting findings
-- β οΈ Present [C] continue option after regulatory content generation
-- π WRITE REGULATORY ANALYSIS TO DOCUMENT IMMEDIATELY
-- πΎ ONLY save when user chooses C (Continue)
-- π Update frontmatter `stepsCompleted: [1, 2, 3, 4]` before loading next step
-- π« FORBIDDEN to load next step until C is selected
-
-## CONTEXT BOUNDARIES:
-
-- Current document and frontmatter from previous steps are available
-- **Research topic = "{{research_topic}}"** - established from initial discussion
-- **Research goals = "{{research_goals}}"** - established from initial discussion
-- Focus on regulatory and compliance requirements for the domain
-- Web search capabilities with source verification are enabled
-
-## YOUR TASK:
-
-Conduct focused regulatory and compliance analysis with emphasis on requirements that impact {{research_topic}}. Search the web to verify and supplement current facts.
-
-## REGULATORY FOCUS SEQUENCE:
-
-### 1. Begin Regulatory Analysis
-
-Start with regulatory research approach:
-"Now I'll focus on **regulatory and compliance requirements** that impact **{{research_topic}}**.
-
-**Regulatory Focus Areas:**
-
-- Specific regulations and compliance frameworks
-- Industry standards and best practices
-- Licensing and certification requirements
-- Data protection and privacy regulations
-- Environmental and safety requirements
-
-**Let me search for current regulatory requirements.**"
-
-### 2. Web Search for Specific Regulations
-
-Search for current regulatory information:
-Search the web: "{{research_topic}} regulations compliance requirements"
-
-**Regulatory focus:**
-
-- Specific regulations applicable to the domain
-- Compliance frameworks and standards
-- Recent regulatory changes or updates
-- Enforcement agencies and oversight bodies
-
-### 3. Web Search for Industry Standards
-
-Search for current industry standards:
-Search the web: "{{research_topic}} standards best practices"
-
-**Standards focus:**
-
-- Industry-specific technical standards
-- Best practices and guidelines
-- Certification requirements
-- Quality assurance frameworks
-
-### 4. Web Search for Data Privacy Requirements
-
-Search for current privacy regulations:
-Search the web: "data privacy regulations {{research_topic}}"
-
-**Privacy focus:**
-
-- GDPR, CCPA, and other data protection laws
-- Industry-specific privacy requirements
-- Data governance and security standards
-- User consent and data handling requirements
-
-### 5. Generate Regulatory Analysis Content
-
-Prepare regulatory content with source citations:
-
-#### Content Structure:
-
-When saving to document, append these Level 2 and Level 3 sections:
-
-```markdown
-## Regulatory Requirements
-
-### Applicable Regulations
-
-[Specific regulations analysis with source citations]
-_Source: [URL]_
-
-### Industry Standards and Best Practices
-
-[Industry standards analysis with source citations]
-_Source: [URL]_
-
-### Compliance Frameworks
-
-[Compliance frameworks analysis with source citations]
-_Source: [URL]_
-
-### Data Protection and Privacy
-
-[Privacy requirements analysis with source citations]
-_Source: [URL]_
-
-### Licensing and Certification
-
-[Licensing requirements analysis with source citations]
-_Source: [URL]_
-
-### Implementation Considerations
-
-[Practical implementation considerations with source citations]
-_Source: [URL]_
-
-### Risk Assessment
-
-[Regulatory and compliance risk assessment]
-```
-
-### 6. Present Analysis and Continue Option
-
-Show the generated regulatory analysis and present continue option:
-"I've completed **regulatory requirements analysis** for {{research_topic}}.
-
-**Key Regulatory Findings:**
-
-- Specific regulations and frameworks identified
-- Industry standards and best practices mapped
-- Compliance requirements clearly documented
-- Implementation considerations provided
-- Risk assessment completed
-
-**Ready to proceed to technical trends?**
-[C] Continue - Save this to the document and move to technical trends
-
-### 7. Handle Continue Selection
-
-#### If 'C' (Continue):
-
-- **CONTENT ALREADY WRITTEN TO DOCUMENT**
-- Update frontmatter: `stepsCompleted: [1, 2, 3, 4]`
-- Load: `./step-05-technical-trends.md`
-
-## APPEND TO DOCUMENT:
-
-Content is already written to document when generated in step 5. No additional append needed.
-
-## SUCCESS METRICS:
-
-β Applicable regulations identified with current citations
-β Industry standards and best practices documented
-β Compliance frameworks clearly mapped
-β Data protection requirements analyzed
-β Implementation considerations provided
-β [C] continue option presented and handled correctly
-β Content properly appended to document when C selected
-
-## FAILURE MODES:
-
-β Relying on training data instead of web search for current facts
-β Missing critical regulatory requirements for the domain
-β Not providing implementation considerations for compliance
-β Not completing risk assessment for regulatory compliance
-β Not presenting [C] continue option after content generation
-β Appending content without user selecting 'C'
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## REGULATORY RESEARCH PROTOCOLS:
-
-- Search for specific regulations by name and number
-- Identify regulatory bodies and enforcement agencies
-- Research recent regulatory changes and updates
-- Map industry standards to regulatory requirements
-- Consider regional and jurisdictional differences
-
-## SOURCE VERIFICATION:
-
-- Always cite regulatory agency websites
-- Use official government and industry association sources
-- Note effective dates and implementation timelines
-- Present compliance requirement levels and obligations
-
-## NEXT STEP:
-
-After user selects 'C' and content is saved to document, load `./step-05-technical-trends.md` to analyze technical trends and innovations in the domain.
-
-Remember: Search the web to verify regulatory facts and provide practical implementation considerations!
diff --git a/src/bmm/workflows/1-analysis/research/domain-steps/step-05-technical-trends.md b/src/bmm/workflows/1-analysis/research/domain-steps/step-05-technical-trends.md
deleted file mode 100644
index 55e834cd..00000000
--- a/src/bmm/workflows/1-analysis/research/domain-steps/step-05-technical-trends.md
+++ /dev/null
@@ -1,234 +0,0 @@
-# Domain Research Step 5: Technical Trends
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without web search verification
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β Search the web to verify and supplement your knowledge with current facts
-- π YOU ARE A TECHNOLOGY ANALYST, not content generator
-- π¬ FOCUS on emerging technologies and innovation patterns
-- π WEB SEARCH REQUIRED - verify current facts against live sources
-- π WRITE CONTENT IMMEDIATELY TO DOCUMENT
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show web search analysis before presenting findings
-- β οΈ Present [C] continue option after technical trends content generation
-- π WRITE TECHNICAL TRENDS ANALYSIS TO DOCUMENT IMMEDIATELY
-- πΎ ONLY proceed when user chooses C (Continue)
-- π Update frontmatter `stepsCompleted: [1, 2, 3, 4, 5]` before loading next step
-- π« FORBIDDEN to load next step until C is selected
-
-## CONTEXT BOUNDARIES:
-
-- Current document and frontmatter from previous steps are available
-- **Research topic = "{{research_topic}}"** - established from initial discussion
-- **Research goals = "{{research_goals}}"** - established from initial discussion
-- Focus on emerging technologies and innovation patterns in the domain
-- Web search capabilities with source verification are enabled
-
-## YOUR TASK:
-
-Conduct comprehensive technical trends analysis using current web data with emphasis on innovations and emerging technologies impacting {{research_topic}}.
-
-## TECHNICAL TRENDS SEQUENCE:
-
-### 1. Begin Technical Trends Analysis
-
-Start with technology research approach:
-"Now I'll conduct **technical trends and emerging technologies** analysis for **{{research_topic}}** using current data.
-
-**Technical Trends Focus:**
-
-- Emerging technologies and innovations
-- Digital transformation impacts
-- Automation and efficiency improvements
-- New business models enabled by technology
-- Future technology projections and roadmaps
-
-**Let me search for current technology developments.**"
-
-### 2. Web Search for Emerging Technologies
-
-Search for current technology information:
-Search the web: "{{research_topic}} emerging technologies innovations"
-
-**Technology focus:**
-
-- AI, machine learning, and automation impacts
-- Digital transformation trends
-- New technologies disrupting the industry
-- Innovation patterns and breakthrough developments
-
-### 3. Web Search for Digital Transformation
-
-Search for current transformation trends:
-Search the web: "{{research_topic}} digital transformation trends"
-
-**Transformation focus:**
-
-- Digital adoption trends and rates
-- Business model evolution
-- Customer experience innovations
-- Operational efficiency improvements
-
-### 4. Web Search for Future Outlook
-
-Search for future projections:
-Search the web: "{{research_topic}} future outlook trends"
-
-**Future focus:**
-
-- Technology roadmaps and projections
-- Market evolution predictions
-- Innovation pipelines and R&D trends
-- Long-term industry transformation
-
-### 5. Generate Technical Trends Content
-
-**WRITE IMMEDIATELY TO DOCUMENT**
-
-Prepare technical analysis with source citations:
-
-#### Content Structure:
-
-When saving to document, append these Level 2 and Level 3 sections:
-
-```markdown
-## Technical Trends and Innovation
-
-### Emerging Technologies
-
-[Emerging technologies analysis with source citations]
-_Source: [URL]_
-
-### Digital Transformation
-
-[Digital transformation analysis with source citations]
-_Source: [URL]_
-
-### Innovation Patterns
-
-[Innovation patterns analysis with source citations]
-_Source: [URL]_
-
-### Future Outlook
-
-[Future outlook and projections with source citations]
-_Source: [URL]_
-
-### Implementation Opportunities
-
-[Implementation opportunity analysis with source citations]
-_Source: [URL]_
-
-### Challenges and Risks
-
-[Challenges and risks assessment with source citations]
-_Source: [URL]_
-
-## Recommendations
-
-### Technology Adoption Strategy
-
-[Technology adoption recommendations]
-
-### Innovation Roadmap
-
-[Innovation roadmap suggestions]
-
-### Risk Mitigation
-
-[Risk mitigation strategies]
-```
-
-### 6. Present Analysis and Complete Option
-
-Show the generated technical analysis and present complete option:
-"I've completed **technical trends and innovation analysis** for {{research_topic}}.
-
-**Technical Highlights:**
-
-- Emerging technologies and innovations identified
-- Digital transformation trends mapped
-- Future outlook and projections analyzed
-- Implementation opportunities and challenges documented
-- Practical recommendations provided
-
-**Technical Trends Research Completed:**
-
-- Emerging technologies and innovations identified
-- Digital transformation trends mapped
-- Future outlook and projections analyzed
-- Implementation opportunities and challenges documented
-
-**Ready to proceed to research synthesis and recommendations?**
-[C] Continue - Save this to document and proceed to synthesis
-
-### 7. Handle Continue Selection
-
-#### If 'C' (Continue):
-
-- **CONTENT ALREADY WRITTEN TO DOCUMENT**
-- Update frontmatter: `stepsCompleted: [1, 2, 3, 4, 5]`
-- Load: `./step-06-research-synthesis.md`
-
-## APPEND TO DOCUMENT:
-
-Content is already written to document when generated in step 5. No additional append needed.
-
-## SUCCESS METRICS:
-
-β Emerging technologies identified with current data
-β Digital transformation trends clearly documented
-β Future outlook and projections analyzed
-β Implementation opportunities and challenges mapped
-β Strategic recommendations provided
-β Content written immediately to document
-β [C] continue option presented and handled correctly
-β Proper routing to next step (research synthesis)
-β Research goals alignment maintained
-
-## FAILURE MODES:
-
-β Relying solely on training data without web verification for current facts
-β Missing critical emerging technologies in the domain
-β Not providing practical implementation recommendations
-β Not completing strategic recommendations
-β Not presenting completion option for research workflow
-β Appending content without user selecting 'C'
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## TECHNICAL RESEARCH PROTOCOLS:
-
-- Search for cutting-edge technologies and innovations
-- Identify disruption patterns and game-changers
-- Research technology adoption timelines and barriers
-- Consider regional technology variations
-- Analyze competitive technological advantages
-
-## RESEARCH WORKFLOW COMPLETION:
-
-When 'C' is selected:
-
-- All domain research steps completed
-- Comprehensive research document generated
-- All sections appended with source citations
-- Research workflow status updated
-- Final recommendations provided to user
-
-## NEXT STEPS:
-
-Research workflow complete. User may:
-
-- Use the domain research to inform other workflows (PRD, architecture, etc.)
-- Conduct additional research on specific topics if needed
-- Move forward with product development based on research insights
-
-Congratulations on completing comprehensive domain research! π
diff --git a/src/bmm/workflows/1-analysis/research/domain-steps/step-06-research-synthesis.md b/src/bmm/workflows/1-analysis/research/domain-steps/step-06-research-synthesis.md
deleted file mode 100644
index 1c7db8c0..00000000
--- a/src/bmm/workflows/1-analysis/research/domain-steps/step-06-research-synthesis.md
+++ /dev/null
@@ -1,443 +0,0 @@
-# Domain Research Step 6: Research Synthesis and Completion
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without web search verification
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β Search the web to verify and supplement your knowledge with current facts
-- π YOU ARE A DOMAIN RESEARCH STRATEGIST, not content generator
-- π¬ FOCUS on comprehensive synthesis and authoritative conclusions
-- π WEB SEARCH REQUIRED - verify current facts against live sources
-- π PRODUCE COMPREHENSIVE DOCUMENT with narrative intro, TOC, and summary
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show web search analysis before presenting findings
-- β οΈ Present [C] complete option after synthesis content generation
-- πΎ ONLY save when user chooses C (Complete)
-- π Update frontmatter `stepsCompleted: [1, 2, 3, 4, 5, 6]` before completing workflow
-- π« FORBIDDEN to complete workflow until C is selected
-- π GENERATE COMPLETE DOCUMENT STRUCTURE with intro, TOC, and summary
-
-## CONTEXT BOUNDARIES:
-
-- Current document and frontmatter from previous steps are available
-- **Research topic = "{{research_topic}}"** - comprehensive domain analysis
-- **Research goals = "{{research_goals}}"** - achieved through exhaustive research
-- All domain research sections have been completed (analysis, regulatory, technical)
-- Web search capabilities with source verification are enabled
-- This is the final synthesis step producing the complete research document
-
-## YOUR TASK:
-
-Produce a comprehensive, authoritative research document on **{{research_topic}}** with compelling narrative introduction, detailed TOC, and executive summary based on exhaustive domain research.
-
-## COMPREHENSIVE DOCUMENT SYNTHESIS:
-
-### 1. Document Structure Planning
-
-**Complete Research Document Structure:**
-
-```markdown
-# [Compelling Title]: Comprehensive {{research_topic}} Research
-
-## Executive Summary
-
-[Brief compelling overview of key findings and implications]
-
-## Table of Contents
-
-- Research Introduction and Methodology
-- Industry Overview and Market Dynamics
-- Technology Trends and Innovation Landscape
-- Regulatory Framework and Compliance Requirements
-- Competitive Landscape and Key Players
-- Strategic Insights and Recommendations
-- Implementation Considerations and Risk Assessment
-- Future Outlook and Strategic Opportunities
-- Research Methodology and Source Documentation
-- Appendices and Additional Resources
-```
-
-### 2. Generate Compelling Narrative Introduction
-
-**Introduction Requirements:**
-
-- Hook reader with compelling opening about {{research_topic}}
-- Establish research significance and timeliness
-- Outline comprehensive research methodology
-- Preview key findings and strategic implications
-- Set professional, authoritative tone
-
-**Web Search for Introduction Context:**
-Search the web: "{{research_topic}} significance importance"
-
-### 3. Synthesize All Research Sections
-
-**Section-by-Section Integration:**
-
-- Combine industry analysis from step-02
-- Integrate regulatory focus from step-03
-- Incorporate technical trends from step-04
-- Add cross-sectional insights and connections
-- Ensure comprehensive coverage with no gaps
-
-### 4. Generate Complete Document Content
-
-#### Final Document Structure:
-
-```markdown
-# [Compelling Title]: Comprehensive {{research_topic}} Domain Research
-
-## Executive Summary
-
-[2-3 paragraph compelling summary of the most critical findings and strategic implications for {{research_topic}} based on comprehensive current research]
-
-**Key Findings:**
-
-- [Most significant market dynamics]
-- [Critical regulatory considerations]
-- [Important technology trends]
-- [Strategic implications]
-
-**Strategic Recommendations:**
-
-- [Top 3-5 actionable recommendations based on research]
-
-## Table of Contents
-
-1. Research Introduction and Methodology
-2. {{research_topic}} Industry Overview and Market Dynamics
-3. Technology Landscape and Innovation Trends
-4. Regulatory Framework and Compliance Requirements
-5. Competitive Landscape and Ecosystem Analysis
-6. Strategic Insights and Domain Opportunities
-7. Implementation Considerations and Risk Assessment
-8. Future Outlook and Strategic Planning
-9. Research Methodology and Source Verification
-10. Appendices and Additional Resources
-
-## 1. Research Introduction and Methodology
-
-### Research Significance
-
-[Compelling narrative about why {{research_topic}} research is critical right now]
-_Why this research matters now: [Strategic importance with current context]_
-_Source: [URL]_
-
-### Research Methodology
-
-[Comprehensive description of research approach including:]
-
-- **Research Scope**: [Comprehensive coverage areas]
-- **Data Sources**: [Authoritative sources and verification approach]
-- **Analysis Framework**: [Structured analysis methodology]
-- **Time Period**: [current focus and historical context]
-- **Geographic Coverage**: [Regional/global scope]
-
-### Research Goals and Objectives
-
-**Original Goals:** {{research_goals}}
-
-**Achieved Objectives:**
-
-- [Goal 1 achievement with supporting evidence]
-- [Goal 2 achievement with supporting evidence]
-- [Additional insights discovered during research]
-
-## 2. {{research_topic}} Industry Overview and Market Dynamics
-
-### Market Size and Growth Projections
-
-[Comprehensive market analysis synthesized from step-02 with current data]
-_Market Size: [Current market valuation]_
-_Growth Rate: [CAGR and projections]_
-_Market Drivers: [Key growth factors]_
-_Source: [URL]_
-
-### Industry Structure and Value Chain
-
-[Complete industry structure analysis]
-_Value Chain Components: [Detailed breakdown]_
-_Industry Segments: [Market segmentation analysis]_
-_Economic Impact: [Industry economic significance]_
-_Source: [URL]_
-
-## 3. Technology Landscape and Innovation Trends
-
-### Current Technology Adoption
-
-[Technology trends analysis from step-04 with current context]
-_Emerging Technologies: [Key technologies affecting {{research_topic}}]_
-_Adoption Patterns: [Technology adoption rates and patterns]_
-_Innovation Drivers: [Factors driving technology change]_
-_Source: [URL]_
-
-### Digital Transformation Impact
-
-[Comprehensive analysis of technology's impact on {{research_topic}}]
-_Transformation Trends: [Major digital transformation patterns]_
-_Disruption Opportunities: [Technology-driven opportunities]_
-_Future Technology Outlook: [Emerging technologies and timelines]_
-_Source: [URL]_
-
-## 4. Regulatory Framework and Compliance Requirements
-
-### Current Regulatory Landscape
-
-[Regulatory analysis from step-03 with current updates]
-_Key Regulations: [Critical regulatory requirements]_
-_Compliance Standards: [Industry standards and best practices]_
-_Recent Changes: [current regulatory updates and implications]_
-_Source: [URL]_
-
-### Risk and Compliance Considerations
-
-[Comprehensive risk assessment]
-_Compliance Risks: [Major regulatory and compliance risks]_
-_Risk Mitigation Strategies: [Approaches to manage regulatory risks]_
-_Future Regulatory Trends: [Anticipated regulatory developments]_
-_Source: [URL]_
-
-## 5. Competitive Landscape and Ecosystem Analysis
-
-### Market Positioning and Key Players
-
-[Competitive analysis with current market positioning]
-_Market Leaders: [Dominant players and strategies]_
-_Emerging Competitors: [New entrants and innovative approaches]_
-_Competitive Dynamics: [Market competition patterns and trends]_
-_Source: [URL]_
-
-### Ecosystem and Partnership Landscape
-
-[Complete ecosystem analysis]
-_Ecosystem Players: [Key stakeholders and relationships]_
-_Partnership Opportunities: [Strategic collaboration potential]_
-_Supply Chain Dynamics: [Supply chain structure and risks]_
-_Source: [URL]_
-
-## 6. Strategic Insights and Domain Opportunities
-
-### Cross-Domain Synthesis
-
-[Strategic insights from integrating all research sections]
-_Market-Technology Convergence: [How technology and market forces interact]_
-_Regulatory-Strategic Alignment: [How regulatory environment shapes strategy]_
-_Competitive Positioning Opportunities: [Strategic advantages based on research]_
-_Source: [URL]_
-
-### Strategic Opportunities
-
-[High-value opportunities identified through comprehensive research]
-_Market Opportunities: [Specific market entry or expansion opportunities]_
-_Technology Opportunities: [Technology adoption or innovation opportunities]_
-_Partnership Opportunities: [Strategic collaboration and partnership potential]_
-_Source: [URL]_
-
-## 7. Implementation Considerations and Risk Assessment
-
-### Implementation Framework
-
-[Practical implementation guidance based on research findings]
-_Implementation Timeline: [Recommended phased approach]_
-_Resource Requirements: [Key resources and capabilities needed]_
-_Success Factors: [Critical success factors for implementation]_
-_Source: [URL]_
-
-### Risk Management and Mitigation
-
-[Comprehensive risk assessment and mitigation strategies]
-_Implementation Risks: [Major risks and mitigation approaches]_
-_Market Risks: [Market-related risks and contingency plans]_
-_Technology Risks: [Technology adoption and implementation risks]_
-_Source: [URL]_
-
-## 8. Future Outlook and Strategic Planning
-
-### Future Trends and Projections
-
-[Forward-looking analysis based on comprehensive research]
-_Near-term Outlook: [1-2 year projections and implications]_
-_Medium-term Trends: [3-5 year expected developments]_
-_Long-term Vision: [5+ year strategic outlook for {{research_topic}}]_
-_Source: [URL]_
-
-### Strategic Recommendations
-
-[Comprehensive strategic recommendations]
-_Immediate Actions: [Priority actions for next 6 months]_
-_Strategic Initiatives: [Key strategic initiatives for 1-2 years]_
-_Long-term Strategy: [Strategic positioning for 3+ years]_
-_Source: [URL]_
-
-## 9. Research Methodology and Source Verification
-
-### Comprehensive Source Documentation
-
-[Complete documentation of all research sources]
-_Primary Sources: [Key authoritative sources used]_
-_Secondary Sources: [Supporting research and analysis]_
-_Web Search Queries: [Complete list of search queries used]_
-
-### Research Quality Assurance
-
-[Quality assurance and validation approach]
-_Source Verification: [All factual claims verified with multiple sources]_
-_Confidence Levels: [Confidence assessments for uncertain data]_
-_Limitations: [Research limitations and areas for further investigation]_
-_Methodology Transparency: [Complete transparency about research approach]_
-
-## 10. Appendices and Additional Resources
-
-### Detailed Data Tables
-
-[Comprehensive data tables supporting research findings]
-_Market Data Tables: [Detailed market size, growth, and segmentation data]_
-_Technology Adoption Data: [Detailed technology adoption and trend data]_
-_Regulatory Reference Tables: [Complete regulatory requirements and compliance data]_
-
-### Additional Resources
-
-[Valuable resources for continued research and implementation]
-_Industry Associations: [Key industry organizations and resources]_
-_Research Organizations: [Authoritative research institutions and reports]_
-_Government Resources: [Regulatory agencies and official resources]_
-_Professional Networks: [Industry communities and knowledge sources]_
-
----
-
-## Research Conclusion
-
-### Summary of Key Findings
-
-[Comprehensive summary of the most important research findings]
-
-### Strategic Impact Assessment
-
-[Assessment of strategic implications for {{research_topic}}]
-
-### Next Steps Recommendations
-
-[Specific next steps for leveraging this research]
-
----
-
-**Research Completion Date:** {{date}}
-**Research Period:** Comprehensive analysis
-**Document Length:** As needed for comprehensive coverage
-**Source Verification:** All facts cited with sources
-**Confidence Level:** High - based on multiple authoritative sources
-
-_This comprehensive research document serves as an authoritative reference on {{research_topic}} and provides strategic insights for informed decision-making._
-```
-
-### 5. Present Complete Document and Final Option
-
-**Document Completion Presentation:**
-
-"I've completed the **comprehensive research document synthesis** for **{{research_topic}}**, producing an authoritative research document with:
-
-**Document Features:**
-
-- **Compelling Narrative Introduction**: Engaging opening that establishes research significance
-- **Comprehensive Table of Contents**: Complete navigation structure for easy reference
-- **Exhaustive Research Coverage**: All aspects of {{research_topic}} thoroughly analyzed
-- **Executive Summary**: Key findings and strategic implications highlighted
-- **Strategic Recommendations**: Actionable insights based on comprehensive research
-- **Complete Source Citations**: Every factual claim verified with sources
-
-**Research Completeness:**
-
-- Industry analysis and market dynamics fully documented
-- Technology trends and innovation landscape comprehensively covered
-- Regulatory framework and compliance requirements detailed
-- Competitive landscape and ecosystem analysis complete
-- Strategic insights and implementation guidance provided
-
-**Document Standards Met:**
-
-- Exhaustive research with no critical gaps
-- Professional structure and compelling narrative
-- As long as needed for comprehensive coverage
-- Multiple independent sources for all claims
-- Proper citations throughout
-
-**Ready to complete this comprehensive research document?**
-[C] Complete Research - Save final comprehensive document
-
-### 6. Handle Final Completion
-
-#### If 'C' (Complete Research):
-
-- Append the complete document to the research file
-- Update frontmatter: `stepsCompleted: [1, 2, 3, 4, 5]`
-- Complete the domain research workflow
-- Provide final document delivery confirmation
-
-## APPEND TO DOCUMENT:
-
-When user selects 'C', append the complete comprehensive research document using the full structure above.
-
-## SUCCESS METRICS:
-
-β Compelling narrative introduction with research significance
-β Comprehensive table of contents with complete document structure
-β Exhaustive research coverage across all domain aspects
-β Executive summary with key findings and strategic implications
-β Strategic recommendations grounded in comprehensive research
-β Complete source verification with citations
-β Professional document structure and compelling narrative
-β [C] complete option presented and handled correctly
-β Domain research workflow completed with comprehensive document
-
-## FAILURE MODES:
-
-β Not producing compelling narrative introduction
-β Missing comprehensive table of contents
-β Incomplete research coverage across domain aspects
-β Not providing executive summary with key findings
-β Missing strategic recommendations based on research
-β Relying solely on training data without web verification for current facts
-β Producing document without professional structure
-β Not presenting completion option for final document
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## COMPREHENSIVE DOCUMENT STANDARDS:
-
-This step ensures the final research document:
-
-- Serves as an authoritative reference on {{research_topic}}
-- Provides compelling narrative and professional structure
-- Includes comprehensive coverage with no gaps
-- Maintains rigorous source verification standards
-- Delivers strategic insights and actionable recommendations
-- Meets professional research document quality standards
-
-## DOMAIN RESEARCH WORKFLOW COMPLETION:
-
-When 'C' is selected:
-
-- All domain research steps completed (1-5)
-- Comprehensive domain research document generated
-- Professional document structure with intro, TOC, and summary
-- All sections appended with source citations
-- Domain research workflow status updated to complete
-- Final comprehensive research document delivered to user
-
-## FINAL DELIVERABLE:
-
-Complete authoritative research document on {{research_topic}} that:
-
-- Establishes professional credibility through comprehensive research
-- Provides strategic insights for informed decision-making
-- Serves as reference document for continued use
-- Maintains highest research quality standards
-
-Congratulations on completing comprehensive domain research! π
diff --git a/src/bmm/workflows/1-analysis/research/market-steps/step-01-init.md b/src/bmm/workflows/1-analysis/research/market-steps/step-01-init.md
deleted file mode 100644
index a3772a9b..00000000
--- a/src/bmm/workflows/1-analysis/research/market-steps/step-01-init.md
+++ /dev/null
@@ -1,182 +0,0 @@
-# Market Research Step 1: Market Research Initialization
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate research content in init step
-- β ALWAYS confirm understanding of user's research goals
-- π YOU ARE A MARKET RESEARCH FACILITATOR, not content generator
-- π¬ FOCUS on clarifying scope and approach
-- π NO WEB RESEARCH in init - that's for later steps
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete research
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Confirm research understanding before proceeding
-- β οΈ Present [C] continue option after scope clarification
-- πΎ Write initial scope document immediately
-- π Update frontmatter `stepsCompleted: [1]` before loading next step
-- π« FORBIDDEN to load next step until C is selected
-
-## CONTEXT BOUNDARIES:
-
-- Current document and frontmatter from main workflow discovery are available
-- Research type = "market" is already set
-- **Research topic = "{{research_topic}}"** - discovered from initial discussion
-- **Research goals = "{{research_goals}}"** - captured from initial discussion
-- Focus on market research scope clarification
-- Web search capabilities are enabled for later steps
-
-## YOUR TASK:
-
-Initialize market research by confirming understanding of {{research_topic}} and establishing clear research scope.
-
-## MARKET RESEARCH INITIALIZATION:
-
-### 1. Confirm Research Understanding
-
-**INITIALIZE - DO NOT RESEARCH YET**
-
-Start with research confirmation:
-"I understand you want to conduct **market research** for **{{research_topic}}** with these goals: {{research_goals}}
-
-**My Understanding of Your Research Needs:**
-
-- **Research Topic**: {{research_topic}}
-- **Research Goals**: {{research_goals}}
-- **Research Type**: Market Research
-- **Approach**: Comprehensive market analysis with source verification
-
-**Market Research Areas We'll Cover:**
-
-- Market size, growth dynamics, and trends
-- Customer insights and behavior analysis
-- Competitive landscape and positioning
-- Strategic recommendations and implementation guidance
-
-**Does this accurately capture what you're looking for?**"
-
-### 2. Refine Research Scope
-
-Gather any clarifications needed:
-
-#### Scope Clarification Questions:
-
-- "Are there specific customer segments or aspects of {{research_topic}} we should prioritize?"
-- "Should we focus on specific geographic regions or global market?"
-- "Is this for market entry, expansion, product development, or other business purpose?"
-- "Any competitors or market segments you specifically want us to analyze?"
-
-### 3. Document Initial Scope
-
-**WRITE IMMEDIATELY TO DOCUMENT**
-
-Write initial research scope to document:
-
-```markdown
-# Market Research: {{research_topic}}
-
-## Research Initialization
-
-### Research Understanding Confirmed
-
-**Topic**: {{research_topic}}
-**Goals**: {{research_goals}}
-**Research Type**: Market Research
-**Date**: {{date}}
-
-### Research Scope
-
-**Market Analysis Focus Areas:**
-
-- Market size, growth projections, and dynamics
-- Customer segments, behavior patterns, and insights
-- Competitive landscape and positioning analysis
-- Strategic recommendations and implementation guidance
-
-**Research Methodology:**
-
-- Current web data with source verification
-- Multiple independent sources for critical claims
-- Confidence level assessment for uncertain data
-- Comprehensive coverage with no critical gaps
-
-### Next Steps
-
-**Research Workflow:**
-
-1. β Initialization and scope setting (current step)
-2. Customer Insights and Behavior Analysis
-3. Competitive Landscape Analysis
-4. Strategic Synthesis and Recommendations
-
-**Research Status**: Scope confirmed, ready to proceed with detailed market analysis
-```
-
-### 4. Present Confirmation and Continue Option
-
-Show initial scope document and present continue option:
-"I've documented our understanding and initial scope for **{{research_topic}}** market research.
-
-**What I've established:**
-
-- Research topic and goals confirmed
-- Market analysis focus areas defined
-- Research methodology verification
-- Clear workflow progression
-
-**Document Status:** Initial scope written to research file for your review
-
-**Ready to begin detailed market research?**
-[C] Continue - Confirm scope and proceed to customer insights analysis
-[Modify] Suggest changes to research scope before proceeding
-
-### 5. Handle User Response
-
-#### If 'C' (Continue):
-
-- Update frontmatter: `stepsCompleted: [1]`
-- Add confirmation note to document: "Scope confirmed by user on {{date}}"
-- Load: `./step-02-customer-insights.md`
-
-#### If 'Modify':
-
-- Gather user changes to scope
-- Update document with modifications
-- Re-present updated scope for confirmation
-
-## SUCCESS METRICS:
-
-β Research topic and goals accurately understood
-β Market research scope clearly defined
-β Initial scope document written immediately
-β User opportunity to review and modify scope
-β [C] continue option presented and handled correctly
-β Document properly updated with scope confirmation
-
-## FAILURE MODES:
-
-β Not confirming understanding of research topic and goals
-β Generating research content instead of just scope clarification
-β Not writing initial scope document to file
-β Not providing opportunity for user to modify scope
-β Proceeding to next step without user confirmation
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor research decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## INITIALIZATION PRINCIPLES:
-
-This step ensures:
-
-- Clear mutual understanding of research objectives
-- Well-defined research scope and approach
-- Immediate documentation for user review
-- User control over research direction before detailed work begins
-
-## NEXT STEP:
-
-After user confirmation and scope finalization, load `./step-02-customer-insights.md` to begin detailed market research with customer insights analysis.
-
-Remember: Init steps confirm understanding and scope, not generate research content!
diff --git a/src/bmm/workflows/1-analysis/research/market-steps/step-02-customer-behavior.md b/src/bmm/workflows/1-analysis/research/market-steps/step-02-customer-behavior.md
deleted file mode 100644
index f707a0a3..00000000
--- a/src/bmm/workflows/1-analysis/research/market-steps/step-02-customer-behavior.md
+++ /dev/null
@@ -1,237 +0,0 @@
-# Market Research Step 2: Customer Behavior and Segments
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without web search verification
-- β Search the web to verify and supplement your knowledge with current facts
-- π YOU ARE A CUSTOMER BEHAVIOR ANALYST, not content generator
-- π¬ FOCUS on customer behavior patterns and demographic analysis
-- π WEB SEARCH REQUIRED - verify current facts against live sources
-- π WRITE CONTENT IMMEDIATELY TO DOCUMENT
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete research
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show web search analysis before presenting findings
-- β οΈ Present [C] continue option after customer behavior content generation
-- π WRITE CUSTOMER BEHAVIOR ANALYSIS TO DOCUMENT IMMEDIATELY
-- πΎ ONLY proceed when user chooses C (Continue)
-- π Update frontmatter `stepsCompleted: [1, 2]` before loading next step
-- π« FORBIDDEN to load next step until C is selected
-
-## CONTEXT BOUNDARIES:
-
-- Current document and frontmatter from step-01 are available
-- Focus on customer behavior patterns and demographic analysis
-- Web search capabilities with source verification are enabled
-- Previous step confirmed research scope and goals
-- **Research topic = "{{research_topic}}"** - established from initial discussion
-- **Research goals = "{{research_goals}}"** - established from initial discussion
-
-## YOUR TASK:
-
-Conduct customer behavior and segment analysis with emphasis on patterns and demographics.
-
-## CUSTOMER BEHAVIOR ANALYSIS SEQUENCE:
-
-### 1. Begin Customer Behavior Analysis
-
-**UTILIZE SUBPROCESSES AND SUBAGENTS**: Use research subagents, subprocesses or parallel processing if available to thoroughly analyze different customer behavior areas simultaneously and thoroughly.
-
-Start with customer behavior research approach:
-"Now I'll conduct **customer behavior analysis** for **{{research_topic}}** to understand customer patterns.
-
-**Customer Behavior Focus:**
-
-- Customer behavior patterns and preferences
-- Demographic profiles and segmentation
-- Psychographic characteristics and values
-- Behavior drivers and influences
-- Customer interaction patterns and engagement
-
-**Let me search for current customer behavior insights.**"
-
-### 2. Parallel Customer Behavior Research Execution
-
-**Execute multiple web searches simultaneously:**
-
-Search the web: "{{research_topic}} customer behavior patterns"
-Search the web: "{{research_topic}} customer demographics"
-Search the web: "{{research_topic}} psychographic profiles"
-Search the web: "{{research_topic}} customer behavior drivers"
-
-**Analysis approach:**
-
-- Look for customer behavior studies and research reports
-- Search for demographic segmentation and analysis
-- Research psychographic profiling and value systems
-- Analyze behavior drivers and influencing factors
-- Study customer interaction and engagement patterns
-
-### 3. Analyze and Aggregate Results
-
-**Collect and analyze findings from all parallel searches:**
-
-"After executing comprehensive parallel web searches, let me analyze and aggregate customer behavior findings:
-
-**Research Coverage:**
-
-- Customer behavior patterns and preferences
-- Demographic profiles and segmentation
-- Psychographic characteristics and values
-- Behavior drivers and influences
-- Customer interaction patterns and engagement
-
-**Cross-Behavior Analysis:**
-[Identify patterns connecting demographics, psychographics, and behaviors]
-
-**Quality Assessment:**
-[Overall confidence levels and research gaps identified]"
-
-### 4. Generate Customer Behavior Content
-
-**WRITE IMMEDIATELY TO DOCUMENT**
-
-Prepare customer behavior analysis with web search citations:
-
-#### Content Structure:
-
-When saving to document, append these Level 2 and Level 3 sections:
-
-```markdown
-## Customer Behavior and Segments
-
-### Customer Behavior Patterns
-
-[Customer behavior patterns analysis with source citations]
-_Behavior Drivers: [Key motivations and patterns from web search]_
-_Interaction Preferences: [Customer engagement and interaction patterns]_
-_Decision Habits: [How customers typically make decisions]_
-_Source: [URL]_
-
-### Demographic Segmentation
-
-[Demographic analysis with source citations]
-_Age Demographics: [Age groups and preferences]_
-_Income Levels: [Income segments and purchasing behavior]_
-_Geographic Distribution: [Regional/city differences]_
-_Education Levels: [Education impact on behavior]_
-_Source: [URL]_
-
-### Psychographic Profiles
-
-[Psychographic analysis with source citations]
-_Values and Beliefs: [Core values driving customer behavior]_
-_Lifestyle Preferences: [Lifestyle choices and behaviors]_
-_Attitudes and Opinions: [Customer attitudes toward products/services]_
-_Personality Traits: [Personality influences on behavior]_
-_Source: [URL]_
-
-### Customer Segment Profiles
-
-[Detailed customer segment profiles with source citations]
-_Segment 1: [Detailed profile including demographics, psychographics, behavior]_
-_Segment 2: [Detailed profile including demographics, psychographics, behavior]_
-_Segment 3: [Detailed profile including demographics, psychographics, behavior]_
-_Source: [URL]_
-
-### Behavior Drivers and Influences
-
-[Behavior drivers analysis with source citations]
-_Emotional Drivers: [Emotional factors influencing behavior]_
-_Rational Drivers: [Logical decision factors]_
-_Social Influences: [Social and peer influences]_
-_Economic Influences: [Economic factors affecting behavior]_
-_Source: [URL]_
-
-### Customer Interaction Patterns
-
-[Customer interaction analysis with source citations]
-_Research and Discovery: [How customers find and research options]_
-_Purchase Decision Process: [Steps in purchase decision making]_
-_Post-Purchase Behavior: [After-purchase engagement patterns]_
-_Loyalty and Retention: [Factors driving customer loyalty]_
-_Source: [URL]_
-```
-
-### 5. Present Analysis and Continue Option
-
-**Show analysis and present continue option:**
-
-"I've completed **customer behavior analysis** for {{research_topic}}, focusing on customer patterns.
-
-**Key Customer Behavior Findings:**
-
-- Customer behavior patterns clearly identified with drivers
-- Demographic segmentation thoroughly analyzed
-- Psychographic profiles mapped and documented
-- Customer interaction patterns captured
-- Multiple sources verified for critical insights
-
-**Ready to proceed to customer pain points?**
-[C] Continue - Save this to document and proceed to pain points analysis
-
-### 6. Handle Continue Selection
-
-#### If 'C' (Continue):
-
-- **CONTENT ALREADY WRITTEN TO DOCUMENT**
-- Update frontmatter: `stepsCompleted: [1, 2]`
-- Load: `./step-03-customer-pain-points.md`
-
-## APPEND TO DOCUMENT:
-
-Content is already written to document when generated in step 4. No additional append needed.
-
-## SUCCESS METRICS:
-
-β Customer behavior patterns identified with current citations
-β Demographic segmentation thoroughly analyzed
-β Psychographic profiles clearly documented
-β Customer interaction patterns captured
-β Multiple sources verified for critical insights
-β Content written immediately to document
-β [C] continue option presented and handled correctly
-β Proper routing to next step (customer pain points)
-β Research goals alignment maintained
-
-## FAILURE MODES:
-
-β Relying solely on training data without web verification for current facts
-
-β Missing critical customer behavior patterns
-β Incomplete demographic segmentation analysis
-β Missing psychographic profile documentation
-β Not writing content immediately to document
-β Not presenting [C] continue option after content generation
-β Not routing to customer pain points analysis step
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor research decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## CUSTOMER BEHAVIOR RESEARCH PROTOCOLS:
-
-- Research customer behavior studies and market research
-- Use demographic data from authoritative sources
-- Research psychographic profiling and value systems
-- Analyze customer interaction and engagement patterns
-- Focus on current behavior data and trends
-- Present conflicting information when sources disagree
-- Apply confidence levels appropriately
-
-## BEHAVIOR ANALYSIS STANDARDS:
-
-- Always cite URLs for web search results
-- Use authoritative customer research sources
-- Note data currency and potential limitations
-- Present multiple perspectives when sources conflict
-- Apply confidence levels to uncertain data
-- Focus on actionable customer insights
-
-## NEXT STEP:
-
-After user selects 'C', load `./step-03-customer-pain-points.md` to analyze customer pain points, challenges, and unmet needs for {{research_topic}}.
-
-Remember: Always write research content to document immediately and emphasize current customer data with rigorous source verification!
diff --git a/src/bmm/workflows/1-analysis/research/market-steps/step-02-customer-insights.md b/src/bmm/workflows/1-analysis/research/market-steps/step-02-customer-insights.md
deleted file mode 100644
index c6d7ea32..00000000
--- a/src/bmm/workflows/1-analysis/research/market-steps/step-02-customer-insights.md
+++ /dev/null
@@ -1,200 +0,0 @@
-# Market Research Step 2: Customer Insights
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without web search verification
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β Search the web to verify and supplement your knowledge with current facts
-- π YOU ARE A CUSTOMER INSIGHTS ANALYST, not content generator
-- π¬ FOCUS on customer behavior and needs analysis
-- π WEB SEARCH REQUIRED - verify current facts against live sources
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show web search analysis before presenting findings
-- β οΈ Present [C] continue option after customer insights content generation
-- πΎ ONLY save when user chooses C (Continue)
-- π Update frontmatter `stepsCompleted: [1, 2]` before loading next step
-- π« FORBIDDEN to load next step until C is selected
-
-## CONTEXT BOUNDARIES:
-
-- Current document and frontmatter from step-01 are available
-- Focus on customer behavior and needs analysis
-- Web search capabilities with source verification are enabled
-- May need to search for current customer behavior trends
-
-## YOUR TASK:
-
-Conduct comprehensive customer insights analysis with emphasis on behavior patterns and needs.
-
-## CUSTOMER INSIGHTS SEQUENCE:
-
-### 1. Begin Customer Insights Analysis
-
-**UTILIZE SUBPROCESSES AND SUBAGENTS**: Use research subagents, subprocesses or parallel processing if available to thoroughly analyze different customer areas simultaneously and thoroughly
-
-Start with customer research approach:
-"Now I'll conduct **customer insights analysis** to understand customer behavior and needs.
-
-**Customer Insights Focus:**
-
-- Customer behavior patterns and preferences
-- Pain points and challenges
-- Decision-making processes
-- Customer journey mapping
-- Customer satisfaction drivers
-- Demographic and psychographic profiles
-
-**Let me search for current customer insights using parallel web searches for comprehensive coverage.**"
-
-### 2. Parallel Customer Research Execution
-
-**Execute multiple web searches simultaneously:**
-
-Search the web: "[product/service/market] customer behavior patterns"
-Search the web: "[product/service/market] customer pain points challenges"
-Search the web: "[product/service/market] customer decision process"
-
-**Analysis approach:**
-
-- Look for customer behavior studies and surveys
-- Search for customer experience and interaction patterns
-- Research customer satisfaction methodologies
-- Note generational and cultural customer variations
-- Research customer pain points and frustrations
-- Analyze decision-making processes and criteria
-
-### 3. Analyze and Aggregate Results
-
-**Collect and analyze findings from all parallel searches:**
-
-"After executing comprehensive parallel web searches, let me analyze and aggregate the customer insights:
-
-**Research Coverage:**
-
-- Customer behavior patterns and preferences
-- Pain points and challenges
-- Decision-making processes and journey mapping
-
-**Cross-Customer Analysis:**
-[Identify patterns connecting behavior, pain points, and decisions]
-
-**Quality Assessment:**
-[Overall confidence levels and research gaps identified]"
-
-### 4. Generate Customer Insights Content
-
-Prepare customer analysis with web search citations:
-
-#### Content Structure:
-
-When saving to document, append these Level 2 and Level 3 sections:
-
-```markdown
-## Customer Insights
-
-### Customer Behavior Patterns
-
-[Customer behavior analysis with source citations]
-_Source: [URL]_
-
-### Pain Points and Challenges
-
-[Pain points analysis with source citations]
-_Source: [URL]_
-
-### Decision-Making Processes
-
-[Decision-making analysis with source citations]
-_Source: [URL]_
-
-### Customer Journey Mapping
-
-[Customer journey analysis with source citations]
-_Source: [URL]_
-
-### Customer Satisfaction Drivers
-
-[Satisfaction drivers analysis with source citations]
-_Source: [URL]_
-
-### Demographic Profiles
-
-[Demographic profiles analysis with source citations]
-_Source: [URL]_
-
-### Psychographic Profiles
-
-[Psychographic profiles analysis with source citations]
-_Source: [URL]_
-```
-
-### 5. Present Analysis and Continue Option
-
-Show the generated customer insights and present continue option:
-"I've completed the **customer insights analysis** for customer behavior and needs.
-
-**Key Customer Findings:**
-
-- Customer behavior patterns clearly identified
-- Pain points and challenges thoroughly documented
-- Decision-making processes mapped
-- Customer journey insights captured
-- Satisfaction and profile data analyzed
-
-**Ready to proceed to competitive analysis?**
-[C] Continue - Save this to the document and proceed to competitive analysis
-
-### 6. Handle Continue Selection
-
-#### If 'C' (Continue):
-
-- Append the final content to the research document
-- Update frontmatter: `stepsCompleted: [1, 2]`
-- Load: `./step-05-competitive-analysis.md`
-
-## APPEND TO DOCUMENT:
-
-When user selects 'C', append the content directly to the research document using the structure from step 4.
-
-## SUCCESS METRICS:
-
-β Customer behavior patterns identified with current citations
-β Pain points and challenges clearly documented
-β Decision-making processes thoroughly analyzed
-β Customer journey insights captured and mapped
-β Customer satisfaction drivers identified
-β [C] continue option presented and handled correctly
-β Content properly appended to document when C selected
-
-## FAILURE MODES:
-
-β Relying solely on training data without web verification for current facts
-
-β Missing critical customer behavior patterns
-β Not identifying key pain points and challenges
-β Incomplete customer journey mapping
-β Not presenting [C] continue option after content generation
-β Appending content without user selecting 'C'
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## CUSTOMER RESEARCH PROTOCOLS:
-
-- Search for customer behavior studies and surveys
-- Use market research firm and industry association sources
-- Research customer experience and interaction patterns
-- Note generational and cultural customer variations
-- Research customer satisfaction methodologies
-
-## NEXT STEP:
-
-After user selects 'C' and content is saved to document, load `./step-05-competitive-analysis.md` to focus on competitive landscape analysis.
-
-Remember: Always emphasize current customer data and rigorous source verification!
diff --git a/src/bmm/workflows/1-analysis/research/market-steps/step-03-customer-pain-points.md b/src/bmm/workflows/1-analysis/research/market-steps/step-03-customer-pain-points.md
deleted file mode 100644
index f4d2ae6d..00000000
--- a/src/bmm/workflows/1-analysis/research/market-steps/step-03-customer-pain-points.md
+++ /dev/null
@@ -1,249 +0,0 @@
-# Market Research Step 3: Customer Pain Points and Needs
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without web search verification
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β Search the web to verify and supplement your knowledge with current facts
-- π YOU ARE A CUSTOMER NEEDS ANALYST, not content generator
-- π¬ FOCUS on customer pain points, challenges, and unmet needs
-- π WEB SEARCH REQUIRED - verify current facts against live sources
-- π WRITE CONTENT IMMEDIATELY TO DOCUMENT
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show web search analysis before presenting findings
-- β οΈ Present [C] continue option after pain points content generation
-- π WRITE CUSTOMER PAIN POINTS ANALYSIS TO DOCUMENT IMMEDIATELY
-- πΎ ONLY proceed when user chooses C (Continue)
-- π Update frontmatter `stepsCompleted: [1, 2, 3]` before loading next step
-- π« FORBIDDEN to load next step until C is selected
-
-## CONTEXT BOUNDARIES:
-
-- Current document and frontmatter from previous steps are available
-- Customer behavior analysis completed in previous step
-- Focus on customer pain points, challenges, and unmet needs
-- Web search capabilities with source verification are enabled
-- **Research topic = "{{research_topic}}"** - established from initial discussion
-- **Research goals = "{{research_goals}}"** - established from initial discussion
-
-## YOUR TASK:
-
-Conduct customer pain points and needs analysis with emphasis on challenges and frustrations.
-
-## CUSTOMER PAIN POINTS ANALYSIS SEQUENCE:
-
-### 1. Begin Customer Pain Points Analysis
-
-**UTILIZE SUBPROCESSES AND SUBAGENTS**: Use research subagents, subprocesses or parallel processing if available to thoroughly analyze different customer pain point areas simultaneously and thoroughly.
-
-Start with customer pain points research approach:
-"Now I'll conduct **customer pain points analysis** for **{{research_topic}}** to understand customer challenges.
-
-**Customer Pain Points Focus:**
-
-- Customer challenges and frustrations
-- Unmet needs and unaddressed problems
-- Barriers to adoption or usage
-- Service and support pain points
-- Customer satisfaction gaps
-
-**Let me search for current customer pain points insights.**"
-
-### 2. Parallel Pain Points Research Execution
-
-**Execute multiple web searches simultaneously:**
-
-Search the web: "{{research_topic}} customer pain points challenges"
-Search the web: "{{research_topic}} customer frustrations"
-Search the web: "{{research_topic}} unmet customer needs"
-Search the web: "{{research_topic}} customer barriers to adoption"
-
-**Analysis approach:**
-
-- Look for customer satisfaction surveys and reports
-- Search for customer complaints and reviews
-- Research customer support and service issues
-- Analyze barriers to customer adoption
-- Study unmet needs and market gaps
-
-### 3. Analyze and Aggregate Results
-
-**Collect and analyze findings from all parallel searches:**
-
-"After executing comprehensive parallel web searches, let me analyze and aggregate customer pain points findings:
-
-**Research Coverage:**
-
-- Customer challenges and frustrations
-- Unmet needs and unaddressed problems
-- Barriers to adoption or usage
-- Service and support pain points
-
-**Cross-Pain Points Analysis:**
-[Identify patterns connecting different types of pain points]
-
-**Quality Assessment:**
-[Overall confidence levels and research gaps identified]"
-
-### 4. Generate Customer Pain Points Content
-
-**WRITE IMMEDIATELY TO DOCUMENT**
-
-Prepare customer pain points analysis with web search citations:
-
-#### Content Structure:
-
-When saving to document, append these Level 2 and Level 3 sections:
-
-```markdown
-## Customer Pain Points and Needs
-
-### Customer Challenges and Frustrations
-
-[Customer challenges analysis with source citations]
-_Primary Frustrations: [Major customer frustrations identified]_
-_Usage Barriers: [Barriers preventing effective usage]_
-_Service Pain Points: [Customer service and support issues]_
-_Frequency Analysis: [How often these challenges occur]_
-_Source: [URL]_
-
-### Unmet Customer Needs
-
-[Unmet needs analysis with source citations]
-_Critical Unmet Needs: [Most important unaddressed needs]_
-_Solution Gaps: [Opportunities to address unmet needs]_
-_Market Gaps: [Market opportunities from unmet needs]_
-_Priority Analysis: [Which needs are most critical]_
-_Source: [URL]_
-
-### Barriers to Adoption
-
-[Adoption barriers analysis with source citations]
-_Price Barriers: [Cost-related barriers to adoption]_
-_Technical Barriers: [Complexity or technical barriers]_
-_Trust Barriers: [Trust and credibility issues]_
-_Convenience Barriers: [Ease of use or accessibility issues]_
-_Source: [URL]_
-
-### Service and Support Pain Points
-
-[Service pain points analysis with source citations]
-_Customer Service Issues: [Common customer service problems]_
-_Support Gaps: [Areas where customer support is lacking]_
-_Communication Issues: [Communication breakdowns and frustrations]_
-_Response Time Issues: [Slow response and resolution problems]_
-_Source: [URL]_
-
-### Customer Satisfaction Gaps
-
-[Satisfaction gap analysis with source citations]
-_Expectation Gaps: [Differences between expectations and reality]_
-_Quality Gaps: [Areas where quality expectations aren't met]_
-_Value Perception Gaps: [Perceived value vs actual value]_
-_Trust and Credibility Gaps: [Trust issues affecting satisfaction]_
-_Source: [URL]_
-
-### Emotional Impact Assessment
-
-[Emotional impact analysis with source citations]
-_Frustration Levels: [Customer frustration severity assessment]_
-_Loyalty Risks: [How pain points affect customer loyalty]_
-_Reputation Impact: [Impact on brand or product reputation]_
-_Customer Retention Risks: [Risk of customer loss from pain points]_
-_Source: [URL]_
-
-### Pain Point Prioritization
-
-[Pain point prioritization with source citations]
-_High Priority Pain Points: [Most critical pain points to address]_
-_Medium Priority Pain Points: [Important but less critical pain points]_
-_Low Priority Pain Points: [Minor pain points with lower impact]_
-_Opportunity Mapping: [Pain points with highest solution opportunity]_
-_Source: [URL]_
-```
-
-### 5. Present Analysis and Continue Option
-
-**Show analysis and present continue option:**
-
-"I've completed **customer pain points analysis** for {{research_topic}}, focusing on customer challenges.
-
-**Key Pain Points Findings:**
-
-- Customer challenges and frustrations thoroughly documented
-- Unmet needs and solution gaps clearly identified
-- Adoption barriers and service pain points analyzed
-- Customer satisfaction gaps assessed
-- Pain points prioritized by impact and opportunity
-
-**Ready to proceed to customer decision processes?**
-[C] Continue - Save this to document and proceed to decision processes analysis
-
-### 6. Handle Continue Selection
-
-#### If 'C' (Continue):
-
-- **CONTENT ALREADY WRITTEN TO DOCUMENT**
-- Update frontmatter: `stepsCompleted: [1, 2, 3]`
-- Load: `./step-04-customer-decisions.md`
-
-## APPEND TO DOCUMENT:
-
-Content is already written to document when generated in step 4. No additional append needed.
-
-## SUCCESS METRICS:
-
-β Customer challenges and frustrations clearly documented
-β Unmet needs and solution gaps identified
-β Adoption barriers and service pain points analyzed
-β Customer satisfaction gaps assessed
-β Pain points prioritized by impact and opportunity
-β Content written immediately to document
-β [C] continue option presented and handled correctly
-β Proper routing to next step (customer decisions)
-β Research goals alignment maintained
-
-## FAILURE MODES:
-
-β Relying solely on training data without web verification for current facts
-
-β Missing critical customer challenges or frustrations
-β Not identifying unmet needs or solution gaps
-β Incomplete adoption barriers analysis
-β Not writing content immediately to document
-β Not presenting [C] continue option after content generation
-β Not routing to customer decisions analysis step
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## CUSTOMER PAIN POINTS RESEARCH PROTOCOLS:
-
-- Research customer satisfaction surveys and reviews
-- Use customer feedback and complaint data
-- Analyze customer support and service issues
-- Study barriers to customer adoption
-- Focus on current pain point data
-- Present conflicting information when sources disagree
-- Apply confidence levels appropriately
-
-## PAIN POINTS ANALYSIS STANDARDS:
-
-- Always cite URLs for web search results
-- Use authoritative customer research sources
-- Note data currency and potential limitations
-- Present multiple perspectives when sources conflict
-- Apply confidence levels to uncertain data
-- Focus on actionable pain point insights
-
-## NEXT STEP:
-
-After user selects 'C', load `./step-04-customer-decisions.md` to analyze customer decision processes, journey mapping, and decision factors for {{research_topic}}.
-
-Remember: Always write research content to document immediately and emphasize current customer pain points data with rigorous source verification!
diff --git a/src/bmm/workflows/1-analysis/research/market-steps/step-04-customer-decisions.md b/src/bmm/workflows/1-analysis/research/market-steps/step-04-customer-decisions.md
deleted file mode 100644
index 21544335..00000000
--- a/src/bmm/workflows/1-analysis/research/market-steps/step-04-customer-decisions.md
+++ /dev/null
@@ -1,259 +0,0 @@
-# Market Research Step 4: Customer Decisions and Journey
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without web search verification
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β Search the web to verify and supplement your knowledge with current facts
-- π YOU ARE A CUSTOMER DECISION ANALYST, not content generator
-- π¬ FOCUS on customer decision processes and journey mapping
-- π WEB SEARCH REQUIRED - verify current facts against live sources
-- π WRITE CONTENT IMMEDIATELY TO DOCUMENT
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show web search analysis before presenting findings
-- β οΈ Present [C] continue option after decision processes content generation
-- π WRITE CUSTOMER DECISIONS ANALYSIS TO DOCUMENT IMMEDIATELY
-- πΎ ONLY proceed when user chooses C (Continue)
-- π Update frontmatter `stepsCompleted: [1, 2, 3, 4]` before loading next step
-- π« FORBIDDEN to load next step until C is selected
-
-## CONTEXT BOUNDARIES:
-
-- Current document and frontmatter from previous steps are available
-- Customer behavior and pain points analysis completed in previous steps
-- Focus on customer decision processes and journey mapping
-- Web search capabilities with source verification are enabled
-- **Research topic = "{{research_topic}}"** - established from initial discussion
-- **Research goals = "{{research_goals}}"** - established from initial discussion
-
-## YOUR TASK:
-
-Conduct customer decision processes and journey analysis with emphasis on decision factors and journey mapping.
-
-## CUSTOMER DECISIONS ANALYSIS SEQUENCE:
-
-### 1. Begin Customer Decisions Analysis
-
-**UTILIZE SUBPROCESSES AND SUBAGENTS**: Use research subagents, subprocesses or parallel processing if available to thoroughly analyze different customer decision areas simultaneously and thoroughly.
-
-Start with customer decisions research approach:
-"Now I'll conduct **customer decision processes analysis** for **{{research_topic}}** to understand customer decision-making.
-
-**Customer Decisions Focus:**
-
-- Customer decision-making processes
-- Decision factors and criteria
-- Customer journey mapping
-- Purchase decision influencers
-- Information gathering patterns
-
-**Let me search for current customer decision insights.**"
-
-### 2. Parallel Decisions Research Execution
-
-**Execute multiple web searches simultaneously:**
-
-Search the web: "{{research_topic}} customer decision process"
-Search the web: "{{research_topic}} buying criteria factors"
-Search the web: "{{research_topic}} customer journey mapping"
-Search the web: "{{research_topic}} decision influencing factors"
-
-**Analysis approach:**
-
-- Look for customer decision research studies
-- Search for buying criteria and factor analysis
-- Research customer journey mapping methodologies
-- Analyze decision influence factors and channels
-- Study information gathering and evaluation patterns
-
-### 3. Analyze and Aggregate Results
-
-**Collect and analyze findings from all parallel searches:**
-
-"After executing comprehensive parallel web searches, let me analyze and aggregate customer decision findings:
-
-**Research Coverage:**
-
-- Customer decision-making processes
-- Decision factors and criteria
-- Customer journey mapping
-- Decision influence factors
-
-**Cross-Decisions Analysis:**
-[Identify patterns connecting decision factors and journey stages]
-
-**Quality Assessment:**
-[Overall confidence levels and research gaps identified]"
-
-### 4. Generate Customer Decisions Content
-
-**WRITE IMMEDIATELY TO DOCUMENT**
-
-Prepare customer decisions analysis with web search citations:
-
-#### Content Structure:
-
-When saving to document, append these Level 2 and Level 3 sections:
-
-```markdown
-## Customer Decision Processes and Journey
-
-### Customer Decision-Making Processes
-
-[Decision processes analysis with source citations]
-_Decision Stages: [Key stages in customer decision making]_
-_Decision Timelines: [Timeframes for different decisions]_
-_Complexity Levels: [Decision complexity assessment]_
-_Evaluation Methods: [How customers evaluate options]_
-_Source: [URL]_
-
-### Decision Factors and Criteria
-
-[Decision factors analysis with source citations]
-_Primary Decision Factors: [Most important factors in decisions]_
-_Secondary Decision Factors: [Supporting factors influencing decisions]_
-_Weighing Analysis: [How different factors are weighed]_
-_Evoluton Patterns: [How factors change over time]_
-_Source: [URL]_
-
-### Customer Journey Mapping
-
-[Journey mapping analysis with source citations]
-_Awareness Stage: [How customers become aware of {{research_topic}}]_
-_Consideration Stage: [Evaluation and comparison process]_
-_Decision Stage: [Final decision-making process]_
-_Purchase Stage: [Purchase execution and completion]_
-_Post-Purchase Stage: [Post-decision evaluation and behavior]_
-_Source: [URL]_
-
-### Touchpoint Analysis
-
-[Touchpoint analysis with source citations]
-_Digital Touchpoints: [Online and digital interaction points]_
-_Offline Touchpoints: [Physical and in-person interaction points]_
-_Information Sources: [Where customers get information]_
-_Influence Channels: [What influences customer decisions]_
-_Source: [URL]_
-
-### Information Gathering Patterns
-
-[Information patterns analysis with source citations]
-_Research Methods: [How customers research options]_
-_Information Sources Trusted: [Most trusted information sources]_
-_Research Duration: [Time spent gathering information]_
-_Evaluation Criteria: [How customers evaluate information]_
-_Source: [URL]_
-
-### Decision Influencers
-
-[Decision influencer analysis with source citations]
-_Peer Influence: [How friends and family influence decisions]_
-_Expert Influence: [How expert opinions affect decisions]_
-_Media Influence: [How media and marketing affect decisions]_
-_Social Proof Influence: [How reviews and testimonials affect decisions]_
-_Source: [URL]_
-
-### Purchase Decision Factors
-
-[Purchase decision factors analysis with source citations]
-_Immediate Purchase Drivers: [Factors triggering immediate purchase]_
-_Delayed Purchase Drivers: [Factors causing purchase delays]_
-_Brand Loyalty Factors: [Factors driving repeat purchases]_
-_Price Sensitivity: [How price affects purchase decisions]_
-_Source: [URL]_
-
-### Customer Decision Optimizations
-
-[Decision optimization analysis with source citations]
-_Friction Reduction: [Ways to make decisions easier]_
-_Trust Building: [Building customer trust in decisions]_
-_Conversion Optimization: [Optimizing decision-to-purchase rates]_
-_Loyalty Building: [Building long-term customer relationships]_
-_Source: [URL]_
-```
-
-### 5. Present Analysis and Continue Option
-
-**Show analysis and present continue option:**
-
-"I've completed **customer decision processes analysis** for {{research_topic}}, focusing on customer decision-making.
-
-**Key Decision Findings:**
-
-- Customer decision-making processes clearly mapped
-- Decision factors and criteria thoroughly analyzed
-- Customer journey mapping completed across all stages
-- Decision influencers and touchpoints identified
-- Information gathering patterns documented
-
-**Ready to proceed to competitive analysis?**
-[C] Continue - Save this to document and proceed to competitive analysis
-
-### 6. Handle Continue Selection
-
-#### If 'C' (Continue):
-
-- **CONTENT ALREADY WRITTEN TO DOCUMENT**
-- Update frontmatter: `stepsCompleted: [1, 2, 3, 4]`
-- Load: `./step-05-competitive-analysis.md`
-
-## APPEND TO DOCUMENT:
-
-Content is already written to document when generated in step 4. No additional append needed.
-
-## SUCCESS METRICS:
-
-β Customer decision-making processes clearly mapped
-β Decision factors and criteria thoroughly analyzed
-β Customer journey mapping completed across all stages
-β Decision influencers and touchpoints identified
-β Information gathering patterns documented
-β Content written immediately to document
-β [C] continue option presented and handled correctly
-β Proper routing to next step (competitive analysis)
-β Research goals alignment maintained
-
-## FAILURE MODES:
-
-β Relying solely on training data without web verification for current facts
-
-β Missing critical decision-making process stages
-β Not identifying key decision factors
-β Incomplete customer journey mapping
-β Not writing content immediately to document
-β Not presenting [C] continue option after content generation
-β Not routing to competitive analysis step
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## CUSTOMER DECISIONS RESEARCH PROTOCOLS:
-
-- Research customer decision studies and psychology
-- Use customer journey mapping methodologies
-- Analyze buying criteria and decision factors
-- Study decision influence and touchpoint analysis
-- Focus on current decision data
-- Present conflicting information when sources disagree
-- Apply confidence levels appropriately
-
-## DECISION ANALYSIS STANDARDS:
-
-- Always cite URLs for web search results
-- Use authoritative customer decision research sources
-- Note data currency and potential limitations
-- Present multiple perspectives when sources conflict
-- Apply confidence levels to uncertain data
-- Focus on actionable decision insights
-
-## NEXT STEP:
-
-After user selects 'C', load `./step-05-competitive-analysis.md` to analyze competitive landscape, market positioning, and competitive strategies for {{research_topic}}.
-
-Remember: Always write research content to document immediately and emphasize current customer decision data with rigorous source verification!
diff --git a/src/bmm/workflows/1-analysis/research/market-steps/step-05-competitive-analysis.md b/src/bmm/workflows/1-analysis/research/market-steps/step-05-competitive-analysis.md
deleted file mode 100644
index d7387a4f..00000000
--- a/src/bmm/workflows/1-analysis/research/market-steps/step-05-competitive-analysis.md
+++ /dev/null
@@ -1,177 +0,0 @@
-# Market Research Step 5: Competitive Analysis
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without web search verification
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β Search the web to verify and supplement your knowledge with current facts
-- π YOU ARE A COMPETITIVE ANALYST, not content generator
-- π¬ FOCUS on competitive landscape and market positioning
-- π WEB SEARCH REQUIRED - verify current facts against live sources
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show web search analysis before presenting findings
-- β οΈ Present [C] complete option after competitive analysis content generation
-- πΎ ONLY save when user chooses C (Complete)
-- π Update frontmatter `stepsCompleted: [1, 2, 3, 4, 5]` before completing workflow
-- π« FORBIDDEN to complete workflow until C is selected
-
-## CONTEXT BOUNDARIES:
-
-- Current document and frontmatter from previous steps are available
-- Focus on competitive landscape and market positioning analysis
-- Web search capabilities with source verification are enabled
-- May need to search for specific competitor information
-
-## YOUR TASK:
-
-Conduct comprehensive competitive analysis with emphasis on market positioning.
-
-## COMPETITIVE ANALYSIS SEQUENCE:
-
-### 1. Begin Competitive Analysis
-
-Start with competitive research approach:
-"Now I'll conduct **competitive analysis** to understand the competitive landscape.
-
-**Competitive Analysis Focus:**
-
-- Key players and market share
-- Competitive positioning strategies
-- Strengths and weaknesses analysis
-- Market differentiation opportunities
-- Competitive threats and challenges
-
-**Let me search for current competitive information.**"
-
-### 2. Generate Competitive Analysis Content
-
-Prepare competitive analysis with web search citations:
-
-#### Content Structure:
-
-When saving to document, append these Level 2 and Level 3 sections:
-
-```markdown
-## Competitive Landscape
-
-### Key Market Players
-
-[Key players analysis with market share data]
-_Source: [URL]_
-
-### Market Share Analysis
-
-[Market share analysis with source citations]
-_Source: [URL]_
-
-### Competitive Positioning
-
-[Positioning analysis with source citations]
-_Source: [URL]_
-
-### Strengths and Weaknesses
-
-[SWOT analysis with source citations]
-_Source: [URL]_
-
-### Market Differentiation
-
-[Differentiation analysis with source citations]
-_Source: [URL]_
-
-### Competitive Threats
-
-[Threats analysis with source citations]
-_Source: [URL]_
-
-### Opportunities
-
-[Competitive opportunities analysis with source citations]
-_Source: [URL]_
-```
-
-### 3. Present Analysis and Complete Option
-
-Show the generated competitive analysis and present complete option:
-"I've completed the **competitive analysis** for the competitive landscape.
-
-**Key Competitive Findings:**
-
-- Key market players and market share identified
-- Competitive positioning strategies mapped
-- Strengths and weaknesses thoroughly analyzed
-- Market differentiation opportunities identified
-- Competitive threats and challenges documented
-
-**Ready to complete the market research?**
-[C] Complete Research - Save final document and conclude
-
-### 4. Handle Complete Selection
-
-#### If 'C' (Complete Research):
-
-- Append the final content to the research document
-- Update frontmatter: `stepsCompleted: [1, 2, 3]`
-- Complete the market research workflow
-
-## APPEND TO DOCUMENT:
-
-When user selects 'C', append the content directly to the research document using the structure from step 2.
-
-## SUCCESS METRICS:
-
-β Key market players identified
-β Market share analysis completed with source verification
-β Competitive positioning strategies clearly mapped
-β Strengths and weaknesses thoroughly analyzed
-β Market differentiation opportunities identified
-β [C] complete option presented and handled correctly
-β Content properly appended to document when C selected
-β Market research workflow completed successfully
-
-## FAILURE MODES:
-
-β Relying solely on training data without web verification for current facts
-
-β Missing key market players or market share data
-β Incomplete competitive positioning analysis
-β Not identifying market differentiation opportunities
-β Not presenting completion option for research workflow
-β Appending content without user selecting 'C'
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## COMPETITIVE RESEARCH PROTOCOLS:
-
-- Search for industry reports and competitive intelligence
-- Use competitor company websites and annual reports
-- Research market research firm competitive analyses
-- Note competitive advantages and disadvantages
-- Search for recent market developments and disruptions
-
-## MARKET RESEARCH COMPLETION:
-
-When 'C' is selected:
-
-- All market research steps completed
-- Comprehensive market research document generated
-- All sections appended with source citations
-- Market research workflow status updated
-- Final recommendations provided to user
-
-## NEXT STEPS:
-
-Market research workflow complete. User may:
-
-- Use market research to inform product development strategies
-- Conduct additional competitive research on specific companies
-- Combine market research with other research types for comprehensive insights
-
-Congratulations on completing comprehensive market research! π
diff --git a/src/bmm/workflows/1-analysis/research/market-steps/step-06-research-completion.md b/src/bmm/workflows/1-analysis/research/market-steps/step-06-research-completion.md
deleted file mode 100644
index 42d7d7d9..00000000
--- a/src/bmm/workflows/1-analysis/research/market-steps/step-06-research-completion.md
+++ /dev/null
@@ -1,475 +0,0 @@
-# Market Research Step 6: Research Completion
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without web search verification
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β Search the web to verify and supplement your knowledge with current facts
-- π YOU ARE A MARKET RESEARCH STRATEGIST, not content generator
-- π¬ FOCUS on strategic recommendations and actionable insights
-- π WEB SEARCH REQUIRED - verify current facts against live sources
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show web search analysis before presenting findings
-- β οΈ Present [C] complete option after completion content generation
-- πΎ ONLY save when user chooses C (Complete)
-- π Update frontmatter `stepsCompleted: [1, 2, 3, 4, 5, 6]` before completing workflow
-- π« FORBIDDEN to complete workflow until C is selected
-- π GENERATE COMPLETE DOCUMENT STRUCTURE with intro, TOC, and summary
-
-## CONTEXT BOUNDARIES:
-
-- Current document and frontmatter from previous steps are available
-- **Research topic = "{{research_topic}}"** - comprehensive market analysis
-- **Research goals = "{{research_goals}}"** - achieved through exhaustive market research
-- All market research sections have been completed (customer behavior, pain points, decisions, competitive analysis)
-- Web search capabilities with source verification are enabled
-- This is the final synthesis step producing the complete market research document
-
-## YOUR TASK:
-
-Produce a comprehensive, authoritative market research document on **{{research_topic}}** with compelling narrative introduction, detailed TOC, and executive summary based on exhaustive market research.
-
-## MARKET RESEARCH COMPLETION SEQUENCE:
-
-### 1. Begin Strategic Synthesis
-
-Start with strategic synthesis approach:
-"Now I'll complete our market research with **strategic synthesis and recommendations** .
-
-**Strategic Synthesis Focus:**
-
-- Integrated insights from market, customer, and competitive analysis
-- Strategic recommendations based on research findings
-- Market entry or expansion strategies
-- Risk assessment and mitigation approaches
-- Actionable next steps and implementation guidance
-
-**Let me search for current strategic insights and best practices.**"
-
-### 2. Web Search for Market Entry Strategies
-
-Search for current market strategies:
-Search the web: "market entry strategies best practices"
-
-**Strategy focus:**
-
-- Market entry timing and approaches
-- Go-to-market strategies and frameworks
-- Market positioning and differentiation tactics
-- Customer acquisition and growth strategies
-
-### 3. Web Search for Risk Assessment
-
-Search for current risk approaches:
-Search the web: "market research risk assessment frameworks"
-
-**Risk focus:**
-
-- Market risks and uncertainty management
-- Competitive threats and mitigation strategies
-- Regulatory and compliance risks
-- Economic and market volatility considerations
-
-### 4. Generate Complete Market Research Document
-
-Prepare comprehensive market research document with full structure:
-
-#### Complete Document Structure:
-
-```markdown
-# [Compelling Title]: Comprehensive {{research_topic}} Market Research
-
-## Executive Summary
-
-[Brief compelling overview of key market findings and strategic implications]
-
-## Table of Contents
-
-- Market Research Introduction and Methodology
-- {{research_topic}} Market Analysis and Dynamics
-- Customer Insights and Behavior Analysis
-- Competitive Landscape and Positioning
-- Strategic Market Recommendations
-- Market Entry and Growth Strategies
-- Risk Assessment and Mitigation
-- Implementation Roadmap and Success Metrics
-- Future Market Outlook and Opportunities
-- Market Research Methodology and Source Documentation
-- Market Research Appendices and Additional Resources
-
-## 1. Market Research Introduction and Methodology
-
-### Market Research Significance
-
-**Compelling market narrative about why {{research_topic}} research is critical now**
-_Market Importance: [Strategic market significance with up-to-date context]_
-_Business Impact: [Business implications of market research]_
-_Source: [URL]_
-
-### Market Research Methodology
-
-[Comprehensive description of market research approach including:]
-
-- **Market Scope**: [Comprehensive market coverage areas]
-- **Data Sources**: [Authoritative market sources and verification approach]
-- **Analysis Framework**: [Structured market analysis methodology]
-- **Time Period**: [current focus and market evolution context]
-- **Geographic Coverage**: [Regional/global market scope]
-
-### Market Research Goals and Objectives
-
-**Original Market Goals:** {{research_goals}}
-
-**Achieved Market Objectives:**
-
-- [Market Goal 1 achievement with supporting evidence]
-- [Market Goal 2 achievement with supporting evidence]
-- [Additional market insights discovered during research]
-
-## 2. {{research_topic}} Market Analysis and Dynamics
-
-### Market Size and Growth Projections
-
-_[Comprehensive market analysis]_
-_Market Size: [Current market valuation and size]_
-_Growth Rate: [CAGR and market growth projections]_
-_Market Drivers: [Key factors driving market growth]_
-_Market Segments: [Detailed market segmentation analysis]_
-_Source: [URL]_
-
-### Market Trends and Dynamics
-
-[Current market trends analysis]
-_Emerging Trends: [Key market trends and their implications]_
-_Market Dynamics: [Forces shaping market evolution]_
-_Consumer Behavior Shifts: [Changes in customer behavior and preferences]_
-_Source: [URL]_
-
-### Pricing and Business Model Analysis
-
-[Comprehensive pricing and business model analysis]
-_Pricing Strategies: [Current pricing approaches and models]_
-_Business Model Evolution: [Emerging and successful business models]_
-_Value Proposition Analysis: [Customer value proposition assessment]_
-_Source: [URL]_
-
-## 3. Customer Insights and Behavior Analysis
-
-### Customer Behavior Patterns
-
-[Customer insights analysis with current context]
-_Behavior Patterns: [Key customer behavior trends and patterns]_
-_Customer Journey: [Complete customer journey mapping]_
-_Decision Factors: [Factors influencing customer decisions]_
-_Source: [URL]_
-
-### Customer Pain Points and Needs
-
-[Comprehensive customer pain point analysis]
-_Pain Points: [Key customer challenges and frustrations]_
-_Unmet Needs: [Unsolved customer needs and opportunities]_
-_Customer Expectations: [Current customer expectations and requirements]_
-_Source: [URL]_
-
-### Customer Segmentation and Targeting
-
-[Detailed customer segmentation analysis]
-_Customer Segments: [Detailed customer segment profiles]_
-_Target Market Analysis: [Most attractive customer segments]_
-_Segment-specific Strategies: [Tailored approaches for key segments]_
-_Source: [URL]_
-
-## 4. Competitive Landscape and Positioning
-
-### Competitive Analysis
-
-[Comprehensive competitive analysis]
-_Market Leaders: [Dominant competitors and their strategies]_
-_Emerging Competitors: [New entrants and innovative approaches]_
-_Competitive Advantages: [Key differentiators and competitive advantages]_
-_Source: [URL]_
-
-### Market Positioning Strategies
-
-[Strategic positioning analysis]
-_Positioning Opportunities: [Opportunities for market differentiation]_
-_Competitive Gaps: [Unserved market needs and opportunities]_
-_Positioning Framework: [Recommended positioning approach]_
-_Source: [URL]_
-
-## 5. Strategic Market Recommendations
-
-### Market Opportunity Assessment
-
-[Strategic market opportunities analysis]
-_High-Value Opportunities: [Most attractive market opportunities]_
-_Market Entry Timing: [Optimal timing for market entry or expansion]_
-_Growth Strategies: [Recommended approaches for market growth]_
-_Source: [URL]_
-
-### Strategic Recommendations
-
-[Comprehensive strategic recommendations]
-_Market Entry Strategy: [Recommended approach for market entry/expansion]_
-_Competitive Strategy: [Recommended competitive positioning and approach]_
-_Customer Acquisition Strategy: [Recommended customer acquisition approach]_
-_Source: [URL]_
-
-## 6. Market Entry and Growth Strategies
-
-### Go-to-Market Strategy
-
-[Comprehensive go-to-market approach]
-_Market Entry Approach: [Recommended market entry strategy and tactics]_
-_Channel Strategy: [Optimal channels for market reach and customer acquisition]_
-_Partnership Strategy: [Strategic partnership and collaboration opportunities]_
-_Source: [URL]_
-
-### Growth and Scaling Strategy
-
-[Market growth and scaling analysis]
-_Growth Phases: [Recommended phased approach to market growth]_
-_Scaling Considerations: [Key factors for successful market scaling]_
-_Expansion Opportunities: [Opportunities for geographic or segment expansion]_
-_Source: [URL]_
-
-## 7. Risk Assessment and Mitigation
-
-### Market Risk Analysis
-
-[Comprehensive market risk assessment]
-_Market Risks: [Key market-related risks and uncertainties]_
-_Competitive Risks: [Competitive threats and mitigation strategies]_
-_Regulatory Risks: [Regulatory and compliance considerations]_
-_Source: [URL]_
-
-### Mitigation Strategies
-
-[Risk mitigation and contingency planning]
-_Risk Mitigation Approaches: [Strategies for managing identified risks]_
-_Contingency Planning: [Backup plans and alternative approaches]_
-_Market Sensitivity Analysis: [Impact of market changes on strategy]_
-_Source: [URL]_
-
-## 8. Implementation Roadmap and Success Metrics
-
-### Implementation Framework
-
-[Comprehensive implementation guidance]
-_Implementation Timeline: [Recommended phased implementation approach]_
-_Required Resources: [Key resources and capabilities needed]_
-_Implementation Milestones: [Key milestones and success criteria]_
-_Source: [URL]_
-
-### Success Metrics and KPIs
-
-[Comprehensive success measurement framework]
-_Key Performance Indicators: [Critical metrics for measuring success]_
-_Monitoring and Reporting: [Approach for tracking and reporting progress]_
-_Success Criteria: [Clear criteria for determining success]_
-_Source: [URL]_
-
-## 9. Future Market Outlook and Opportunities
-
-### Future Market Trends
-
-[Forward-looking market analysis]
-_Near-term Market Evolution: [1-2 year market development expectations]_
-_Medium-term Market Trends: [3-5 year expected market developments]_
-_Long-term Market Vision: [5+ year market outlook for {{research_topic}}]_
-_Source: [URL]_
-
-### Strategic Opportunities
-
-[Market opportunity analysis and recommendations]
-_Emerging Opportunities: [New market opportunities and their potential]_
-_Innovation Opportunities: [Areas for market innovation and differentiation]_
-_Strategic Market Investments: [Recommended market investments and priorities]_
-_Source: [URL]_
-
-## 10. Market Research Methodology and Source Verification
-
-### Comprehensive Market Source Documentation
-
-[Complete documentation of all market research sources]
-_Primary Market Sources: [Key authoritative market sources used]_
-_Secondary Market Sources: [Supporting market research and analysis]_
-_Market Web Search Queries: [Complete list of market search queries used]_
-
-### Market Research Quality Assurance
-
-[Market research quality assurance and validation approach]
-_Market Source Verification: [All market claims verified with multiple sources]_
-_Market Confidence Levels: [Confidence assessments for uncertain market data]_
-_Market Research Limitations: [Market research limitations and areas for further investigation]_
-_Methodology Transparency: [Complete transparency about market research approach]_
-
-## 11. Market Research Appendices and Additional Resources
-
-### Detailed Market Data Tables
-
-[Comprehensive market data tables supporting research findings]
-_Market Size Data: [Detailed market size and growth data tables]_
-_Customer Analysis Data: [Detailed customer behavior and segmentation data]_
-_Competitive Analysis Data: [Detailed competitor comparison and positioning data]_
-
-### Market Resources and References
-
-[Valuable market resources for continued research and implementation]
-_Market Research Reports: [Authoritative market research reports and publications]_
-_Industry Associations: [Key industry organizations and market resources]_
-_Market Analysis Tools: [Tools and resources for ongoing market analysis]_
-
----
-
-## Market Research Conclusion
-
-### Summary of Key Market Findings
-
-[Comprehensive summary of the most important market research findings]
-
-### Strategic Market Impact Assessment
-
-[Assessment of market implications for {{research_topic}}]
-
-### Next Steps Market Recommendations
-
-[Specific next steps for leveraging this market research]
-
----
-
-**Market Research Completion Date:** {{date}}
-**Research Period:** current comprehensive market analysis
-**Document Length:** As needed for comprehensive market coverage
-**Source Verification:** All market facts cited with current sources
-**Market Confidence Level:** High - based on multiple authoritative market sources
-
-_This comprehensive market research document serves as an authoritative market reference on {{research_topic}} and provides strategic market insights for informed decision-making._
-```
-
-### 5. Present Complete Market Research Document and Final Option
-
-**Market Research Document Completion Presentation:**
-
-"I've completed the **comprehensive market research document synthesis** for **{{research_topic}}**, producing an authoritative market research document with:
-
-**Document Features:**
-
-- **Compelling Market Introduction**: Engaging opening that establishes market research significance
-- **Comprehensive Market TOC**: Complete navigation structure for market reference
-- **Exhaustive Market Research Coverage**: All market aspects of {{research_topic}} thoroughly analyzed
-- **Executive Market Summary**: Key market findings and strategic implications highlighted
-- **Strategic Market Recommendations**: Actionable market insights based on comprehensive research
-- **Complete Market Source Citations**: Every market claim verified with current sources
-
-**Market Research Completeness:**
-
-- Market analysis and dynamics fully documented
-- Customer insights and behavior analysis comprehensively covered
-- Competitive landscape and positioning detailed
-- Strategic market recommendations and implementation guidance provided
-
-**Document Standards Met:**
-
-- Exhaustive market research with no critical gaps
-- Professional market structure and compelling narrative
-- As long as needed for comprehensive market coverage
-- Multiple independent sources for all market claims
-- current market data throughout with proper citations
-
-**Ready to complete this comprehensive market research document?**
-[C] Complete Research - Save final comprehensive market research document
-
-### 6. Handle Complete Selection
-
-#### If 'C' (Complete Research):
-
-- Append the final content to the research document
-- Update frontmatter: `stepsCompleted: [1, 2, 3, 4]`
-- Complete the market research workflow
-
-## APPEND TO DOCUMENT:
-
-When user selects 'C', append the content directly to the research document using the structure from step 4.
-
-## SUCCESS METRICS:
-
-β Compelling market introduction with research significance
-β Comprehensive market table of contents with complete document structure
-β Exhaustive market research coverage across all market aspects
-β Executive market summary with key findings and strategic implications
-β Strategic market recommendations grounded in comprehensive research
-β Complete market source verification with current citations
-β Professional market document structure and compelling narrative
-β [C] complete option presented and handled correctly
-β Market research workflow completed with comprehensive document
-
-## FAILURE MODES:
-
-β Not producing compelling market introduction
-β Missing comprehensive market table of contents
-β Incomplete market research coverage across market aspects
-β Not providing executive market summary with key findings
-β Missing strategic market recommendations based on research
-β Relying solely on training data without web verification for current facts
-β Producing market document without professional structure
-β Not presenting completion option for final market document
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## STRATEGIC RESEARCH PROTOCOLS:
-
-- Search for current market strategy frameworks and best practices
-- Research successful market entry cases and approaches
-- Identify risk management methodologies and frameworks
-- Research implementation planning and execution strategies
-- Consider market timing and readiness factors
-
-## COMPREHENSIVE MARKET DOCUMENT STANDARDS:
-
-This step ensures the final market research document:
-
-- Serves as an authoritative market reference on {{research_topic}}
-- Provides strategic market insights for informed decision-making
-- Includes comprehensive market coverage with no gaps
-- Maintains rigorous market source verification standards
-- Delivers strategic market insights and actionable recommendations
-- Meets professional market research document quality standards
-
-## MARKET RESEARCH WORKFLOW COMPLETION:
-
-When 'C' is selected:
-
-- All market research steps completed (1-4)
-- Comprehensive market research document generated
-- Professional market document structure with intro, TOC, and summary
-- All market sections appended with source citations
-- Market research workflow status updated to complete
-- Final comprehensive market research document delivered to user
-
-## FINAL MARKET DELIVERABLE:
-
-Complete authoritative market research document on {{research_topic}} that:
-
-- Establishes professional market credibility through comprehensive research
-- Provides strategic market insights for informed decision-making
-- Serves as market reference document for continued use
-- Maintains highest market research quality standards with current verification
-
-## NEXT STEPS:
-
-Comprehensive market research workflow complete. User may:
-
-- Use market research document to inform business strategies and decisions
-- Conduct additional market research on specific segments or opportunities
-- Combine market research with other research types for comprehensive insights
-- Move forward with implementation based on strategic market recommendations
-
-Congratulations on completing comprehensive market research with professional documentation! π
diff --git a/src/bmm/workflows/1-analysis/research/research.template.md b/src/bmm/workflows/1-analysis/research/research.template.md
deleted file mode 100644
index 1d995247..00000000
--- a/src/bmm/workflows/1-analysis/research/research.template.md
+++ /dev/null
@@ -1,29 +0,0 @@
----
-stepsCompleted: []
-inputDocuments: []
-workflowType: 'research'
-lastStep: 1
-research_type: '{{research_type}}'
-research_topic: '{{research_topic}}'
-research_goals: '{{research_goals}}'
-user_name: '{{user_name}}'
-date: '{{date}}'
-web_research_enabled: true
-source_verification: true
----
-
-# Research Report: {{research_type}}
-
-**Date:** {{date}}
-**Author:** {{user_name}}
-**Research Type:** {{research_type}}
-
----
-
-## Research Overview
-
-[Research overview and methodology will be appended here]
-
----
-
-
diff --git a/src/bmm/workflows/1-analysis/research/technical-steps/step-01-init.md b/src/bmm/workflows/1-analysis/research/technical-steps/step-01-init.md
deleted file mode 100644
index b286822d..00000000
--- a/src/bmm/workflows/1-analysis/research/technical-steps/step-01-init.md
+++ /dev/null
@@ -1,137 +0,0 @@
-# Technical Research Step 1: Technical Research Scope Confirmation
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without user confirmation
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β FOCUS EXCLUSIVELY on confirming technical research scope and approach
-- π YOU ARE A TECHNICAL RESEARCH PLANNER, not content generator
-- π¬ ACKNOWLEDGE and CONFIRM understanding of technical research goals
-- π This is SCOPE CONFIRMATION ONLY - no web research yet
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- β οΈ Present [C] continue option after scope confirmation
-- πΎ ONLY proceed when user chooses C (Continue)
-- π Update frontmatter `stepsCompleted: [1]` before loading next step
-- π« FORBIDDEN to load next step until C is selected
-
-## CONTEXT BOUNDARIES:
-
-- Research type = "technical" is already set
-- **Research topic = "{{research_topic}}"** - discovered from initial discussion
-- **Research goals = "{{research_goals}}"** - captured from initial discussion
-- Focus on technical architecture and implementation research
-- Web search is required to verify and supplement your knowledge with current facts
-
-## YOUR TASK:
-
-Confirm technical research scope and approach for **{{research_topic}}** with the user's goals in mind.
-
-## TECHNICAL SCOPE CONFIRMATION:
-
-### 1. Begin Scope Confirmation
-
-Start with technical scope understanding:
-"I understand you want to conduct **technical research** for **{{research_topic}}** with these goals: {{research_goals}}
-
-**Technical Research Scope:**
-
-- **Architecture Analysis**: System design patterns, frameworks, and architectural decisions
-- **Implementation Approaches**: Development methodologies, coding patterns, and best practices
-- **Technology Stack**: Languages, frameworks, tools, and platforms relevant to {{research_topic}}
-- **Integration Patterns**: APIs, communication protocols, and system interoperability
-- **Performance Considerations**: Scalability, optimization, and performance patterns
-
-**Research Approach:**
-
-- Current web data with rigorous source verification
-- Multi-source validation for critical technical claims
-- Confidence levels for uncertain technical information
-- Comprehensive technical coverage with architecture-specific insights
-
-### 2. Scope Confirmation
-
-Present clear scope confirmation:
-"**Technical Research Scope Confirmation:**
-
-For **{{research_topic}}**, I will research:
-
-β **Architecture Analysis** - design patterns, frameworks, system architecture
-β **Implementation Approaches** - development methodologies, coding patterns
-β **Technology Stack** - languages, frameworks, tools, platforms
-β **Integration Patterns** - APIs, protocols, interoperability
-β **Performance Considerations** - scalability, optimization, patterns
-
-**All claims verified against current public sources.**
-
-**Does this technical research scope and approach align with your goals?**
-[C] Continue - Begin technical research with this scope
-
-### 3. Handle Continue Selection
-
-#### If 'C' (Continue):
-
-- Document scope confirmation in research file
-- Update frontmatter: `stepsCompleted: [1]`
-- Load: `./step-02-technical-overview.md`
-
-## APPEND TO DOCUMENT:
-
-When user selects 'C', append scope confirmation:
-
-```markdown
-## Technical Research Scope Confirmation
-
-**Research Topic:** {{research_topic}}
-**Research Goals:** {{research_goals}}
-
-**Technical Research Scope:**
-
-- Architecture Analysis - design patterns, frameworks, system architecture
-- Implementation Approaches - development methodologies, coding patterns
-- Technology Stack - languages, frameworks, tools, platforms
-- Integration Patterns - APIs, protocols, interoperability
-- Performance Considerations - scalability, optimization, patterns
-
-**Research Methodology:**
-
-- Current web data with rigorous source verification
-- Multi-source validation for critical technical claims
-- Confidence level framework for uncertain information
-- Comprehensive technical coverage with architecture-specific insights
-
-**Scope Confirmed:** {{date}}
-```
-
-## SUCCESS METRICS:
-
-β Technical research scope clearly confirmed with user
-β All technical analysis areas identified and explained
-β Research methodology emphasized
-β [C] continue option presented and handled correctly
-β Scope confirmation documented when user proceeds
-β Proper routing to next technical research step
-
-## FAILURE MODES:
-
-β Not clearly confirming technical research scope with user
-β Missing critical technical analysis areas
-β Not explaining that web search is required for current facts
-β Not presenting [C] continue option
-β Proceeding without user scope confirmation
-β Not routing to next technical research step
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## NEXT STEP:
-
-After user selects 'C', load `./step-02-technical-overview.md` to begin technology stack analysis.
-
-Remember: This is SCOPE CONFIRMATION ONLY - no actual technical research yet, just confirming the research approach and scope!
diff --git a/src/bmm/workflows/1-analysis/research/technical-steps/step-02-technical-overview.md b/src/bmm/workflows/1-analysis/research/technical-steps/step-02-technical-overview.md
deleted file mode 100644
index 78151eb0..00000000
--- a/src/bmm/workflows/1-analysis/research/technical-steps/step-02-technical-overview.md
+++ /dev/null
@@ -1,239 +0,0 @@
-# Technical Research Step 2: Technology Stack Analysis
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without web search verification
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β Search the web to verify and supplement your knowledge with current facts
-- π YOU ARE A TECHNOLOGY STACK ANALYST, not content generator
-- π¬ FOCUS on languages, frameworks, tools, and platforms
-- π WEB SEARCH REQUIRED - verify current facts against live sources
-- π WRITE CONTENT IMMEDIATELY TO DOCUMENT
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show web search analysis before presenting findings
-- β οΈ Present [C] continue option after technology stack content generation
-- π WRITE TECHNOLOGY STACK ANALYSIS TO DOCUMENT IMMEDIATELY
-- πΎ ONLY proceed when user chooses C (Continue)
-- π Update frontmatter `stepsCompleted: [1, 2]` before loading next step
-- π« FORBIDDEN to load next step until C is selected
-
-## CONTEXT BOUNDARIES:
-
-- Current document and frontmatter from step-01 are available
-- **Research topic = "{{research_topic}}"** - established from initial discussion
-- **Research goals = "{{research_goals}}"** - established from initial discussion
-- Focus on languages, frameworks, tools, and platforms
-- Web search capabilities with source verification are enabled
-
-## YOUR TASK:
-
-Conduct technology stack analysis focusing on languages, frameworks, tools, and platforms. Search the web to verify and supplement current facts.
-
-## TECHNOLOGY STACK ANALYSIS SEQUENCE:
-
-### 1. Begin Technology Stack Analysis
-
-**UTILIZE SUBPROCESSES AND SUBAGENTS**: Use research subagents, subprocesses or parallel processing if available to thoroughly analyze different technology stack areas simultaneously and thoroughly.
-
-Start with technology stack research approach:
-"Now I'll conduct **technology stack analysis** for **{{research_topic}}** to understand the technology landscape.
-
-**Technology Stack Focus:**
-
-- Programming languages and their evolution
-- Development frameworks and libraries
-- Database and storage technologies
-- Development tools and platforms
-- Cloud infrastructure and deployment platforms
-
-**Let me search for current technology stack insights.**"
-
-### 2. Parallel Technology Stack Research Execution
-
-**Execute multiple web searches simultaneously:**
-
-Search the web: "{{research_topic}} programming languages frameworks"
-Search the web: "{{research_topic}} development tools platforms"
-Search the web: "{{research_topic}} database storage technologies"
-Search the web: "{{research_topic}} cloud infrastructure platforms"
-
-**Analysis approach:**
-
-- Look for recent technology trend reports and developer surveys
-- Search for technology documentation and best practices
-- Research open-source projects and their technology choices
-- Analyze technology adoption patterns and migration trends
-- Study platform and tool evolution in the domain
-
-### 3. Analyze and Aggregate Results
-
-**Collect and analyze findings from all parallel searches:**
-
-"After executing comprehensive parallel web searches, let me analyze and aggregate technology stack findings:
-
-**Research Coverage:**
-
-- Programming languages and frameworks analysis
-- Development tools and platforms evaluation
-- Database and storage technologies assessment
-- Cloud infrastructure and deployment platform analysis
-
-**Cross-Technology Analysis:**
-[Identify patterns connecting language choices, frameworks, and platform decisions]
-
-**Quality Assessment:**
-[Overall confidence levels and research gaps identified]"
-
-### 4. Generate Technology Stack Content
-
-**WRITE IMMEDIATELY TO DOCUMENT**
-
-Prepare technology stack analysis with web search citations:
-
-#### Content Structure:
-
-When saving to document, append these Level 2 and Level 3 sections:
-
-```markdown
-## Technology Stack Analysis
-
-### Programming Languages
-
-[Programming languages analysis with source citations]
-_Popular Languages: [Most widely used languages for {{research_topic}}]_
-_Emerging Languages: [Growing languages gaining adoption]_
-_Language Evolution: [How language preferences are changing]_
-_Performance Characteristics: [Language performance and suitability]_
-_Source: [URL]_
-
-### Development Frameworks and Libraries
-
-[Frameworks analysis with source citations]
-_Major Frameworks: [Dominant frameworks and their use cases]_
-_Micro-frameworks: [Lightweight options and specialized libraries]_
-_Evolution Trends: [How frameworks are evolving and changing]_
-_Ecosystem Maturity: [Library availability and community support]_
-_Source: [URL]_
-
-### Database and Storage Technologies
-
-[Database analysis with source citations]
-_Relational Databases: [Traditional SQL databases and their evolution]_
-_NoSQL Databases: [Document, key-value, graph, and other NoSQL options]_
-_In-Memory Databases: [Redis, Memcached, and performance-focused solutions]_
-_Data Warehousing: [Analytics and big data storage solutions]_
-_Source: [URL]_
-
-### Development Tools and Platforms
-
-[Tools and platforms analysis with source citations]
-_IDE and Editors: [Development environments and their evolution]_
-_Version Control: [Git and related development tools]_
-_Build Systems: [Compilation, packaging, and automation tools]_
-_Testing Frameworks: [Unit testing, integration testing, and QA tools]_
-_Source: [URL]_
-
-### Cloud Infrastructure and Deployment
-
-[Cloud platforms analysis with source citations]
-_Major Cloud Providers: [AWS, Azure, GCP and their services]_
-_Container Technologies: [Docker, Kubernetes, and orchestration]_
-_Serverless Platforms: [FaaS and event-driven computing]_
-_CDN and Edge Computing: [Content delivery and distributed computing]_
-_Source: [URL]_
-
-### Technology Adoption Trends
-
-[Adoption trends analysis with source citations]
-_Migration Patterns: [How technology choices are evolving]_
-_Emerging Technologies: [New technologies gaining traction]_
-_Legacy Technology: [Older technologies being phased out]_
-_Community Trends: [Developer preferences and open-source adoption]_
-_Source: [URL]_
-```
-
-### 5. Present Analysis and Continue Option
-
-**Show analysis and present continue option:**
-
-"I've completed **technology stack analysis** of the technology landscape for {{research_topic}}.
-
-**Key Technology Stack Findings:**
-
-- Programming languages and frameworks thoroughly analyzed
-- Database and storage technologies evaluated
-- Development tools and platforms documented
-- Cloud infrastructure and deployment options mapped
-- Technology adoption trends identified
-
-**Ready to proceed to integration patterns analysis?**
-[C] Continue - Save this to document and proceed to integration patterns
-
-### 6. Handle Continue Selection
-
-#### If 'C' (Continue):
-
-- **CONTENT ALREADY WRITTEN TO DOCUMENT**
-- Update frontmatter: `stepsCompleted: [1, 2]`
-- Load: `./step-03-integration-patterns.md`
-
-## APPEND TO DOCUMENT:
-
-Content is already written to document when generated in step 4. No additional append needed.
-
-## SUCCESS METRICS:
-
-β Programming languages and frameworks thoroughly analyzed
-β Database and storage technologies evaluated
-β Development tools and platforms documented
-β Cloud infrastructure and deployment options mapped
-β Technology adoption trends identified
-β Content written immediately to document
-β [C] continue option presented and handled correctly
-β Proper routing to next step (integration patterns)
-β Research goals alignment maintained
-
-## FAILURE MODES:
-
-β Relying solely on training data without web verification for current facts
-
-β Missing critical programming languages or frameworks
-β Incomplete database and storage technology analysis
-β Not identifying development tools and platforms
-β Not writing content immediately to document
-β Not presenting [C] continue option after content generation
-β Not routing to integration patterns step
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## TECHNOLOGY STACK RESEARCH PROTOCOLS:
-
-- Research technology trend reports and developer surveys
-- Use technology documentation and best practices guides
-- Analyze open-source projects and their technology choices
-- Study technology adoption patterns and migration trends
-- Focus on current technology data
-- Present conflicting information when sources disagree
-- Apply confidence levels appropriately
-
-## TECHNOLOGY STACK ANALYSIS STANDARDS:
-
-- Always cite URLs for web search results
-- Use authoritative technology research sources
-- Note data currency and potential limitations
-- Present multiple perspectives when sources conflict
-- Apply confidence levels to uncertain data
-- Focus on actionable technology insights
-
-## NEXT STEP:
-
-After user selects 'C', load `./step-03-integration-patterns.md` to analyze APIs, communication protocols, and system interoperability for {{research_topic}}.
-
-Remember: Always write research content to document immediately and emphasize current technology data with rigorous source verification!
diff --git a/src/bmm/workflows/1-analysis/research/technical-steps/step-03-integration-patterns.md b/src/bmm/workflows/1-analysis/research/technical-steps/step-03-integration-patterns.md
deleted file mode 100644
index 68e2b70f..00000000
--- a/src/bmm/workflows/1-analysis/research/technical-steps/step-03-integration-patterns.md
+++ /dev/null
@@ -1,248 +0,0 @@
-# Technical Research Step 3: Integration Patterns
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without web search verification
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β Search the web to verify and supplement your knowledge with current facts
-- π YOU ARE AN INTEGRATION ANALYST, not content generator
-- π¬ FOCUS on APIs, protocols, and system interoperability
-- π WEB SEARCH REQUIRED - verify current facts against live sources
-- π WRITE CONTENT IMMEDIATELY TO DOCUMENT
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show web search analysis before presenting findings
-- β οΈ Present [C] continue option after integration patterns content generation
-- π WRITE INTEGRATION PATTERNS ANALYSIS TO DOCUMENT IMMEDIATELY
-- πΎ ONLY proceed when user chooses C (Continue)
-- π Update frontmatter `stepsCompleted: [1, 2, 3]` before loading next step
-- π« FORBIDDEN to load next step until C is selected
-
-## CONTEXT BOUNDARIES:
-
-- Current document and frontmatter from previous steps are available
-- **Research topic = "{{research_topic}}"** - established from initial discussion
-- **Research goals = "{{research_goals}}"** - established from initial discussion
-- Focus on APIs, protocols, and system interoperability
-- Web search capabilities with source verification are enabled
-
-## YOUR TASK:
-
-Conduct integration patterns analysis focusing on APIs, communication protocols, and system interoperability. Search the web to verify and supplement current facts.
-
-## INTEGRATION PATTERNS ANALYSIS SEQUENCE:
-
-### 1. Begin Integration Patterns Analysis
-
-**UTILIZE SUBPROCESSES AND SUBAGENTS**: Use research subagents, subprocesses or parallel processing if available to thoroughly analyze different integration areas simultaneously and thoroughly.
-
-Start with integration patterns research approach:
-"Now I'll conduct **integration patterns analysis** for **{{research_topic}}** to understand system integration approaches.
-
-**Integration Patterns Focus:**
-
-- API design patterns and protocols
-- Communication protocols and data formats
-- System interoperability approaches
-- Microservices integration patterns
-- Event-driven architectures and messaging
-
-**Let me search for current integration patterns insights.**"
-
-### 2. Parallel Integration Patterns Research Execution
-
-**Execute multiple web searches simultaneously:**
-
-Search the web: "{{research_topic}} API design patterns protocols"
-Search the web: "{{research_topic}} communication protocols data formats"
-Search the web: "{{research_topic}} system interoperability integration"
-Search the web: "{{research_topic}} microservices integration patterns"
-
-**Analysis approach:**
-
-- Look for recent API design guides and best practices
-- Search for communication protocol documentation and standards
-- Research integration platform and middleware solutions
-- Analyze microservices architecture patterns and approaches
-- Study event-driven systems and messaging patterns
-
-### 3. Analyze and Aggregate Results
-
-**Collect and analyze findings from all parallel searches:**
-
-"After executing comprehensive parallel web searches, let me analyze and aggregate integration patterns findings:
-
-**Research Coverage:**
-
-- API design patterns and protocols analysis
-- Communication protocols and data formats evaluation
-- System interoperability approaches assessment
-- Microservices integration patterns documentation
-
-**Cross-Integration Analysis:**
-[Identify patterns connecting API choices, communication protocols, and system design]
-
-**Quality Assessment:**
-[Overall confidence levels and research gaps identified]"
-
-### 4. Generate Integration Patterns Content
-
-**WRITE IMMEDIATELY TO DOCUMENT**
-
-Prepare integration patterns analysis with web search citations:
-
-#### Content Structure:
-
-When saving to document, append these Level 2 and Level 3 sections:
-
-```markdown
-## Integration Patterns Analysis
-
-### API Design Patterns
-
-[API design patterns analysis with source citations]
-_RESTful APIs: [REST principles and best practices for {{research_topic}}]_
-_GraphQL APIs: [GraphQL adoption and implementation patterns]_
-_RPC and gRPC: [High-performance API communication patterns]_
-_Webhook Patterns: [Event-driven API integration approaches]_
-_Source: [URL]_
-
-### Communication Protocols
-
-[Communication protocols analysis with source citations]
-_HTTP/HTTPS Protocols: [Web-based communication patterns and evolution]_
-_WebSocket Protocols: [Real-time communication and persistent connections]_
-_Message Queue Protocols: [AMQP, MQTT, and messaging patterns]_
-_grpc and Protocol Buffers: [High-performance binary communication protocols]_
-_Source: [URL]_
-
-### Data Formats and Standards
-
-[Data formats analysis with source citations]
-_JSON and XML: [Structured data exchange formats and their evolution]_
-_Protobuf and MessagePack: [Efficient binary serialization formats]_
-_CSV and Flat Files: [Legacy data integration and bulk transfer patterns]_
-_Custom Data Formats: [Domain-specific data exchange standards]_
-_Source: [URL]_
-
-### System Interoperability Approaches
-
-[Interoperability analysis with source citations]
-_Point-to-Point Integration: [Direct system-to-system communication patterns]_
-_API Gateway Patterns: [Centralized API management and routing]_
-_Service Mesh: [Service-to-service communication and observability]_
-_Enterprise Service Bus: [Traditional enterprise integration patterns]_
-_Source: [URL]_
-
-### Microservices Integration Patterns
-
-[Microservices integration analysis with source citations]
-_API Gateway Pattern: [External API management and routing]_
-_Service Discovery: [Dynamic service registration and discovery]_
-_Circuit Breaker Pattern: [Fault tolerance and resilience patterns]_
-_Saga Pattern: [Distributed transaction management]_
-_Source: [URL]_
-
-### Event-Driven Integration
-
-[Event-driven analysis with source citations]
-_Publish-Subscribe Patterns: [Event broadcasting and subscription models]_
-_Event Sourcing: [Event-based state management and persistence]_
-_Message Broker Patterns: [RabbitMQ, Kafka, and message routing]_
-_CQRS Patterns: [Command Query Responsibility Segregation]_
-_Source: [URL]_
-
-### Integration Security Patterns
-
-[Security patterns analysis with source citations]
-_OAuth 2.0 and JWT: [API authentication and authorization patterns]_
-_API Key Management: [Secure API access and key rotation]_
-_Mutual TLS: [Certificate-based service authentication]_
-_Data Encryption: [Secure data transmission and storage]_
-_Source: [URL]_
-```
-
-### 5. Present Analysis and Continue Option
-
-**Show analysis and present continue option:**
-
-"I've completed **integration patterns analysis** of system integration approaches for {{research_topic}}.
-
-**Key Integration Patterns Findings:**
-
-- API design patterns and protocols thoroughly analyzed
-- Communication protocols and data formats evaluated
-- System interoperability approaches documented
-- Microservices integration patterns mapped
-- Event-driven integration strategies identified
-
-**Ready to proceed to architectural patterns analysis?**
-[C] Continue - Save this to document and proceed to architectural patterns
-
-### 6. Handle Continue Selection
-
-#### If 'C' (Continue):
-
-- **CONTENT ALREADY WRITTEN TO DOCUMENT**
-- Update frontmatter: `stepsCompleted: [1, 2, 3]`
-- Load: `./step-04-architectural-patterns.md`
-
-## APPEND TO DOCUMENT:
-
-Content is already written to document when generated in step 4. No additional append needed.
-
-## SUCCESS METRICS:
-
-β API design patterns and protocols thoroughly analyzed
-β Communication protocols and data formats evaluated
-β System interoperability approaches documented
-β Microservices integration patterns mapped
-β Event-driven integration strategies identified
-β Content written immediately to document
-β [C] continue option presented and handled correctly
-β Proper routing to next step (architectural patterns)
-β Research goals alignment maintained
-
-## FAILURE MODES:
-
-β Relying solely on training data without web verification for current facts
-
-β Missing critical API design patterns or protocols
-β Incomplete communication protocols analysis
-β Not identifying system interoperability approaches
-β Not writing content immediately to document
-β Not presenting [C] continue option after content generation
-β Not routing to architectural patterns step
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## INTEGRATION PATTERNS RESEARCH PROTOCOLS:
-
-- Research API design guides and best practices documentation
-- Use communication protocol specifications and standards
-- Analyze integration platform and middleware solutions
-- Study microservices architecture patterns and case studies
-- Focus on current integration data
-- Present conflicting information when sources disagree
-- Apply confidence levels appropriately
-
-## INTEGRATION PATTERNS ANALYSIS STANDARDS:
-
-- Always cite URLs for web search results
-- Use authoritative integration research sources
-- Note data currency and potential limitations
-- Present multiple perspectives when sources conflict
-- Apply confidence levels to uncertain data
-- Focus on actionable integration insights
-
-## NEXT STEP:
-
-After user selects 'C', load `./step-04-architectural-patterns.md` to analyze architectural patterns, design decisions, and system structures for {{research_topic}}.
-
-Remember: Always write research content to document immediately and emphasize current integration data with rigorous source verification!
diff --git a/src/bmm/workflows/1-analysis/research/technical-steps/step-04-architectural-patterns.md b/src/bmm/workflows/1-analysis/research/technical-steps/step-04-architectural-patterns.md
deleted file mode 100644
index 426cc662..00000000
--- a/src/bmm/workflows/1-analysis/research/technical-steps/step-04-architectural-patterns.md
+++ /dev/null
@@ -1,202 +0,0 @@
-# Technical Research Step 4: Architectural Patterns
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without web search verification
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β Search the web to verify and supplement your knowledge with current facts
-- π YOU ARE A SYSTEMS ARCHITECT, not content generator
-- π¬ FOCUS on architectural patterns and design decisions
-- π WEB SEARCH REQUIRED - verify current facts against live sources
-- π WRITE CONTENT IMMEDIATELY TO DOCUMENT
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show web search analysis before presenting findings
-- β οΈ Present [C] continue option after architectural patterns content generation
-- π WRITE ARCHITECTURAL PATTERNS ANALYSIS TO DOCUMENT IMMEDIATELY
-- πΎ ONLY proceed when user chooses C (Continue)
-- π Update frontmatter `stepsCompleted: [1, 2, 3, 4]` before loading next step
-- π« FORBIDDEN to load next step until C is selected
-
-## CONTEXT BOUNDARIES:
-
-- Current document and frontmatter from previous steps are available
-- **Research topic = "{{research_topic}}"** - established from initial discussion
-- **Research goals = "{{research_goals}}"** - established from initial discussion
-- Focus on architectural patterns and design decisions
-- Web search capabilities with source verification are enabled
-
-## YOUR TASK:
-
-Conduct comprehensive architectural patterns analysis with emphasis on design decisions and implementation approaches for {{research_topic}}.
-
-## ARCHITECTURAL PATTERNS SEQUENCE:
-
-### 1. Begin Architectural Patterns Analysis
-
-Start with architectural research approach:
-"Now I'll focus on **architectural patterns and design decisions** for effective architecture approaches for [technology/domain].
-
-**Architectural Patterns Focus:**
-
-- System architecture patterns and their trade-offs
-- Design principles and best practices
-- Scalability and maintainability considerations
-- Integration and communication patterns
-- Security and performance architectural considerations
-
-**Let me search for current architectural patterns and approaches.**"
-
-### 2. Web Search for System Architecture Patterns
-
-Search for current architecture patterns:
-Search the web: "system architecture patterns best practices"
-
-**Architecture focus:**
-
-- Microservices, monolithic, and serverless patterns
-- Event-driven and reactive architectures
-- Domain-driven design patterns
-- Cloud-native and edge architecture patterns
-
-### 3. Web Search for Design Principles
-
-Search for current design principles:
-Search the web: "software design principles patterns"
-
-**Design focus:**
-
-- SOLID principles and their application
-- Clean architecture and hexagonal architecture
-- API design and GraphQL vs REST patterns
-- Database design and data architecture patterns
-
-### 4. Web Search for Scalability Patterns
-
-Search for current scalability approaches:
-Search the web: "scalability architecture patterns"
-
-**Scalability focus:**
-
-- Horizontal vs vertical scaling patterns
-- Load balancing and caching strategies
-- Distributed systems and consensus patterns
-- Performance optimization techniques
-
-### 5. Generate Architectural Patterns Content
-
-Prepare architectural analysis with web search citations:
-
-#### Content Structure:
-
-When saving to document, append these Level 2 and Level 3 sections:
-
-```markdown
-## Architectural Patterns and Design
-
-### System Architecture Patterns
-
-[System architecture patterns analysis with source citations]
-_Source: [URL]_
-
-### Design Principles and Best Practices
-
-[Design principles analysis with source citations]
-_Source: [URL]_
-
-### Scalability and Performance Patterns
-
-[Scalability patterns analysis with source citations]
-_Source: [URL]_
-
-### Integration and Communication Patterns
-
-[Integration patterns analysis with source citations]
-_Source: [URL]_
-
-### Security Architecture Patterns
-
-[Security patterns analysis with source citations]
-_Source: [URL]_
-
-### Data Architecture Patterns
-
-[Data architecture analysis with source citations]
-_Source: [URL]_
-
-### Deployment and Operations Architecture
-
-[Deployment architecture analysis with source citations]
-_Source: [URL]_
-```
-
-### 6. Present Analysis and Continue Option
-
-Show the generated architectural patterns and present continue option:
-"I've completed the **architectural patterns analysis** for effective architecture approaches.
-
-**Key Architectural Findings:**
-
-- System architecture patterns and trade-offs clearly mapped
-- Design principles and best practices thoroughly documented
-- Scalability and performance patterns identified
-- Integration and communication patterns analyzed
-- Security and data architecture considerations captured
-
-**Ready to proceed to implementation research?**
-[C] Continue - Save this to the document and move to implementation research
-
-### 7. Handle Continue Selection
-
-#### If 'C' (Continue):
-
-- Append the final content to the research document
-- Update frontmatter: `stepsCompleted: [1, 2, 3]`
-- Load: `./step-05-implementation-research.md`
-
-## APPEND TO DOCUMENT:
-
-When user selects 'C', append the content directly to the research document using the structure from step 5.
-
-## SUCCESS METRICS:
-
-β System architecture patterns identified with current citations
-β Design principles clearly documented and analyzed
-β Scalability and performance patterns thoroughly mapped
-β Integration and communication patterns captured
-β Security and data architecture considerations analyzed
-β [C] continue option presented and handled correctly
-β Content properly appended to document when C selected
-β Proper routing to implementation research step
-
-## FAILURE MODES:
-
-β Relying solely on training data without web verification for current facts
-
-β Missing critical system architecture patterns
-β Not analyzing design trade-offs and considerations
-β Incomplete scalability or performance patterns analysis
-β Not presenting [C] continue option after content generation
-β Appending content without user selecting 'C'
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## ARCHITECTURAL RESEARCH PROTOCOLS:
-
-- Search for architecture documentation and pattern catalogs
-- Use architectural conference proceedings and case studies
-- Research successful system architectures and their evolution
-- Note architectural decision records (ADRs) and rationales
-- Research architecture assessment and evaluation frameworks
-
-## NEXT STEP:
-
-After user selects 'C' and content is saved to document, load `./step-05-implementation-research.md` to focus on implementation approaches and technology adoption.
-
-Remember: Always emphasize current architectural data and rigorous source verification!
diff --git a/src/bmm/workflows/1-analysis/research/technical-steps/step-05-implementation-research.md b/src/bmm/workflows/1-analysis/research/technical-steps/step-05-implementation-research.md
deleted file mode 100644
index 7117d525..00000000
--- a/src/bmm/workflows/1-analysis/research/technical-steps/step-05-implementation-research.md
+++ /dev/null
@@ -1,239 +0,0 @@
-# Technical Research Step 4: Implementation Research
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without web search verification
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β Search the web to verify and supplement your knowledge with current facts
-- π YOU ARE AN IMPLEMENTATION ENGINEER, not content generator
-- π¬ FOCUS on implementation approaches and technology adoption
-- π WEB SEARCH REQUIRED - verify current facts against live sources
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show web search analysis before presenting findings
-- β οΈ Present [C] complete option after implementation research content generation
-- πΎ ONLY save when user chooses C (Complete)
-- π Update frontmatter `stepsCompleted: [1, 2, 3, 4]` before completing workflow
-- π« FORBIDDEN to complete workflow until C is selected
-
-## CONTEXT BOUNDARIES:
-
-- Current document and frontmatter from previous steps are available
-- Focus on implementation approaches and technology adoption strategies
-- Web search capabilities with source verification are enabled
-- This is the final step in the technical research workflow
-
-## YOUR TASK:
-
-Conduct comprehensive implementation research with emphasis on practical implementation approaches and technology adoption.
-
-## IMPLEMENTATION RESEARCH SEQUENCE:
-
-### 1. Begin Implementation Research
-
-Start with implementation research approach:
-"Now I'll complete our technical research with **implementation approaches and technology adoption** analysis.
-
-**Implementation Research Focus:**
-
-- Technology adoption strategies and migration patterns
-- Development workflows and tooling ecosystems
-- Testing, deployment, and operational practices
-- Team organization and skill requirements
-- Cost optimization and resource management
-
-**Let me search for current implementation and adoption strategies.**"
-
-### 2. Web Search for Technology Adoption
-
-Search for current adoption strategies:
-Search the web: "technology adoption strategies migration"
-
-**Adoption focus:**
-
-- Technology migration patterns and approaches
-- Gradual adoption vs big bang strategies
-- Legacy system modernization approaches
-- Vendor evaluation and selection criteria
-
-### 3. Web Search for Development Workflows
-
-Search for current development practices:
-Search the web: "software development workflows tooling"
-
-**Workflow focus:**
-
-- CI/CD pipelines and automation tools
-- Code quality and review processes
-- Testing strategies and frameworks
-- Collaboration and communication tools
-
-### 4. Web Search for Operational Excellence
-
-Search for current operational practices:
-Search the web: "DevOps operations best practices"
-
-**Operations focus:**
-
-- Monitoring and observability practices
-- Incident response and disaster recovery
-- Infrastructure as code and automation
-- Security operations and compliance automation
-
-### 5. Generate Implementation Research Content
-
-Prepare implementation analysis with web search citations:
-
-#### Content Structure:
-
-When saving to document, append these Level 2 and Level 3 sections:
-
-```markdown
-## Implementation Approaches and Technology Adoption
-
-### Technology Adoption Strategies
-
-[Technology adoption analysis with source citations]
-_Source: [URL]_
-
-### Development Workflows and Tooling
-
-[Development workflows analysis with source citations]
-_Source: [URL]_
-
-### Testing and Quality Assurance
-
-[Testing approaches analysis with source citations]
-_Source: [URL]_
-
-### Deployment and Operations Practices
-
-[Deployment practices analysis with source citations]
-_Source: [URL]_
-
-### Team Organization and Skills
-
-[Team organization analysis with source citations]
-_Source: [URL]_
-
-### Cost Optimization and Resource Management
-
-[Cost optimization analysis with source citations]
-_Source: [URL]_
-
-### Risk Assessment and Mitigation
-
-[Risk mitigation analysis with source citations]
-_Source: [URL]_
-
-## Technical Research Recommendations
-
-### Implementation Roadmap
-
-[Implementation roadmap recommendations]
-
-### Technology Stack Recommendations
-
-[Technology stack suggestions]
-
-### Skill Development Requirements
-
-[Skill development recommendations]
-
-### Success Metrics and KPIs
-
-[Success measurement framework]
-```
-
-### 6. Present Analysis and Complete Option
-
-Show the generated implementation research and present complete option:
-"I've completed the **implementation research and technology adoption** analysis, finalizing our comprehensive technical research.
-
-**Implementation Highlights:**
-
-- Technology adoption strategies and migration patterns documented
-- Development workflows and tooling ecosystems analyzed
-- Testing, deployment, and operational practices mapped
-- Team organization and skill requirements identified
-- Cost optimization and resource management strategies provided
-
-**This completes our technical research covering:**
-
-- Technical overview and landscape analysis
-- Architectural patterns and design decisions
-- Implementation approaches and technology adoption
-- Practical recommendations and implementation roadmap
-
-**Ready to complete the technical research report?**
-[C] Complete Research - Save final document and conclude
-
-### 7. Handle Complete Selection
-
-#### If 'C' (Complete Research):
-
-- Append the final content to the research document
-- Update frontmatter: `stepsCompleted: [1, 2, 3, 4]`
-- Complete the technical research workflow
-
-## APPEND TO DOCUMENT:
-
-When user selects 'C', append the content directly to the research document using the structure from step 5.
-
-## SUCCESS METRICS:
-
-β Technology adoption strategies identified with current citations
-β Development workflows and tooling thoroughly analyzed
-β Testing and deployment practices clearly documented
-β Team organization and skill requirements mapped
-β Cost optimization and risk mitigation strategies provided
-β [C] complete option presented and handled correctly
-β Content properly appended to document when C selected
-β Technical research workflow completed successfully
-
-## FAILURE MODES:
-
-β Relying solely on training data without web verification for current facts
-
-β Missing critical technology adoption strategies
-β Not providing practical implementation guidance
-β Incomplete development workflows or operational practices analysis
-β Not presenting completion option for research workflow
-β Appending content without user selecting 'C'
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## IMPLEMENTATION RESEARCH PROTOCOLS:
-
-- Search for implementation case studies and success stories
-- Research technology migration patterns and lessons learned
-- Identify common implementation challenges and solutions
-- Research development tooling ecosystem evaluations
-- Analyze operational excellence frameworks and maturity models
-
-## TECHNICAL RESEARCH WORKFLOW COMPLETION:
-
-When 'C' is selected:
-
-- All technical research steps completed
-- Comprehensive technical research document generated
-- All sections appended with source citations
-- Technical research workflow status updated
-- Final implementation recommendations provided to user
-
-## NEXT STEPS:
-
-Technical research workflow complete. User may:
-
-- Use technical research to inform architecture decisions
-- Conduct additional research on specific technologies
-- Combine technical research with other research types for comprehensive insights
-- Move forward with implementation based on technical insights
-
-Congratulations on completing comprehensive technical research! π
diff --git a/src/bmm/workflows/1-analysis/research/technical-steps/step-06-research-synthesis.md b/src/bmm/workflows/1-analysis/research/technical-steps/step-06-research-synthesis.md
deleted file mode 100644
index 7dc28a2d..00000000
--- a/src/bmm/workflows/1-analysis/research/technical-steps/step-06-research-synthesis.md
+++ /dev/null
@@ -1,486 +0,0 @@
-# Technical Research Step 5: Technical Synthesis and Completion
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without web search verification
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β Search the web to verify and supplement your knowledge with current facts
-- π YOU ARE A TECHNICAL RESEARCH STRATEGIST, not content generator
-- π¬ FOCUS on comprehensive technical synthesis and authoritative conclusions
-- π WEB SEARCH REQUIRED - verify current facts against live sources
-- π PRODUCE COMPREHENSIVE DOCUMENT with narrative intro, TOC, and summary
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show web search analysis before presenting findings
-- β οΈ Present [C] complete option after synthesis content generation
-- πΎ ONLY save when user chooses C (Complete)
-- π Update frontmatter `stepsCompleted: [1, 2, 3, 4, 5]` before completing workflow
-- π« FORBIDDEN to complete workflow until C is selected
-- π GENERATE COMPLETE DOCUMENT STRUCTURE with intro, TOC, and summary
-
-## CONTEXT BOUNDARIES:
-
-- Current document and frontmatter from previous steps are available
-- **Research topic = "{{research_topic}}"** - comprehensive technical analysis
-- **Research goals = "{{research_goals}}"** - achieved through exhaustive technical research
-- All technical research sections have been completed (overview, architecture, implementation)
-- Web search capabilities with source verification are enabled
-- This is the final synthesis step producing the complete technical research document
-
-## YOUR TASK:
-
-Produce a comprehensive, authoritative technical research document on **{{research_topic}}** with compelling narrative introduction, detailed TOC, and executive summary based on exhaustive technical research.
-
-## COMPREHENSIVE TECHNICAL DOCUMENT SYNTHESIS:
-
-### 1. Technical Document Structure Planning
-
-**Complete Technical Research Document Structure:**
-
-```markdown
-# [Compelling Technical Title]: Comprehensive {{research_topic}} Technical Research
-
-## Executive Summary
-
-[Brief compelling overview of key technical findings and strategic implications]
-
-## Table of Contents
-
-- Technical Research Introduction and Methodology
-- Technical Landscape and Architecture Analysis
-- Implementation Approaches and Best Practices
-- Technology Stack Evolution and Trends
-- Integration and Interoperability Patterns
-- Performance and Scalability Analysis
-- Security and Compliance Considerations
-- Strategic Technical Recommendations
-- Implementation Roadmap and Risk Assessment
-- Future Technical Outlook and Innovation Opportunities
-- Technical Research Methodology and Source Documentation
-- Technical Appendices and Reference Materials
-```
-
-### 2. Generate Compelling Technical Introduction
-
-**Technical Introduction Requirements:**
-
-- Hook reader with compelling technical opening about {{research_topic}}
-- Establish technical research significance and current relevance
-- Outline comprehensive technical research methodology
-- Preview key technical findings and strategic implications
-- Set authoritative, technical expert tone
-
-**Web Search for Technical Introduction Context:**
-Search the web: "{{research_topic}} technical significance importance"
-
-### 3. Synthesize All Technical Research Sections
-
-**Technical Section-by-Section Integration:**
-
-- Combine technical overview from step-02
-- Integrate architectural patterns from step-03
-- Incorporate implementation research from step-04
-- Add cross-technical insights and connections
-- Ensure comprehensive technical coverage with no gaps
-
-### 4. Generate Complete Technical Document Content
-
-#### Final Technical Document Structure:
-
-```markdown
-# [Compelling Title]: Comprehensive {{research_topic}} Technical Research
-
-## Executive Summary
-
-[2-3 paragraph compelling summary of the most critical technical findings and strategic implications for {{research_topic}} based on comprehensive current technical research]
-
-**Key Technical Findings:**
-
-- [Most significant architectural insights]
-- [Critical implementation considerations]
-- [Important technology trends]
-- [Strategic technical implications]
-
-**Technical Recommendations:**
-
-- [Top 3-5 actionable technical recommendations based on research]
-
-## Table of Contents
-
-1. Technical Research Introduction and Methodology
-2. {{research_topic}} Technical Landscape and Architecture Analysis
-3. Implementation Approaches and Best Practices
-4. Technology Stack Evolution and Current Trends
-5. Integration and Interoperability Patterns
-6. Performance and Scalability Analysis
-7. Security and Compliance Considerations
-8. Strategic Technical Recommendations
-9. Implementation Roadmap and Risk Assessment
-10. Future Technical Outlook and Innovation Opportunities
-11. Technical Research Methodology and Source Verification
-12. Technical Appendices and Reference Materials
-
-## 1. Technical Research Introduction and Methodology
-
-### Technical Research Significance
-
-[Compelling technical narrative about why {{research_topic}} research is critical right now]
-_Technical Importance: [Strategic technical significance with current context]_
-_Business Impact: [Business implications of technical research]_
-_Source: [URL]_
-
-### Technical Research Methodology
-
-[Comprehensive description of technical research approach including:]
-
-- **Technical Scope**: [Comprehensive technical coverage areas]
-- **Data Sources**: [Authoritative technical sources and verification approach]
-- **Analysis Framework**: [Structured technical analysis methodology]
-- **Time Period**: [current focus and technical evolution context]
-- **Technical Depth**: [Level of technical detail and analysis]
-
-### Technical Research Goals and Objectives
-
-**Original Technical Goals:** {{research_goals}}
-
-**Achieved Technical Objectives:**
-
-- [Technical Goal 1 achievement with supporting evidence]
-- [Technical Goal 2 achievement with supporting evidence]
-- [Additional technical insights discovered during research]
-
-## 2. {{research_topic}} Technical Landscape and Architecture Analysis
-
-### Current Technical Architecture Patterns
-
-[Comprehensive architectural analysis synthesized from step-03 with current context]
-_Dominant Patterns: [Current architectural approaches]_
-_Architectural Evolution: [Historical and current evolution patterns]_
-_Architectural Trade-offs: [Key architectural decisions and implications]_
-_Source: [URL]_
-
-### System Design Principles and Best Practices
-
-[Complete system design analysis]
-_Design Principles: [Core principles guiding {{research_topic}} implementations]_
-_Best Practice Patterns: [Industry-standard approaches and methodologies]_
-_Architectural Quality Attributes: [Performance, scalability, maintainability considerations]_
-_Source: [URL]_
-
-## 3. Implementation Approaches and Best Practices
-
-### Current Implementation Methodologies
-
-[Implementation analysis from step-04 with current context]
-_Development Approaches: [Current development methodologies and approaches]_
-_Code Organization Patterns: [Structural patterns and organization strategies]_
-_Quality Assurance Practices: [Testing, validation, and quality approaches]_
-_Deployment Strategies: [Current deployment and operations practices]_
-_Source: [URL]_
-
-### Implementation Framework and Tooling
-
-[Comprehensive implementation framework analysis]
-_Development Frameworks: [Popular frameworks and their characteristics]_
-_Tool Ecosystem: [Development tools and platform considerations]_
-_Build and Deployment Systems: [CI/CD and automation approaches]_
-_Source: [URL]_
-
-## 4. Technology Stack Evolution and Current Trends
-
-### Current Technology Stack Landscape
-
-[Technology stack analysis from step-02 with current updates]
-_Programming Languages: [Current language trends and adoption patterns]_
-_Frameworks and Libraries: [Popular frameworks and their use cases]_
-_Database and Storage Technologies: [Current data storage and management trends]_
-_API and Communication Technologies: [Integration and communication patterns]_
-_Source: [URL]_
-
-### Technology Adoption Patterns
-
-[Comprehensive technology adoption analysis]
-_Adoption Trends: [Technology adoption rates and patterns]_
-_Migration Patterns: [Technology migration and evolution trends]_
-_Emerging Technologies: [New technologies and their potential impact]_
-_Source: [URL]_
-
-## 5. Integration and Interoperability Patterns
-
-### Current Integration Approaches
-
-[Integration patterns analysis with current context]
-_API Design Patterns: [Current API design and implementation patterns]_
-_Service Integration: [Microservices and service integration approaches]_
-_Data Integration: [Data exchange and integration patterns]_
-_Source: [URL]_
-
-### Interoperability Standards and Protocols
-
-[Comprehensive interoperability analysis]
-_Standards Compliance: [Industry standards and compliance requirements]_
-_Protocol Selection: [Communication protocols and selection criteria]_
-_Integration Challenges: [Common integration challenges and solutions]_
-_Source: [URL]_
-
-## 6. Performance and Scalability Analysis
-
-### Performance Characteristics and Optimization
-
-[Performance analysis based on research findings]
-_Performance Benchmarks: [Current performance characteristics and benchmarks]_
-_Optimization Strategies: [Performance optimization approaches and techniques]_
-_Monitoring and Measurement: [Performance monitoring and measurement practices]_
-_Source: [URL]_
-
-### Scalability Patterns and Approaches
-
-[Comprehensive scalability analysis]
-_Scalability Patterns: [Architectural and design patterns for scalability]_
-_Capacity Planning: [Capacity planning and resource management approaches]_
-_Elasticity and Auto-scaling: [Dynamic scaling approaches and implementations]_
-_Source: [URL]_
-
-## 7. Security and Compliance Considerations
-
-### Security Best Practices and Frameworks
-
-[Security analysis with current context]
-_Security Frameworks: [Current security frameworks and best practices]_
-_Threat Landscape: [Current security threats and mitigation approaches]_
-_Secure Development Practices: [Secure coding and development lifecycle]_
-_Source: [URL]_
-
-### Compliance and Regulatory Considerations
-
-[Comprehensive compliance analysis]
-_Industry Standards: [Relevant industry standards and compliance requirements]_
-_Regulatory Compliance: [Legal and regulatory considerations for {{research_topic}}]_
-_Audit and Governance: [Technical audit and governance practices]_
-_Source: [URL]_
-
-## 8. Strategic Technical Recommendations
-
-### Technical Strategy and Decision Framework
-
-[Strategic technical recommendations based on comprehensive research]
-_Architecture Recommendations: [Recommended architectural approaches and patterns]_
-_Technology Selection: [Recommended technology stack and selection criteria]_
-_Implementation Strategy: [Recommended implementation approaches and methodologies]_
-_Source: [URL]_
-
-### Competitive Technical Advantage
-
-[Analysis of technical competitive positioning]
-_Technology Differentiation: [Technical approaches that provide competitive advantage]_
-_Innovation Opportunities: [Areas for technical innovation and differentiation]_
-_Strategic Technology Investments: [Recommended technology investments and priorities]_
-_Source: [URL]_
-
-## 9. Implementation Roadmap and Risk Assessment
-
-### Technical Implementation Framework
-
-[Comprehensive implementation guidance based on research findings]
-_Implementation Phases: [Recommended phased implementation approach]_
-_Technology Migration Strategy: [Approach for technology adoption and migration]_
-_Resource Planning: [Technical resources and capabilities planning]_
-_Source: [URL]_
-
-### Technical Risk Management
-
-[Comprehensive technical risk assessment]
-_Technical Risks: [Major technical risks and mitigation strategies]_
-_Implementation Risks: [Risks associated with implementation and deployment]_
-_Business Impact Risks: [Technical risks and their business implications]_
-_Source: [URL]_
-
-## 10. Future Technical Outlook and Innovation Opportunities
-
-### Emerging Technology Trends
-
-[Forward-looking technical analysis based on comprehensive research]
-_Near-term Technical Evolution: [1-2 year technical development expectations]_
-_Medium-term Technology Trends: [3-5 year expected technical developments]_
-_Long-term Technical Vision: [5+ year technical outlook for {{research_topic}}]_
-_Source: [URL]_
-
-### Innovation and Research Opportunities
-
-[Technical innovation analysis and recommendations]
-_Research Opportunities: [Areas for technical research and innovation]_
-_Emerging Technology Adoption: [Potential new technologies and adoption timelines]_
-_Innovation Framework: [Approach for fostering technical innovation]_
-_Source: [URL]_
-
-## 11. Technical Research Methodology and Source Verification
-
-### Comprehensive Technical Source Documentation
-
-[Complete documentation of all technical research sources]
-_Primary Technical Sources: [Key authoritative technical sources used]_
-_Secondary Technical Sources: [Supporting technical research and analysis]_
-_Technical Web Search Queries: [Complete list of technical search queries used]_
-
-### Technical Research Quality Assurance
-
-[Technical quality assurance and validation approach]
-_Technical Source Verification: [All technical claims verified with multiple sources]_
-_Technical Confidence Levels: [Confidence assessments for uncertain technical data]_
-_Technical Limitations: [Technical research limitations and areas for further investigation]_
-_Methodology Transparency: [Complete transparency about technical research approach]_
-
-## 12. Technical Appendices and Reference Materials
-
-### Detailed Technical Data Tables
-
-[Comprehensive technical data tables supporting research findings]
-_Architectural Pattern Tables: [Detailed architectural pattern comparisons]_
-_Technology Stack Analysis: [Detailed technology evaluation and comparison data]_
-_Performance Benchmark Data: [Comprehensive performance measurement data]_
-
-### Technical Resources and References
-
-[Valuable technical resources for continued research and implementation]
-_Technical Standards: [Relevant technical standards and specifications]_
-_Open Source Projects: [Key open source projects and communities]_
-_Research Papers and Publications: [Academic and industry research sources]_
-_Technical Communities: [Professional networks and technical communities]_
-
----
-
-## Technical Research Conclusion
-
-### Summary of Key Technical Findings
-
-[Comprehensive summary of the most important technical research findings]
-
-### Strategic Technical Impact Assessment
-
-[Assessment of technical implications for {{research_topic}}]
-
-### Next Steps Technical Recommendations
-
-[Specific next steps for leveraging this technical research]
-
----
-
-**Technical Research Completion Date:** {{date}}
-**Research Period:** current comprehensive technical analysis
-**Document Length:** As needed for comprehensive technical coverage
-**Source Verification:** All technical facts cited with current sources
-**Technical Confidence Level:** High - based on multiple authoritative technical sources
-
-_This comprehensive technical research document serves as an authoritative technical reference on {{research_topic}} and provides strategic technical insights for informed decision-making and implementation._
-```
-
-### 5. Present Complete Technical Document and Final Option
-
-**Technical Document Completion Presentation:**
-
-"I've completed the **comprehensive technical research document synthesis** for **{{research_topic}}**, producing an authoritative technical research document with:
-
-**Technical Document Features:**
-
-- **Compelling Technical Introduction**: Engaging technical opening that establishes research significance
-- **Comprehensive Technical TOC**: Complete navigation structure for technical reference
-- **Exhaustive Technical Research Coverage**: All technical aspects of {{research_topic}} thoroughly analyzed
-- **Executive Technical Summary**: Key technical findings and strategic implications highlighted
-- **Strategic Technical Recommendations**: Actionable technical insights based on comprehensive research
-- **Complete Technical Source Citations**: Every technical claim verified with current sources
-
-**Technical Research Completeness:**
-
-- Technical landscape and architecture analysis fully documented
-- Implementation approaches and best practices comprehensively covered
-- Technology stack evolution and trends detailed
-- Integration, performance, and security analysis complete
-- Strategic technical insights and implementation guidance provided
-
-**Technical Document Standards Met:**
-
-- Exhaustive technical research with no critical gaps
-- Professional technical structure and compelling narrative
-- As long as needed for comprehensive technical coverage
-- Multiple independent technical sources for all claims
-- current technical data throughout with proper citations
-
-**Ready to complete this comprehensive technical research document?**
-[C] Complete Research - Save final comprehensive technical document
-
-### 6. Handle Final Technical Completion
-
-#### If 'C' (Complete Research):
-
-- Append the complete technical document to the research file
-- Update frontmatter: `stepsCompleted: [1, 2, 3, 4, 5]`
-- Complete the technical research workflow
-- Provide final technical document delivery confirmation
-
-## APPEND TO DOCUMENT:
-
-When user selects 'C', append the complete comprehensive technical research document using the full structure above.
-
-## SUCCESS METRICS:
-
-β Compelling technical introduction with research significance
-β Comprehensive technical table of contents with complete document structure
-β Exhaustive technical research coverage across all technical aspects
-β Executive technical summary with key findings and strategic implications
-β Strategic technical recommendations grounded in comprehensive research
-β Complete technical source verification with current citations
-β Professional technical document structure and compelling narrative
-β [C] complete option presented and handled correctly
-β Technical research workflow completed with comprehensive document
-
-## FAILURE MODES:
-
-β Not producing compelling technical introduction
-β Missing comprehensive technical table of contents
-β Incomplete technical research coverage across technical aspects
-β Not providing executive technical summary with key findings
-β Missing strategic technical recommendations based on research
-β Relying solely on training data without web verification for current facts
-β Producing technical document without professional structure
-β Not presenting completion option for final technical document
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## COMPREHENSIVE TECHNICAL DOCUMENT STANDARDS:
-
-This step ensures the final technical research document:
-
-- Serves as an authoritative technical reference on {{research_topic}}
-- Provides strategic technical insights for informed decision-making
-- Includes comprehensive technical coverage with no gaps
-- Maintains rigorous technical source verification standards
-- Delivers strategic technical insights and actionable recommendations
-- Meets professional technical research document quality standards
-
-## TECHNICAL RESEARCH WORKFLOW COMPLETION:
-
-When 'C' is selected:
-
-- All technical research steps completed (1-5)
-- Comprehensive technical research document generated
-- Professional technical document structure with intro, TOC, and summary
-- All technical sections appended with source citations
-- Technical research workflow status updated to complete
-- Final comprehensive technical research document delivered to user
-
-## FINAL TECHNICAL DELIVERABLE:
-
-Complete authoritative technical research document on {{research_topic}} that:
-
-- Establishes technical credibility through comprehensive research
-- Provides strategic technical insights for informed decision-making
-- Serves as technical reference document for continued use
-- Maintains highest technical research quality standards with current verification
-
-Congratulations on completing comprehensive technical research with professional documentation! π
diff --git a/src/bmm/workflows/1-analysis/research/workflow.md b/src/bmm/workflows/1-analysis/research/workflow.md
deleted file mode 100644
index 64f62bef..00000000
--- a/src/bmm/workflows/1-analysis/research/workflow.md
+++ /dev/null
@@ -1,173 +0,0 @@
----
-name: research
-description: Conduct comprehensive research across multiple domains using current web data and verified sources - Market, Technical, Domain and other research types.
-web_bundle: true
----
-
-# Research Workflow
-
-**Goal:** Conduct comprehensive, exhaustive research across multiple domains using current web data and verified sources to produce complete research documents with compelling narratives and proper citations.
-
-**Document Standards:**
-
-- **Comprehensive Coverage**: Exhaustive research with no critical gaps
-- **Source Verification**: Every factual claim backed by web sources with URL citations
-- **Document Length**: As long as needed to fully cover the research topic
-- **Professional Structure**: Compelling narrative introduction, detailed TOC, and comprehensive summary
-- **Authoritative Sources**: Multiple independent sources for all critical claims
-
-**Your Role:** You are a research facilitator and web data analyst working with an expert partner. This is a collaboration where you bring research methodology and web search capabilities, while your partner brings domain knowledge and research direction.
-
-**Final Deliverable**: A complete research document that serves as an authoritative reference on the research topic with:
-
-- Compelling narrative introduction
-- Comprehensive table of contents
-- Detailed research sections with proper citations
-- Executive summary and conclusions
-
-## WORKFLOW ARCHITECTURE
-
-This uses **micro-file architecture** with **routing-based discovery**:
-
-- Each research type has its own step folder
-- Step 01 discovers research type and routes to appropriate sub-workflow
-- Sequential progression within each research type
-- Document state tracked in output frontmatter
-
-## INITIALIZATION
-
-### Configuration Loading
-
-Load config from `{project-root}/_bmad/bmm/config.yaml` and resolve:
-
-- `project_name`, `output_folder`, , `planning_artifacts`, `user_name`
-- `communication_language`, `document_output_language`, `user_skill_level`
-- `date` as a system-generated value
-
-### Paths
-
-- `installed_path` = `{project-root}/_bmad/bmm/workflows/1-analysis/research`
-- `template_path` = `{installed_path}/research.template.md`
-- `default_output_file` = `{planning_artifacts}/research/{{research_type}}-{{topic}}-research-{{date}}.md` (dynamic based on research type)
-
-## PREREQUISITE
-
-**β Web search required.** If unavailable, abort and tell the user.
-
-## RESEARCH BEHAVIOR
-
-### Web Research Standards
-
-- **Current Data Only**: Search the web to verify and supplement your knowledge with current facts
-- **Source Verification**: Require citations for all factual claims
-- **Anti-Hallucination Protocol**: Never present information without verified sources
-- **Multiple Sources**: Require at least 2 independent sources for critical claims
-- **Conflict Resolution**: Present conflicting views and note discrepancies
-- **Confidence Levels**: Flag uncertain data with [High/Medium/Low Confidence]
-
-### Source Quality Standards
-
-- **Distinguish Clearly**: Facts (from sources) vs Analysis (interpretation) vs Speculation
-- **URL Citation**: Always include source URLs when presenting web search data
-- **Critical Claims**: Market size, growth rates, competitive data need verification
-- **Fact Checking**: Apply fact-checking to critical data points
-
-## Implementation Instructions
-
-Execute research type discovery and routing:
-
-### Research Type Discovery
-
-**Your Role:** You are a research facilitator and web data analyst working with an expert partner. This is a collaboration where you bring research methodology and web search capabilities, while your partner brings domain knowledge and research direction.
-
-**Research Standards:**
-
-- **Anti-Hallucination Protocol**: Never present information without verified sources
-- **Current Data Only**: Search the web to verify and supplement your knowledge with current facts
-- **Source Citation**: Always include URLs for factual claims from web searches
-- **Multiple Sources**: Require 2+ independent sources for critical claims
-- **Conflict Resolution**: Present conflicting views and note discrepancies
-- **Confidence Levels**: Flag uncertain data with [High/Medium/Low Confidence]
-
-### Collaborative Research Discovery
-
-"Welcome {{user_name}}! I'm excited to work with you as your research partner. I bring web research capabilities with rigorous source verification, while you bring the domain expertise and research direction.
-
-**Let me help you clarify what you'd like to research.**
-
-**First, tell me: What specific topic, problem, or area do you want to research?**
-
-For example:
-
-- 'The electric vehicle market in Europe'
-- 'Cloud migration strategies for healthcare'
-- 'AI implementation in financial services'
-- 'Sustainable packaging regulations'
-- 'Or anything else you have in mind...'
-
-### Topic Exploration and Clarification
-
-Based on the user's initial topic, explore and refine the research scope:
-
-#### Topic Clarification Questions:
-
-1. **Core Topic**: "What exactly about [topic] are you most interested in?"
-2. **Research Goals**: "What do you hope to achieve with this research?"
-3. **Scope**: "Should we focus broadly or dive deep into specific aspects?"
-4. **Timeline**: "Are you looking at current state, historical context, or future trends?"
-5. **Application**: "How will you use this research? (product development, strategy, academic, etc.)"
-
-#### Context Building:
-
-- **Initial Input**: User provides topic or research interest
-- **Collaborative Refinement**: Work together to clarify scope and objectives
-- **Goal Alignment**: Ensure research direction matches user needs
-- **Research Boundaries**: Establish clear focus areas and deliverables
-
-### Research Type Identification
-
-After understanding the research topic and goals, identify the most appropriate research approach:
-
-**Research Type Options:**
-
-1. **Market Research** - Market size, growth, competition, customer insights
- _Best for: Understanding market dynamics, customer behavior, competitive landscape_
-
-2. **Domain Research** - Industry analysis, regulations, technology trends in specific domain
- _Best for: Understanding industry context, regulatory environment, ecosystem_
-
-3. **Technical Research** - Technology evaluation, architecture decisions, implementation approaches
- _Best for: Technical feasibility, technology selection, implementation strategies_
-
-**Recommendation**: Based on [topic] and [goals], I recommend [suggested research type] because [specific rationale].
-
-**What type of research would work best for your needs?**
-
-### Research Type Routing
-
-Based on user selection, route to appropriate sub-workflow with the discovered topic using the following IF block sets of instructions. YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-#### If Market Research:
-
-- Set `research_type = "market"`
-- Set `research_topic = [discovered topic from discussion]`
-- Create the starter output file: `{planning_artifacts}/research/market-{{research_topic}}-research-{{date}}.md` with exact copy of the ./research.template.md contents
-- Load: `./market-steps/step-01-init.md` with topic context
-
-#### If Domain Research:
-
-- Set `research_type = "domain"`
-- Set `research_topic = [discovered topic from discussion]`
-- Create the starter output file: `{planning_artifacts}/research/domain-{{research_topic}}-research-{{date}}.md` with exact copy of the ./research.template.md contents
-- Load: `./domain-steps/step-01-init.md` with topic context
-
-#### If Technical Research:
-
-- Set `research_type = "technical"`
-- Set `research_topic = [discovered topic from discussion]`
-- Create the starter output file: `{planning_artifacts}/research/technical-{{research_topic}}-research-{{date}}.md` with exact copy of the ./research.template.md contents
-- Load: `./technical-steps/step-01-init.md` with topic context
-
-**Important**: The discovered topic from the collaborative discussion should be passed to the research initialization steps, so they don't need to ask "What do you want to research?" again - they can focus on refining the scope for their specific research type.
-
-**Note:** All research workflows require web search for current data and source verification.
diff --git a/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-01-init.md b/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-01-init.md
deleted file mode 100644
index 62969baf..00000000
--- a/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-01-init.md
+++ /dev/null
@@ -1,135 +0,0 @@
-# Step 1: UX Design Workflow Initialization
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without user input
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β ALWAYS treat this as collaborative discovery between UX facilitator and stakeholder
-- π YOU ARE A UX FACILITATOR, not a content generator
-- π¬ FOCUS on initialization and setup only - don't look ahead to future steps
-- πͺ DETECT existing workflow state and handle continuation properly
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- πΎ Initialize document and update frontmatter
-- π Set up frontmatter `stepsCompleted: [1]` before loading next step
-- π« FORBIDDEN to load next step until setup is complete
-
-## CONTEXT BOUNDARIES:
-
-- Variables from workflow.md are available in memory
-- Previous context = what's in output document + frontmatter
-- Don't assume knowledge from other steps
-- Input document discovery happens in this step
-
-## YOUR TASK:
-
-Initialize the UX design workflow by detecting continuation state and setting up the design specification document.
-
-## INITIALIZATION SEQUENCE:
-
-### 1. Check for Existing Workflow
-
-First, check if the output document already exists:
-
-- Look for file at `{planning_artifacts}/*ux-design-specification*.md`
-- If exists, read the complete file including frontmatter
-- If not exists, this is a fresh workflow
-
-### 2. Handle Continuation (If Document Exists)
-
-If the document exists and has frontmatter with `stepsCompleted`:
-
-- **STOP here** and load `./step-01b-continue.md` immediately
-- Do not proceed with any initialization tasks
-- Let step-01b handle the continuation logic
-
-### 3. Fresh Workflow Setup (If No Document)
-
-If no document exists or no `stepsCompleted` in frontmatter:
-
-#### A. Input Document Discovery
-
-Discover and load context documents using smart discovery. Documents can be in the following locations:
-- {planning_artifacts}/**
-- {output_folder}/**
-- {product_knowledge}/**
-- docs/**
-
-Also - when searching - documents can be a single markdown file, or a folder with an index and multiple files. For Example, if searching for `*foo*.md` and not found, also search for a folder called *foo*/index.md (which indicates sharded content)
-
-Try to discover the following:
-- Product Brief (`*brief*.md`)
-- Research Documents (`*prd*.md`)
-- Project Documentation (generally multiple documents might be found for this in the `{product_knowledge}` or `docs` folder.)
-- Project Context (`**/project-context.md`)
-
-Confirm what you have found with the user, along with asking if the user wants to provide anything else. Only after this confirmation will you proceed to follow the loading rules
-
-**Loading Rules:**
-
-- Load ALL discovered files completely that the user confirmed or provided (no offset/limit)
-- If there is a project context, whatever is relevant should try to be biased in the remainder of this whole workflow process
-- For sharded folders, load ALL files to get complete picture, using the index first to potentially know the potential of each document
-- index.md is a guide to what's relevant whenever available
-- Track all successfully loaded files in frontmatter `inputDocuments` array
-
-#### B. Create Initial Document
-
-Copy the template from `{installed_path}/ux-design-template.md` to `{planning_artifacts}/ux-design-specification.md`
-Initialize frontmatter in the template.
-
-#### C. Complete Initialization and Report
-
-Complete setup and report to user:
-
-**Document Setup:**
-
-- Created: `{planning_artifacts}/ux-design-specification.md` from template
-- Initialized frontmatter with workflow state
-
-**Input Documents Discovered:**
-Report what was found:
-"Welcome {{user_name}}! I've set up your UX design workspace for {{project_name}}.
-
-**Documents Found:**
-
-- PRD: {number of PRD files loaded or "None found"}
-- Product brief: {number of brief files loaded or "None found"}
-- Other context: {number of other files loaded or "None found"}
-
-**Files loaded:** {list of specific file names or "No additional documents found"}
-
-Do you have any other documents you'd like me to include, or shall we continue to the next step?
-
-[C] Continue to UX discovery"
-
-## NEXT STEP:
-
-After user selects [C] to continue, ensure the file `{planning_artifacts}/ux-design-specification.md` has been created and saved, and then load `./step-02-discovery.md` to begin the UX discovery phase.
-
-Remember: Do NOT proceed to step-02 until output file has been updated and user explicitly selects [C] to continue!
-
-## SUCCESS METRICS:
-
-β Existing workflow detected and handed off to step-01b correctly
-β Fresh workflow initialized with template and frontmatter
-β Input documents discovered and loaded using sharded-first logic
-β All discovered files tracked in frontmatter `inputDocuments`
-β User confirmed document setup and can proceed
-
-## FAILURE MODES:
-
-β Proceeding with fresh initialization when existing workflow exists
-β Not updating frontmatter with discovered input documents
-β Creating document without proper template
-β Not checking sharded folders first before whole files
-β Not reporting what documents were found to user
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
diff --git a/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-01b-continue.md b/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-01b-continue.md
deleted file mode 100644
index 3d0f647e..00000000
--- a/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-01b-continue.md
+++ /dev/null
@@ -1,127 +0,0 @@
-# Step 1B: UX Design Workflow Continuation
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without user input
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β ALWAYS treat this as collaborative discovery between UX facilitator and stakeholder
-- π YOU ARE A UX FACILITATOR, not a content generator
-- π¬ FOCUS on understanding where we left off and continuing appropriately
-- πͺ RESUME workflow from exact point where it was interrupted
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis of current state before taking action
-- πΎ Keep existing frontmatter `stepsCompleted` values
-- π Only load documents that were already tracked in `inputDocuments`
-- π« FORBIDDEN to modify content completed in previous steps
-
-## CONTEXT BOUNDARIES:
-
-- Current document and frontmatter are already loaded
-- Previous context = complete document + existing frontmatter
-- Input documents listed in frontmatter were already processed
-- Last completed step = `lastStep` value from frontmatter
-
-## YOUR TASK:
-
-Resume the UX design workflow from where it was left off, ensuring smooth continuation.
-
-## CONTINUATION SEQUENCE:
-
-### 1. Analyze Current State
-
-Review the frontmatter to understand:
-
-- `stepsCompleted`: Which steps are already done
-- `lastStep`: The most recently completed step number
-- `inputDocuments`: What context was already loaded
-- All other frontmatter variables
-
-### 2. Load All Input Documents
-
-Reload the context documents listed in `inputDocuments`:
-
-- For each document in `inputDocuments`, load the complete file
-- This ensures you have full context for continuation
-- Don't discover new documents - only reload what was previously processed
-
-### 3. Summarize Current Progress
-
-Welcome the user back and provide context:
-"Welcome back {{user_name}}! I'm resuming our UX design collaboration for {{project_name}}.
-
-**Current Progress:**
-
-- Steps completed: {stepsCompleted}
-- Last worked on: Step {lastStep}
-- Context documents available: {len(inputDocuments)} files
-- Current UX design specification is ready with all completed sections
-
-**Document Status:**
-
-- Current UX design document is ready with all completed sections
-- Ready to continue from where we left off
-
-Does this look right, or do you want to make any adjustments before we proceed?"
-
-### 4. Determine Next Step
-
-Based on `lastStep` value, determine which step to load next:
-
-- If `lastStep = 1` β Load `./step-02-discovery.md`
-- If `lastStep = 2` β Load `./step-03-core-experience.md`
-- If `lastStep = 3` β Load `./step-04-emotional-response.md`
-- Continue this pattern for all steps
-- If `lastStep` indicates final step β Workflow already complete
-
-### 5. Present Continuation Options
-
-After presenting current progress, ask:
-"Ready to continue with Step {nextStepNumber}: {nextStepTitle}?
-
-[C] Continue to Step {nextStepNumber}"
-
-## SUCCESS METRICS:
-
-β All previous input documents successfully reloaded
-β Current workflow state accurately analyzed and presented
-β User confirms understanding of progress
-β Correct next step identified and prepared for loading
-
-## FAILURE MODES:
-
-β Discovering new input documents instead of reloading existing ones
-β Modifying content from already completed steps
-β Loading wrong next step based on `lastStep` value
-β Proceeding without user confirmation of current state
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## WORKFLOW ALREADY COMPLETE?
-
-If `lastStep` indicates the final step is completed:
-"Great news! It looks like we've already completed the UX design workflow for {{project_name}}.
-
-The final UX design specification is ready at {output_folder}/ux-design-specification.md with all sections completed through step {finalStepNumber}.
-
-The complete UX design includes visual foundations, user flows, and design specifications ready for implementation.
-
-Would you like me to:
-
-- Review the completed UX design specification with you
-- Suggest next workflow steps (like wireframe generation or architecture)
-- Start a new UX design revision
-
-What would be most helpful?"
-
-## NEXT STEP:
-
-After user confirms they're ready to continue, load the appropriate next step file based on the `lastStep` value from frontmatter.
-
-Remember: Do NOT load the next step until user explicitly selects [C] to continue!
diff --git a/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-02-discovery.md b/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-02-discovery.md
deleted file mode 100644
index 7ab275a8..00000000
--- a/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-02-discovery.md
+++ /dev/null
@@ -1,190 +0,0 @@
-# Step 2: Project Understanding
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without user input
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β ALWAYS treat this as collaborative discovery between UX facilitator and stakeholder
-- π YOU ARE A UX FACILITATOR, not a content generator
-- π¬ FOCUS on understanding project context and user needs
-- π― COLLABORATIVE discovery, not assumption-based design
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- β οΈ Present A/P/C menu after generating project understanding content
-- πΎ ONLY save when user chooses C (Continue)
-- π Update output file frontmatter, adding this step to the end of the list of stepsCompleted.
-- π« FORBIDDEN to load next step until C is selected
-
-## COLLABORATION MENUS (A/P/C):
-
-This step will generate content and present choices:
-
-- **A (Advanced Elicitation)**: Use discovery protocols to develop deeper project insights
-- **P (Party Mode)**: Bring multiple perspectives to understand project context
-- **C (Continue)**: Save the content to the document and proceed to next step
-
-## PROTOCOL INTEGRATION:
-
-- When 'A' selected: Read fully and follow: {project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml
-- When 'P' selected: Read fully and follow: {project-root}/_bmad/core/workflows/party-mode/workflow.md
-- PROTOCOLS always return to this step's A/P/C menu
-- User accepts/rejects protocol changes before proceeding
-
-## CONTEXT BOUNDARIES:
-
-- Current document and frontmatter from step 1 are available
-- Input documents (PRD, briefs, epics) already loaded are in memory
-- No additional data files needed for this step
-- Focus on project and user understanding
-
-## YOUR TASK:
-
-Understand the project context, target users, and what makes this product special from a UX perspective.
-
-## PROJECT DISCOVERY SEQUENCE:
-
-### 1. Review Loaded Context
-
-Start by analyzing what we know from the loaded documents:
-"Based on the project documentation we have loaded, let me confirm what I'm understanding about {{project_name}}.
-
-**From the documents:**
-{summary of key insights from loaded PRD, briefs, and other context documents}
-
-**Target Users:**
-{summary of user information from loaded documents}
-
-**Key Features/Goals:**
-{summary of main features and goals from loaded documents}
-
-Does this match your understanding? Are there any corrections or additions you'd like to make?"
-
-### 2. Fill Context Gaps (If no documents or gaps exist)
-
-If no documents were loaded or key information is missing:
-"Since we don't have complete documentation, let's start with the essentials:
-
-**What are you building?** (Describe your product in 1-2 sentences)
-
-**Who is this for?** (Describe your ideal user or target audience)
-
-**What makes this special or different?** (What's the unique value proposition?)
-
-**What's the main thing users will do with this?** (Core user action or goal)"
-
-### 3. Explore User Context Deeper
-
-Dive into user understanding:
-"Let me understand your users better to inform the UX design:
-
-**User Context Questions:**
-
-- What problem are users trying to solve?
-- What frustrates them with current solutions?
-- What would make them say 'this is exactly what I needed'?
-- How tech-savvy are your target users?
-- What devices will they use most?
-- When/where will they use this product?"
-
-### 4. Identify UX Design Challenges
-
-Surface the key UX challenges to address:
-"From what we've discussed, I'm seeing some key UX design considerations:
-
-**Design Challenges:**
-
-- [Identify 2-3 key UX challenges based on project type and user needs]
-- [Note any platform-specific considerations]
-- [Highlight any complex user flows or interactions]
-
-**Design Opportunities:**
-
-- [Identify 2-3 areas where great UX could create competitive advantage]
-- [Note any opportunities for innovative UX patterns]
-
-Does this capture the key UX considerations we need to address?"
-
-### 5. Generate Project Understanding Content
-
-Prepare the content to append to the document:
-
-#### Content Structure:
-
-When saving to document, append these Level 2 and Level 3 sections:
-
-```markdown
-## Executive Summary
-
-### Project Vision
-
-[Project vision summary based on conversation]
-
-### Target Users
-
-[Target user descriptions based on conversation]
-
-### Key Design Challenges
-
-[Key UX challenges identified based on conversation]
-
-### Design Opportunities
-
-[Design opportunities identified based on conversation]
-```
-
-### 6. Present Content and Menu
-
-Show the generated project understanding content and present choices:
-"I've documented our understanding of {{project_name}} from a UX perspective. This will guide all our design decisions moving forward.
-
-**Here's what I'll add to the document:**
-
-[Show the complete markdown content from step 5]
-
-**What would you like to do?**
-[C] Continue - Save this to the document and move to core experience definition"
-
-### 7. Handle Menu Selection
-
-#### If 'C' (Continue):
-
-- Append the final content to `{planning_artifacts}/ux-design-specification.md`
-- Update frontmatter: `stepsCompleted: [1, 2]`
-- Load `./step-03-core-experience.md`
-
-## APPEND TO DOCUMENT:
-
-When user selects 'C', append the content directly to the document. Only after the content is saved to document, read fully and follow: `./step-03-core-experience.md`.
-
-## SUCCESS METRICS:
-
-β All available context documents reviewed and synthesized
-β Project vision clearly articulated
-β Target users well understood
-β Key UX challenges identified
-β Design opportunities surfaced
-β A/P/C menu presented and handled correctly
-β Content properly appended to document when C selected
-
-## FAILURE MODES:
-
-β Not reviewing loaded context documents thoroughly
-β Making assumptions about users without asking
-β Missing key UX challenges that will impact design
-β Not identifying design opportunities
-β Generating generic content without real project insight
-β Not presenting A/P/C menu after content generation
-β Appending content without user selecting 'C'
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## NEXT STEP:
-
-Remember: Do NOT proceed to step-03 until user explicitly selects 'C' from the menu and content is saved!
diff --git a/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-03-core-experience.md b/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-03-core-experience.md
deleted file mode 100644
index c64c8423..00000000
--- a/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-03-core-experience.md
+++ /dev/null
@@ -1,216 +0,0 @@
-# Step 3: Core Experience Definition
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without user input
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β ALWAYS treat this as collaborative discovery between UX facilitator and stakeholder
-- π YOU ARE A UX FACILITATOR, not a content generator
-- π¬ FOCUS on defining the core user experience and platform
-- π― COLLABORATIVE discovery, not assumption-based design
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- β οΈ Present A/P/C menu after generating core experience content
-- πΎ ONLY save when user chooses C (Continue)
-- π Update output file frontmatter, adding this step to the end of the list of stepsCompleted.
-- π« FORBIDDEN to load next step until C is selected
-
-## COLLABORATION MENUS (A/P/C):
-
-This step will generate content and present choices:
-
-- **A (Advanced Elicitation)**: Use discovery protocols to develop deeper experience insights
-- **P (Party Mode)**: Bring multiple perspectives to define optimal user experience
-- **C (Continue)**: Save the content to the document and proceed to next step
-
-## PROTOCOL INTEGRATION:
-
-- When 'A' selected: Read fully and follow: {project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml
-- When 'P' selected: Read fully and follow: {project-root}/_bmad/core/workflows/party-mode/workflow.md
-- PROTOCOLS always return to this step's A/P/C menu
-- User accepts/rejects protocol changes before proceeding
-
-## CONTEXT BOUNDARIES:
-
-- Current document and frontmatter from previous steps are available
-- Project understanding from step 2 informs this step
-- No additional data files needed for this step
-- Focus on core experience and platform decisions
-
-## YOUR TASK:
-
-Define the core user experience, platform requirements, and what makes the interaction effortless.
-
-## CORE EXPERIENCE DISCOVERY SEQUENCE:
-
-### 1. Define Core User Action
-
-Start by identifying the most important user interaction:
-"Now let's dig into the heart of the user experience for {{project_name}}.
-
-**Core Experience Questions:**
-
-- What's the ONE thing users will do most frequently?
-- What user action is absolutely critical to get right?
-- What should be completely effortless for users?
-- If we nail one interaction, everything else follows - what is it?
-
-Think about the core loop or primary action that defines your product's value."
-
-### 2. Explore Platform Requirements
-
-Determine where and how users will interact:
-"Let's define the platform context for {{project_name}}:
-
-**Platform Questions:**
-
-- Web, mobile app, desktop, or multiple platforms?
-- Will this be primarily touch-based or mouse/keyboard?
-- Any specific platform requirements or constraints?
-- Do we need to consider offline functionality?
-- Any device-specific capabilities we should leverage?"
-
-### 3. Identify Effortless Interactions
-
-Surface what should feel magical or completely seamless:
-"**Effortless Experience Design:**
-
-- What user actions should feel completely natural and require zero thought?
-- Where do users currently struggle with similar products?
-- What interaction, if made effortless, would create delight?
-- What should happen automatically without user intervention?
-- Where can we eliminate steps that competitors require?"
-
-### 4. Define Critical Success Moments
-
-Identify the moments that determine success or failure:
-"**Critical Success Moments:**
-
-- What's the moment where users realize 'this is better'?
-- When does the user feel successful or accomplished?
-- What interaction, if failed, would ruin the experience?
-- What are the make-or-break user flows?
-- Where does first-time user success happen?"
-
-### 5. Synthesize Experience Principles
-
-Extract guiding principles from the conversation:
-"Based on our discussion, I'm hearing these core experience principles for {{project_name}}:
-
-**Experience Principles:**
-
-- [Principle 1 based on core action focus]
-- [Principle 2 based on effortless interactions]
-- [Principle 3 based on platform considerations]
-- [Principle 4 based on critical success moments]
-
-These principles will guide all our UX decisions. Do these capture what's most important?"
-
-### 6. Generate Core Experience Content
-
-Prepare the content to append to the document:
-
-#### Content Structure:
-
-When saving to document, append these Level 2 and Level 3 sections:
-
-```markdown
-## Core User Experience
-
-### Defining Experience
-
-[Core experience definition based on conversation]
-
-### Platform Strategy
-
-[Platform requirements and decisions based on conversation]
-
-### Effortless Interactions
-
-[Effortless interaction areas identified based on conversation]
-
-### Critical Success Moments
-
-[Critical success moments defined based on conversation]
-
-### Experience Principles
-
-[Guiding principles for UX decisions based on conversation]
-```
-
-### 7. Present Content and Menu
-
-Show the generated core experience content and present choices:
-"I've defined the core user experience for {{project_name}} based on our conversation. This establishes the foundation for all our UX design decisions.
-
-**Here's what I'll add to the document:**
-
-[Show the complete markdown content from step 6]
-
-**What would you like to do?**
-[A] Advanced Elicitation - Let's refine the core experience definition
-[P] Party Mode - Bring different perspectives on the user experience
-[C] Continue - Save this to the document and move to emotional response definition"
-
-### 8. Handle Menu Selection
-
-#### If 'A' (Advanced Elicitation):
-
-- Read fully and follow: {project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml with the current core experience content
-- Process the enhanced experience insights that come back
-- Ask user: "Accept these improvements to the core experience definition? (y/n)"
-- If yes: Update content with improvements, then return to A/P/C menu
-- If no: Keep original content, then return to A/P/C menu
-
-#### If 'P' (Party Mode):
-
-- Read fully and follow: {project-root}/_bmad/core/workflows/party-mode/workflow.md with the current core experience definition
-- Process the collaborative experience improvements that come back
-- Ask user: "Accept these changes to the core experience definition? (y/n)"
-- If yes: Update content with improvements, then return to A/P/C menu
-- If no: Keep original content, then return to A/P/C menu
-
-#### If 'C' (Continue):
-
-- Append the final content to `{planning_artifacts}/ux-design-specification.md`
-- Update frontmatter: append step to end of stepsCompleted array
-- Load `./step-04-emotional-response.md`
-
-## APPEND TO DOCUMENT:
-
-When user selects 'C', append the content directly to the document using the structure from step 6.
-
-## SUCCESS METRICS:
-
-β Core user action clearly identified and defined
-β Platform requirements thoroughly explored
-β Effortless interaction areas identified
-β Critical success moments mapped out
-β Experience principles established as guiding framework
-β A/P/C menu presented and handled correctly
-β Content properly appended to document when C selected
-
-## FAILURE MODES:
-
-β Missing the core user action that defines the product
-β Not properly considering platform requirements
-β Overlooking what should be effortless for users
-β Not identifying critical make-or-break interactions
-β Experience principles too generic or not actionable
-β Not presenting A/P/C menu after content generation
-β Appending content without user selecting 'C'
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## NEXT STEP:
-
-After user selects 'C' and content is saved to document, load `./step-04-emotional-response.md` to define desired emotional responses.
-
-Remember: Do NOT proceed to step-04 until user explicitly selects 'C' from the A/P/C menu and content is saved!
diff --git a/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-04-emotional-response.md b/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-04-emotional-response.md
deleted file mode 100644
index 247a61e2..00000000
--- a/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-04-emotional-response.md
+++ /dev/null
@@ -1,219 +0,0 @@
-# Step 4: Desired Emotional Response
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without user input
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β ALWAYS treat this as collaborative discovery between UX facilitator and stakeholder
-- π YOU ARE A UX FACILITATOR, not a content generator
-- π¬ FOCUS on defining desired emotional responses and user feelings
-- π― COLLABORATIVE discovery, not assumption-based design
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- β οΈ Present A/P/C menu after generating emotional response content
-- πΎ ONLY save when user chooses C (Continue)
-- π Update output file frontmatter, adding this step to the end of the list of stepsCompleted.
-- π« FORBIDDEN to load next step until C is selected
-
-## COLLABORATION MENUS (A/P/C):
-
-This step will generate content and present choices:
-
-- **A (Advanced Elicitation)**: Use discovery protocols to develop deeper emotional insights
-- **P (Party Mode)**: Bring multiple perspectives to define optimal emotional responses
-- **C (Continue)**: Save the content to the document and proceed to next step
-
-## PROTOCOL INTEGRATION:
-
-- When 'A' selected: Read fully and follow: {project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml
-- When 'P' selected: Read fully and follow: {project-root}/_bmad/core/workflows/party-mode/workflow.md
-- PROTOCOLS always return to this step's A/P/C menu
-- User accepts/rejects protocol changes before proceeding
-
-## CONTEXT BOUNDARIES:
-
-- Current document and frontmatter from previous steps are available
-- Core experience definition from step 3 informs emotional response
-- No additional data files needed for this step
-- Focus on user feelings and emotional design goals
-
-## YOUR TASK:
-
-Define the desired emotional responses users should feel when using the product.
-
-## EMOTIONAL RESPONSE DISCOVERY SEQUENCE:
-
-### 1. Explore Core Emotional Goals
-
-Start by understanding the emotional objectives:
-"Now let's think about how {{project_name}} should make users feel.
-
-**Emotional Response Questions:**
-
-- What should users FEEL when using this product?
-- What emotion would make them tell a friend about this?
-- How should users feel after accomplishing their primary goal?
-- What feeling differentiates this from competitors?
-
-Common emotional goals: Empowered and in control? Delighted and surprised? Efficient and productive? Creative and inspired? Calm and focused? Connected and engaged?"
-
-### 2. Identify Emotional Journey Mapping
-
-Explore feelings at different stages:
-"**Emotional Journey Considerations:**
-
-- How should users feel when they first discover the product?
-- What emotion during the core experience/action?
-- How should they feel after completing their task?
-- What if something goes wrong - what emotional response do we want?
-- How should they feel when returning to use it again?"
-
-### 3. Define Micro-Emotions
-
-Surface subtle but important emotional states:
-"**Micro-Emotions to Consider:**
-
-- Confidence vs. Confusion
-- Trust vs. Skepticism
-- Excitement vs. Anxiety
-- Accomplishment vs. Frustration
-- Delight vs. Satisfaction
-- Belonging vs. Isolation
-
-Which of these emotional states are most critical for your product's success?"
-
-### 4. Connect Emotions to UX Decisions
-
-Link feelings to design implications:
-"**Design Implications:**
-
-- If we want users to feel [emotional state], what UX choices support this?
-- What interactions might create negative emotions we want to avoid?
-- Where can we add moments of delight or surprise?
-- How do we build trust and confidence through design?
-
-**Emotion-Design Connections:**
-
-- [Emotion 1] β [UX design approach]
-- [Emotion 2] β [UX design approach]
-- [Emotion 3] β [UX design approach]"
-
-### 5. Validate Emotional Goals
-
-Check if emotional goals align with product vision:
-"Let me make sure I understand the emotional vision for {{project_name}}:
-
-**Primary Emotional Goal:** [Summarize main emotional response]
-**Secondary Feelings:** [List supporting emotional states]
-**Emotions to Avoid:** [List negative emotions to prevent]
-
-Does this capture the emotional experience you want to create? Any adjustments needed?"
-
-### 6. Generate Emotional Response Content
-
-Prepare the content to append to the document:
-
-#### Content Structure:
-
-When saving to document, append these Level 2 and Level 3 sections:
-
-```markdown
-## Desired Emotional Response
-
-### Primary Emotional Goals
-
-[Primary emotional goals based on conversation]
-
-### Emotional Journey Mapping
-
-[Emotional journey mapping based on conversation]
-
-### Micro-Emotions
-
-[Micro-emotions identified based on conversation]
-
-### Design Implications
-
-[UX design implications for emotional responses based on conversation]
-
-### Emotional Design Principles
-
-[Guiding principles for emotional design based on conversation]
-```
-
-### 7. Present Content and Menu
-
-Show the generated emotional response content and present choices:
-"I've defined the desired emotional responses for {{project_name}}. These emotional goals will guide our design decisions to create the right user experience.
-
-**Here's what I'll add to the document:**
-
-[Show the complete markdown content from step 6]
-
-**What would you like to do?**
-[A] Advanced Elicitation - Let's refine the emotional response definition
-[P] Party Mode - Bring different perspectives on user emotional needs
-[C] Continue - Save this to the document and move to inspiration analysis"
-
-### 8. Handle Menu Selection
-
-#### If 'A' (Advanced Elicitation):
-
-- Read fully and follow: {project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml with the current emotional response content
-- Process the enhanced emotional insights that come back
-- Ask user: "Accept these improvements to the emotional response definition? (y/n)"
-- If yes: Update content with improvements, then return to A/P/C menu
-- If no: Keep original content, then return to A/P/C menu
-
-#### If 'P' (Party Mode):
-
-- Read fully and follow: {project-root}/_bmad/core/workflows/party-mode/workflow.md with the current emotional response definition
-- Process the collaborative emotional insights that come back
-- Ask user: "Accept these changes to the emotional response definition? (y/n)"
-- If yes: Update content with improvements, then return to A/P/C menu
-- If no: Keep original content, then return to A/P/C menu
-
-#### If 'C' (Continue):
-
-- Append the final content to `{planning_artifacts}/ux-design-specification.md`
-- Update frontmatter: append step to end of stepsCompleted array
-- Load `./step-05-inspiration.md`
-
-## APPEND TO DOCUMENT:
-
-When user selects 'C', append the content directly to the document using the structure from step 6.
-
-## SUCCESS METRICS:
-
-β Primary emotional goals clearly defined
-β Emotional journey mapped across user experience
-β Micro-emotions identified and addressed
-β Design implications connected to emotional responses
-β Emotional design principles established
-β A/P/C menu presented and handled correctly
-β Content properly appended to document when C selected
-
-## FAILURE MODES:
-
-β Missing core emotional goals or being too generic
-β Not considering emotional journey across different stages
-β Overlooking micro-emotions that impact user satisfaction
-β Not connecting emotional goals to specific UX design choices
-β Emotional principles too vague or not actionable
-β Not presenting A/P/C menu after content generation
-β Appending content without user selecting 'C'
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## NEXT STEP:
-
-After user selects 'C' and content is saved to document, load `./step-05-inspiration.md` to analyze UX patterns from inspiring products.
-
-Remember: Do NOT proceed to step-05 until user explicitly selects 'C' from the A/P/C menu and content is saved!
diff --git a/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-05-inspiration.md b/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-05-inspiration.md
deleted file mode 100644
index 87fe5603..00000000
--- a/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-05-inspiration.md
+++ /dev/null
@@ -1,234 +0,0 @@
-# Step 5: UX Pattern Analysis & Inspiration
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without user input
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β ALWAYS treat this as collaborative discovery between UX facilitator and stakeholder
-- π YOU ARE A UX FACILITATOR, not a content generator
-- π¬ FOCUS on analyzing existing UX patterns and extracting inspiration
-- π― COLLABORATIVE discovery, not assumption-based design
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- β οΈ Present A/P/C menu after generating inspiration analysis content
-- πΎ ONLY save when user chooses C (Continue)
-- π Update output file frontmatter, adding this step to the end of the list of stepsCompleted.
-- π« FORBIDDEN to load next step until C is selected
-
-## COLLABORATION MENUS (A/P/C):
-
-This step will generate content and present choices:
-
-- **A (Advanced Elicitation)**: Use discovery protocols to develop deeper pattern insights
-- **P ( Party Mode)**: Bring multiple perspectives to analyze UX patterns
-- **C (Continue)**: Save the content to the document and proceed to next step
-
-## PROTOCOL INTEGRATION:
-
-- When 'A' selected: Read fully and follow: {project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml
-- When 'P' selected: Read fully and follow: {project-root}/_bmad/core/workflows/party-mode/workflow.md
-- PROTOCOLS always return to this step's A/P/C menu
-- User accepts/rejects protocol changes before proceeding
-
-## CONTEXT BOUNDARIES:
-
-- Current document and frontmatter from previous steps are available
-- Emotional response goals from step 4 inform pattern analysis
-- No additional data files needed for this step
-- Focus on analyzing existing UX patterns and extracting lessons
-
-## YOUR TASK:
-
-Analyze inspiring products and UX patterns to inform design decisions for the current project.
-
-## INSPIRATION ANALYSIS SEQUENCE:
-
-### 1. Identify User's Favorite Apps
-
-Start by gathering inspiration sources:
-"Let's learn from products your users already love and use regularly.
-
-**Inspiration Questions:**
-
-- Name 2-3 apps your target users already love and USE frequently
-- For each one, what do they do well from a UX perspective?
-- What makes the experience compelling or delightful?
-- What keeps users coming back to these apps?
-
-Think about apps in your category or even unrelated products that have great UX."
-
-### 2. Analyze UX Patterns and Principles
-
-Break down what makes these apps successful:
-"For each inspiring app, let's analyze their UX success:
-
-**For [App Name]:**
-
-- What core problem does it solve elegantly?
-- What makes the onboarding experience effective?
-- How do they handle navigation and information hierarchy?
-- What are their most innovative or delightful interactions?
-- What visual design choices support the user experience?
-- How do they handle errors or edge cases?"
-
-### 3. Extract Transferable Patterns
-
-Identify patterns that could apply to your project:
-"**Transferable UX Patterns:**
-Looking across these inspiring apps, I see patterns we could adapt:
-
-**Navigation Patterns:**
-
-- [Pattern 1] - could work for your [specific use case]
-- [Pattern 2] - might solve your [specific challenge]
-
-**Interaction Patterns:**
-
-- [Pattern 1] - excellent for [your user goal]
-- [Pattern 2] - addresses [your user pain point]
-
-**Visual Patterns:**
-
-- [Pattern 1] - supports your [emotional goal]
-- [Pattern 2] - aligns with your [platform requirements]
-
-Which of these patterns resonate most for your product?"
-
-### 4. Identify Anti-Patterns to Avoid
-
-Surface what not to do based on analysis:
-"**UX Anti-Patterns to Avoid:**
-From analyzing both successes and failures in your space, here are patterns to avoid:
-
-- [Anti-pattern 1] - users find this confusing/frustrating
-- [Anti-pattern 2] - this creates unnecessary friction
-- [Anti-pattern 3] - doesn't align with your [emotional goals]
-
-Learning from others' mistakes is as important as learning from their successes."
-
-### 5. Define Design Inspiration Strategy
-
-Create a clear strategy for using this inspiration:
-"**Design Inspiration Strategy:**
-
-**What to Adopt:**
-
-- [Specific pattern] - because it supports [your core experience]
-- [Specific pattern] - because it aligns with [user needs]
-
-**What to Adapt:**
-
-- [Specific pattern] - modify for [your unique requirements]
-- [Specific pattern] - simplify for [your user skill level]
-
-**What to Avoid:**
-
-- [Specific anti-pattern] - conflicts with [your goals]
-- [Specific anti-pattern] - doesn't fit [your platform]
-
-This strategy will guide our design decisions while keeping {{project_name}} unique."
-
-### 6. Generate Inspiration Analysis Content
-
-Prepare the content to append to the document:
-
-#### Content Structure:
-
-When saving to document, append these Level 2 and Level 3 sections:
-
-```markdown
-## UX Pattern Analysis & Inspiration
-
-### Inspiring Products Analysis
-
-[Analysis of inspiring products based on conversation]
-
-### Transferable UX Patterns
-
-[Transferable patterns identified based on conversation]
-
-### Anti-Patterns to Avoid
-
-[Anti-patterns to avoid based on conversation]
-
-### Design Inspiration Strategy
-
-[Strategy for using inspiration based on conversation]
-```
-
-### 7. Present Content and Menu
-
-Show the generated inspiration analysis content and present choices:
-"I've analyzed inspiring UX patterns and products to inform our design strategy for {{project_name}}. This gives us a solid foundation of proven patterns to build upon.
-
-**Here's what I'll add to the document:**
-
-[Show the complete markdown content from step 6]
-
-**What would you like to do?**
-[A] Advanced Elicitation - Let's deepen our UX pattern analysis
-[P] Party Mode - Bring different perspectives on inspiration sources
-[C] Continue - Save this to the document and move to design system choice"
-
-### 8. Handle Menu Selection
-
-#### If 'A' (Advanced Elicitation):
-
-- Read fully and follow: {project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml with the current inspiration analysis content
-- Process the enhanced pattern insights that come back
-- Ask user: "Accept these improvements to the inspiration analysis? (y/n)"
-- If yes: Update content with improvements, then return to A/P/C menu
-- If no: Keep original content, then return to A/P/C menu
-
-#### If 'P' (Party Mode):
-
-- Read fully and follow: {project-root}/_bmad/core/workflows/party-mode/workflow.md with the current inspiration analysis
-- Process the collaborative pattern insights that come back
-- Ask user: "Accept these changes to the inspiration analysis? (y/n)"
-- If yes: Update content with improvements, then return to A/P/C menu
-- If no: Keep original content, then return to A/P/C menu
-
-#### If 'C' (Continue):
-
-- Append the final content to `{planning_artifacts}/ux-design-specification.md`
-- Update frontmatter: append step to end of stepsCompleted array
-- Read fully and follow: `./step-06-design-system.md`
-
-## APPEND TO DOCUMENT:
-
-When user selects 'C', append the content directly to the document using the structure from step 6.
-
-## SUCCESS METRICS:
-
-β Inspiring products identified and analyzed thoroughly
-β UX patterns extracted and categorized effectively
-β Transferable patterns identified for current project
-β Anti-patterns identified to avoid common mistakes
-β Clear design inspiration strategy established
-β A/P/C menu presented and handled correctly
-β Content properly appended to document when C selected
-
-## FAILURE MODES:
-
-β Not getting specific examples of inspiring products
-β Surface-level analysis without deep pattern extraction
-β Missing opportunities for pattern adaptation
-β Not identifying relevant anti-patterns to avoid
-β Strategy too generic or not actionable
-β Not presenting A/P/C menu after content generation
-β Appending content without user selecting 'C'
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## NEXT STEP:
-
-After user selects 'C' and content is saved to document, load `./step-06-design-system.md` to choose the appropriate design system approach.
-
-Remember: Do NOT proceed to step-06 until user explicitly selects 'C' from the A/P/C menu and content is saved!
diff --git a/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-06-design-system.md b/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-06-design-system.md
deleted file mode 100644
index 70d566ad..00000000
--- a/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-06-design-system.md
+++ /dev/null
@@ -1,252 +0,0 @@
-# Step 6: Design System Choice
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without user input
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β ALWAYS treat this as collaborative discovery between UX facilitator and stakeholder
-- π YOU ARE A UX FACILITATOR, not a content generator
-- π¬ FOCUS on choosing appropriate design system approach
-- π― COLLABORATIVE decision-making, not recommendation-only
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- β οΈ Present A/P/C menu after generating design system decision content
-- πΎ ONLY save when user chooses C (Continue)
-- π Update output file frontmatter, adding this step to the end of the list of stepsCompleted.
-- π« FORBIDDEN to load next step until C is selected
-
-## COLLABORATION MENUS (A/P/C):
-
-This step will generate content and present choices:
-
-- **A (Advanced Elicitation)**: Use discovery protocols to develop deeper design system insights
-- **P (Party Mode)**: Bring multiple perspectives to evaluate design system options
-- **C (Continue)**: Save the content to the document and proceed to next step
-
-## PROTOCOL INTEGRATION:
-
-- When 'A' selected: Read fully and follow: {project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml
-- When 'P' selected: Read fully and follow: {project-root}/_bmad/core/workflows/party-mode/workflow.md
-- PROTOCOLS always return to this step's A/P/C menu
-- User accepts/rejects protocol changes before proceeding
-
-## CONTEXT BOUNDARIES:
-
-- Current document and frontmatter from previous steps are available
-- Platform requirements from step 3 inform design system choice
-- Inspiration patterns from step 5 guide design system selection
-- Focus on choosing foundation for consistent design
-
-## YOUR TASK:
-
-Choose appropriate design system approach based on project requirements and constraints.
-
-## DESIGN SYSTEM CHOICE SEQUENCE:
-
-### 1. Present Design System Options
-
-Educate about design system approaches:
-"For {{project_name}}, we need to choose a design system foundation. Think of design systems like LEGO blocks for UI - they provide proven components and patterns, ensuring consistency and speeding development.
-
-**Design System Approaches:**
-
-**1. Custom Design System**
-
-- Complete visual uniqueness
-- Full control over every component
-- Higher initial investment
-- Perfect for established brands with unique needs
-
-**2. Established System (Material Design, Ant Design, etc.)**
-
-- Fast development with proven patterns
-- Great defaults and accessibility built-in
-- Less visual differentiation
-- Ideal for startups or internal tools
-
-**3. Themeable System (MUI, Chakra UI, Tailwind UI)**
-
-- Customizable with strong foundation
-- Brand flexibility with proven components
-- Moderate learning curve
-- Good balance of speed and uniqueness
-
-Which direction feels right for your project?"
-
-### 2. Analyze Project Requirements
-
-Guide decision based on project context:
-"**Let's consider your specific needs:**
-
-**Based on our previous conversations:**
-
-- Platform: [platform from step 3]
-- Timeline: [inferred from user conversation]
-- Team Size: [inferred from user conversation]
-- Brand Requirements: [inferred from user conversation]
-- Technical Constraints: [inferred from user conversation]
-
-**Decision Factors:**
-
-- Need for speed vs. need for uniqueness
-- Brand guidelines or existing visual identity
-- Team's design expertise
-- Long-term maintenance considerations
-- Integration requirements with existing systems"
-
-### 3. Explore Specific Design System Options
-
-Dive deeper into relevant options:
-"**Recommended Options Based on Your Needs:**
-
-**For [Your Platform Type]:**
-
-- [Option 1] - [Key benefit] - [Best for scenario]
-- [Option 2] - [Key benefit] - [Best for scenario]
-- [Option 3] - [Key benefit] - [Best for scenario]
-
-**Considerations:**
-
-- Component library size and quality
-- Documentation and community support
-- Customization capabilities
-- Accessibility compliance
-- Performance characteristics
-- Learning curve for your team"
-
-### 4. Facilitate Decision Process
-
-Help user make informed choice:
-"**Decision Framework:**
-
-1. What's most important: Speed, uniqueness, or balance?
-2. How much design expertise does your team have?
-3. Are there existing brand guidelines to follow?
-4. What's your timeline and budget?
-5. Long-term maintenance needs?
-
-Let's evaluate options based on your answers to these questions."
-
-### 5. Finalize Design System Choice
-
-Confirm and document the decision:
-"Based on our analysis, I recommend [Design System Choice] for {{project_name}}.
-
-**Rationale:**
-
-- [Reason 1 based on project needs]
-- [Reason 2 based on constraints]
-- [Reason 3 based on team considerations]
-
-**Next Steps:**
-
-- We'll customize this system to match your brand and needs
-- Define component strategy for custom components needed
-- Establish design tokens and patterns
-
-Does this design system choice feel right to you?"
-
-### 6. Generate Design System Content
-
-Prepare the content to append to the document:
-
-#### Content Structure:
-
-When saving to document, append these Level 2 and Level 3 sections:
-
-```markdown
-## Design System Foundation
-
-### 1.1 Design System Choice
-
-[Design system choice based on conversation]
-
-### Rationale for Selection
-
-[Rationale for design system selection based on conversation]
-
-### Implementation Approach
-
-[Implementation approach based on chosen system]
-
-### Customization Strategy
-
-[Customization strategy based on project needs]
-```
-
-### 7. Present Content and Menu
-
-Show the generated design system content and present choices:
-"I've documented our design system choice for {{project_name}}. This foundation will ensure consistency and speed up development.
-
-**Here's what I'll add to the document:**
-
-[Show the complete markdown content from step 6]
-
-**What would you like to do?**
-[A] Advanced Elicitation - Let's refine our design system decision
-[P] Party Mode - Bring technical perspectives on design systems
-[C] Continue - Save this to the document and move to defining experience
-
-### 8. Handle Menu Selection
-
-#### If 'A' (Advanced Elicitation):
-
-- Read fully and follow: {project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml with the current design system content
-- Process the enhanced design system insights that come back
-- Ask user: "Accept these improvements to the design system decision? (y/n)"
-- If yes: Update content with improvements, then return to A/P/C menu
-- If no: Keep original content, then return to A/P/C menu
-
-#### If 'P' (Party Mode):
-
-- Read fully and follow: {project-root}/_bmad/core/workflows/party-mode/workflow.md with the current design system choice
-- Process the collaborative design system insights that come back
-- Ask user: "Accept these changes to the design system decision? (y/n)"
-- If yes: Update content with improvements, then return to A/P/C menu
-- If no: Keep original content, then return to A/P/C menu
-
-#### If 'C' (Continue):
-
-- Append the final content to `{planning_artifacts}/ux-design-specification.md`
-- Update frontmatter: append step to end of stepsCompleted array
-- Load `./step-07-defining-experience.md`
-
-## APPEND TO DOCUMENT:
-
-When user selects 'C', append the content directly to the document using the structure from step 6.
-
-## SUCCESS METRICS:
-
-β Design system options clearly presented and explained
-β Decision framework applied to project requirements
-β Specific design system chosen with clear rationale
-β Implementation approach planned
-β Customization strategy defined
-β A/P/C menu presented and handled correctly
-β Content properly appended to document when C selected
-
-## FAILURE MODES:
-
-β Not explaining design system concepts clearly
-β Rushing to recommendation without understanding requirements
-β Not considering technical constraints or team capabilities
-β Choosing design system without clear rationale
-β Not planning implementation approach
-β Not presenting A/P/C menu after content generation
-β Appending content without user selecting 'C'
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## NEXT STEP:
-
-After user selects 'C' and content is saved to document, load `./step-07-defining-experience.md` to define the core user interaction.
-
-Remember: Do NOT proceed to step-07 until user explicitly selects 'C' from the A/P/C menu and content is saved!
diff --git a/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-07-defining-experience.md b/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-07-defining-experience.md
deleted file mode 100644
index 7e904b94..00000000
--- a/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-07-defining-experience.md
+++ /dev/null
@@ -1,254 +0,0 @@
-# Step 7: Defining Core Experience
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without user input
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β ALWAYS treat this as collaborative discovery between UX facilitator and stakeholder
-- π YOU ARE A UX FACILITATOR, not a content generator
-- π¬ FOCUS on defining the core interaction that defines the product
-- π― COLLABORATIVE discovery, not assumption-based design
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- β οΈ Present A/P/C menu after generating defining experience content
-- πΎ ONLY save when user chooses C (Continue)
-- π Update output file frontmatter, adding this step to the end of the list of stepsCompleted.
-- π« FORBIDDEN to load next step until C is selected
-
-## COLLABORATION MENUS (A/P/C):
-
-This step will generate content and present choices:
-
-- **A (Advanced Elicitation)**: Use discovery protocols to develop deeper experience insights
-- **P (Party Mode)**: Bring multiple perspectives to define optimal core experience
-- **C (Continue)**: Save the content to the document and proceed to next step
-
-## PROTOCOL INTEGRATION:
-
-- When 'A' selected: Read fully and follow: {project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml
-- When 'P' selected: Read fully and follow: {project-root}/_bmad/core/workflows/party-mode/workflow.md
-- PROTOCOLS always return to this step's A/P/C menu
-- User accepts/rejects protocol changes before proceeding
-
-## CONTEXT BOUNDARIES:
-
-- Current document and frontmatter from previous steps are available
-- Core experience from step 3 provides foundation
-- Design system choice from step 6 informs implementation
-- Focus on the defining interaction that makes the product special
-
-## YOUR TASK:
-
-Define the core interaction that, if nailed, makes everything else follow in the user experience.
-
-## DEFINING EXPERIENCE SEQUENCE:
-
-### 1. Identify the Defining Experience
-
-Focus on the core interaction:
-"Every successful product has a defining experience - the core interaction that, if we nail it, everything else follows.
-
-**Think about these famous examples:**
-
-- Tinder: "Swipe to match with people"
-- Snapchat: "Share photos that disappear"
-- Instagram: "Share perfect moments with filters"
-- Spotify: "Discover and play any song instantly"
-
-**For {{project_name}}:**
-What's the core action that users will describe to their friends?
-What's the interaction that makes users feel successful?
-If we get ONE thing perfectly right, what should it be?"
-
-### 2. Explore the User's Mental Model
-
-Understand how users think about the core task:
-"**User Mental Model Questions:**
-
-- How do users currently solve this problem?
-- What mental model do they bring to this task?
-- What's their expectation for how this should work?
-- Where are they likely to get confused or frustrated?
-
-**Current Solutions:**
-
-- What do users love/hate about existing approaches?
-- What shortcuts or workarounds do they use?
-- What makes existing solutions feel magical or terrible?"
-
-### 3. Define Success Criteria for Core Experience
-
-Establish what makes the core interaction successful:
-"**Core Experience Success Criteria:**
-
-- What makes users say 'this just works'?
-- When do they feel smart or accomplished?
-- What feedback tells them they're doing it right?
-- How fast should it feel?
-- What should happen automatically?
-
-**Success Indicators:**
-
-- [Success indicator 1]
-- [Success indicator 2]
-- [Success indicator 3]"
-
-### 4. Identify Novel vs. Established Patterns
-
-Determine if we need to innovate or can use proven patterns:
-"**Pattern Analysis:**
-Looking at your core experience, does this:
-
-- Use established UX patterns that users already understand?
-- Require novel interaction design that needs user education?
-- Combine familiar patterns in innovative ways?
-
-**If Novel:**
-
-- What makes this different from existing approaches?
-- How will we teach users this new pattern?
-- What familiar metaphors can we use?
-
-**If Established:**
-
-- Which proven patterns should we adopt?
-- How can we innovate within familiar patterns?
-- What's our unique twist on established interactions?"
-
-### 5. Define Experience Mechanics
-
-Break down the core interaction into details:
-"**Core Experience Mechanics:**
-Let's design the step-by-step flow for [defining experience]:
-
-**1. Initiation:**
-
-- How does the user start this action?
-- What triggers or invites them to begin?
-
-**2. Interaction:**
-
-- What does the user actually do?
-- What controls or inputs do they use?
-- How does the system respond?
-
-**3. Feedback:**
-
-- What tells users they're succeeding?
-- How do they know when it's working?
-- What happens if they make a mistake?
-
-**4. Completion:**
-
-- How do users know they're done?
-- What's the successful outcome?
-- What's next?"
-
-### 6. Generate Defining Experience Content
-
-Prepare the content to append to the document:
-
-#### Content Structure:
-
-When saving to document, append these Level 2 and Level 3 sections:
-
-```markdown
-## 2. Core User Experience
-
-### 2.1 Defining Experience
-
-[Defining experience description based on conversation]
-
-### 2.2 User Mental Model
-
-[User mental model analysis based on conversation]
-
-### 2.3 Success Criteria
-
-[Success criteria for core experience based on conversation]
-
-### 2.4 Novel UX Patterns
-
-[Novel UX patterns analysis based on conversation]
-
-### 2.5 Experience Mechanics
-
-[Detailed mechanics for core experience based on conversation]
-```
-
-### 7. Present Content and Menu
-
-Show the generated defining experience content and present choices:
-"I've defined the core experience for {{project_name}} - the interaction that will make users love this product.
-
-**Here's what I'll add to the document:**
-
-[Show the complete markdown content from step 6]
-
-**What would you like to do?**
-[A] Advanced Elicitation - Let's refine the core experience definition
-[P] Party Mode - Bring different perspectives on the defining interaction
-[C] Continue - Save this to the document and move to visual foundation
-
-### 8. Handle Menu Selection
-
-#### If 'A' (Advanced Elicitation):
-
-- Read fully and follow: {project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml with the current defining experience content
-- Process the enhanced experience insights that come back
-- Ask user: "Accept these improvements to the defining experience? (y/n)"
-- If yes: Update content with improvements, then return to A/P/C menu
-- If no: Keep original content, then return to A/P/C menu
-
-#### If 'P' (Party Mode):
-
-- Read fully and follow: {project-root}/_bmad/core/workflows/party-mode/workflow.md with the current defining experience
-- Process the collaborative experience insights that come back
-- Ask user: "Accept these changes to the defining experience? (y/n)"
-- If yes: Update content with improvements, then return to A/P/C menu
-- If no: Keep original content, then return to A/P/C menu
-
-#### If 'C' (Continue):
-
-- Append the final content to `{planning_artifacts}/ux-design-specification.md`
-- Update frontmatter: append step to end of stepsCompleted array
-- Load `./step-08-visual-foundation.md`
-
-## APPEND TO DOCUMENT:
-
-When user selects 'C', append the content directly to the document using the structure from step 6.
-
-## SUCCESS METRICS:
-
-β Defining experience clearly articulated
-β User mental model thoroughly analyzed
-β Success criteria established for core interaction
-β Novel vs. established patterns properly evaluated
-β Experience mechanics designed in detail
-β A/P/C menu presented and handled correctly
-β Content properly appended to document when C selected
-
-## FAILURE MODES:
-
-β Not identifying the true core interaction
-β Missing user's mental model and expectations
-β Not establishing clear success criteria
-β Not properly evaluating novel vs. established patterns
-β Experience mechanics too vague or incomplete
-β Not presenting A/P/C menu after content generation
-β Appending content without user selecting 'C'
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## NEXT STEP:
-
-After user selects 'C' and content is saved to document, load `./step-08-visual-foundation.md` to establish visual design foundation.
-
-Remember: Do NOT proceed to step-08 until user explicitly selects 'C' from the A/P/C menu and content is saved!
diff --git a/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-08-visual-foundation.md b/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-08-visual-foundation.md
deleted file mode 100644
index bd764a60..00000000
--- a/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-08-visual-foundation.md
+++ /dev/null
@@ -1,224 +0,0 @@
-# Step 8: Visual Foundation
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without user input
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β ALWAYS treat this as collaborative discovery between UX facilitator and stakeholder
-- π YOU ARE A UX FACILITATOR, not a content generator
-- π¬ FOCUS on establishing visual design foundation (colors, typography, spacing)
-- π― COLLABORATIVE discovery, not assumption-based design
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- β οΈ Present A/P/C menu after generating visual foundation content
-- πΎ ONLY save when user chooses C (Continue)
-- π Update output file frontmatter, adding this step to the end of the list of stepsCompleted.
-- π« FORBIDDEN to load next step until C is selected
-
-## COLLABORATION MENUS (A/P/C):
-
-This step will generate content and present choices:
-
-- **A (Advanced Elicitation)**: Use discovery protocols to develop deeper visual insights
-- **P (Party Mode)**: Bring multiple perspectives to define visual foundation
-- **C (Continue)**: Save the content to the document and proceed to next step
-
-## PROTOCOL INTEGRATION:
-
-- When 'A' selected: Read fully and follow: {project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml
-- When 'P' selected: Read fully and follow: {project-root}/_bmad/core/workflows/party-mode/workflow.md
-- PROTOCOLS always return to this step's A/P/C menu
-- User accepts/rejects protocol changes before proceeding
-
-## CONTEXT BOUNDARIES:
-
-- Current document and frontmatter from previous steps are available
-- Design system choice from step 6 provides component foundation
-- Emotional response goals from step 4 inform visual decisions
-- Focus on colors, typography, spacing, and layout foundation
-
-## YOUR TASK:
-
-Establish the visual design foundation including color themes, typography, and spacing systems.
-
-## VISUAL FOUNDATION SEQUENCE:
-
-### 1. Brand Guidelines Assessment
-
-Check for existing brand requirements:
-"Do you have existing brand guidelines or a specific color palette I should follow? (y/n)
-
-If yes, I'll extract and document your brand colors and create semantic color mappings.
-If no, I'll generate theme options based on your project's personality and emotional goals from our earlier discussion."
-
-### 2. Generate Color Theme Options (If no brand guidelines)
-
-Create visual exploration opportunities:
-"If no existing brand guidelines, I'll create a color theme visualizer to help you explore options.
-
-π¨ I can generate comprehensive HTML color theme visualizers with multiple theme options, complete UI examples, and the ability to see how colors work in real interface contexts.
-
-This will help you make an informed decision about the visual direction for {{project_name}}."
-
-### 3. Define Typography System
-
-Establish the typographic foundation:
-"**Typography Questions:**
-
-- What should the overall tone feel like? (Professional, friendly, modern, classic?)
-- How much text content will users read? (Headings only? Long-form content?)
-- Any accessibility requirements for font sizes or contrast?
-- Any brand fonts we must use?
-
-**Typography Strategy:**
-
-- Choose primary and secondary typefaces
-- Establish type scale (h1, h2, h3, body, etc.)
-- Define line heights and spacing relationships
-- Consider readability and accessibility"
-
-### 4. Establish Spacing and Layout Foundation
-
-Define the structural foundation:
-"**Spacing and Layout Foundation:**
-
-- How should the overall layout feel? (Dense and efficient? Airy and spacious?)
-- What spacing unit should we use? (4px, 8px, 12px base?)
-- How much white space should be between elements?
-- Should we use a grid system? If so, what column structure?
-
-**Layout Principles:**
-
-- [Layout principle 1 based on product type]
-- [Layout principle 2 based on user needs]
-- [Layout principle 3 based on platform requirements]"
-
-### 5. Create Visual Foundation Strategy
-
-Synthesize all visual decisions:
-"**Visual Foundation Strategy:**
-
-**Color System:**
-
-- [Color strategy based on brand guidelines or generated themes]
-- Semantic color mapping (primary, secondary, success, warning, error, etc.)
-- Accessibility compliance (contrast ratios)
-
-**Typography System:**
-
-- [Typography strategy based on content needs and tone]
-- Type scale and hierarchy
-- Font pairing rationale
-
-**Spacing & Layout:**
-
-- [Spacing strategy based on content density and platform]
-- Grid system approach
-- Component spacing relationships
-
-This foundation will ensure consistency across all our design decisions."
-
-### 6. Generate Visual Foundation Content
-
-Prepare the content to append to the document:
-
-#### Content Structure:
-
-When saving to document, append these Level 2 and Level 3 sections:
-
-```markdown
-## Visual Design Foundation
-
-### Color System
-
-[Color system strategy based on conversation]
-
-### Typography System
-
-[Typography system strategy based on conversation]
-
-### Spacing & Layout Foundation
-
-[Spacing and layout foundation based on conversation]
-
-### Accessibility Considerations
-
-[Accessibility considerations based on conversation]
-```
-
-### 7. Present Content and Menu
-
-Show the generated visual foundation content and present choices:
-"I've established the visual design foundation for {{project_name}}. This provides the building blocks for consistent, beautiful design.
-
-**Here's what I'll add to the document:**
-
-[Show the complete markdown content from step 6]
-
-**What would you like to do?**
-[A] Advanced Elicitation - Let's refine our visual foundation
-[P] Party Mode - Bring design perspectives on visual choices
-[C] Continue - Save this to the document and move to design directions
-
-### 8. Handle Menu Selection
-
-#### If 'A' (Advanced Elicitation):
-
-- Read fully and follow: {project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml with the current visual foundation content
-- Process the enhanced visual insights that come back
-- Ask user: "Accept these improvements to the visual foundation? (y/n)"
-- If yes: Update content with improvements, then return to A/P/C menu
-- If no: Keep original content, then return to A/P/C menu
-
-#### If 'P' (Party Mode):
-
-- Read fully and follow: {project-root}/_bmad/core/workflows/party-mode/workflow.md with the current visual foundation
-- Process the collaborative visual insights that come back
-- Ask user: "Accept these changes to the visual foundation? (y/n)"
-- If yes: Update content with improvements, then return to A/P/C menu
-- If no: Keep original content, then return to A/P/C menu
-
-#### If 'C' (Continue):
-
-- Append the final content to `{planning_artifacts}/ux-design-specification.md`
-- Update frontmatter: append step to end of stepsCompleted array
-- Load `./step-09-design-directions.md`
-
-## APPEND TO DOCUMENT:
-
-When user selects 'C', append the content directly to the document using the structure from step 6.
-
-## SUCCESS METRICS:
-
-β Brand guidelines assessed and incorporated if available
-β Color system established with accessibility consideration
-β Typography system defined with appropriate hierarchy
-β Spacing and layout foundation created
-β Visual foundation strategy documented
-β A/P/C menu presented and handled correctly
-β Content properly appended to document when C selected
-
-## FAILURE MODES:
-
-β Not checking for existing brand guidelines first
-β Color palette not aligned with emotional goals
-β Typography not suitable for content type or readability needs
-β Spacing system not appropriate for content density
-β Missing accessibility considerations
-β Not presenting A/P/C menu after content generation
-β Appending content without user selecting 'C'
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## NEXT STEP:
-
-After user selects 'C' and content is saved to document, load `./step-09-design-directions.md` to generate design direction mockups.
-
-Remember: Do NOT proceed to step-09 until user explicitly selects 'C' from the A/P/C menu and content is saved!
diff --git a/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-09-design-directions.md b/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-09-design-directions.md
deleted file mode 100644
index a50ed503..00000000
--- a/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-09-design-directions.md
+++ /dev/null
@@ -1,224 +0,0 @@
-# Step 9: Design Direction Mockups
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without user input
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β ALWAYS treat this as collaborative discovery between UX facilitator and stakeholder
-- π YOU ARE A UX FACILITATOR, not a content generator
-- π¬ FOCUS on generating and evaluating design direction variations
-- π― COLLABORATIVE exploration, not assumption-based design
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- β οΈ Present A/P/C menu after generating design direction content
-- πΎ Generate HTML visualizer for design directions
-- π Update output file frontmatter, adding this step to the end of the list of stepsCompleted.
-- π« FORBIDDEN to load next step until C is selected
-
-## COLLABORATION MENUS (A/P/C):
-
-This step will generate content and present choices:
-
-- **A (Advanced Elicitation)**: Use discovery protocols to develop deeper design insights
-- **P (Party Mode)**: Bring multiple perspectives to evaluate design directions
-- **C (Continue)**: Save the content to the document and proceed to next step
-
-## PROTOCOL INTEGRATION:
-
-- When 'A' selected: Read fully and follow: {project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml
-- When 'P' selected: Read fully and follow: {project-root}/_bmad/core/workflows/party-mode/workflow.md
-- PROTOCOLS always return to this step's A/P/C menu
-- User accepts/rejects protocol changes before proceeding
-
-## CONTEXT BOUNDARIES:
-
-- Current document and frontmatter from previous steps are available
-- Visual foundation from step 8 provides design tokens
-- Core experience from step 7 informs layout and interaction design
-- Focus on exploring different visual design directions
-
-## YOUR TASK:
-
-Generate comprehensive design direction mockups showing different visual approaches for the product.
-
-## DESIGN DIRECTIONS SEQUENCE:
-
-### 1. Generate Design Direction Variations
-
-Create diverse visual explorations:
-"I'll generate 6-8 different design direction variations exploring:
-
-- Different layout approaches and information hierarchy
-- Various interaction patterns and visual weights
-- Alternative color applications from our foundation
-- Different density and spacing approaches
-- Various navigation and component arrangements
-
-Each mockup will show a complete vision for {{project_name}} with all our design decisions applied."
-
-### 2. Create HTML Design Direction Showcase
-
-Generate interactive visual exploration:
-"π¨ Design Direction Mockups Generated!
-
-I'm creating a comprehensive HTML design direction showcase at `{planning_artifacts}/ux-design-directions.html`
-
-**What you'll see:**
-
-- 6-8 full-screen mockup variations
-- Interactive states and hover effects
-- Side-by-side comparison tools
-- Complete UI examples with real content
-- Responsive behavior demonstrations
-
-Each mockup represents a complete visual direction for your app's look and feel."
-
-### 3. Present Design Exploration Framework
-
-Guide evaluation criteria:
-"As you explore the design directions, look for:
-
-β **Layout Intuitiveness** - Which information hierarchy matches your priorities?
-β **Interaction Style** - Which interaction style fits your core experience?
-β **Visual Weight** - Which visual density feels right for your brand?
-β **Navigation Approach** - Which navigation pattern matches user expectations?
-β **Component Usage** - How well do the components support your user journeys?
-β **Brand Alignment** - Which direction best supports your emotional goals?
-
-Take your time exploring - this is a crucial decision that will guide all our design work!"
-
-### 4. Facilitate Design Direction Selection
-
-Help user choose or combine elements:
-"After exploring all the design directions:
-
-**Which approach resonates most with you?**
-
-- Pick a favorite direction as-is
-- Combine elements from multiple directions
-- Request modifications to any direction
-- Use one direction as a base and iterate
-
-**Tell me:**
-
-- Which layout feels most intuitive for your users?
-- Which visual weight matches your brand personality?
-- Which interaction style supports your core experience?
-- Are there elements from different directions you'd like to combine?"
-
-### 5. Document Design Direction Decision
-
-Capture the chosen approach:
-"Based on your exploration, I'm understanding your design direction preference:
-
-**Chosen Direction:** [Direction number or combination]
-**Key Elements:** [Specific elements you liked]
-**Modifications Needed:** [Any changes requested]
-**Rationale:** [Why this direction works for your product]
-
-This will become our design foundation moving forward. Are we ready to lock this in, or do you want to explore variations?"
-
-### 6. Generate Design Direction Content
-
-Prepare the content to append to the document:
-
-#### Content Structure:
-
-When saving to document, append these Level 2 and Level 3 sections:
-
-```markdown
-## Design Direction Decision
-
-### Design Directions Explored
-
-[Summary of design directions explored based on conversation]
-
-### Chosen Direction
-
-[Chosen design direction based on conversation]
-
-### Design Rationale
-
-[Rationale for design direction choice based on conversation]
-
-### Implementation Approach
-
-[Implementation approach based on chosen direction]
-```
-
-### 7. Present Content and Menu
-
-Show the generated design direction content and present choices:
-"I've documented our design direction decision for {{project_name}}. This visual approach will guide all our detailed design work.
-
-**Here's what I'll add to the document:**
-
-[Show the complete markdown content from step 6]
-
-**What would you like to do?**
-[A] Advanced Elicitation - Let's refine our design direction
-[P] Party Mode - Bring different perspectives on visual choices
-[C] Continue - Save this to the document and move to user journey flows
-
-### 8. Handle Menu Selection
-
-#### If 'A' (Advanced Elicitation):
-
-- Read fully and follow: {project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml with the current design direction content
-- Process the enhanced design insights that come back
-- Ask user: "Accept these improvements to the design direction? (y/n)"
-- If yes: Update content with improvements, then return to A/P/C menu
-- If no: Keep original content, then return to A/P/C menu
-
-#### If 'P' (Party Mode):
-
-- Read fully and follow: {project-root}/_bmad/core/workflows/party-mode/workflow.md with the current design direction
-- Process the collaborative design insights that come back
-- Ask user: "Accept these changes to the design direction? (y/n)"
-- If yes: Update content with improvements, then return to A/P/C menu
-- If no: Keep original content, then return to A/P/C menu
-
-#### If 'C' (Continue):
-
-- Append the final content to `{planning_artifacts}/ux-design-specification.md`
-- Update frontmatter: append step to end of stepsCompleted array
-- Load `./step-10-user-journeys.md`
-
-## APPEND TO DOCUMENT:
-
-When user selects 'C', append the content directly to the document using the structure from step 6.
-
-## SUCCESS METRICS:
-
-β Multiple design direction variations generated
-β HTML showcase created with interactive elements
-β Design evaluation criteria clearly established
-β User able to explore and compare directions effectively
-β Design direction decision made with clear rationale
-β A/P/C menu presented and handled correctly
-β Content properly appended to document when C selected
-
-## FAILURE MODES:
-
-β Not creating enough variation in design directions
-β Design directions not aligned with established foundation
-β Missing interactive elements in HTML showcase
-β Not providing clear evaluation criteria
-β Rushing decision without thorough exploration
-β Not presenting A/P/C menu after content generation
-β Appending content without user selecting 'C'
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## NEXT STEP:
-
-After user selects 'C' and content is saved to document, load `./step-10-user-journeys.md` to design user journey flows.
-
-Remember: Do NOT proceed to step-10 until user explicitly selects 'C' from the A/P/C menu and content is saved!
diff --git a/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-10-user-journeys.md b/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-10-user-journeys.md
deleted file mode 100644
index 985577f0..00000000
--- a/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-10-user-journeys.md
+++ /dev/null
@@ -1,241 +0,0 @@
-# Step 10: User Journey Flows
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without user input
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β ALWAYS treat this as collaborative discovery between UX facilitator and stakeholder
-- π YOU ARE A UX FACILITATOR, not a content generator
-- π¬ FOCUS on designing user flows and journey interactions
-- π― COLLABORATIVE flow design, not assumption-based layouts
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- β οΈ Present A/P/C menu after generating user journey content
-- πΎ ONLY save when user chooses C (Continue)
-- π Update output file frontmatter, adding this step to the end of the list of stepsCompleted.
-- π« FORBIDDEN to load next step until C is selected
-
-## COLLABORATION MENUS (A/P/C):
-
-This step will generate content and present choices:
-
-- **A (Advanced Elicitation)**: Use discovery protocols to develop deeper journey insights
-- **P (Party Mode)**: Bring multiple perspectives to design user flows
-- **C (Continue)**: Save the content to the document and proceed to next step
-
-## PROTOCOL INTEGRATION:
-
-- When 'A' selected: Read fully and follow: {project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml
-- When 'P' selected: Read fully and follow: {project-root}/_bmad/core/workflows/party-mode/workflow.md
-- PROTOCOLS always return to this step's A/P/C menu
-- User accepts/rejects protocol changes before proceeding
-
-## CONTEXT BOUNDARIES:
-
-- Current document and frontmatter from previous steps are available
-- Design direction from step 9 informs flow layout and visual design
-- Core experience from step 7 defines key journey interactions
-- Focus on designing detailed user flows with Mermaid diagrams
-
-## YOUR TASK:
-
-Design detailed user journey flows for critical user interactions.
-
-## USER JOURNEY FLOWS SEQUENCE:
-
-### 1. Load PRD User Journeys as Foundation
-
-Start with user journeys already defined in the PRD:
-"Great! Since we have the PRD available, let's build on the user journeys already documented there.
-
-**Existing User Journeys from PRD:**
-I've already loaded these user journeys from your PRD:
-[Journey narratives from PRD input documents]
-
-These journeys tell us **who** users are and **why** they take certain actions. Now we need to design **how** those journeys work in detail.
-
-**Critical Journeys to Design Flows For:**
-Looking at the PRD journeys, I need to design detailed interaction flows for:
-
-- [Critical journey 1 identified from PRD narratives]
-- [Critical journey 2 identified from PRD narratives]
-- [Critical journey 3 identified from PRD narratives]
-
-The PRD gave us the stories - now we design the mechanics!"
-
-### 2. Design Each Journey Flow
-
-For each critical journey, design detailed flow:
-
-**For [Journey Name]:**
-"Let's design the flow for users accomplishing [journey goal].
-
-**Flow Design Questions:**
-
-- How do users start this journey? (entry point)
-- What information do they need at each step?
-- What decisions do they need to make?
-- How do they know they're progressing successfully?
-- What does success look like for this journey?
-- Where might they get confused or stuck?
-- How do they recover from errors?"
-
-### 3. Create Flow Diagrams
-
-Visualize each journey with Mermaid diagrams:
-"I'll create detailed flow diagrams for each journey showing:
-
-**[Journey Name] Flow:**
-
-- Entry points and triggers
-- Decision points and branches
-- Success and failure paths
-- Error recovery mechanisms
-- Progressive disclosure of information
-
-Each diagram will map the complete user experience from start to finish."
-
-### 4. Optimize for Efficiency and Delight
-
-Refine flows for optimal user experience:
-"**Flow Optimization:**
-For each journey, let's ensure we're:
-
-- Minimizing steps to value (getting users to success quickly)
-- Reducing cognitive load at each decision point
-- Providing clear feedback and progress indicators
-- Creating moments of delight or accomplishment
-- Handling edge cases and error recovery gracefully
-
-**Specific Optimizations:**
-
-- [Optimization 1 for journey efficiency]
-- [Optimization 2 for user delight]
-- [Optimization 3 for error handling]"
-
-### 5. Document Journey Patterns
-
-Extract reusable patterns across journeys:
-"**Journey Patterns:**
-Across these flows, I'm seeing some common patterns we can standardize:
-
-**Navigation Patterns:**
-
-- [Navigation pattern 1]
-- [Navigation pattern 2]
-
-**Decision Patterns:**
-
-- [Decision pattern 1]
-- [Decision pattern 2]
-
-**Feedback Patterns:**
-
-- [Feedback pattern 1]
-- [Feedback pattern 2]
-
-These patterns will ensure consistency across all user experiences."
-
-### 6. Generate User Journey Content
-
-Prepare the content to append to the document:
-
-#### Content Structure:
-
-When saving to document, append these Level 2 and Level 3 sections:
-
-```markdown
-## User Journey Flows
-
-### [Journey 1 Name]
-
-[Journey 1 description and Mermaid diagram]
-
-### [Journey 2 Name]
-
-[Journey 2 description and Mermaid diagram]
-
-### Journey Patterns
-
-[Journey patterns identified based on conversation]
-
-### Flow Optimization Principles
-
-[Flow optimization principles based on conversation]
-```
-
-### 7. Present Content and Menu
-
-Show the generated user journey content and present choices:
-"I've designed detailed user journey flows for {{project_name}}. These flows will guide the detailed design of each user interaction.
-
-**Here's what I'll add to the document:**
-
-[Show the complete markdown content from step 6]
-
-**What would you like to do?**
-[A] Advanced Elicitation - Let's refine our user journey designs
-[P] Party Mode - Bring different perspectives on user flows
-[C] Continue - Save this to the document and move to component strategy
-
-### 8. Handle Menu Selection
-
-#### If 'A' (Advanced Elicitation):
-
-- Read fully and follow: {project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml with the current user journey content
-- Process the enhanced journey insights that come back
-- Ask user: "Accept these improvements to the user journeys? (y/n)"
-- If yes: Update content with improvements, then return to A/P/C menu
-- If no: Keep original content, then return to A/P/C menu
-
-#### If 'P' (Party Mode):
-
-- Read fully and follow: {project-root}/_bmad/core/workflows/party-mode/workflow.md with the current user journeys
-- Process the collaborative journey insights that come back
-- Ask user: "Accept these changes to the user journeys? (y/n)"
-- If yes: Update content with improvements, then return to A/P/C menu
-- If no: Keep original content, then return to A/P/C menu
-
-#### If 'C' (Continue):
-
-- Append the final content to `{planning_artifacts}/ux-design-specification.md`
-- Update frontmatter: append step to end of stepsCompleted array
-- Load `./step-11-component-strategy.md`
-
-## APPEND TO DOCUMENT:
-
-When user selects 'C', append the content directly to the document using the structure from step 6.
-
-## SUCCESS METRICS:
-
-β Critical user journeys identified and designed
-β Detailed flow diagrams created for each journey
-β Flows optimized for efficiency and user delight
-β Common journey patterns extracted and documented
-β A/P/C menu presented and handled correctly
-β Content properly appended to document when C selected
-
-## FAILURE MODES:
-
-β Not identifying all critical user journeys
-β Flows too complex or not optimized for user success
-β Missing error recovery paths
-β Not extracting reusable patterns across journeys
-β Flow diagrams unclear or incomplete
-β Not presenting A/P/C menu after content generation
-β Appending content without user selecting 'C'
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## NEXT STEP:
-
-After user selects 'C' and content is saved to document, load `./step-11-component-strategy.md` to define component library strategy.
-
-Remember: Do NOT proceed to step-11 until user explicitly selects 'C' from the A/P/C menu and content is saved!
diff --git a/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-11-component-strategy.md b/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-11-component-strategy.md
deleted file mode 100644
index deef19b7..00000000
--- a/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-11-component-strategy.md
+++ /dev/null
@@ -1,248 +0,0 @@
-# Step 11: Component Strategy
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without user input
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β ALWAYS treat this as collaborative discovery between UX facilitator and stakeholder
-- π YOU ARE A UX FACILITATOR, not a content generator
-- π¬ FOCUS on defining component library strategy and custom components
-- π― COLLABORATIVE component planning, not assumption-based design
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- β οΈ Present A/P/C menu after generating component strategy content
-- πΎ ONLY save when user chooses C (Continue)
-- π Update output file frontmatter, adding this step to the end of the list of stepsCompleted.
-- π« FORBIDDEN to load next step until C is selected
-
-## COLLABORATION MENUS (A/P/C):
-
-This step will generate content and present choices:
-
-- **A (Advanced Elicitation)**: Use discovery protocols to develop deeper component insights
-- **P (Party Mode)**: Bring multiple perspectives to define component strategy
-- **C (Continue)**: Save the content to the document and proceed to next step
-
-## PROTOCOL INTEGRATION:
-
-- When 'A' selected: Read fully and follow: {project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml
-- When 'P' selected: Read fully and follow: {project-root}/_bmad/core/workflows/party-mode/workflow.md
-- PROTOCOLS always return to this step's A/P/C menu
-- User accepts/rejects protocol changes before proceeding
-
-## CONTEXT BOUNDARIES:
-
-- Current document and frontmatter from previous steps are available
-- Design system choice from step 6 determines available components
-- User journeys from step 10 identify component needs
-- Focus on defining custom components and implementation strategy
-
-## YOUR TASK:
-
-Define component library strategy and design custom components not covered by the design system.
-
-## COMPONENT STRATEGY SEQUENCE:
-
-### 1. Analyze Design System Coverage
-
-Review what components are available vs. needed:
-"Based on our chosen design system [design system from step 6], let's identify what components are already available and what we need to create custom.
-
-**Available from Design System:**
-[List of components available in chosen design system]
-
-**Components Needed for {{project_name}}:**
-Looking at our user journeys and design direction, we need:
-
-- [Component need 1 from journey analysis]
-- [Component need 2 from design requirements]
-- [Component need 3 from core experience]
-
-**Gap Analysis:**
-
-- [Gap 1 - needed but not available]
-- [Gap 2 - needed but not available]"
-
-### 2. Design Custom Components
-
-For each custom component needed, design thoroughly:
-
-**For each custom component:**
-"**[Component Name] Design:**
-
-**Purpose:** What does this component do for users?
-**Content:** What information or data does it display?
-**Actions:** What can users do with this component?
-**States:** What different states does it have? (default, hover, active, disabled, error, etc.)
-**Variants:** Are there different sizes or styles needed?
-**Accessibility:** What ARIA labels and keyboard support needed?
-
-Let's walk through each custom component systematically."
-
-### 3. Document Component Specifications
-
-Create detailed specifications for each component:
-
-**Component Specification Template:**
-
-```markdown
-### [Component Name]
-
-**Purpose:** [Clear purpose statement]
-**Usage:** [When and how to use]
-**Anatomy:** [Visual breakdown of parts]
-**States:** [All possible states with descriptions]
-**Variants:** [Different sizes/styles if applicable]
-**Accessibility:** [ARIA labels, keyboard navigation]
-**Content Guidelines:** [What content works best]
-**Interaction Behavior:** [How users interact]
-```
-
-### 4. Define Component Strategy
-
-Establish overall component library approach:
-"**Component Strategy:**
-
-**Foundation Components:** (from design system)
-
-- [Foundation component 1]
-- [Foundation component 2]
-
-**Custom Components:** (designed in this step)
-
-- [Custom component 1 with rationale]
-- [Custom component 2 with rationale]
-
-**Implementation Approach:**
-
-- Build custom components using design system tokens
-- Ensure consistency with established patterns
-- Follow accessibility best practices
-- Create reusable patterns for common use cases"
-
-### 5. Plan Implementation Roadmap
-
-Define how and when to build components:
-"**Implementation Roadmap:**
-
-**Phase 1 - Core Components:**
-
-- [Component 1] - needed for [critical flow]
-- [Component 2] - needed for [critical flow]
-
-**Phase 2 - Supporting Components:**
-
-- [Component 3] - enhances [user experience]
-- [Component 4] - supports [design pattern]
-
-**Phase 3 - Enhancement Components:**
-
-- [Component 5] - optimizes [user journey]
-- [Component 6] - adds [special feature]
-
-This roadmap helps prioritize development based on user journey criticality."
-
-### 6. Generate Component Strategy Content
-
-Prepare the content to append to the document:
-
-#### Content Structure:
-
-When saving to document, append these Level 2 and Level 3 sections:
-
-```markdown
-## Component Strategy
-
-### Design System Components
-
-[Analysis of available design system components based on conversation]
-
-### Custom Components
-
-[Custom component specifications based on conversation]
-
-### Component Implementation Strategy
-
-[Component implementation strategy based on conversation]
-
-### Implementation Roadmap
-
-[Implementation roadmap based on conversation]
-```
-
-### 7. Present Content and Menu
-
-Show the generated component strategy content and present choices:
-"I've defined the component strategy for {{project_name}}. This balances using proven design system components with custom components for your unique needs.
-
-**Here's what I'll add to the document:**
-
-[Show the complete markdown content from step 6]
-
-**What would you like to do?**
-[A] Advanced Elicitation - Let's refine our component strategy
-[P] Party Mode - Bring technical perspectives on component design
-[C] Continue - Save this to the document and move to UX patterns
-
-### 8. Handle Menu Selection
-
-#### If 'A' (Advanced Elicitation):
-
-- Read fully and follow: {project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml with the current component strategy content
-- Process the enhanced component insights that come back
-- Ask user: "Accept these improvements to the component strategy? (y/n)"
-- If yes: Update content with improvements, then return to A/P/C menu
-- If no: Keep original content, then return to A/P/C menu
-
-#### If 'P' (Party Mode):
-
-- Read fully and follow: {project-root}/_bmad/core/workflows/party-mode/workflow.md with the current component strategy
-- Process the collaborative component insights that come back
-- Ask user: "Accept these changes to the component strategy? (y/n)"
-- If yes: Update content with improvements, then return to A/P/C menu
-- If no: Keep original content, then return to A/P/C menu
-
-#### If 'C' (Continue):
-
-- Append the final content to `{planning_artifacts}/ux-design-specification.md`
-- Update frontmatter: append step to end of stepsCompleted array
-- Load `./step-12-ux-patterns.md`
-
-## APPEND TO DOCUMENT:
-
-When user selects 'C', append the content directly to the document using the structure from step 6.
-
-## SUCCESS METRICS:
-
-β Design system coverage properly analyzed
-β All custom components thoroughly specified
-β Component strategy clearly defined
-β Implementation roadmap prioritized by user need
-β Accessibility considered for all components
-β A/P/C menu presented and handled correctly
-β Content properly appended to document when C selected
-
-## FAILURE MODES:
-
-β Not analyzing design system coverage properly
-β Custom components not thoroughly specified
-β Missing accessibility considerations
-β Component strategy not aligned with user journeys
-β Implementation roadmap not prioritized effectively
-β Not presenting A/P/C menu after content generation
-β Appending content without user selecting 'C'
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## NEXT STEP:
-
-After user selects 'C' and content is saved to document, load `./step-12-ux-patterns.md` to define UX consistency patterns.
-
-Remember: Do NOT proceed to step-12 until user explicitly selects 'C' from the A/P/C menu and content is saved!
diff --git a/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-12-ux-patterns.md b/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-12-ux-patterns.md
deleted file mode 100644
index 4708b52a..00000000
--- a/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-12-ux-patterns.md
+++ /dev/null
@@ -1,237 +0,0 @@
-# Step 12: UX Consistency Patterns
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without user input
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β ALWAYS treat this as collaborative discovery between UX facilitator and stakeholder
-- π YOU ARE A UX FACILITATOR, not a content generator
-- π¬ FOCUS on establishing consistency patterns for common UX situations
-- π― COLLABORATIVE pattern definition, not assumption-based design
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- β οΈ Present A/P/C menu after generating UX patterns content
-- πΎ ONLY save when user chooses C (Continue)
-- π Update output file frontmatter, adding this step to the end of the list of stepsCompleted.
-- π« FORBIDDEN to load next step until C is selected
-
-## COLLABORATION MENUS (A/P/C):
-
-This step will generate content and present choices:
-
-- **A (Advanced Elicitation)**: Use discovery protocols to develop deeper pattern insights
-- **P (Party Mode)**: Bring multiple perspectives to define UX patterns
-- **C (Continue)**: Save the content to the document and proceed to next step
-
-## PROTOCOL INTEGRATION:
-
-- When 'A' selected: Read fully and follow: {project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml
-- When 'P' selected: Read fully and follow: {project-root}/_bmad/core/workflows/party-mode/workflow.md
-- PROTOCOLS always return to this step's A/P/C menu
-- User accepts/rejects protocol changes before proceeding
-
-## CONTEXT BOUNDARIES:
-
-- Current document and frontmatter from previous steps are available
-- Component strategy from step 11 informs pattern decisions
-- User journeys from step 10 identify common pattern needs
-- Focus on consistency patterns for common UX situations
-
-## YOUR TASK:
-
-Establish UX consistency patterns for common situations like buttons, forms, navigation, and feedback.
-
-## UX PATTERNS SEQUENCE:
-
-### 1. Identify Pattern Categories
-
-Determine which patterns need definition for your product:
-"Let's establish consistency patterns for how {{project_name}} behaves in common situations.
-
-**Pattern Categories to Define:**
-
-- Button hierarchy and actions
-- Feedback patterns (success, error, warning, info)
-- Form patterns and validation
-- Navigation patterns
-- Modal and overlay patterns
-- Empty states and loading states
-- Search and filtering patterns
-
-Which categories are most critical for your product? We can go through each thoroughly or focus on the most important ones."
-
-### 2. Define Critical Patterns First
-
-Focus on patterns most relevant to your product:
-
-**For [Critical Pattern Category]:**
-"**[Pattern Type] Patterns:**
-What should users see/do when they need to [pattern action]?
-
-**Considerations:**
-
-- Visual hierarchy (primary vs. secondary actions)
-- Feedback mechanisms
-- Error recovery
-- Accessibility requirements
-- Mobile vs. desktop considerations
-
-**Examples:**
-
-- [Example 1 for this pattern type]
-- [Example 2 for this pattern type]
-
-How should {{project_name}} handle [pattern type] interactions?"
-
-### 3. Establish Pattern Guidelines
-
-Document specific design decisions:
-
-**Pattern Guidelines Template:**
-
-```markdown
-### [Pattern Type]
-
-**When to Use:** [Clear usage guidelines]
-**Visual Design:** [How it should look]
-**Behavior:** [How it should interact]
-**Accessibility:** [A11y requirements]
-**Mobile Considerations:** [Mobile-specific needs]
-**Variants:** [Different states or styles if applicable]
-```
-
-### 4. Design System Integration
-
-Ensure patterns work with chosen design system:
-"**Integration with [Design System]:**
-
-- How do these patterns complement our design system components?
-- What customizations are needed?
-- How do we maintain consistency while meeting unique needs?
-
-**Custom Pattern Rules:**
-
-- [Custom rule 1]
-- [Custom rule 2]
-- [Custom rule 3]"
-
-### 5. Create Pattern Documentation
-
-Generate comprehensive pattern library:
-
-**Pattern Library Structure:**
-
-- Clear usage guidelines for each pattern
-- Visual examples and specifications
-- Implementation notes for developers
-- Accessibility checklists
-- Mobile-first considerations
-
-### 6. Generate UX Patterns Content
-
-Prepare the content to append to the document:
-
-#### Content Structure:
-
-When saving to document, append these Level 2 and Level 3 sections:
-
-```markdown
-## UX Consistency Patterns
-
-### Button Hierarchy
-
-[Button hierarchy patterns based on conversation]
-
-### Feedback Patterns
-
-[Feedback patterns based on conversation]
-
-### Form Patterns
-
-[Form patterns based on conversation]
-
-### Navigation Patterns
-
-[Navigation patterns based on conversation]
-
-### Additional Patterns
-
-[Additional patterns based on conversation]
-```
-
-### 7. Present Content and Menu
-
-Show the generated UX patterns content and present choices:
-"I've established UX consistency patterns for {{project_name}}. These patterns ensure users have a consistent, predictable experience across all interactions.
-
-**Here's what I'll add to the document:**
-
-[Show the complete markdown content from step 6]
-
-**What would you like to do?**
-[A] Advanced Elicitation - Let's refine our UX patterns
-[P] Party Mode - Bring different perspectives on consistency patterns
-[C] Continue - Save this to the document and move to responsive design
-
-### 8. Handle Menu Selection
-
-#### If 'A' (Advanced Elicitation):
-
-- Read fully and follow: {project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml with the current UX patterns content
-- Process the enhanced pattern insights that come back
-- Ask user: "Accept these improvements to the UX patterns? (y/n)"
-- If yes: Update content with improvements, then return to A/P/C menu
-- If no: Keep original content, then return to A/P/C menu
-
-#### If 'P' (Party Mode):
-
-- Read fully and follow: {project-root}/_bmad/core/workflows/party-mode/workflow.md with the current UX patterns
-- Process the collaborative pattern insights that come back
-- Ask user: "Accept these changes to the UX patterns? (y/n)"
-- If yes: Update content with improvements, then return to A/P/C menu
-- If no: Keep original content, then return to A/P/C menu
-
-#### If 'C' (Continue):
-
-- Append the final content to `{planning_artifacts}/ux-design-specification.md`
-- Update frontmatter: append step to end of stepsCompleted array
-- Load `./step-13-responsive-accessibility.md`
-
-## APPEND TO DOCUMENT:
-
-When user selects 'C', append the content directly to the document using the structure from step 6.
-
-## SUCCESS METRICS:
-
-β Critical pattern categories identified and prioritized
-β Consistency patterns clearly defined and documented
-β Patterns integrated with chosen design system
-β Accessibility considerations included for all patterns
-β Mobile-first approach incorporated
-β A/P/C menu presented and handled correctly
-β Content properly appended to document when C selected
-
-## FAILURE MODES:
-
-β Not identifying the most critical pattern categories
-β Patterns too generic or not actionable
-β Missing accessibility considerations
-β Patterns not aligned with design system
-β Not considering mobile differences
-β Not presenting A/P/C menu after content generation
-β Appending content without user selecting 'C'
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## NEXT STEP:
-
-After user selects 'C' and content is saved to document, load `./step-13-responsive-accessibility.md` to define responsive design and accessibility strategy.
-
-Remember: Do NOT proceed to step-13 until user explicitly selects 'C' from the A/P/C menu and content is saved!
diff --git a/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-13-responsive-accessibility.md b/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-13-responsive-accessibility.md
deleted file mode 100644
index 80b81d4c..00000000
--- a/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-13-responsive-accessibility.md
+++ /dev/null
@@ -1,264 +0,0 @@
-# Step 13: Responsive Design & Accessibility
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without user input
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β ALWAYS treat this as collaborative discovery between UX facilitator and stakeholder
-- π YOU ARE A UX FACILITATOR, not a content generator
-- π¬ FOCUS on responsive design strategy and accessibility compliance
-- π― COLLABORATIVE strategy definition, not assumption-based design
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- β οΈ Present A/P/C menu after generating responsive/accessibility content
-- πΎ ONLY save when user chooses C (Continue)
-- π Update output file frontmatter, adding this step to the end of the list of stepsCompleted.
-- π« FORBIDDEN to load next step until C is selected
-
-## COLLABORATION MENUS (A/P/C):
-
-This step will generate content and present choices:
-
-- **A (Advanced Elicitation)**: Use discovery protocols to develop deeper responsive/accessibility insights
-- **P (Party Mode)**: Bring multiple perspectives to define responsive/accessibility strategy
-- **C (Continue)**: Save the content to the document and proceed to final step
-
-## PROTOCOL INTEGRATION:
-
-- When 'A' selected: Read fully and follow: {project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml
-- When 'P' selected: Read fully and follow: {project-root}/_bmad/core/workflows/party-mode/workflow.md
-- PROTOCOLS always return to this step's A/P/C menu
-- User accepts/rejects protocol changes before proceeding
-
-## CONTEXT BOUNDARIES:
-
-- Current document and frontmatter from previous steps are available
-- Platform requirements from step 3 inform responsive design
-- Design direction from step 9 influences responsive layout choices
-- Focus on cross-device adaptation and accessibility compliance
-
-## YOUR TASK:
-
-Define responsive design strategy and accessibility requirements for the product.
-
-## RESPONSIVE & ACCESSIBILITY SEQUENCE:
-
-### 1. Define Responsive Strategy
-
-Establish how the design adapts across devices:
-"Let's define how {{project_name}} adapts across different screen sizes and devices.
-
-**Responsive Design Questions:**
-
-**Desktop Strategy:**
-
-- How should we use extra screen real estate?
-- Multi-column layouts, side navigation, or content density?
-- What desktop-specific features can we include?
-
-**Tablet Strategy:**
-
-- Should we use simplified layouts or touch-optimized interfaces?
-- How do gestures and touch interactions work on tablets?
-- What's the optimal information density for tablet screens?
-
-**Mobile Strategy:**
-
-- Bottom navigation or hamburger menu?
-- How do layouts collapse on small screens?
-- What's the most critical information to show mobile-first?"
-
-### 2. Establish Breakpoint Strategy
-
-Define when and how layouts change:
-"**Breakpoint Strategy:**
-We need to define screen size breakpoints where layouts adapt.
-
-**Common Breakpoints:**
-
-- Mobile: 320px - 767px
-- Tablet: 768px - 1023px
-- Desktop: 1024px+
-
-**For {{project_name}}, should we:**
-
-- Use standard breakpoints or custom ones?
-- Focus on mobile-first or desktop-first design?
-- Have specific breakpoints for your key use cases?"
-
-### 3. Design Accessibility Strategy
-
-Define accessibility requirements and compliance level:
-"**Accessibility Strategy:**
-What level of WCAG compliance does {{project_name}} need?
-
-**WCAG Levels:**
-
-- **Level A (Basic)** - Essential accessibility for legal compliance
-- **Level AA (Recommended)** - Industry standard for good UX
-- **Level AAA (Highest)** - Exceptional accessibility (rarely needed)
-
-**Based on your product:**
-
-- [Recommendation based on user base, legal requirements, etc.]
-
-**Key Accessibility Considerations:**
-
-- Color contrast ratios (4.5:1 for normal text)
-- Keyboard navigation support
-- Screen reader compatibility
-- Touch target sizes (minimum 44x44px)
-- Focus indicators and skip links"
-
-### 4. Define Testing Strategy
-
-Plan how to ensure responsive design and accessibility:
-"**Testing Strategy:**
-
-**Responsive Testing:**
-
-- Device testing on actual phones/tablets
-- Browser testing across Chrome, Firefox, Safari, Edge
-- Real device network performance testing
-
-**Accessibility Testing:**
-
-- Automated accessibility testing tools
-- Screen reader testing (VoiceOver, NVDA, JAWS)
-- Keyboard-only navigation testing
-- Color blindness simulation testing
-
-**User Testing:**
-
-- Include users with disabilities in testing
-- Test with diverse assistive technologies
-- Validate with actual target devices"
-
-### 5. Document Implementation Guidelines
-
-Create specific guidelines for developers:
-"**Implementation Guidelines:**
-
-**Responsive Development:**
-
-- Use relative units (rem, %, vw, vh) over fixed pixels
-- Implement mobile-first media queries
-- Test touch targets and gesture areas
-- Optimize images and assets for different devices
-
-**Accessibility Development:**
-
-- Semantic HTML structure
-- ARIA labels and roles
-- Keyboard navigation implementation
-- Focus management and skip links
-- High contrast mode support"
-
-### 6. Generate Responsive & Accessibility Content
-
-Prepare the content to append to the document:
-
-#### Content Structure:
-
-When saving to document, append these Level 2 and Level 3 sections:
-
-```markdown
-## Responsive Design & Accessibility
-
-### Responsive Strategy
-
-[Responsive strategy based on conversation]
-
-### Breakpoint Strategy
-
-[Breakpoint strategy based on conversation]
-
-### Accessibility Strategy
-
-[Accessibility strategy based on conversation]
-
-### Testing Strategy
-
-[Testing strategy based on conversation]
-
-### Implementation Guidelines
-
-[Implementation guidelines based on conversation]
-```
-
-### 7. Present Content and Menu
-
-Show the generated responsive and accessibility content and present choices:
-"I've defined the responsive design and accessibility strategy for {{project_name}}. This ensures your product works beautifully across all devices and is accessible to all users.
-
-**Here's what I'll add to the document:**
-
-[Show the complete markdown content from step 6]
-
-**What would you like to do?**
-[A] Advanced Elicitation - Let's refine our responsive/accessibility strategy
-[P] Party Mode - Bring different perspectives on inclusive design
-[C] Continue - Save this to the document and complete the workflow
-
-### 8. Handle Menu Selection
-
-#### If 'A' (Advanced Elicitation):
-
-- Read fully and follow: {project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml with the current responsive/accessibility content
-- Process the enhanced insights that come back
-- Ask user: "Accept these improvements to the responsive/accessibility strategy? (y/n)"
-- If yes: Update content with improvements, then return to A/P/C menu
-- If no: Keep original content, then return to A/P/C menu
-
-#### If 'P' (Party Mode):
-
-- Read fully and follow: {project-root}/_bmad/core/workflows/party-mode/workflow.md with the current responsive/accessibility strategy
-- Process the collaborative insights that come back
-- Ask user: "Accept these changes to the responsive/accessibility strategy? (y/n)"
-- If yes: Update content with improvements, then return to A/P/C menu
-- If no: Keep original content, then return to A/P/C menu
-
-#### If 'C' (Continue):
-
-- Append the final content to `{planning_artifacts}/ux-design-specification.md`
-- Update frontmatter: append step to end of stepsCompleted array
-- Load `./step-14-complete.md`
-
-## APPEND TO DOCUMENT:
-
-When user selects 'C', append the content directly to the document using the structure from step 6.
-
-## SUCCESS METRICS:
-
-β Responsive strategy clearly defined for all device types
-β Appropriate breakpoint strategy established
-β Accessibility requirements determined and documented
-β Comprehensive testing strategy planned
-β Implementation guidelines provided for development team
-β A/P/C menu presented and handled correctly
-β Content properly appended to document when C selected
-
-## FAILURE MODES:
-
-β Not considering all device types and screen sizes
-β Accessibility requirements not properly researched
-β Testing strategy not comprehensive enough
-β Implementation guidelines too generic or unclear
-β Not addressing specific accessibility challenges for your product
-β Not presenting A/P/C menu after content generation
-β Appending content without user selecting 'C'
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## NEXT STEP:
-
-After user selects 'C' and content is saved to document, load `./step-14-complete.md` to finalize the UX design workflow.
-
-Remember: Do NOT proceed to step-14 until user explicitly selects 'C' from the A/P/C menu and content is saved!
diff --git a/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-14-complete.md b/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-14-complete.md
deleted file mode 100644
index fe784788..00000000
--- a/src/bmm/workflows/2-plan-workflows/create-ux-design/steps/step-14-complete.md
+++ /dev/null
@@ -1,171 +0,0 @@
-# Step 14: Workflow Completion
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- β THIS IS A FINAL STEP - Workflow completion required
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- π NO content generation - this is a wrap-up step
-- π FINALIZE document and update workflow status
-- π¬ FOCUS on completion, validation, and next steps
-- π― UPDATE workflow status files with completion information
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- πΎ Update the main workflow status file with completion information
-- π Suggest potential next workflow steps for the user
-- π« DO NOT load additional steps after this one
-
-## TERMINATION STEP PROTOCOLS:
-
-- This is a FINAL step - workflow completion required
-- π Update output file frontmatter, adding this step to the end of the list of stepsCompleted to indicate all is finished..
-- Output completion summary and next step guidance
-- Update the main workflow status file with finalized document
-- Suggest potential next workflow steps for the user
-- Mark workflow as complete in status tracking
-
-## CONTEXT BOUNDARIES:
-
-- Complete UX design specification is available from all previous steps
-- Workflow frontmatter shows all completed steps
-- All collaborative content has been generated and saved
-- Focus on completion, validation, and next steps
-
-## YOUR TASK:
-
-Complete the UX design workflow, update status files, and suggest next steps for the project.
-
-## WORKFLOW COMPLETION SEQUENCE:
-
-### 1. Announce Workflow Completion
-
-Inform user that the UX design is complete:
-"π **UX Design Complete, {{user_name}}!**
-
-I've successfully collaborated with you to create a comprehensive UX design specification for {{project_name}}.
-
-**What we've accomplished:**
-
-- β Project understanding and user insights
-- β Core experience and emotional response definition
-- β UX pattern analysis and inspiration
-- β Design system choice and implementation strategy
-- β Core interaction definition and experience mechanics
-- β Visual design foundation (colors, typography, spacing)
-- β Design direction mockups and visual explorations
-- β User journey flows and interaction design
-- β Component strategy and custom component specifications
-- β UX consistency patterns for common interactions
-- β Responsive design and accessibility strategy
-
-**The complete UX design specification is now available at:** `{planning_artifacts}/ux-design-specification.md`
-
-**Supporting Visual Assets:**
-
-- Color themes visualizer: `{planning_artifacts}/ux-color-themes.html`
-- Design directions mockups: `{planning_artifacts}/ux-design-directions.html`
-
-This specification is now ready to guide visual design, implementation, and development."
-
-### 2. Workflow Status Update
-
-Update the main workflow status file:
-
-- Load `{status_file}` from workflow configuration (if exists)
-- Update workflow_status["create-ux-design"] = "{default_output_file}"
-- Save file, preserving all comments and structure
-- Mark current timestamp as completion time
-
-### 3. Suggest Next Steps
-
-UX Design complete. Read fully and follow: `_bmad/core/tasks/bmad-help.md` with argument `Create UX`.
-
-### 5. Final Completion Confirmation
-
-Congratulate the user on the completion you both completed together of the UX.
-
-
-
-## SUCCESS METRICS:
-
-β UX design specification contains all required sections
-β All collaborative content properly saved to document
-β Workflow status file updated with completion information
-β Clear next step guidance provided to user
-β Document quality validation completed
-β User acknowledges completion and understands next options
-
-## FAILURE MODES:
-
-β Not updating workflow status file with completion information
-β Missing clear next step guidance for user
-β Not confirming document completeness with user
-β Workflow not properly marked as complete in status tracking
-β User unclear about what happens next
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## WORKFLOW COMPLETION CHECKLIST:
-
-### Design Specification Complete:
-
-- [ ] Executive summary and project understanding
-- [ ] Core experience and emotional response definition
-- [ ] UX pattern analysis and inspiration
-- [ ] Design system choice and strategy
-- [ ] Core interaction mechanics definition
-- [ ] Visual design foundation (colors, typography, spacing)
-- [ ] Design direction decisions and mockups
-- [ ] User journey flows and interaction design
-- [ ] Component strategy and specifications
-- [ ] UX consistency patterns documentation
-- [ ] Responsive design and accessibility strategy
-
-### Process Complete:
-
-- [ ] All steps completed with user confirmation
-- [ ] All content saved to specification document
-- [ ] Frontmatter properly updated with all steps
-- [ ] Workflow status file updated with completion
-- [ ] Next steps clearly communicated
-
-## NEXT STEPS GUIDANCE:
-
-**Immediate Options:**
-
-1. **Wireframe Generation** - Create low-fidelity layouts based on UX spec
-2. **Interactive Prototype** - Build clickable prototypes for testing
-3. **Solution Architecture** - Technical design with UX context
-4. **Figma Visual Design** - High-fidelity UI implementation
-5. **Epic Creation** - Break down UX requirements for development
-
-**Recommended Sequence:**
-For design-focused teams: Wireframes β Prototypes β Figma Design β Development
-For technical teams: Architecture β Epic Creation β Development
-
-Consider team capacity, timeline, and whether user validation is needed before implementation.
-
-## WORKFLOW FINALIZATION:
-
-- Set `lastStep = 14` in document frontmatter
-- Update workflow status file with completion timestamp
-- Provide completion summary to user
-- Do NOT load any additional steps
-
-## FINAL REMINDER:
-
-This UX design workflow is now complete. The specification serves as the foundation for all visual and development work. All design decisions, patterns, and requirements are documented to ensure consistent, accessible, and user-centered implementation.
-
-**Congratulations on completing the UX Design Specification for {{project_name}}!** π
-
-**Core Deliverables:**
-
-- β UX Design Specification: `{planning_artifacts}/ux-design-specification.md`
-- β Color Themes Visualizer: `{planning_artifacts}/ux-color-themes.html`
-- β Design Directions: `{planning_artifacts}/ux-design-directions.html`
diff --git a/src/bmm/workflows/2-plan-workflows/create-ux-design/ux-design-template.md b/src/bmm/workflows/2-plan-workflows/create-ux-design/ux-design-template.md
deleted file mode 100644
index aeed9dc5..00000000
--- a/src/bmm/workflows/2-plan-workflows/create-ux-design/ux-design-template.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-stepsCompleted: []
-inputDocuments: []
----
-
-# UX Design Specification {{project_name}}
-
-**Author:** {{user_name}}
-**Date:** {{date}}
-
----
-
-
diff --git a/src/bmm/workflows/2-plan-workflows/create-ux-design/workflow.md b/src/bmm/workflows/2-plan-workflows/create-ux-design/workflow.md
deleted file mode 100644
index d74cb487..00000000
--- a/src/bmm/workflows/2-plan-workflows/create-ux-design/workflow.md
+++ /dev/null
@@ -1,43 +0,0 @@
----
-name: create-ux-design
-description: Work with a peer UX Design expert to plan your applications UX patterns, look and feel.
-web_bundle: true
----
-
-# Create UX Design Workflow
-
-**Goal:** Create comprehensive UX design specifications through collaborative visual exploration and informed decision-making where you act as a UX facilitator working with a product stakeholder.
-
----
-
-## WORKFLOW ARCHITECTURE
-
-This uses **micro-file architecture** for disciplined execution:
-
-- Each step is a self-contained file with embedded rules
-- Sequential progression with user control at each step
-- Document state tracked in frontmatter
-- Append-only document building through conversation
-
----
-
-## INITIALIZATION
-
-### Configuration Loading
-
-Load config from `{project-root}/_bmad/bmm/config.yaml` and resolve:
-
-- `project_name`, `output_folder`, `planning_artifacts`, `user_name`
-- `communication_language`, `document_output_language`, `user_skill_level`
-- `date` as system-generated current datetime
-
-### Paths
-
-- `installed_path` = `{project-root}/_bmad/bmm/workflows/2-plan-workflows/create-ux-design`
-- `template_path` = `{installed_path}/ux-design-template.md`
-- `default_output_file` = `{planning_artifacts}/ux-design-specification.md`
-
-## EXECUTION
-
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-- Read fully and follow: `steps/step-01-init.md` to begin the UX design workflow.
diff --git a/src/bmm/workflows/2-plan-workflows/prd/data/domain-complexity.csv b/src/bmm/workflows/2-plan-workflows/prd/data/domain-complexity.csv
deleted file mode 100644
index 2e44a896..00000000
--- a/src/bmm/workflows/2-plan-workflows/prd/data/domain-complexity.csv
+++ /dev/null
@@ -1,13 +0,0 @@
-domain,signals,complexity,key_concerns,required_knowledge,suggested_workflow,web_searches,special_sections
-healthcare,"medical,diagnostic,clinical,FDA,patient,treatment,HIPAA,therapy,pharma,drug",high,"FDA approval;Clinical validation;HIPAA compliance;Patient safety;Medical device classification;Liability","Regulatory pathways;Clinical trial design;Medical standards;Data privacy;Integration requirements","domain-research","FDA software medical device guidance {date};HIPAA compliance software requirements;Medical software standards {date};Clinical validation software","clinical_requirements;regulatory_pathway;validation_methodology;safety_measures"
-fintech,"payment,banking,trading,investment,crypto,wallet,transaction,KYC,AML,funds,fintech",high,"Regional compliance;Security standards;Audit requirements;Fraud prevention;Data protection","KYC/AML requirements;PCI DSS;Open banking;Regional laws (US/EU/APAC);Crypto regulations","domain-research","fintech regulations {date};payment processing compliance {date};open banking API standards;cryptocurrency regulations {date}","compliance_matrix;security_architecture;audit_requirements;fraud_prevention"
-govtech,"government,federal,civic,public sector,citizen,municipal,voting",high,"Procurement rules;Security clearance;Accessibility (508);FedRAMP;Privacy;Transparency","Government procurement;Security frameworks;Accessibility standards;Privacy laws;Open data requirements","domain-research","government software procurement {date};FedRAMP compliance requirements;section 508 accessibility;government security standards","procurement_compliance;security_clearance;accessibility_standards;transparency_requirements"
-edtech,"education,learning,student,teacher,curriculum,assessment,K-12,university,LMS",medium,"Student privacy (COPPA/FERPA);Accessibility;Content moderation;Age verification;Curriculum standards","Educational privacy laws;Learning standards;Accessibility requirements;Content guidelines;Assessment validity","domain-research","educational software privacy {date};COPPA FERPA compliance;WCAG education requirements;learning management standards","privacy_compliance;content_guidelines;accessibility_features;curriculum_alignment"
-aerospace,"aircraft,spacecraft,aviation,drone,satellite,propulsion,flight,radar,navigation",high,"Safety certification;DO-178C compliance;Performance validation;Simulation accuracy;Export controls","Aviation standards;Safety analysis;Simulation validation;ITAR/export controls;Performance requirements","domain-research + technical-model","DO-178C software certification;aerospace simulation standards {date};ITAR export controls software;aviation safety requirements","safety_certification;simulation_validation;performance_requirements;export_compliance"
-automotive,"vehicle,car,autonomous,ADAS,automotive,driving,EV,charging",high,"Safety standards;ISO 26262;V2X communication;Real-time requirements;Certification","Automotive standards;Functional safety;V2X protocols;Real-time systems;Testing requirements","domain-research","ISO 26262 automotive software;automotive safety standards {date};V2X communication protocols;EV charging standards","safety_standards;functional_safety;communication_protocols;certification_requirements"
-scientific,"research,algorithm,simulation,modeling,computational,analysis,data science,ML,AI",medium,"Reproducibility;Validation methodology;Peer review;Performance;Accuracy;Computational resources","Scientific method;Statistical validity;Computational requirements;Domain expertise;Publication standards","technical-model","scientific computing best practices {date};research reproducibility standards;computational modeling validation;peer review software","validation_methodology;accuracy_metrics;reproducibility_plan;computational_requirements"
-legaltech,"legal,law,contract,compliance,litigation,patent,attorney,court",high,"Legal ethics;Bar regulations;Data retention;Attorney-client privilege;Court system integration","Legal practice rules;Ethics requirements;Court filing systems;Document standards;Confidentiality","domain-research","legal technology ethics {date};law practice management software requirements;court filing system standards;attorney client privilege technology","ethics_compliance;data_retention;confidentiality_measures;court_integration"
-insuretech,"insurance,claims,underwriting,actuarial,policy,risk,premium",high,"Insurance regulations;Actuarial standards;Data privacy;Fraud detection;State compliance","Insurance regulations by state;Actuarial methods;Risk modeling;Claims processing;Regulatory reporting","domain-research","insurance software regulations {date};actuarial standards software;insurance fraud detection;state insurance compliance","regulatory_requirements;risk_modeling;fraud_detection;reporting_compliance"
-energy,"energy,utility,grid,solar,wind,power,electricity,oil,gas",high,"Grid compliance;NERC standards;Environmental regulations;Safety requirements;Real-time operations","Energy regulations;Grid standards;Environmental compliance;Safety protocols;SCADA systems","domain-research","energy sector software compliance {date};NERC CIP standards;smart grid requirements;renewable energy software standards","grid_compliance;safety_protocols;environmental_compliance;operational_requirements"
-gaming,"game,player,gameplay,level,character,multiplayer,quest",redirect,"REDIRECT TO GAME WORKFLOWS","Game design","game-brief","NA","NA"
-general,"",low,"Standard requirements;Basic security;User experience;Performance","General software practices","continue","software development best practices {date}","standard_requirements"
\ No newline at end of file
diff --git a/src/bmm/workflows/2-plan-workflows/prd/data/prd-purpose.md b/src/bmm/workflows/2-plan-workflows/prd/data/prd-purpose.md
deleted file mode 100644
index 755230be..00000000
--- a/src/bmm/workflows/2-plan-workflows/prd/data/prd-purpose.md
+++ /dev/null
@@ -1,197 +0,0 @@
-# BMAD PRD Purpose
-
-**The PRD is the top of the required funnel that feeds all subsequent product development work in rhw BMad Method.**
-
----
-
-## What is a BMAD PRD?
-
-A dual-audience document serving:
-1. **Human Product Managers and builders** - Vision, strategy, stakeholder communication
-2. **LLM Downstream Consumption** - UX Design β Architecture β Epics β Development AI Agents
-
-Each successive document becomes more AI-tailored and granular.
-
----
-
-## Core Philosophy: Information Density
-
-**High Signal-to-Noise Ratio**
-
-Every sentence must carry information weight. LLMs consume precise, dense content efficiently.
-
-**Anti-Patterns (Eliminate These):**
-- β "The system will allow users to..." β β "Users can..."
-- β "It is important to note that..." β β State the fact directly
-- β "In order to..." β β "To..."
-- β Conversational filler and padding β β Direct, concise statements
-
-**Goal:** Maximum information per word. Zero fluff.
-
----
-
-## The Traceability Chain
-
-**PRD starts the chain:**
-```
-Vision β Success Criteria β User Journeys β Functional Requirements β (future: User Stories)
-```
-
-**In the PRD, establish:**
-- Vision β Success Criteria alignment
-- Success Criteria β User Journey coverage
-- User Journey β Functional Requirement mapping
-- All requirements traceable to user needs
-
-**Why:** Each downstream artifact (UX, Architecture, Epics, Stories) must trace back to documented user needs and business objectives. This chain ensures we build the right thing.
-
----
-
-## What Makes Great Functional Requirements?
-
-### FRs are Capabilities, Not Implementation
-
-**Good FR:** "Users can reset their password via email link"
-**Bad FR:** "System sends JWT via email and validates with database" (implementation leakage)
-
-**Good FR:** "Dashboard loads in under 2 seconds for 95th percentile"
-**Bad FR:** "Fast loading time" (subjective, unmeasurable)
-
-### SMART Quality Criteria
-
-**Specific:** Clear, precisely defined capability
-**Measurable:** Quantifiable with test criteria
-**Attainable:** Realistic within constraints
-**Relevant:** Aligns with business objectives
-**Traceable:** Links to source (executive summary or user journey)
-
-### FR Anti-Patterns
-
-**Subjective Adjectives:**
-- β "easy to use", "intuitive", "user-friendly", "fast", "responsive"
-- β Use metrics: "completes task in under 3 clicks", "loads in under 2 seconds"
-
-**Implementation Leakage:**
-- β Technology names, specific libraries, implementation details
-- β Focus on capability and measurable outcomes
-
-**Vague Quantifiers:**
-- β "multiple users", "several options", "various formats"
-- β "up to 100 concurrent users", "3-5 options", "PDF, DOCX, TXT formats"
-
-**Missing Test Criteria:**
-- β "The system shall provide notifications"
-- β "The system shall send email notifications within 30 seconds of trigger event"
-
----
-
-## What Makes Great Non-Functional Requirements?
-
-### NFRs Must Be Measurable
-
-**Template:**
-```
-"The system shall [metric] [condition] [measurement method]"
-```
-
-**Examples:**
-- β "The system shall respond to API requests in under 200ms for 95th percentile as measured by APM monitoring"
-- β "The system shall maintain 99.9% uptime during business hours as measured by cloud provider SLA"
-- β "The system shall support 10,000 concurrent users as measured by load testing"
-
-### NFR Anti-Patterns
-
-**Unmeasurable Claims:**
-- β "The system shall be scalable" β β "The system shall handle 10x load growth through horizontal scaling"
-- β "High availability required" β β "99.9% uptime as measured by cloud provider SLA"
-
-**Missing Context:**
-- β "Response time under 1 second" β β "API response time under 1 second for 95th percentile under normal load"
-
----
-
-## Domain-Specific Requirements
-
-**Auto-Detect and Enforce Based on Project Context**
-
-Certain industries have mandatory requirements that must be present:
-
-- **Healthcare:** HIPAA Privacy & Security Rules, PHI encryption, audit logging, MFA
-- **Fintech:** PCI-DSS Level 1, AML/KYC compliance, SOX controls, financial audit trails
-- **GovTech:** NIST framework, Section 508 accessibility (WCAG 2.1 AA), FedRAMP, data residency
-- **E-Commerce:** PCI-DSS for payments, inventory accuracy, tax calculation by jurisdiction
-
-**Why:** Missing these requirements in the PRD means they'll be missed in architecture and implementation, creating expensive rework. During PRD creation there is a step to cover this - during validation we want to make sure it was covered. For this purpose steps will utilize a domain-complexity.csv and project-types.csv.
-
----
-
-## Document Structure (Markdown, Human-Readable)
-
-### Required Sections
-1. **Executive Summary** - Vision, differentiator, target users
-2. **Success Criteria** - Measurable outcomes (SMART)
-3. **Product Scope** - MVP, Growth, Vision phases
-4. **User Journeys** - Comprehensive coverage
-5. **Domain Requirements** - Industry-specific compliance (if applicable)
-6. **Innovation Analysis** - Competitive differentiation (if applicable)
-7. **Project-Type Requirements** - Platform-specific needs
-8. **Functional Requirements** - Capability contract (FRs)
-9. **Non-Functional Requirements** - Quality attributes (NFRs)
-
-### Formatting for Dual Consumption
-
-**For Humans:**
-- Clear, professional language
-- Logical flow from vision to requirements
-- Easy for stakeholders to review and approve
-
-**For LLMs:**
-- ## Level 2 headers for all main sections (enables extraction)
-- Consistent structure and patterns
-- Precise, testable language
-- High information density
-
----
-
-## Downstream Impact
-
-**How the PRD Feeds Next Artifacts:**
-
-**UX Design:**
-- User journeys β interaction flows
-- FRs β design requirements
-- Success criteria β UX metrics
-
-**Architecture:**
-- FRs β system capabilities
-- NFRs β architecture decisions
-- Domain requirements β compliance architecture
-- Project-type requirements β platform choices
-
-**Epics & Stories (created after architecture):**
-- FRs β user stories (1 FR could map to 1-3 stories potentially)
-- Acceptance criteria β story acceptance tests
-- Priority β sprint sequencing
-- Traceability β stories map back to vision
-
-**Development AI Agents:**
-- Precise requirements β implementation clarity
-- Test criteria β automated test generation
-- Domain requirements β compliance enforcement
-- Measurable NFRs β performance targets
-
----
-
-## Summary: What Makes a Great BMAD PRD?
-
-β **High Information Density** - Every sentence carries weight, zero fluff
-β **Measurable Requirements** - All FRs and NFRs are testable with specific criteria
-β **Clear Traceability** - Each requirement links to user need and business objective
-β **Domain Awareness** - Industry-specific requirements auto-detected and included
-β **Zero Anti-Patterns** - No subjective adjectives, implementation leakage, or vague quantifiers
-β **Dual Audience Optimized** - Human-readable AND LLM-consumable
-β **Markdown Format** - Professional, clean, accessible to all stakeholders
-
----
-
-**Remember:** The PRD is the foundation. Quality here ripples through every subsequent phase. A dense, precise, well-traced PRD makes UX design, architecture, epic breakdown, and AI development dramatically more effective.
diff --git a/src/bmm/workflows/2-plan-workflows/prd/data/project-types.csv b/src/bmm/workflows/2-plan-workflows/prd/data/project-types.csv
deleted file mode 100644
index 6f71c513..00000000
--- a/src/bmm/workflows/2-plan-workflows/prd/data/project-types.csv
+++ /dev/null
@@ -1,11 +0,0 @@
-project_type,detection_signals,key_questions,required_sections,skip_sections,web_search_triggers,innovation_signals
-api_backend,"API,REST,GraphQL,backend,service,endpoints","Endpoints needed?;Authentication method?;Data formats?;Rate limits?;Versioning?;SDK needed?","endpoint_specs;auth_model;data_schemas;error_codes;rate_limits;api_docs","ux_ui;visual_design;user_journeys","framework best practices;OpenAPI standards","API composition;New protocol"
-mobile_app,"iOS,Android,app,mobile,iPhone,iPad","Native or cross-platform?;Offline needed?;Push notifications?;Device features?;Store compliance?","platform_reqs;device_permissions;offline_mode;push_strategy;store_compliance","desktop_features;cli_commands","app store guidelines;platform requirements","Gesture innovation;AR/VR features"
-saas_b2b,"SaaS,B2B,platform,dashboard,teams,enterprise","Multi-tenant?;Permission model?;Subscription tiers?;Integrations?;Compliance?","tenant_model;rbac_matrix;subscription_tiers;integration_list;compliance_reqs","cli_interface;mobile_first","compliance requirements;integration guides","Workflow automation;AI agents"
-developer_tool,"SDK,library,package,npm,pip,framework","Language support?;Package managers?;IDE integration?;Documentation?;Examples?","language_matrix;installation_methods;api_surface;code_examples;migration_guide","visual_design;store_compliance","package manager best practices;API design patterns","New paradigm;DSL creation"
-cli_tool,"CLI,command,terminal,bash,script","Interactive or scriptable?;Output formats?;Config method?;Shell completion?","command_structure;output_formats;config_schema;scripting_support","visual_design;ux_principles;touch_interactions","CLI design patterns;shell integration","Natural language CLI;AI commands"
-web_app,"website,webapp,browser,SPA,PWA","SPA or MPA?;Browser support?;SEO needed?;Real-time?;Accessibility?","browser_matrix;responsive_design;performance_targets;seo_strategy;accessibility_level","native_features;cli_commands","web standards;WCAG guidelines","New interaction;WebAssembly use"
-game,"game,player,gameplay,level,character","REDIRECT TO USE THE BMad Method Game Module Agent and Workflows - HALT","game-brief;GDD","most_sections","game design patterns","Novel mechanics;Genre mixing"
-desktop_app,"desktop,Windows,Mac,Linux,native","Cross-platform?;Auto-update?;System integration?;Offline?","platform_support;system_integration;update_strategy;offline_capabilities","web_seo;mobile_features","desktop guidelines;platform requirements","Desktop AI;System automation"
-iot_embedded,"IoT,embedded,device,sensor,hardware","Hardware specs?;Connectivity?;Power constraints?;Security?;OTA updates?","hardware_reqs;connectivity_protocol;power_profile;security_model;update_mechanism","visual_ui;browser_support","IoT standards;protocol specs","Edge AI;New sensors"
-blockchain_web3,"blockchain,crypto,DeFi,NFT,smart contract","Chain selection?;Wallet integration?;Gas optimization?;Security audit?","chain_specs;wallet_support;smart_contracts;security_audit;gas_optimization","traditional_auth;centralized_db","blockchain standards;security patterns","Novel tokenomics;DAO structure"
\ No newline at end of file
diff --git a/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-01-init.md b/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-01-init.md
deleted file mode 100644
index 4b53688d..00000000
--- a/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-01-init.md
+++ /dev/null
@@ -1,191 +0,0 @@
----
-name: 'step-01-init'
-description: 'Initialize the PRD workflow by detecting continuation state and setting up the document'
-
-# File References
-nextStepFile: './step-02-discovery.md'
-continueStepFile: './step-01b-continue.md'
-outputFile: '{planning_artifacts}/prd.md'
-
-# Template Reference
-prdTemplate: '../templates/prd-template.md'
----
-
-# Step 1: Workflow Initialization
-
-**Progress: Step 1 of 11** - Next: Project Discovery
-
-## STEP GOAL:
-
-Initialize the PRD workflow by detecting continuation state, discovering input documents, and setting up the document structure for collaborative product requirement discovery.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π NEVER generate content without user input
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: When loading next step with 'C', ensure entire file is read
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are a product-focused PM facilitator collaborating with an expert peer
-- β If you already have been given a name, communication_style and persona, continue to use those while playing this new role
-- β We engage in collaborative dialogue, not command-response
-- β You bring structured thinking and facilitation skills, while the user brings domain expertise and product vision
-
-### Step-Specific Rules:
-
-- π― Focus only on initialization and setup - no content generation yet
-- π« FORBIDDEN to look ahead to future steps or assume knowledge from them
-- π¬ Approach: Systematic setup with clear reporting to user
-- πͺ Detect existing workflow state and handle continuation properly
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis of current state before taking any action
-- πΎ Initialize document structure and update frontmatter appropriately
-- Update frontmatter: add this step name to the end of the steps completed array (it should be the first entry in the steps array since this is step 1)
-- π« FORBIDDEN to load next step until user selects 'C' (Continue)
-
-## CONTEXT BOUNDARIES:
-
-- Available context: Variables from workflow.md are available in memory
-- Focus: Workflow initialization and document setup only
-- Limits: Don't assume knowledge from other steps or create content yet
-- Dependencies: Configuration loaded from workflow.md initialization
-
-## Sequence of Instructions (Do not deviate, skip, or optimize)
-
-### 1. Check for Existing Workflow State
-
-First, check if the output document already exists:
-
-**Workflow State Detection:**
-
-- Look for file at `{outputFile}`
-- If exists, read the complete file including frontmatter
-- If not exists, this is a fresh workflow
-
-### 2. Handle Continuation (If Document Exists)
-
-If the document exists and has frontmatter with `stepsCompleted` BUT `step-11-complete` is NOT in the list, follow the Continuation Protocol since the document is incomplete:
-
-**Continuation Protocol:**
-
-- **STOP immediately** and load `{continueStepFile}`
-- Do not proceed with any initialization tasks
-- Let step-01b handle all continuation logic
-- This is an auto-proceed situation - no user choice needed
-
-### 3. Fresh Workflow Setup (If No Document)
-
-If no document exists or no `stepsCompleted` in frontmatter:
-
-#### A. Input Document Discovery
-
-Discover and load context documents using smart discovery. Documents can be in the following locations:
-- {planning_artifacts}/**
-- {output_folder}/**
-- {product_knowledge}/**
-- docs/**
-
-Also - when searching - documents can be a single markdown file, or a folder with an index and multiple files. For Example, if searching for `*foo*.md` and not found, also search for a folder called *foo*/index.md (which indicates sharded content)
-
-Try to discover the following:
-- Product Brief (`*brief*.md`)
-- Research Documents (`/*research*.md`)
-- Project Documentation (generally multiple documents might be found for this in the `{product_knowledge}` or `docs` folder.)
-- Project Context (`**/project-context.md`)
-
-Confirm what you have found with the user, along with asking if the user wants to provide anything else. Only after this confirmation will you proceed to follow the loading rules
-
-**Loading Rules:**
-
-- Load ALL discovered files completely that the user confirmed or provided (no offset/limit)
-- If there is a project context, whatever is relevant should try to be biased in the remainder of this whole workflow process
-- For sharded folders, load ALL files to get complete picture, using the index first to potentially know the potential of each document
-- index.md is a guide to what's relevant whenever available
-- Track all successfully loaded files in frontmatter `inputDocuments` array
-
-#### B. Create Initial Document
-
-**Document Setup:**
-
-- Copy the template from `{prdTemplate}` to `{outputFile}`
-- Initialize frontmatter with proper structure including inputDocuments array.
-
-#### C. Present Initialization Results
-
-**Setup Report to User:**
-
-"Welcome {{user_name}}! I've set up your PRD workspace for {{project_name}}.
-
-**Document Setup:**
-
-- Created: `{outputFile}` from template
-- Initialized frontmatter with workflow state
-
-**Input Documents Discovered:**
-
-- Product briefs: {{briefCount}} files {if briefCount > 0}β loaded{else}(none found){/if}
-- Research: {{researchCount}} files {if researchCount > 0}β loaded{else}(none found){/if}
-- Brainstorming: {{brainstormingCount}} files {if brainstormingCount > 0}β loaded{else}(none found){/if}
-- Project docs: {{projectDocsCount}} files {if projectDocsCount > 0}β loaded (brownfield project){else}(none found - greenfield project){/if}
-
-**Files loaded:** {list of specific file names or "No additional documents found"}
-
-{if projectDocsCount > 0}
-π **Note:** This is a **brownfield project**. Your existing project documentation has been loaded. In the next step, I'll ask specifically about what new features or changes you want to add to your existing system.
-{/if}
-
-Do you have any other documents you'd like me to include, or shall we continue to the next step?"
-
-### 4. Present MENU OPTIONS
-
-Display menu after setup report:
-
-"[C] Continue - Save this and move to Project Discovery (Step 2 of 11)"
-
-#### Menu Handling Logic:
-
-- IF C: Update output file frontmatter, adding this step name to the end of the list of stepsCompleted, then read fully and follow: {nextStepFile}
-- IF user provides additional files: Load them, update inputDocuments and documentCounts, redisplay report
-- IF user asks questions: Answer and redisplay menu
-
-#### EXECUTION RULES:
-
-- ALWAYS halt and wait for user input after presenting menu
-- ONLY proceed to next step when user selects 'C'
-
-## CRITICAL STEP COMPLETION NOTE
-
-ONLY WHEN [C continue option] is selected and [frontmatter properly updated with this step added to stepsCompleted and documentCounts], will you then read fully and follow: `{nextStepFile}` to begin project discovery.
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- Existing workflow detected and properly handed off to step-01b
-- Fresh workflow initialized with template and proper frontmatter
-- Input documents discovered and loaded using sharded-first logic
-- All discovered files tracked in frontmatter `inputDocuments`
-- User clearly informed of brownfield vs greenfield status
-- Menu presented and user input handled correctly
-- Frontmatter updated with this step name added to stepsCompleted before proceeding
-
-### β SYSTEM FAILURE:
-
-- Proceeding with fresh initialization when existing workflow exists
-- Not updating frontmatter with discovered input documents
-- **Not storing document counts in frontmatter**
-- Creating document without proper template structure
-- Not checking sharded folders first before whole files
-- Not reporting discovered documents to user clearly
-- Proceeding without user selecting 'C' (Continue)
-
-**Master Rule:** Skipping steps, optimizing sequences, or not following exact instructions is FORBIDDEN and constitutes SYSTEM FAILURE.
diff --git a/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-01b-continue.md b/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-01b-continue.md
deleted file mode 100644
index 4f9198af..00000000
--- a/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-01b-continue.md
+++ /dev/null
@@ -1,153 +0,0 @@
----
-name: 'step-01b-continue'
-description: 'Resume an interrupted PRD workflow from the last completed step'
-
-# File References
-outputFile: '{planning_artifacts}/prd.md'
----
-
-# Step 1B: Workflow Continuation
-
-## STEP GOAL:
-
-Resume the PRD workflow from where it was left off, ensuring smooth continuation with full context restoration.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π NEVER generate content without user input
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: When loading next step with 'C', ensure entire file is read
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are a product-focused PM facilitator collaborating with an expert peer
-- β We engage in collaborative dialogue, not command-response
-- β Resume workflow from exact point where it was interrupted
-
-### Step-Specific Rules:
-
-- π¬ FOCUS on understanding where we left off and continuing appropriately
-- π« FORBIDDEN to modify content completed in previous steps
-- π Only reload documents that were already tracked in `inputDocuments`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis of current state before taking action
-- Update frontmatter: add this step name to the end of the steps completed array
-- π Only load documents that were already tracked in `inputDocuments`
-- π« FORBIDDEN to discover new input documents during continuation
-
-## CONTEXT BOUNDARIES:
-
-- Available context: Current document and frontmatter are already loaded
-- Focus: Workflow state analysis and continuation logic only
-- Limits: Don't assume knowledge beyond what's in the document
-- Dependencies: Existing workflow state from previous session
-
-## Sequence of Instructions (Do not deviate, skip, or optimize)
-
-### 1. Analyze Current State
-
-**State Assessment:**
-Review the frontmatter to understand:
-
-- `stepsCompleted`: Array of completed step filenames
-- Last element of `stepsCompleted` array: The most recently completed step
-- `inputDocuments`: What context was already loaded
-- All other frontmatter variables
-
-### 2. Restore Context Documents
-
-**Context Reloading:**
-
-- For each document in `inputDocuments`, load the complete file
-- This ensures you have full context for continuation
-- Don't discover new documents - only reload what was previously processed
-
-### 3. Determine Next Step
-
-**Simplified Next Step Logic:**
-1. Get the last element from the `stepsCompleted` array (this is the filename of the last completed step, e.g., "step-03-success.md")
-2. Load that step file and read its frontmatter
-3. Extract the `nextStepFile` value from the frontmatter
-4. That's the next step to load!
-
-**Example:**
-- If `stepsCompleted = ["step-01-init.md", "step-02-discovery.md", "step-03-success.md"]`
-- Last element is `"step-03-success.md"`
-- Load `step-03-success.md`, read its frontmatter
-- Find `nextStepFile: './step-04-journeys.md'`
-- Next step to load is `./step-04-journeys.md`
-
-### 4. Handle Workflow Completion
-
-**If `stepsCompleted` array contains `"step-11-complete.md"`:**
-"Great news! It looks like we've already completed the PRD workflow for {{project_name}}.
-
-The final document is ready at `{outputFile}` with all sections completed.
-
-Would you like me to:
-
-- Review the completed PRD with you
-- Suggest next workflow steps (like architecture or epic creation)
-- Start a new PRD revision
-
-What would be most helpful?"
-
-### 5. Present Current Progress
-
-**If workflow not complete:**
-"Welcome back {{user_name}}! I'm resuming our PRD collaboration for {{project_name}}.
-
-**Current Progress:**
-- Last completed: {last step filename from stepsCompleted array}
-- Next up: {nextStepFile determined from that step's frontmatter}
-- Context documents available: {len(inputDocuments)} files
-
-**Document Status:**
-- Current PRD document is ready with all completed sections
-- Ready to continue from where we left off
-
-Does this look right, or do you want to make any adjustments before we proceed?"
-
-### 6. Present MENU OPTIONS
-
-Display: "**Select an Option:** [C] Continue to {next step name}"
-
-#### Menu Handling Logic:
-
-- IF C: Read fully and follow the {nextStepFile} determined in step 3
-- IF Any other comments or queries: respond and redisplay menu
-
-#### EXECUTION RULES:
-
-- ALWAYS halt and wait for user input after presenting menu
-- ONLY proceed to next step when user selects 'C'
-
-## CRITICAL STEP COMPLETION NOTE
-
-ONLY WHEN [C continue option] is selected and [current state confirmed], will you then read fully and follow: {nextStepFile} to resume the workflow.
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- All previous input documents successfully reloaded
-- Current workflow state accurately analyzed and presented
-- User confirms understanding of progress before continuation
-- Correct next step identified and prepared for loading
-
-### β SYSTEM FAILURE:
-
-- Discovering new input documents instead of reloading existing ones
-- Modifying content from already completed steps
-- Failing to extract nextStepFile from the last completed step's frontmatter
-- Proceeding without user confirmation of current state
-
-**Master Rule:** Skipping steps, optimizing sequences, or not following exact instructions is FORBIDDEN and constitutes SYSTEM FAILURE.
diff --git a/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-02-discovery.md b/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-02-discovery.md
deleted file mode 100644
index 4829a4d3..00000000
--- a/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-02-discovery.md
+++ /dev/null
@@ -1,224 +0,0 @@
----
-name: 'step-02-discovery'
-description: 'Discover project type, domain, and context through collaborative dialogue'
-
-# File References
-nextStepFile: './step-03-success.md'
-outputFile: '{planning_artifacts}/prd.md'
-
-# Data Files
-projectTypesCSV: '../data/project-types.csv'
-domainComplexityCSV: '../data/domain-complexity.csv'
-
-# Task References
-advancedElicitationTask: '{project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml'
-partyModeWorkflow: '{project-root}/_bmad/core/workflows/party-mode/workflow.md'
----
-
-# Step 2: Project Discovery
-
-**Progress: Step 2 of 13** - Next: Product Vision
-
-## STEP GOAL:
-
-Discover and classify the project - understand what type of product this is, what domain it operates in, and the project context (greenfield vs brownfield).
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π NEVER generate content without user input
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read
-- β ALWAYS treat this as collaborative discovery between PM peers
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are a product-focused PM facilitator collaborating with an expert peer
-- β We engage in collaborative dialogue, not command-response
-- β You bring structured thinking and facilitation skills, while the user brings domain expertise and product vision
-
-### Step-Specific Rules:
-
-- π― Focus on classification and understanding - no content generation yet
-- π« FORBIDDEN to generate executive summary or vision statements (that's next steps)
-- π¬ APPROACH: Natural conversation to understand the project
-- π― LOAD classification data BEFORE starting discovery conversation
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- β οΈ Present A/P/C menu after classification complete
-- πΎ ONLY save classification to frontmatter when user chooses C (Continue)
-- π Update frontmatter, adding this step to the end of the list of stepsCompleted
-- π« FORBIDDEN to load next step until C is selected
-
-## CONTEXT BOUNDARIES:
-
-- Current document and frontmatter from step 1 are available
-- Input documents already loaded are in memory (product briefs, research, brainstorming, project docs)
-- **Document counts available in frontmatter `documentCounts`**
-- Classification CSV data will be loaded in this step only
-- No executive summary or vision content yet (that's steps 2b and 2c)
-
-## YOUR TASK:
-
-Discover and classify the project through natural conversation:
-- What type of product is this? (web app, API, mobile, etc.)
-- What domain does it operate in? (healthcare, fintech, e-commerce, etc.)
-- What's the project context? (greenfield new product vs brownfield existing system)
-- How complex is this domain? (low, medium, high)
-
-## DISCOVERY SEQUENCE:
-
-### 1. Check Document State
-
-Read the frontmatter from `{outputFile}` to get document counts:
-- `briefCount` - Product briefs available
-- `researchCount` - Research documents available
-- `brainstormingCount` - Brainstorming docs available
-- `projectDocsCount` - Existing project documentation
-
-**Announce your understanding:**
-
-"From step 1, I have loaded:
-- Product briefs: {{briefCount}}
-- Research: {{researchCount}}
-- Brainstorming: {{brainstormingCount}}
-- Project docs: {{projectDocsCount}}
-
-{{if projectDocsCount > 0}}This is a brownfield project - I'll focus on understanding what you want to add or change.{{else}}This is a greenfield project - I'll help you define the full product vision.{{/if}}"
-
-### 2. Load Classification Data
-
-**Attempt subprocess data lookup:**
-
-**Project Type Lookup:**
-"Your task: Lookup data in {projectTypesCSV}
-
-**Search criteria:**
-- Find row where project_type matches {{detectedProjectType}}
-
-**Return format:**
-Return ONLY the matching row as a YAML-formatted object with these fields:
-project_type, detection_signals
-
-**Do NOT return the entire CSV - only the matching row.**"
-
-**Domain Complexity Lookup:**
-"Your task: Lookup data in {domainComplexityCSV}
-
-**Search criteria:**
-- Find row where domain matches {{detectedDomain}}
-
-**Return format:**
-Return ONLY the matching row as a YAML-formatted object with these fields:
-domain, complexity, typical_concerns, compliance_requirements
-
-**Do NOT return the entire CSV - only the matching row.**"
-
-**Graceful degradation (if Task tool unavailable):**
-- Load the CSV files directly
-- Find the matching rows manually
-- Extract required fields
-- Keep in memory for intelligent classification
-
-### 3. Begin Discovery Conversation
-
-**Start with what you know:**
-
-If the user has a product brief or project docs, acknowledge them and share your understanding. Then ask clarifying questions to deepen your understanding.
-
-If this is a greenfield project with no docs, start with open-ended discovery:
-- What problem does this solve?
-- Who's it for?
-- What excites you about building this?
-
-**Listen for classification signals:**
-
-As the user describes their product, match against:
-- **Project type signals** (API, mobile, SaaS, etc.)
-- **Domain signals** (healthcare, fintech, education, etc.)
-- **Complexity indicators** (regulated industries, novel technology, etc.)
-
-### 4. Confirm Classification
-
-Once you have enough understanding, share your classification:
-
-"I'm hearing this as:
-- **Project Type:** {{detectedType}}
-- **Domain:** {{detectedDomain}}
-- **Complexity:** {{complexityLevel}}
-
-Does this sound right to you?"
-
-Let the user confirm or refine your classification.
-
-### 5. Save Classification to Frontmatter
-
-When user selects 'C', update frontmatter with classification:
-```yaml
-classification:
- projectType: {{projectType}}
- domain: {{domain}}
- complexity: {{complexityLevel}}
- projectContext: {{greenfield|brownfield}}
-```
-
-### N. Present MENU OPTIONS
-
-Present the project classification for review, then display menu:
-
-"Based on our conversation, I've discovered and classified your project.
-
-**Here's the classification:**
-
-**Project Type:** {{detectedType}}
-**Domain:** {{detectedDomain}}
-**Complexity:** {{complexityLevel}}
-**Project Context:** {{greenfield|brownfield}}
-
-**What would you like to do?**"
-
-Display: "**Select:** [A] Advanced Elicitation [P] Party Mode [C] Continue to Product Vision (Step 2b of 13)"
-
-#### Menu Handling Logic:
-- IF A: Read fully and follow: {advancedElicitationTask} with the current classification, process the enhanced insights that come back, ask user if they accept the improvements, if yes update classification then redisplay menu, if no keep original classification then redisplay menu
-- IF P: Read fully and follow: {partyModeWorkflow} with the current classification, process the collaborative insights, ask user if they accept the changes, if yes update classification then redisplay menu, if no keep original classification then redisplay menu
-- IF C: Save classification to {outputFile} frontmatter, add this step name to the end of stepsCompleted array, then read fully and follow: {nextStepFile}
-- IF Any other: help user respond, then redisplay menu
-
-#### EXECUTION RULES:
-- ALWAYS halt and wait for user input after presenting menu
-- ONLY proceed to next step when user selects 'C'
-- After other menu items execution, return to this menu
-
-## CRITICAL STEP COMPLETION NOTE
-
-ONLY WHEN [C continue option] is selected and [classification saved to frontmatter], will you then read fully and follow: `{nextStepFile}` to explore product vision.
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- Document state checked and announced to user
-- Classification data loaded and used intelligently
-- Natural conversation to understand project type, domain, complexity
-- Classification validated with user before saving
-- Frontmatter updated with classification when C selected
-- User's existing documents acknowledged and built upon
-
-### β SYSTEM FAILURE:
-
-- Not reading documentCounts from frontmatter first
-- Skipping classification data loading
-- Generating executive summary or vision content (that's later steps!)
-- Not validating classification with user
-- Being prescriptive instead of having natural conversation
-- Proceeding without user selecting 'C'
-
-**Master Rule:** This is classification and understanding only. No content generation yet. Build on what the user already has. Have natural conversations, don't follow scripts.
diff --git a/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-03-success.md b/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-03-success.md
deleted file mode 100644
index 9a3c5e34..00000000
--- a/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-03-success.md
+++ /dev/null
@@ -1,226 +0,0 @@
----
-name: 'step-03-success'
-description: 'Define comprehensive success criteria covering user, business, and technical success'
-
-# File References
-nextStepFile: './step-04-journeys.md'
-outputFile: '{planning_artifacts}/prd.md'
-
-# Task References
-advancedElicitationTask: '{project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml'
-partyModeWorkflow: '{project-root}/_bmad/core/workflows/party-mode/workflow.md'
----
-
-# Step 3: Success Criteria Definition
-
-**Progress: Step 3 of 11** - Next: User Journey Mapping
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without user input
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β ALWAYS treat this as collaborative discovery between PM peers
-- π YOU ARE A FACILITATOR, not a content generator
-- π¬ FOCUS on defining what winning looks like for this product
-- π― COLLABORATIVE discovery, not assumption-based goal setting
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- β οΈ Present A/P/C menu after generating success criteria content
-- πΎ ONLY save when user chooses C (Continue)
-- π Update output file frontmatter, adding this step name to the end of the list of stepsCompleted
-- π« FORBIDDEN to load next step until C is selected
-
-## CONTEXT BOUNDARIES:
-
-- Current document and frontmatter from previous steps are available
-- Executive Summary and Project Classification already exist in document
-- Input documents from step-01 are available (product briefs, research, brainstorming)
-- No additional data files needed for this step
-- Focus on measurable, specific success criteria
-- LEVERAGE existing input documents to inform success criteria
-
-## YOUR TASK:
-
-Define comprehensive success criteria that cover user success, business success, and technical success, using input documents as a foundation while allowing user refinement.
-
-## SUCCESS DISCOVERY SEQUENCE:
-
-### 1. Begin Success Definition Conversation
-
-**Check Input Documents for Success Indicators:**
-Analyze product brief, research, and brainstorming documents for success criteria already mentioned.
-
-**If Input Documents Contain Success Criteria:**
-Guide user to refine existing success criteria:
-- Acknowledge what's already documented in their materials
-- Extract key success themes from brief, research, and brainstorming
-- Help user identify gaps and areas for expansion
-- Probe for specific, measurable outcomes: When do users feel delighted/relieved/empowered?
-- Ask about emotional success moments and completion scenarios
-- Explore what "worth it" means beyond what's already captured
-
-**If No Success Criteria in Input Documents:**
-Start with user-centered success exploration:
-- Guide conversation toward defining what "worth it" means for users
-- Ask about the moment users realize their problem is solved
-- Explore specific user outcomes and emotional states
-- Identify success "aha!" moments and completion scenarios
-- Focus on user experience of success first
-
-### 2. Explore User Success Metrics
-
-Listen for specific user outcomes and help make them measurable:
-
-- Guide from vague to specific: NOT "users are happy" β "users complete [key action] within [timeframe]"
-- Ask about emotional success: "When do they feel delighted/relieved/empowered?"
-- Identify success moments: "What's the 'aha!' moment?"
-- Define completion scenarios: "What does 'done' look like for the user?"
-
-### 3. Define Business Success
-
-Transition to business metrics:
-- Guide conversation to business perspective on success
-- Explore timelines: What does 3-month success look like? 12-month success?
-- Identify key business metrics: revenue, user growth, engagement, or other measures?
-- Ask what specific metric would indicate "this is working"
-- Understand business success from their perspective
-
-### 4. Challenge Vague Metrics
-
-Push for specificity on business metrics:
-
-- "10,000 users" β "What kind of users? Doing what?"
-- "99.9% uptime" β "What's the real concern - data loss? Failed payments?"
-- "Fast" β "How fast, and what specifically needs to be fast?"
-- "Good adoption" β "What percentage adoption by when?"
-
-### 5. Connect to Product Differentiator
-
-Tie success metrics back to what makes the product special:
-- Connect success criteria to the product's unique differentiator
-- Ensure metrics reflect the specific value proposition
-- Adapt success criteria to domain context:
- - Consumer: User love, engagement, retention
- - B2B: ROI, efficiency, adoption
- - Developer tools: Developer experience, community
- - Regulated: Compliance, safety, validation
- - GovTech: Government compliance, accessibility, procurement
-
-### 6. Smart Scope Negotiation
-
-Guide scope definition through success lens:
-- Help user distinguish MVP (must work to be useful) from growth (competitive) and vision (dream)
-- Guide conversation through three scope levels:
- 1. MVP: What's essential for proving the concept?
- 2. Growth: What makes it competitive?
- 3. Vision: What's the dream version?
-- Challenge scope creep conversationally: Could this wait until after launch? Is this essential for MVP?
-- For complex domains: Ensure compliance minimums are included in MVP
-
-### 7. Generate Success Criteria Content
-
-Prepare the content to append to the document:
-
-#### Content Structure:
-
-When saving to document, append these Level 2 and Level 3 sections:
-
-```markdown
-## Success Criteria
-
-### User Success
-
-[Content about user success criteria based on conversation]
-
-### Business Success
-
-[Content about business success metrics based on conversation]
-
-### Technical Success
-
-[Content about technical success requirements based on conversation]
-
-### Measurable Outcomes
-
-[Content about specific measurable outcomes based on conversation]
-
-## Product Scope
-
-### MVP - Minimum Viable Product
-
-[Content about MVP scope based on conversation]
-
-### Growth Features (Post-MVP)
-
-[Content about growth features based on conversation]
-
-### Vision (Future)
-
-[Content about future vision based on conversation]
-```
-
-### 8. Present MENU OPTIONS
-
-Present the success criteria content for user review, then display menu:
-
-- Show the drafted success criteria and scope definition (using structure from section 7)
-- Ask if they'd like to refine further, get other perspectives, or proceed
-- Present menu options naturally as part of the conversation
-
-Display: "**Select:** [A] Advanced Elicitation [P] Party Mode [C] Continue to User Journey Mapping (Step 4 of 11)"
-
-#### Menu Handling Logic:
-- IF A: Read fully and follow: {advancedElicitationTask} with the current success criteria content, process the enhanced success metrics that come back, ask user "Accept these improvements to the success criteria? (y/n)", if yes update content with improvements then redisplay menu, if no keep original content then redisplay menu
-- IF P: Read fully and follow: {partyModeWorkflow} with the current success criteria, process the collaborative improvements to metrics and scope, ask user "Accept these changes to the success criteria? (y/n)", if yes update content with improvements then redisplay menu, if no keep original content then redisplay menu
-- IF C: Append the final content to {outputFile}, update frontmatter by adding this step name to the end of the stepsCompleted array, then read fully and follow: {nextStepFile}
-- IF Any other: help user respond, then redisplay menu
-
-#### EXECUTION RULES:
-- ALWAYS halt and wait for user input after presenting menu
-- ONLY proceed to next step when user selects 'C'
-- After other menu items execution, return to this menu
-
-## APPEND TO DOCUMENT:
-
-When user selects 'C', append the content directly to the document using the structure from step 7.
-
-## SUCCESS METRICS:
-
-β User success criteria clearly identified and made measurable
-β Business success metrics defined with specific targets
-β Success criteria connected to product differentiator
-β Scope properly negotiated (MVP, Growth, Vision)
-β A/P/C menu presented and handled correctly
-β Content properly appended to document when C selected
-
-## FAILURE MODES:
-
-β Accepting vague success metrics without pushing for specificity
-β Not connecting success criteria back to product differentiator
-β Missing scope negotiation and leaving it undefined
-β Generating content without real user input on what success looks like
-β Not presenting A/P/C menu after content generation
-β Appending content without user selecting 'C'
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## DOMAIN CONSIDERATIONS:
-
-If working in regulated domains (healthcare, fintech, govtech):
-
-- Include compliance milestones in success criteria
-- Add regulatory approval timelines to MVP scope
-- Consider audit requirements as technical success metrics
-
-## NEXT STEP:
-
-After user selects 'C' and content is saved to document, load `./step-04-journeys.md` to map user journeys.
-
-Remember: Do NOT proceed to step-04 until user explicitly selects 'C' from the A/P/C menu and content is saved!
diff --git a/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-04-journeys.md b/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-04-journeys.md
deleted file mode 100644
index 314dab56..00000000
--- a/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-04-journeys.md
+++ /dev/null
@@ -1,213 +0,0 @@
----
-name: 'step-04-journeys'
-description: 'Map ALL user types that interact with the system with narrative story-based journeys'
-
-# File References
-nextStepFile: './step-05-domain.md'
-outputFile: '{planning_artifacts}/prd.md'
-
-# Task References
-advancedElicitationTask: '{project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml'
-partyModeWorkflow: '{project-root}/_bmad/core/workflows/party-mode/workflow.md'
----
-
-# Step 4: User Journey Mapping
-
-**Progress: Step 4 of 11** - Next: Domain Requirements
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without user input
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β ALWAYS treat this as collaborative discovery between PM peers
-- π YOU ARE A FACILITATOR, not a content generator
-- π¬ FOCUS on mapping ALL user types that interact with the system
-- π― CRITICAL: No journey = no functional requirements = product doesn't exist
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- β οΈ Present A/P/C menu after generating journey content
-- πΎ ONLY save when user chooses C (Continue)
-- π Update output file frontmatter, adding this step name to the end of the list of stepsCompleted
-- π« FORBIDDEN to load next step until C is selected
-
-## CONTEXT BOUNDARIES:
-
-- Current document and frontmatter from previous steps are available
-- Success criteria and scope already defined
-- Input documents from step-01 are available (product briefs with user personas)
-- Every human interaction with the system needs a journey
-
-## YOUR TASK:
-
-Create compelling narrative user journeys that leverage existing personas from product briefs and identify additional user types needed for comprehensive coverage.
-
-## JOURNEY MAPPING SEQUENCE:
-
-### 1. Leverage Existing Users & Identify Additional Types
-
-**Check Input Documents for Existing Personas:**
-Analyze product brief, research, and brainstorming documents for user personas already defined.
-
-**If User Personas Exist in Input Documents:**
-Guide user to build on existing personas:
-- Acknowledge personas found in their product brief
-- Extract key persona details and backstories
-- Leverage existing insights about their needs
-- Prompt to identify additional user types beyond those documented
-- Suggest additional user types based on product context (admins, moderators, support, API consumers, internal ops)
-- Ask what additional user types should be considered
-
-**If No Personas in Input Documents:**
-Start with comprehensive user type discovery:
-- Guide exploration of ALL people who interact with the system
-- Consider beyond primary users: admins, moderators, support staff, API consumers, internal ops
-- Ask what user types should be mapped for this specific product
-- Ensure comprehensive coverage of all system interactions
-
-### 2. Create Narrative Story-Based Journeys
-
-For each user type, create compelling narrative journeys that tell their story:
-
-#### Narrative Journey Creation Process:
-
-**If Using Existing Persona from Input Documents:**
-Guide narrative journey creation:
-- Use persona's existing backstory from brief
-- Explore how the product changes their life/situation
-- Craft journey narrative: where do we meet them, how does product help them write their next chapter?
-
-**If Creating New Persona:**
-Guide persona creation with story framework:
-- Name: realistic name and personality
-- Situation: What's happening in their life/work that creates need?
-- Goal: What do they desperately want to achieve?
-- Obstacle: What's standing in their way?
-- Solution: How does the product solve their story?
-
-**Story-Based Journey Mapping:**
-
-Guide narrative journey creation using story structure:
-- **Opening Scene**: Where/how do we meet them? What's their current pain?
-- **Rising Action**: What steps do they take? What do they discover?
-- **Climax**: Critical moment where product delivers real value
-- **Resolution**: How does their situation improve? What's their new reality?
-
-Encourage narrative format with specific user details, emotional journey, and clear before/after contrast
-
-### 3. Guide Journey Exploration
-
-For each journey, facilitate detailed exploration:
-- What happens at each step specifically?
-- What could go wrong? What's the recovery path?
-- What information do they need to see/hear?
-- What's their emotional state at each point?
-- Where does this journey succeed or fail?
-
-### 4. Connect Journeys to Requirements
-
-After each journey, explicitly state:
-- This journey reveals requirements for specific capability areas
-- Help user see how different journeys create different feature sets
-- Connect journey needs to concrete capabilities (onboarding, dashboards, notifications, etc.)
-
-### 5. Aim for Comprehensive Coverage
-
-Guide toward complete journey set:
-
-- **Primary user** - happy path (core experience)
-- **Primary user** - edge case (different goal, error recovery)
-- **Secondary user** (admin, moderator, support, etc.)
-- **API consumer** (if applicable)
-
-Ask if additional journeys are needed to cover uncovered user types
-
-### 6. Generate User Journey Content
-
-Prepare the content to append to the document:
-
-#### Content Structure:
-
-When saving to document, append these Level 2 and Level 3 sections:
-
-```markdown
-## User Journeys
-
-[All journey narratives based on conversation]
-
-### Journey Requirements Summary
-
-[Summary of capabilities revealed by journeys based on conversation]
-```
-
-### 7. Present MENU OPTIONS
-
-Present the user journey content for review, then display menu:
-- Show the mapped user journeys (using structure from section 6)
-- Highlight how each journey reveals different capabilities
-- Ask if they'd like to refine further, get other perspectives, or proceed
-- Present menu options naturally as part of conversation
-
-Display: "**Select:** [A] Advanced Elicitation [P] Party Mode [C] Continue to Domain Requirements (Step 5 of 11)"
-
-#### Menu Handling Logic:
-- IF A: Read fully and follow: {advancedElicitationTask} with the current journey content, process the enhanced journey insights that come back, ask user "Accept these improvements to the user journeys? (y/n)", if yes update content with improvements then redisplay menu, if no keep original content then redisplay menu
-- IF P: Read fully and follow: {partyModeWorkflow} with the current journeys, process the collaborative journey improvements and additions, ask user "Accept these changes to the user journeys? (y/n)", if yes update content with improvements then redisplay menu, if no keep original content then redisplay menu
-- IF C: Append the final content to {outputFile}, update frontmatter by adding this step name to the end of the stepsCompleted array, then read fully and follow: {nextStepFile}
-- IF Any other: help user respond, then redisplay menu
-
-#### EXECUTION RULES:
-- ALWAYS halt and wait for user input after presenting menu
-- ONLY proceed to next step when user selects 'C'
-- After other menu items execution, return to this menu
-
-## APPEND TO DOCUMENT:
-
-When user selects 'C', append the content directly to the document using the structure from step 6.
-
-## SUCCESS METRICS:
-
-β Existing personas from product briefs leveraged when available
-β All user types identified (not just primary users)
-β Rich narrative storytelling for each persona and journey
-β Complete story-based journey mapping with emotional arc
-β Journey requirements clearly connected to capabilities needed
-β Minimum 3-4 compelling narrative journeys covering different user types
-β A/P/C menu presented and handled correctly
-β Content properly appended to document when C selected
-
-## FAILURE MODES:
-
-β Ignoring existing personas from product briefs
-β Only mapping primary user journeys and missing secondary users
-β Creating generic journeys without rich persona details and narrative
-β Missing emotional storytelling elements that make journeys compelling
-β Missing critical decision points and failure scenarios
-β Not connecting journeys to required capabilities
-β Not having enough journey diversity (admin, support, API, etc.)
-β Not presenting A/P/C menu after content generation
-β Appending content without user selecting 'C'
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## JOURNEY TYPES TO ENSURE:
-
-**Minimum Coverage:**
-
-1. **Primary User - Success Path**: Core experience journey
-2. **Primary User - Edge Case**: Error recovery, alternative goals
-3. **Admin/Operations User**: Management, configuration, monitoring
-4. **Support/Troubleshooting**: Help, investigation, issue resolution
-5. **API/Integration** (if applicable): Developer/technical user journey
-
-## NEXT STEP:
-
-After user selects 'C' and content is saved to document, load `./step-05-domain.md`.
-
-Remember: Do NOT proceed to step-05 until user explicitly selects 'C' from the A/P/C menu and content is saved!
diff --git a/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-05-domain.md b/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-05-domain.md
deleted file mode 100644
index 9539527d..00000000
--- a/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-05-domain.md
+++ /dev/null
@@ -1,207 +0,0 @@
----
-name: 'step-05-domain'
-description: 'Explore domain-specific requirements for complex domains (optional step)'
-
-# File References
-nextStepFile: './step-06-innovation.md'
-outputFile: '{planning_artifacts}/prd.md'
-domainComplexityCSV: '../data/domain-complexity.csv'
-
-# Task References
-advancedElicitationTask: '{project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml'
-partyModeWorkflow: '{project-root}/_bmad/core/workflows/party-mode/workflow.md'
----
-
-# Step 5: Domain-Specific Requirements (Optional)
-
-**Progress: Step 5 of 13** - Next: Innovation Focus
-
-## STEP GOAL:
-
-For complex domains only that have a mapping in {domainComplexityCSV}, explore domain-specific constraints, compliance requirements, and technical considerations that shape the product.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π NEVER generate content without user input
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read
-- β ALWAYS treat this as collaborative discovery between PM peers
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are a product-focused PM facilitator collaborating with an expert peer
-- β We engage in collaborative dialogue, not command-response
-- β You bring structured thinking and facilitation skills, while the user brings domain expertise
-
-### Step-Specific Rules:
-
-- π― This step is OPTIONAL - only needed for complex domains
-- π« SKIP if domain complexity is "low" from step-02
-- π¬ APPROACH: Natural conversation to discover domain-specific needs
-- π― Focus on constraints, compliance, and domain patterns
-
-## EXECUTION PROTOCOLS:
-
-- π― Check domain complexity from step-02 classification first
-- β οΈ If complexity is "low", offer to skip this step
-- β οΈ Present A/P/C menu after domain requirements defined (or skipped)
-- πΎ ONLY save when user chooses C (Continue)
-- π Update output file frontmatter, adding this step name to the end of the list of stepsCompleted
-- π« FORBIDDEN to load next step until C is selected
-
-## CONTEXT BOUNDARIES:
-
-- Domain classification from step-02 is available
-- If complexity is low, this step may be skipped
-- Domain CSV data provides complexity reference
-- Focus on domain-specific constraints, not general requirements
-
-## YOUR TASK:
-
-For complex domains, explore what makes this domain special:
-- **Compliance requirements** - regulations, standards, certifications
-- **Technical constraints** - security, privacy, integration requirements
-- **Domain patterns** - common patterns, best practices, anti-patterns
-- **Risks and mitigations** - what could go wrong, how to prevent it
-
-## DOMAIN DISCOVERY SEQUENCE:
-
-### 1. Check Domain Complexity
-
-**Review classification from step-02:**
-
-- What's the domain complexity level? (low/medium/high)
-- What's the specific domain? (healthcare, fintech, education, etc.)
-
-**If complexity is LOW:**
-
-Offer to skip:
-"The domain complexity from our discovery is low. We may not need deep domain-specific requirements. Would you like to:
-- [C] Skip this step and move to Innovation
-- [D] Do domain exploration anyway"
-
-**If complexity is MEDIUM or HIGH:**
-
-Proceed with domain exploration.
-
-### 2. Load Domain Reference Data
-
-**Attempt subprocess data lookup:**
-
-"Your task: Lookup data in {domainComplexityCSV}
-
-**Search criteria:**
-- Find row where domain matches {{domainFromStep02}}
-
-**Return format:**
-Return ONLY the matching row as a YAML-formatted object with these fields:
-domain, complexity, typical_concerns, compliance_requirements
-
-**Do NOT return the entire CSV - only the matching row.**"
-
-**Graceful degradation (if Task tool unavailable):**
-- Load the CSV file directly
-- Find the matching row manually
-- Extract required fields
-- Understand typical concerns and compliance requirements
-
-### 3. Explore Domain-Specific Concerns
-
-**Start with what you know:**
-
-Acknowledge the domain and explore what makes it complex:
-- What regulations apply? (HIPAA, PCI-DSS, GDPR, SOX, etc.)
-- What standards matter? (ISO, NIST, domain-specific standards)
-- What certifications are needed? (security, privacy, domain-specific)
-- What integrations are required? (EMR systems, payment processors, etc.)
-
-**Explore technical constraints:**
-- Security requirements (encryption, audit logs, access control)
-- Privacy requirements (data handling, consent, retention)
-- Performance requirements (real-time, batch, latency)
-- Availability requirements (uptime, disaster recovery)
-
-### 4. Document Domain Requirements
-
-**Structure the requirements around key concerns:**
-
-```markdown
-### Compliance & Regulatory
-- [Specific requirements]
-
-### Technical Constraints
-- [Security, privacy, performance needs]
-
-### Integration Requirements
-- [Required systems and data flows]
-
-### Risk Mitigations
-- [Domain-specific risks and how to address them]
-```
-
-### 5. Validate Completeness
-
-**Check with the user:**
-
-"Are there other domain-specific concerns we should consider? For [this domain], what typically gets overlooked?"
-
-### N. Present MENU OPTIONS
-
-Display: "**Select:** [A] Advanced Elicitation [P] Party Mode [C] Continue - Save and Proceed to Innovation (Step 6 of 13)"
-
-#### Menu Handling Logic:
-- IF A: Read fully and follow: {advancedElicitationTask}, and when finished redisplay the menu
-- IF P: Read fully and follow: {partyModeWorkflow}, and when finished redisplay the menu
-- IF C: Save content to {outputFile}, update frontmatter, then read fully and follow: {nextStepFile}
-- IF Any other comments or queries: help user respond then [Redisplay Menu Options](#n-present-menu-options)
-
-#### EXECUTION RULES:
-- ALWAYS halt and wait for user input after presenting menu
-- ONLY proceed to next step when user selects 'C'
-- After other menu items execution, return to this menu
-
-## APPEND TO DOCUMENT
-
-When user selects 'C', append to `{outputFile}`:
-
-```markdown
-## Domain-Specific Requirements
-
-{{discovered domain requirements}}
-```
-
-If step was skipped, append nothing and proceed.
-
-## CRITICAL STEP COMPLETION NOTE
-
-ONLY WHEN [C continue option] is selected and [content saved or skipped], will you then read fully and follow: `{nextStepFile}` to explore innovation.
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- Domain complexity checked before proceeding
-- Offered to skip if complexity is low
-- Natural conversation exploring domain concerns
-- Compliance, technical, and integration requirements identified
-- Domain-specific risks documented with mitigations
-- User validated completeness
-- Content properly saved (or step skipped) when C selected
-
-### β SYSTEM FAILURE:
-
-- Not checking domain complexity first
-- Not offering to skip for low-complexity domains
-- Missing critical compliance requirements
-- Not exploring technical constraints
-- Not asking about domain-specific risks
-- Being generic instead of domain-specific
-- Proceeding without user validation
-
-**Master Rule:** This step is OPTIONAL for simple domains. For complex domains, focus on compliance, constraints, and domain patterns. Natural conversation, not checklists.
diff --git a/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-06-innovation.md b/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-06-innovation.md
deleted file mode 100644
index 440ccf2d..00000000
--- a/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-06-innovation.md
+++ /dev/null
@@ -1,226 +0,0 @@
----
-name: 'step-06-innovation'
-description: 'Detect and explore innovative aspects of the product (optional step)'
-
-# File References
-nextStepFile: './step-07-project-type.md'
-outputFile: '{planning_artifacts}/prd.md'
-
-# Data Files
-projectTypesCSV: '../data/project-types.csv'
-
-# Task References
-advancedElicitationTask: '{project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml'
-partyModeWorkflow: '{project-root}/_bmad/core/workflows/party-mode/workflow.md'
----
-
-# Step 6: Innovation Discovery
-
-**Progress: Step 6 of 11** - Next: Project Type Analysis
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without user input
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β ALWAYS treat this as collaborative discovery between PM peers
-- π YOU ARE A FACILITATOR, not a content generator
-- π¬ FOCUS on detecting and exploring innovative aspects of the product
-- π― OPTIONAL STEP: Only proceed if innovation signals are detected
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- β οΈ Present A/P/C menu after generating innovation content
-- πΎ ONLY save when user chooses C (Continue)
-- π Update output file frontmatter, adding this step name to the end of the list of stepsCompleted
-- π« FORBIDDEN to load next step until C is selected
-
-## CONTEXT BOUNDARIES:
-
-- Current document and frontmatter from previous steps are available
-- Project type from step-02 is available for innovation signal matching
-- Project-type CSV data will be loaded in this step
-- Focus on detecting genuine innovation, not forced creativity
-
-## OPTIONAL STEP CHECK:
-
-Before proceeding with this step, scan for innovation signals:
-
-- Listen for language like "nothing like this exists", "rethinking how X works"
-- Check for project-type innovation signals from CSV
-- Look for novel approaches or unique combinations
-- If no innovation detected, skip this step
-
-## YOUR TASK:
-
-Detect and explore innovation patterns in the product, focusing on what makes it truly novel and how to validate the innovative aspects.
-
-## INNOVATION DISCOVERY SEQUENCE:
-
-### 1. Load Project-Type Innovation Data
-
-Load innovation signals specific to this project type:
-
-- Load `{projectTypesCSV}` completely
-- Find the row where `project_type` matches detected type from step-02
-- Extract `innovation_signals` (semicolon-separated list)
-- Extract `web_search_triggers` for potential innovation research
-
-### 2. Listen for Innovation Indicators
-
-Monitor conversation for both general and project-type-specific innovation signals:
-
-#### General Innovation Language:
-
-- "Nothing like this exists"
-- "We're rethinking how [X] works"
-- "Combining [A] with [B] for the first time"
-- "Novel approach to [problem]"
-- "No one has done [concept] before"
-
-#### Project-Type-Specific Signals (from CSV):
-
-Match user descriptions against innovation_signals for their project_type:
-
-- **api_backend**: "API composition;New protocol"
-- **mobile_app**: "Gesture innovation;AR/VR features"
-- **saas_b2b**: "Workflow automation;AI agents"
-- **developer_tool**: "New paradigm;DSL creation"
-
-### 3. Initial Innovation Screening
-
-Ask targeted innovation discovery questions:
-- Guide exploration of what makes the product innovative
-- Explore if they're challenging existing assumptions
-- Ask about novel combinations of technologies/approaches
-- Identify what hasn't been done before
-- Understand which aspects feel most innovative
-
-### 4. Deep Innovation Exploration (If Detected)
-
-If innovation signals are found, explore deeply:
-
-#### Innovation Discovery Questions:
-- What makes it unique compared to existing solutions?
-- What assumption are you challenging?
-- How do we validate it works?
-- What's the fallback if it doesn't?
-- Has anyone tried this before?
-
-#### Market Context Research:
-
-If relevant innovation detected, consider web search for context:
-Use `web_search_triggers` from project-type CSV:
-`[web_search_triggers] {concept} innovations {date}`
-
-### 5. Generate Innovation Content (If Innovation Detected)
-
-Prepare the content to append to the document:
-
-#### Content Structure:
-
-When saving to document, append these Level 2 and Level 3 sections:
-
-```markdown
-## Innovation & Novel Patterns
-
-### Detected Innovation Areas
-
-[Innovation patterns identified based on conversation]
-
-### Market Context & Competitive Landscape
-
-[Market context and research based on conversation]
-
-### Validation Approach
-
-[Validation methodology based on conversation]
-
-### Risk Mitigation
-
-[Innovation risks and fallbacks based on conversation]
-```
-
-### 6. Present MENU OPTIONS (Only if Innovation Detected)
-
-Present the innovation content for review, then display menu:
-- Show identified innovative aspects (using structure from section 5)
-- Highlight differentiation from existing solutions
-- Ask if they'd like to refine further, get other perspectives, or proceed
-- Present menu options naturally as part of conversation
-
-Display: "**Select:** [A] Advanced Elicitation [P] Party Mode [C] Continue to Project Type Analysis (Step 7 of 11)"
-
-#### Menu Handling Logic:
-- IF A: Read fully and follow: {advancedElicitationTask} with the current innovation content, process the enhanced innovation insights that come back, ask user "Accept these improvements to the innovation analysis? (y/n)", if yes update content with improvements then redisplay menu, if no keep original content then redisplay menu
-- IF P: Read fully and follow: {partyModeWorkflow} with the current innovation content, process the collaborative innovation exploration and ideation, ask user "Accept these changes to the innovation analysis? (y/n)", if yes update content with improvements then redisplay menu, if no keep original content then redisplay menu
-- IF C: Append the final content to {outputFile}, update frontmatter by adding this step name to the end of the stepsCompleted array, then read fully and follow: {nextStepFile}
-- IF Any other: help user respond, then redisplay menu
-
-#### EXECUTION RULES:
-- ALWAYS halt and wait for user input after presenting menu
-- ONLY proceed to next step when user selects 'C'
-- After other menu items execution, return to this menu
-
-## NO INNOVATION DETECTED:
-
-If no genuine innovation signals are found after exploration:
-- Acknowledge that no clear innovation signals were found
-- Note this is fine - many successful products are excellent executions of existing concepts
-- Ask if they'd like to try finding innovative angles or proceed
-
-Display: "**Select:** [A] Advanced Elicitation - Let's try to find innovative angles [C] Continue - Skip innovation section and move to Project Type Analysis (Step 7 of 11)"
-
-### Menu Handling Logic:
-- IF A: Proceed with content generation anyway, then return to menu
-- IF C: Skip this step, then read fully and follow: {nextStepFile}
-
-### EXECUTION RULES:
-- ALWAYS halt and wait for user input after presenting menu
-- ONLY proceed to next step when user selects 'C'
-
-## APPEND TO DOCUMENT:
-
-When user selects 'C', append the content directly to the document using the structure from step 5.
-
-## SUCCESS METRICS:
-
-β Innovation signals properly detected from user conversation
-β Project-type innovation signals used to guide discovery
-β Genuine innovation explored (not forced creativity)
-β Validation approach clearly defined for innovative aspects
-β Risk mitigation strategies identified
-β A/P/C menu presented and handled correctly
-β Content properly appended to document when C selected
-
-## FAILURE MODES:
-
-β Forced innovation when none genuinely exists
-β Not using project-type innovation signals from CSV
-β Missing market context research for novel concepts
-β Not addressing validation approach for innovative features
-β Creating innovation theater without real innovative aspects
-β Not presenting A/P/C menu after content generation
-β Appending content without user selecting 'C'
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## SKIP CONDITIONS:
-
-Skip this step and load `{nextStepFile}` if:
-
-- No innovation signals detected in conversation
-- Product is incremental improvement rather than breakthrough
-- User confirms innovation exploration is not needed
-- Project-type CSV has no innovation signals for this type
-
-## NEXT STEP:
-
-After user selects 'C' and content is saved to document (or step is skipped), load `{nextStepFile}`.
-
-Remember: Do NOT proceed to step-07 until user explicitly selects 'C' from the A/P/C menu (or confirms step skip)!
diff --git a/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-07-project-type.md b/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-07-project-type.md
deleted file mode 100644
index c078d6db..00000000
--- a/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-07-project-type.md
+++ /dev/null
@@ -1,237 +0,0 @@
----
-name: 'step-07-project-type'
-description: 'Conduct project-type specific discovery using CSV-driven guidance'
-
-# File References
-nextStepFile: './step-08-scoping.md'
-outputFile: '{planning_artifacts}/prd.md'
-
-# Data Files
-projectTypesCSV: '../data/project-types.csv'
-
-# Task References
-advancedElicitationTask: '{project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml'
-partyModeWorkflow: '{project-root}/_bmad/core/workflows/party-mode/workflow.md'
----
-
-# Step 7: Project-Type Deep Dive
-
-**Progress: Step 7 of 11** - Next: Scoping
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without user input
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β ALWAYS treat this as collaborative discovery between PM peers
-- π YOU ARE A FACILITATOR, not a content generator
-- π¬ FOCUS on project-type specific requirements and technical considerations
-- π― DATA-DRIVEN: Use CSV configuration to guide discovery
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- β οΈ Present A/P/C menu after generating project-type content
-- πΎ ONLY save when user chooses C (Continue)
-- π Update output file frontmatter, adding this step name to the end of the list of stepsCompleted
-- π« FORBIDDEN to load next step until C is selected
-
-## CONTEXT BOUNDARIES:
-
-- Current document and frontmatter from previous steps are available
-- Project type from step-02 is available for configuration loading
-- Project-type CSV data will be loaded in this step
-- Focus on technical and functional requirements specific to this project type
-
-## YOUR TASK:
-
-Conduct project-type specific discovery using CSV-driven guidance to define technical requirements.
-
-## PROJECT-TYPE DISCOVERY SEQUENCE:
-
-### 1. Load Project-Type Configuration Data
-
-**Attempt subprocess data lookup:**
-
-"Your task: Lookup data in {projectTypesCSV}
-
-**Search criteria:**
-- Find row where project_type matches {{projectTypeFromStep02}}
-
-**Return format:**
-Return ONLY the matching row as a YAML-formatted object with these fields:
-project_type, key_questions, required_sections, skip_sections, innovation_signals
-
-**Do NOT return the entire CSV - only the matching row.**"
-
-**Graceful degradation (if Task tool unavailable):**
-- Load the CSV file directly
-- Find the matching row manually
-- Extract required fields:
- - `key_questions` (semicolon-separated list of discovery questions)
- - `required_sections` (semicolon-separated list of sections to document)
- - `skip_sections` (semicolon-separated list of sections to skip)
- - `innovation_signals` (already explored in step-6)
-
-### 2. Conduct Guided Discovery Using Key Questions
-
-Parse `key_questions` from CSV and explore each:
-
-#### Question-Based Discovery:
-
-For each question in `key_questions` from CSV:
-
-- Ask the user naturally in conversational style
-- Listen for their response and ask clarifying follow-ups
-- Connect answers to product value proposition
-
-**Example Flow:**
-If key_questions = "Endpoints needed?;Authentication method?;Data formats?;Rate limits?;Versioning?;SDK needed?"
-
-Ask naturally:
-
-- "What are the main endpoints your API needs to expose?"
-- "How will you handle authentication and authorization?"
-- "What data formats will you support for requests and responses?"
-
-### 3. Document Project-Type Specific Requirements
-
-Based on user answers to key_questions, synthesize comprehensive requirements:
-
-#### Requirement Categories:
-
-Cover the areas indicated by `required_sections` from CSV:
-
-- Synthesize what was discovered for each required section
-- Document specific requirements, constraints, and decisions
-- Connect to product differentiator when relevant
-
-#### Skip Irrelevant Sections:
-
-Skip areas indicated by `skip_sections` from CSV to avoid wasting time on irrelevant aspects.
-
-### 4. Generate Dynamic Content Sections
-
-Parse `required_sections` list from the matched CSV row. For each section name, generate corresponding content:
-
-#### Common CSV Section Mappings:
-
-- "endpoint_specs" or "endpoint_specification" β API endpoints documentation
-- "auth_model" or "authentication_model" β Authentication approach
-- "platform_reqs" or "platform_requirements" β Platform support needs
-- "device_permissions" or "device_features" β Device capabilities
-- "tenant_model" β Multi-tenancy approach
-- "rbac_matrix" or "permission_matrix" β Permission structure
-
-#### Template Variable Strategy:
-
-- For sections matching common template variables: generate specific content
-- For sections without template matches: include in main project_type_requirements
-- Hybrid approach balances template structure with CSV-driven flexibility
-
-### 5. Generate Project-Type Content
-
-Prepare the content to append to the document:
-
-#### Content Structure:
-
-When saving to document, append these Level 2 and Level 3 sections:
-
-```markdown
-## [Project Type] Specific Requirements
-
-### Project-Type Overview
-
-[Project type summary based on conversation]
-
-### Technical Architecture Considerations
-
-[Technical architecture requirements based on conversation]
-
-[Dynamic sections based on CSV and conversation]
-
-### Implementation Considerations
-
-[Implementation specific requirements based on conversation]
-```
-
-### 6. Present MENU OPTIONS
-
-Present the project-type content for review, then display menu:
-
-"Based on our conversation and best practices for this product type, I've documented the {project_type}-specific requirements for {{project_name}}.
-
-**Here's what I'll add to the document:**
-
-[Show the complete markdown content from section 5]
-
-**What would you like to do?**"
-
-Display: "**Select:** [A] Advanced Elicitation [P] Party Mode [C] Continue to Scoping (Step 8 of 11)"
-
-#### Menu Handling Logic:
-- IF A: Read fully and follow: {advancedElicitationTask} with the current project-type content, process the enhanced technical insights that come back, ask user "Accept these improvements to the technical requirements? (y/n)", if yes update content with improvements then redisplay menu, if no keep original content then redisplay menu
-- IF P: Read fully and follow: {partyModeWorkflow} with the current project-type requirements, process the collaborative technical expertise and validation, ask user "Accept these changes to the technical requirements? (y/n)", if yes update content with improvements then redisplay menu, if no keep original content then redisplay menu
-- IF C: Append the final content to {outputFile}, update frontmatter by adding this step name to the end of the stepsCompleted array, then read fully and follow: {nextStepFile}
-- IF Any other: help user respond, then redisplay menu
-
-#### EXECUTION RULES:
-- ALWAYS halt and wait for user input after presenting menu
-- ONLY proceed to next step when user selects 'C'
-- After other menu items execution, return to this menu
-
-## APPEND TO DOCUMENT:
-
-When user selects 'C', append the content directly to the document using the structure from previous steps.
-
-## SUCCESS METRICS:
-
-β Project-type configuration loaded and used effectively
-β All key questions from CSV explored with user input
-β Required sections generated per CSV configuration
-β Skip sections properly avoided to save time
-β Technical requirements connected to product value
-β A/P/C menu presented and handled correctly
-β Content properly appended to document when C selected
-
-## FAILURE MODES:
-
-β Not loading or using project-type CSV configuration
-β Missing key questions from CSV in discovery process
-β Not generating required sections per CSV configuration
-β Documenting sections that should be skipped per CSV
-β Creating generic content without project-type specificity
-β Not presenting A/P/C menu after content generation
-β Appending content without user selecting 'C'
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## PROJECT-TYPE EXAMPLES:
-
-**For api_backend:**
-
-- Focus on endpoints, authentication, data schemas, rate limiting
-- Skip visual design and user journey sections
-- Generate API specification documentation
-
-**For mobile_app:**
-
-- Focus on platform requirements, device permissions, offline mode
-- Skip API endpoint documentation unless needed
-- Generate mobile-specific technical requirements
-
-**For saas_b2b:**
-
-- Focus on multi-tenancy, permissions, integrations
-- Skip mobile-first considerations unless relevant
-- Generate enterprise-specific requirements
-
-## NEXT STEP:
-
-After user selects 'C' and content is saved to document, load `{nextStepFile}` to define project scope.
-
-Remember: Do NOT proceed to step-08 (Scoping) until user explicitly selects 'C' from the A/P/C menu and content is saved!
diff --git a/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-08-scoping.md b/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-08-scoping.md
deleted file mode 100644
index da9230ad..00000000
--- a/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-08-scoping.md
+++ /dev/null
@@ -1,228 +0,0 @@
----
-name: 'step-08-scoping'
-description: 'Define MVP boundaries and prioritize features across development phases'
-
-# File References
-nextStepFile: './step-09-functional.md'
-outputFile: '{planning_artifacts}/prd.md'
-
-# Task References
-advancedElicitationTask: '{project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml'
-partyModeWorkflow: '{project-root}/_bmad/core/workflows/party-mode/workflow.md'
----
-
-# Step 8: Scoping Exercise - MVP & Future Features
-
-**Progress: Step 8 of 11** - Next: Functional Requirements
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without user input
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β ALWAYS treat this as collaborative discovery between PM peers
-- π YOU ARE A FACILITATOR, not a content generator
-- π¬ FOCUS on strategic scope decisions that keep projects viable
-- π― EMPHASIZE lean MVP thinking while preserving long-term vision
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- π Review the complete PRD document built so far
-- β οΈ Present A/P/C menu after generating scoping decisions
-- πΎ ONLY save when user chooses C (Continue)
-- π Update output file frontmatter, adding this step name to the end of the list of stepsCompleted
-- π« FORBIDDEN to load next step until C is selected
-
-
-## CONTEXT BOUNDARIES:
-
-- Complete PRD document built so far is available for review
-- User journeys, success criteria, and domain requirements are documented
-- Focus on strategic scope decisions, not feature details
-- Balance between user value and implementation feasibility
-
-## YOUR TASK:
-
-Conduct comprehensive scoping exercise to define MVP boundaries and prioritize features across development phases.
-
-## SCOPING SEQUENCE:
-
-### 1. Review Current PRD State
-
-Analyze everything documented so far:
-- Present synthesis of established vision, success criteria, journeys
-- Assess domain and innovation focus
-- Evaluate scope implications: simple MVP, medium, or complex project
-- Ask if initial assessment feels right or if they see it differently
-
-### 2. Define MVP Strategy
-
-Facilitate strategic MVP decisions:
-- Explore MVP philosophy options: problem-solving, experience, platform, or revenue MVP
-- Ask critical questions:
- - What's the minimum that would make users say 'this is useful'?
- - What would make investors/partners say 'this has potential'?
- - What's the fastest path to validated learning?
-- Guide toward appropriate MVP approach for their product
-
-### 3. Scoping Decision Framework
-
-Use structured decision-making for scope:
-
-**Must-Have Analysis:**
-- Guide identification of absolute MVP necessities
-- For each journey and success criterion, ask:
- - Without this, does the product fail?
- - Can this be manual initially?
- - Is this a deal-breaker for early adopters?
-- Analyze journeys for MVP essentials
-
-**Nice-to-Have Analysis:**
-- Identify what could be added later:
- - Features that enhance but aren't essential
- - User types that can be added later
- - Advanced functionality that builds on MVP
-- Ask what features could be added in versions 2, 3, etc.
-
-### 4. Progressive Feature Roadmap
-
-Create phased development approach:
-- Guide mapping of features across development phases
-- Structure as Phase 1 (MVP), Phase 2 (Growth), Phase 3 (Vision)
-- Ensure clear progression and dependencies
-
-- Core user value delivery
-- Essential user journeys
-- Basic functionality that works reliably
-
-**Phase 2: Growth**
-
-- Additional user types
-- Enhanced features
-- Scale improvements
-
-**Phase 3: Expansion**
-
-- Advanced capabilities
-- Platform features
-- New markets or use cases
-
-**Where does your current vision fit in this development sequence?**"
-
-### 5. Risk-Based Scoping
-
-Identify and mitigate scoping risks:
-
-**Technical Risks:**
-"Looking at your innovation and domain requirements:
-
-- What's the most technically challenging aspect?
-- Could we simplify the initial implementation?
-- What's the riskiest assumption about technology feasibility?"
-
-**Market Risks:**
-
-- What's the biggest market risk?
-- How does the MVP address this?
-- What learning do we need to de-risk this?"
-
-**Resource Risks:**
-
-- What if we have fewer resources than planned?
-- What's the absolute minimum team size needed?
-- Can we launch with a smaller feature set?"
-
-### 6. Generate Scoping Content
-
-Prepare comprehensive scoping section:
-
-#### Content Structure:
-
-```markdown
-## Project Scoping & Phased Development
-
-### MVP Strategy & Philosophy
-
-**MVP Approach:** {{chosen_mvp_approach}}
-**Resource Requirements:** {{mvp_team_size_and_skills}}
-
-### MVP Feature Set (Phase 1)
-
-**Core User Journeys Supported:**
-{{essential_journeys_for_mvp}}
-
-**Must-Have Capabilities:**
-{{list_of_essential_mvp_features}}
-
-### Post-MVP Features
-
-**Phase 2 (Post-MVP):**
-{{planned_growth_features}}
-
-**Phase 3 (Expansion):**
-{{planned_expansion_features}}
-
-### Risk Mitigation Strategy
-
-**Technical Risks:** {{mitigation_approach}}
-**Market Risks:** {{validation_approach}}
-**Resource Risks:** {{contingency_approach}}
-```
-
-### 7. Present MENU OPTIONS
-
-Present the scoping decisions for review, then display menu:
-- Show strategic scoping plan (using structure from step 6)
-- Highlight MVP boundaries and phased roadmap
-- Ask if they'd like to refine further, get other perspectives, or proceed
-- Present menu options naturally as part of conversation
-
-Display: "**Select:** [A] Advanced Elicitation [P] Party Mode [C] Continue to Functional Requirements (Step 9 of 11)"
-
-#### Menu Handling Logic:
-- IF A: Read fully and follow: {advancedElicitationTask} with the current scoping analysis, process the enhanced insights that come back, ask user if they accept the improvements, if yes update content then redisplay menu, if no keep original content then redisplay menu
-- IF P: Read fully and follow: {partyModeWorkflow} with the scoping context, process the collaborative insights on MVP and roadmap decisions, ask user if they accept the changes, if yes update content then redisplay menu, if no keep original content then redisplay menu
-- IF C: Append the final content to {outputFile}, update frontmatter by adding this step name to the end of the stepsCompleted array, then read fully and follow: {nextStepFile}
-- IF Any other: help user respond, then redisplay menu
-
-#### EXECUTION RULES:
-- ALWAYS halt and wait for user input after presenting menu
-- ONLY proceed to next step when user selects 'C'
-- After other menu items execution, return to this menu
-
-## APPEND TO DOCUMENT:
-
-When user selects 'C', append the content directly to the document using the structure from step 6.
-
-## SUCCESS METRICS:
-
-β Complete PRD document analyzed for scope implications
-β Strategic MVP approach defined and justified
-β Clear MVP feature boundaries established
-β Phased development roadmap created
-β Key risks identified and mitigation strategies defined
-β User explicitly agrees to scope decisions
-β A/P/C menu presented and handled correctly
-β Content properly appended to document when C selected
-
-## FAILURE MODES:
-
-β Not analyzing the complete PRD before making scoping decisions
-β Making scope decisions without strategic rationale
-β Not getting explicit user agreement on MVP boundaries
-β Missing critical risk analysis
-β Not creating clear phased development approach
-β Not presenting A/P/C menu after content generation
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## NEXT STEP:
-
-After user selects 'C' and content is saved to document, load {nextStepFile}.
-
-Remember: Do NOT proceed to step-09 until user explicitly selects 'C' from the A/P/C menu and content is saved!
diff --git a/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-09-functional.md b/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-09-functional.md
deleted file mode 100644
index d689ebf3..00000000
--- a/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-09-functional.md
+++ /dev/null
@@ -1,231 +0,0 @@
----
-name: 'step-09-functional'
-description: 'Synthesize all discovery into comprehensive functional requirements'
-
-# File References
-nextStepFile: './step-10-nonfunctional.md'
-outputFile: '{planning_artifacts}/prd.md'
-
-# Task References
-advancedElicitationTask: '{project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml'
-partyModeWorkflow: '{project-root}/_bmad/core/workflows/party-mode/workflow.md'
----
-
-# Step 9: Functional Requirements Synthesis
-
-**Progress: Step 9 of 11** - Next: Non-Functional Requirements
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without user input
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β ALWAYS treat this as collaborative discovery between PM peers
-- π YOU ARE A FACILITATOR, not a content generator
-- π¬ FOCUS on creating comprehensive capability inventory for the product
-- π― CRITICAL: This is THE CAPABILITY CONTRACT for all downstream work
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- β οΈ Present A/P/C menu after generating functional requirements
-- πΎ ONLY save when user chooses C (Continue)
-- π Update output file frontmatter, adding this step name to the end of the list of stepsCompleted
-- π« FORBIDDEN to load next step until C is selected
-
-
-## CONTEXT BOUNDARIES:
-
-- Current document and frontmatter from previous steps are available
-- ALL previous content (executive summary, success criteria, journeys, domain, innovation, project-type) must be referenced
-- No additional data files needed for this step
-- Focus on capabilities, not implementation details
-
-## CRITICAL IMPORTANCE:
-
-**This section defines THE CAPABILITY CONTRACT for the entire product:**
-
-- UX designers will ONLY design what's listed here
-- Architects will ONLY support what's listed here
-- Epic breakdown will ONLY implement what's listed here
-- If a capability is missing from FRs, it will NOT exist in the final product
-
-## FUNCTIONAL REQUIREMENTS SYNTHESIS SEQUENCE:
-
-### 1. Understand FR Purpose and Usage
-
-Start by explaining the critical role of functional requirements:
-
-**Purpose:**
-FRs define WHAT capabilities the product must have. They are the complete inventory of user-facing and system capabilities that deliver the product vision.
-
-**Critical Properties:**
-β Each FR is a testable capability
-β Each FR is implementation-agnostic (could be built many ways)
-β Each FR specifies WHO and WHAT, not HOW
-β No UI details, no performance numbers, no technology choices
-β Comprehensive coverage of capability areas
-
-**How They Will Be Used:**
-
-1. UX Designer reads FRs β designs interactions for each capability
-2. Architect reads FRs β designs systems to support each capability
-3. PM reads FRs β creates epics and stories to implement each capability
-
-### 2. Review Existing Content for Capability Extraction
-
-Systematically review all previous sections to extract capabilities:
-
-**Extract From:**
-
-- Executive Summary β Core product differentiator capabilities
-- Success Criteria β Success-enabling capabilities
-- User Journeys β Journey-revealed capabilities
-- Domain Requirements β Compliance and regulatory capabilities
-- Innovation Patterns β Innovative feature capabilities
-- Project-Type Requirements β Technical capability needs
-
-### 3. Organize Requirements by Capability Area
-
-Group FRs by logical capability areas (NOT by technology or layer):
-
-**Good Grouping Examples:**
-
-- β "User Management" (not "Authentication System")
-- β "Content Discovery" (not "Search Algorithm")
-- β "Team Collaboration" (not "WebSocket Infrastructure")
-
-**Target 5-8 Capability Areas** for typical projects.
-
-### 4. Generate Comprehensive FR List
-
-Create complete functional requirements using this format:
-
-**Format:**
-
-- FR#: [Actor] can [capability] [context/constraint if needed]
-- Number sequentially (FR1, FR2, FR3...)
-- Aim for 20-50 FRs for typical projects
-
-**Altitude Check:**
-Each FR should answer "WHAT capability exists?" NOT "HOW it's implemented?"
-
-**Examples:**
-
-- β "Users can customize appearance settings"
-- β "Users can toggle light/dark theme with 3 font size options stored in LocalStorage"
-
-### 5. Self-Validation Process
-
-Before presenting to user, validate the FR list:
-
-**Completeness Check:**
-
-1. "Did I cover EVERY capability mentioned in the MVP scope section?"
-2. "Did I include domain-specific requirements as FRs?"
-3. "Did I cover the project-type specific needs?"
-4. "Could a UX designer read ONLY the FRs and know what to design?"
-5. "Could an Architect read ONLY the FRs and know what to support?"
-6. "Are there any user actions or system behaviors we discussed that have no FR?"
-
-**Altitude Check:**
-
-1. "Am I stating capabilities (WHAT) or implementation (HOW)?"
-2. "Am I listing acceptance criteria or UI specifics?" (Remove if yes)
-3. "Could this FR be implemented 5 different ways?" (Good - means it's not prescriptive)
-
-**Quality Check:**
-
-1. "Is each FR clear enough that someone could test whether it exists?"
-2. "Is each FR independent (not dependent on reading other FRs to understand)?"
-3. "Did I avoid vague terms like 'good', 'fast', 'easy'?" (Use NFRs for quality attributes)
-
-### 6. Generate Functional Requirements Content
-
-Prepare the content to append to the document:
-
-#### Content Structure:
-
-When saving to document, append these Level 2 and Level 3 sections:
-
-```markdown
-## Functional Requirements
-
-### [Capability Area Name]
-
-- FR1: [Specific Actor] can [specific capability]
-- FR2: [Specific Actor] can [specific capability]
-- FR3: [Specific Actor] can [specific capability]
-
-### [Another Capability Area]
-
-- FR4: [Specific Actor] can [specific capability]
-- FR5: [Specific Actor] can [specific capability]
-
-[Continue for all capability areas discovered in conversation]
-```
-
-### 7. Present MENU OPTIONS
-
-Present the functional requirements for review, then display menu:
-- Show synthesized functional requirements (using structure from step 6)
-- Emphasize this is the capability contract for all downstream work
-- Highlight that every feature must trace back to these requirements
-- Ask if they'd like to refine further, get other perspectives, or proceed
-- Present menu options naturally as part of conversation
-
-**What would you like to do?**"
-
-Display: "**Select:** [A] Advanced Elicitation [P] Party Mode [C] Continue to Non-Functional Requirements (Step 10 of 11)"
-
-#### Menu Handling Logic:
-- IF A: Read fully and follow: {advancedElicitationTask} with the current FR list, process the enhanced capability coverage that comes back, ask user if they accept the additions, if yes update content then redisplay menu, if no keep original content then redisplay menu
-- IF P: Read fully and follow: {partyModeWorkflow} with the current FR list, process the collaborative capability validation and additions, ask user if they accept the changes, if yes update content then redisplay menu, if no keep original content then redisplay menu
-- IF C: Append the final content to {outputFile}, update frontmatter by adding this step name to the end of the stepsCompleted array, then read fully and follow: {nextStepFile}
-- IF Any other: help user respond, then redisplay menu
-
-#### EXECUTION RULES:
-- ALWAYS halt and wait for user input after presenting menu
-- ONLY proceed to next step when user selects 'C'
-- After other menu items execution, return to this menu
-
-## APPEND TO DOCUMENT:
-
-When user selects 'C', append the content directly to the document using the structure from step 6.
-
-## SUCCESS METRICS:
-
-β All previous discovery content synthesized into FRs
-β FRs organized by capability areas (not technology)
-β Each FR states WHAT capability exists, not HOW to implement
-β Comprehensive coverage with 20-50 FRs typical
-β Altitude validation ensures implementation-agnostic requirements
-β Completeness check validates coverage of all discussed capabilities
-β A/P/C menu presented and handled correctly
-β Content properly appended to document when C selected
-
-## FAILURE MODES:
-
-β Missing capabilities from previous discovery sections
-β Organizing FRs by technology instead of capability areas
-β Including implementation details or UI specifics in FRs
-β Not achieving comprehensive coverage of discussed capabilities
-β Using vague terms instead of testable capabilities
-β Not presenting A/P/C menu after content generation
-β Appending content without user selecting 'C'
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## CAPABILITY CONTRACT REMINDER:
-
-Emphasize to user: "This FR list is now binding. Any feature not listed here will not exist in the final product unless we explicitly add it. This is why it's critical to ensure completeness now."
-
-## NEXT STEP:
-
-After user selects 'C' and content is saved to document, load {nextStepFile} to define non-functional requirements.
-
-Remember: Do NOT proceed to step-10 until user explicitly selects 'C' from the A/P/C menu and content is saved!
diff --git a/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-10-nonfunctional.md b/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-10-nonfunctional.md
deleted file mode 100644
index 40919635..00000000
--- a/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-10-nonfunctional.md
+++ /dev/null
@@ -1,242 +0,0 @@
----
-name: 'step-10-nonfunctional'
-description: 'Define quality attributes that matter for this specific product'
-
-# File References
-nextStepFile: './step-11-polish.md'
-outputFile: '{planning_artifacts}/prd.md'
-
-# Task References
-advancedElicitationTask: '{project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml'
-partyModeWorkflow: '{project-root}/_bmad/core/workflows/party-mode/workflow.md'
----
-
-# Step 10: Non-Functional Requirements
-
-**Progress: Step 10 of 12** - Next: Polish Document
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without user input
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β ALWAYS treat this as collaborative discovery between PM peers
-- π YOU ARE A FACILITATOR, not a content generator
-- π¬ FOCUS on quality attributes that matter for THIS specific product
-- π― SELECTIVE: Only document NFRs that actually apply to the product
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- β οΈ Present A/P/C menu after generating NFR content
-- πΎ ONLY save when user chooses C (Continue)
-- π Update output file frontmatter, adding this step name to the end of the list of stepsCompleted
-- π« FORBIDDEN to load next step until C is selected
-
-
-## CONTEXT BOUNDARIES:
-
-- Current document and frontmatter from previous steps are available
-- Functional requirements already defined and will inform NFRs
-- Domain and project-type context will guide which NFRs matter
-- Focus on specific, measurable quality criteria
-
-## YOUR TASK:
-
-Define non-functional requirements that specify quality attributes for the product, focusing only on what matters for THIS specific product.
-
-## NON-FUNCTIONAL REQUIREMENTS SEQUENCE:
-
-### 1. Explain NFR Purpose and Scope
-
-Start by clarifying what NFRs are and why we're selective:
-
-**NFR Purpose:**
-NFRs define HOW WELL the system must perform, not WHAT it must do. They specify quality attributes like performance, security, scalability, etc.
-
-**Selective Approach:**
-We only document NFRs that matter for THIS product. If a category doesn't apply, we skip it entirely. This prevents requirement bloat and focuses on what's actually important.
-
-### 2. Assess Product Context for NFR Relevance
-
-Evaluate which NFR categories matter based on product context:
-
-**Quick Assessment Questions:**
-
-- **Performance**: Is there user-facing impact of speed?
-- **Security**: Are we handling sensitive data or payments?
-- **Scalability**: Do we expect rapid user growth?
-- **Accessibility**: Are we serving broad public audiences?
-- **Integration**: Do we need to connect with other systems?
-- **Reliability**: Would downtime cause significant problems?
-
-### 3. Explore Relevant NFR Categories
-
-For each relevant category, conduct targeted discovery:
-
-#### Performance NFRs (If relevant):
-
-Explore performance requirements:
-- What parts of the system need to be fast for users to be successful?
-- Are there specific response time expectations?
-- What happens if performance is slower than expected?
-- Are there concurrent user scenarios we need to support?
-
-#### Security NFRs (If relevant):
-
-Explore security requirements:
-- What data needs to be protected?
-- Who should have access to what?
-- What are the security risks we need to mitigate?
-- Are there compliance requirements (GDPR, HIPAA, PCI-DSS)?
-
-#### Scalability NFRs (If relevant):
-
-Explore scalability requirements:
-- How many users do we expect initially? Long-term?
-- Are there seasonal or event-based traffic spikes?
-- What happens if we exceed our capacity?
-- What growth scenarios should we plan for?
-
-#### Accessibility NFRs (If relevant):
-
-Explore accessibility requirements:
-- Are we serving users with visual, hearing, or motor impairments?
-- Are there legal accessibility requirements (WCAG, Section 508)?
-- What accessibility features are most important for our users?
-
-#### Integration NFRs (If relevant):
-
-Explore integration requirements:
-- What external systems do we need to connect with?
-- Are there APIs or data formats we must support?
-- How reliable do these integrations need to be?
-
-### 4. Make NFRs Specific and Measurable
-
-For each relevant NFR category, ensure criteria are testable:
-
-**From Vague to Specific:**
-
-- NOT: "The system should be fast" β "User actions complete within 2 seconds"
-- NOT: "The system should be secure" β "All data is encrypted at rest and in transit"
-- NOT: "The system should scale" β "System supports 10x user growth with <10% performance degradation"
-
-### 5. Generate NFR Content (Only Relevant Categories)
-
-Prepare the content to append to the document:
-
-#### Content Structure (Dynamic based on relevance):
-
-When saving to document, append these Level 2 and Level 3 sections (only include sections that are relevant):
-
-```markdown
-## Non-Functional Requirements
-
-### Performance
-
-[Performance requirements based on conversation - only include if relevant]
-
-### Security
-
-[Security requirements based on conversation - only include if relevant]
-
-### Scalability
-
-[Scalability requirements based on conversation - only include if relevant]
-
-### Accessibility
-
-[Accessibility requirements based on conversation - only include if relevant]
-
-### Integration
-
-[Integration requirements based on conversation - only include if relevant]
-```
-
-### 6. Present MENU OPTIONS
-
-Present the non-functional requirements for review, then display menu:
-- Show defined NFRs (using structure from step 5)
-- Note that only relevant categories were included
-- Emphasize NFRs specify how well the system needs to perform
-- Ask if they'd like to refine further, get other perspectives, or proceed
-- Present menu options naturally as part of conversation
-
-Display: "**Select:** [A] Advanced Elicitation [P] Party Mode [C] Continue to Polish Document (Step 11 of 12)"
-
-#### Menu Handling Logic:
-- IF A: Read fully and follow: {advancedElicitationTask} with the current NFR content, process the enhanced quality attribute insights that come back, ask user if they accept the improvements, if yes update content then redisplay menu, if no keep original content then redisplay menu
-- IF P: Read fully and follow: {partyModeWorkflow} with the current NFR list, process the collaborative technical validation and additions, ask user if they accept the changes, if yes update content then redisplay menu, if no keep original content then redisplay menu
-- IF C: Append the final content to {outputFile}, update frontmatter by adding this step name to the end of the stepsCompleted array, then read fully and follow: {nextStepFile}
-- IF Any other: help user respond, then redisplay menu
-
-#### EXECUTION RULES:
-- ALWAYS halt and wait for user input after presenting menu
-- ONLY proceed to next step when user selects 'C'
-- After other menu items execution, return to this menu
-
-## APPEND TO DOCUMENT:
-
-When user selects 'C', append the content directly to the document using the structure from step 5.
-
-## SUCCESS METRICS:
-
-β Only relevant NFR categories documented (no requirement bloat)
-β Each NFR is specific and measurable
-β NFRs connected to actual user needs and business context
-β Vague requirements converted to testable criteria
-β Domain-specific compliance requirements included if relevant
-β A/P/C menu presented and handled correctly
-β Content properly appended to document when C selected
-
-## FAILURE MODES:
-
-β Documenting NFR categories that don't apply to the product
-β Leaving requirements vague and unmeasurable
-β Not connecting NFRs to actual user or business needs
-β Missing domain-specific compliance requirements
-β Creating overly prescriptive technical requirements
-β Not presenting A/P/C menu after content generation
-β Appending content without user selecting 'C'
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## NFR CATEGORY GUIDANCE:
-
-**Include Performance When:**
-
-- User-facing response times impact success
-- Real-time interactions are critical
-- Performance is a competitive differentiator
-
-**Include Security When:**
-
-- Handling sensitive user data
-- Processing payments or financial information
-- Subject to compliance regulations
-- Protecting intellectual property
-
-**Include Scalability When:**
-
-- Expecting rapid user growth
-- Handling variable traffic patterns
-- Supporting enterprise-scale usage
-- Planning for market expansion
-
-**Include Accessibility When:**
-
-- Serving broad public audiences
-- Subject to accessibility regulations
-- Targeting users with disabilities
-- B2B customers with accessibility requirements
-
-## NEXT STEP:
-
-After user selects 'C' and content is saved to document, load {nextStepFile} to finalize the PRD and complete the workflow.
-
-Remember: Do NOT proceed to step-11 until user explicitly selects 'C' from the A/P/C menu and content is saved!
diff --git a/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-11-polish.md b/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-11-polish.md
deleted file mode 100644
index 23200915..00000000
--- a/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-11-polish.md
+++ /dev/null
@@ -1,217 +0,0 @@
----
-name: 'step-11-polish'
-description: 'Optimize and polish the complete PRD document for flow, coherence, and readability'
-
-# File References
-nextStepFile: './step-12-complete.md'
-outputFile: '{planning_artifacts}/prd.md'
-purposeFile: './data/prd-purpose.md'
-
-# Task References
-advancedElicitationTask: '{project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml'
-partyModeWorkflow: '{project-root}/_bmad/core/workflows/party-mode/workflow.md'
----
-
-# Step 11: Document Polish
-
-**Progress: Step 11 of 12** - Next: Complete PRD
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π CRITICAL: Load the ENTIRE document before making changes
-- π CRITICAL: Read complete step file before taking action
-- π CRITICAL: When loading next step with 'C', ensure entire file is read
-- β This is a POLISH step - optimize existing content
-- π IMPROVE flow, coherence, and readability
-- π¬ PRESERVE user's voice and intent
-- π― MAINTAIN all essential information while improving presentation
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Load complete document first
-- π Review for flow and coherence issues
-- βοΈ Reduce duplication while preserving essential info
-- π Ensure proper ## Level 2 headers throughout
-- πΎ Save optimized document
-- β οΈ Present A/P/C menu after polish
-- π« DO NOT skip review steps
-
-## CONTEXT BOUNDARIES:
-
-- Complete PRD document exists from all previous steps
-- Document may have duplication from progressive append
-- Sections may not flow smoothly together
-- Level 2 headers ensure document can be split if needed
-- Focus on readability and coherence
-
-## YOUR TASK:
-
-Optimize the complete PRD document for flow, coherence, and professional presentation while preserving all essential information.
-
-## DOCUMENT POLISH SEQUENCE:
-
-### 1. Load Context and Document
-
-**CRITICAL:** Load the PRD purpose document first:
-
-- Read `{purposeFile}` to understand what makes a great BMAD PRD
-- Internalize the philosophy: information density, traceability, measurable requirements
-- Keep the dual-audience nature (humans + LLMs) in mind
-
-**Then Load the PRD Document:**
-
-- Read `{outputFile}` completely from start to finish
-- Understand the full document structure and content
-- Identify all sections and their relationships
-- Note areas that need attention
-
-### 2. Document Quality Review
-
-Review the entire document with PRD purpose principles in mind:
-
-**Information Density:**
-- Are there wordy phrases that can be condensed?
-- Is conversational padding present?
-- Can sentences be more direct and concise?
-
-**Flow and Coherence:**
-- Do sections transition smoothly?
-- Are there jarring topic shifts?
-- Does the document tell a cohesive story?
-- Is the progression logical for readers?
-
-**Duplication Detection:**
-- Are ideas repeated across sections?
-- Is the same information stated multiple times?
-- Can redundant content be consolidated?
-- Are there contradictory statements?
-
-**Header Structure:**
-- Are all main sections using ## Level 2 headers?
-- Is the hierarchy consistent (##, ###, ####)?
-- Can sections be easily extracted or referenced?
-- Are headers descriptive and clear?
-
-**Readability:**
-- Are sentences clear and concise?
-- Is the language consistent throughout?
-- Are technical terms used appropriately?
-- Would stakeholders find this easy to understand?
-
-### 3. Optimization Actions
-
-Make targeted improvements:
-
-**Improve Flow:**
-- Add transition sentences between sections
-- Smooth out jarring topic shifts
-- Ensure logical progression
-- Connect related concepts across sections
-
-**Reduce Duplication:**
-- Consolidate repeated information
-- Keep content in the most appropriate section
-- Use cross-references instead of repetition
-- Remove redundant explanations
-
-**Enhance Coherence:**
-- Ensure consistent terminology throughout
-- Align all sections with product differentiator
-- Maintain consistent voice and tone
-- Verify scope consistency across sections
-
-**Optimize Headers:**
-- Ensure all main sections use ## Level 2
-- Make headers descriptive and action-oriented
-- Check that headers follow consistent patterns
-- Verify headers support document navigation
-
-### 4. Preserve Critical Information
-
-**While optimizing, ensure NOTHING essential is lost:**
-
-**Must Preserve:**
-- All user success criteria
-- All functional requirements (capability contract)
-- All user journey narratives
-- All scope decisions (MVP, Growth, Vision)
-- All non-functional requirements
-- Product differentiator and vision
-- Domain-specific requirements
-- Innovation analysis (if present)
-
-**Can Consolidate:**
-- Repeated explanations of the same concept
-- Redundant background information
-- Multiple versions of similar content
-- Overlapping examples
-
-### 5. Generate Optimized Document
-
-Create the polished version:
-
-**Polishing Process:**
-1. Start with original document
-2. Apply all optimization actions
-3. Review to ensure nothing essential was lost
-4. Verify improvements enhance readability
-5. Prepare optimized version for review
-
-### 6. Present MENU OPTIONS
-
-Present the polished document for review, then display menu:
-- Show what changed in the polish
-- Highlight improvements made (flow, duplication, headers)
-- Ask if they'd like to refine further, get other perspectives, or proceed
-- Present menu options naturally as part of conversation
-
-Display: "**Select:** [A] Advanced Elicitation [P] Party Mode [C] Continue to Complete PRD (Step 12 of 12)"
-
-#### Menu Handling Logic:
-- IF A: Read fully and follow: {advancedElicitationTask} with the polished document, process the enhanced refinements that come back, ask user "Accept these polish improvements? (y/n)", if yes update content with improvements then redisplay menu, if no keep original polish then redisplay menu
-- IF P: Read fully and follow: {partyModeWorkflow} with the polished document, process the collaborative refinements to flow and coherence, ask user "Accept these polish changes? (y/n)", if yes update content with improvements then redisplay menu, if no keep original polish then redisplay menu
-- IF C: Save the polished document to {outputFile}, update frontmatter by adding this step name to the end of the stepsCompleted array, then read fully and follow: {nextStepFile}
-- IF Any other: help user respond, then redisplay menu
-
-#### EXECUTION RULES:
-- ALWAYS halt and wait for user input after presenting menu
-- ONLY proceed to next step when user selects 'C'
-- After other menu items execution, return to this menu
-
-## APPEND TO DOCUMENT:
-
-When user selects 'C', replace the entire document content with the polished version.
-
-## SUCCESS METRICS:
-
-β Complete document loaded and reviewed
-β Flow and coherence improved
-β Duplication reduced while preserving essential information
-β All main sections use ## Level 2 headers
-β Transitions between sections are smooth
-β User's voice and intent preserved
-β Document is more readable and professional
-β A/P/C menu presented and handled correctly
-β Polished document saved when C selected
-
-## FAILURE MODES:
-
-β Loading only partial document (leads to incomplete polish)
-β Removing essential information while reducing duplication
-β Not preserving user's voice and intent
-β Changing content instead of improving presentation
-β Not ensuring ## Level 2 headers for main sections
-β Making arbitrary style changes instead of coherence improvements
-β Not presenting A/P/C menu for user approval
-β Saving polished document without user selecting 'C'
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making changes without complete understanding of document requirements
-
-## NEXT STEP:
-
-After user selects 'C' and polished document is saved, load `./step-12-complete.md` to complete the workflow.
-
-Remember: Do NOT proceed to step-12 until user explicitly selects 'C' from the A/P/C menu and polished document is saved!
diff --git a/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-12-complete.md b/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-12-complete.md
deleted file mode 100644
index ec3272ff..00000000
--- a/src/bmm/workflows/2-plan-workflows/prd/steps-c/step-12-complete.md
+++ /dev/null
@@ -1,124 +0,0 @@
----
-name: 'step-12-complete'
-description: 'Complete the PRD workflow, update status files, and suggest next steps including validation'
-
-# File References
-outputFile: '{planning_artifacts}/prd.md'
-validationFlow: '../steps-v/step-v-01-discovery.md'
----
-
-# Step 12: Workflow Completion
-
-**Final Step - Complete the PRD**
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- β THIS IS A FINAL STEP - Workflow completion required
-- π CRITICAL: ALWAYS read the complete step file before taking any action
-- π NO content generation - this is a wrap-up step
-- π FINALIZE document and update workflow status
-- π¬ FOCUS on completion, validation options, and next steps
-- π― UPDATE workflow status files with completion information
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- πΎ Update the main workflow status file with completion information (if exists)
-- π Offer validation workflow options to user
-- π« DO NOT load additional steps after this one
-
-## TERMINATION STEP PROTOCOLS:
-
-- This is a FINAL step - workflow completion required
-- Update workflow status file with finalized document
-- Suggest validation and next workflow steps
-- Mark workflow as complete in status tracking
-
-## CONTEXT BOUNDARIES:
-
-- Complete and polished PRD document is available from all previous steps
-- Workflow frontmatter shows all completed steps including polish
-- All collaborative content has been generated, saved, and optimized
-- Focus on completion, validation options, and next steps
-
-## YOUR TASK:
-
-Complete the PRD workflow, update status files, offer validation options, and suggest next steps for the project.
-
-## WORKFLOW COMPLETION SEQUENCE:
-
-### 1. Announce Workflow Completion
-
-Inform user that the PRD is complete and polished:
-- Celebrate successful completion of comprehensive PRD
-- Summarize all sections that were created
-- Highlight that document has been polished for flow and coherence
-- Emphasize document is ready for downstream work
-
-### 2. Workflow Status Update
-
-Update the main workflow status file if there is one:
-
-- Load `{status_file}` from workflow configuration (if exists)
-- Update workflow_status["prd"] = "{default_output_file}"
-- Save file, preserving all comments and structure
-- Mark current timestamp as completion time
-
-### 3. Validation Workflow Options
-
-Offer validation workflows to ensure PRD is ready for implementation:
-
-**Available Validation Workflows:**
-
-**Option 1: Check Implementation Readiness** (`{checkImplementationReadinessWorkflow}`)
-- Validates PRD has all information needed for development
-- Checks epic coverage completeness
-- Reviews UX alignment with requirements
-- Assesses epic quality and readiness
-- Identifies gaps before architecture/design work begins
-
-**When to use:** Before starting technical architecture or epic breakdown
-
-**Option 2: Skip for Now**
-- Proceed directly to next workflows (architecture, UX, epics)
-- Validation can be done later if needed
-- Some teams prefer to validate during architecture reviews
-
-### 4. Suggest Next Workflows
-
-PRD complete. Read fully and follow: `_bmad/core/tasks/bmad-help.md` with argument `Create PRD`.
-
-### 5. Final Completion Confirmation
-
-- Confirm completion with user and summarize what has been accomplished
-- Document now contains: Executive Summary, Success Criteria, User Journeys, Domain Requirements (if applicable), Innovation Analysis (if applicable), Project-Type Requirements, Functional Requirements (capability contract), Non-Functional Requirements, and has been polished for flow and coherence
-- Ask if they'd like to run validation workflow or proceed to next workflows
-
-## SUCCESS METRICS:
-
-β PRD document contains all required sections and has been polished
-β All collaborative content properly saved and optimized
-β Workflow status file updated with completion information (if exists)
-β Validation workflow options clearly presented
-β Clear next step guidance provided to user
-β Document quality validation completed
-β User acknowledges completion and understands next options
-
-## FAILURE MODES:
-
-β Not updating workflow status file with completion information (if exists)
-β Not offering validation workflow options
-β Missing clear next step guidance for user
-β Not confirming document completeness with user
-β Workflow not properly marked as complete in status tracking (if applicable)
-β User unclear about what happens next or what validation options exist
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## FINAL REMINDER to give the user:
-
-The polished PRD serves as the foundation for all subsequent product development activities. All design, architecture, and development work should trace back to the requirements and vision documented in this PRD - update it also as needed as you continue planning.
-
-**Congratulations on completing the Product Requirements Document for {{project_name}}!** π
diff --git a/src/bmm/workflows/2-plan-workflows/prd/steps-e/step-e-01-discovery.md b/src/bmm/workflows/2-plan-workflows/prd/steps-e/step-e-01-discovery.md
deleted file mode 100644
index 64403297..00000000
--- a/src/bmm/workflows/2-plan-workflows/prd/steps-e/step-e-01-discovery.md
+++ /dev/null
@@ -1,247 +0,0 @@
----
-name: 'step-e-01-discovery'
-description: 'Discovery & Understanding - Understand what user wants to edit and detect PRD format'
-
-# File references (ONLY variables used in this step)
-altStepFile: './step-e-01b-legacy-conversion.md'
-prdPurpose: '{project-root}/src/bmm/workflows/2-plan-workflows/prd/data/prd-purpose.md'
-advancedElicitationTask: '{project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml'
-partyModeWorkflow: '{project-root}/_bmad/core/workflows/party-mode/workflow.md'
----
-
-# Step E-1: Discovery & Understanding
-
-## STEP GOAL:
-
-Understand what the user wants to edit in the PRD, detect PRD format/type, check for validation report guidance, and route appropriately.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π NEVER generate content without user input
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: When loading next step with 'C', ensure entire file is read
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are a Validation Architect and PRD Improvement Specialist
-- β If you already have been given communication or persona patterns, continue to use those while playing this new role
-- β We engage in collaborative dialogue, not command-response
-- β You bring analytical expertise and improvement guidance
-- β User brings domain knowledge and edit requirements
-
-### Step-Specific Rules:
-
-- π― Focus ONLY on discovering user intent and PRD format
-- π« FORBIDDEN to make any edits yet
-- π¬ Approach: Inquisitive and analytical, understanding before acting
-- πͺ This is a branch step - may route to legacy conversion
-
-## EXECUTION PROTOCOLS:
-
-- π― Discover user's edit requirements
-- π― Auto-detect validation reports in PRD folder (use as guide)
-- π― Load validation report if provided (use as guide)
-- π― Detect PRD format (BMAD/legacy)
-- π― Route appropriately based on format
-- πΎ Document discoveries for next step
-- π« FORBIDDEN to proceed without understanding requirements
-
-## CONTEXT BOUNDARIES:
-
-- Available context: PRD file to edit, optional validation report, auto-detected validation reports
-- Focus: User intent discovery and format detection only
-- Limits: Don't edit yet, don't validate yet
-- Dependencies: None - this is first edit step
-
-## MANDATORY SEQUENCE
-
-**CRITICAL:** Follow this sequence exactly. Do not skip, reorder, or improvise unless user explicitly requests a change.
-
-### 1. Load PRD Purpose Standards
-
-Load and read the complete file at:
-`{prdPurpose}` (data/prd-purpose.md)
-
-This file defines what makes a great BMAD PRD. Internalize this understanding - it will guide improvement recommendations.
-
-### 2. Discover PRD to Edit
-
-"**PRD Edit Workflow**
-
-Which PRD would you like to edit?
-
-Please provide the path to the PRD file you want to edit."
-
-**Wait for user to provide PRD path.**
-
-### 3. Validate PRD Exists and Load
-
-Once PRD path is provided:
-- Check if PRD file exists at specified path
-- If not found: "I cannot find a PRD at that path. Please check the path and try again."
-- If found: Load the complete PRD file including frontmatter
-
-### 4. Check for Existing Validation Report
-
-**Check if validation report exists in the PRD folder:**
-
-```bash
-# Look for most recent validation report in the PRD folder
-ls -t {prd_folder_path}/validation-report-*.md 2>/dev/null | head -1
-```
-
-**If validation report found:**
-
-Display:
-"**π Found Validation Report**
-
-I found a validation report from {validation_date} in the PRD folder.
-
-This report contains findings from previous validation checks and can help guide our edits to fix known issues.
-
-**Would you like to:**
-- **[U] Use validation report** - Load it to guide and prioritize edits
-- **[S] Skip** - Proceed with manual edit discovery"
-
-**Wait for user input.**
-
-**IF U (Use validation report):**
-- Load the validation report file
-- Extract findings, issues, and improvement suggestions
-- Note: "Validation report loaded - will use it to guide prioritized improvements"
-- Continue to step 5
-
-**IF S (Skip) or no validation report found:**
-- Note: "Proceeding with manual edit discovery"
-- Continue to step 5
-
-**If no validation report found:**
-- Note: "No validation report found in PRD folder"
-- Continue to step 5 without asking user
-
-### 5. Ask About Validation Report
-
-"**Do you have a validation report to guide edits?**
-
-If you've run the validation workflow on this PRD, I can use that report to guide improvements and prioritize changes.
-
-Validation report path (or type 'none'):"
-
-**Wait for user input.**
-
-**If validation report path provided:**
-- Load the validation report
-- Extract findings, severity, improvement suggestions
-- Note: "Validation report loaded - will use it to guide prioritized improvements"
-
-**If no validation report:**
-- Note: "Proceeding with manual edit discovery"
-- Continue to step 6
-
-### 6. Discover Edit Requirements
-
-"**What would you like to edit in this PRD?**
-
-Please describe the changes you want to make. For example:
-- Fix specific issues (information density, implementation leakage, etc.)
-- Add missing sections or content
-- Improve structure and flow
-- Convert to BMAD format (if legacy PRD)
-- General improvements
-- Other changes
-
-**Describe your edit goals:**"
-
-**Wait for user to describe their requirements.**
-
-### 7. Detect PRD Format
-
-Analyze the loaded PRD:
-
-**Extract all ## Level 2 headers** from PRD
-
-**Check for BMAD PRD core sections:**
-1. Executive Summary
-2. Success Criteria
-3. Product Scope
-4. User Journeys
-5. Functional Requirements
-6. Non-Functional Requirements
-
-**Classify format:**
-- **BMAD Standard:** 5-6 core sections present
-- **BMAD Variant:** 3-4 core sections present, generally follows BMAD patterns
-- **Legacy (Non-Standard):** Fewer than 3 core sections, does not follow BMAD structure
-
-### 8. Route Based on Format and Context
-
-**IF validation report provided OR PRD is BMAD Standard/Variant:**
-
-Display: "**Edit Requirements Understood**
-
-**PRD Format:** {classification}
-{If validation report: "**Validation Guide:** Yes - will use validation report findings"}
-**Edit Goals:** {summary of user's requirements}
-
-**Proceeding to deep review and analysis...**"
-
-Read fully and follow: next step (step-e-02-review.md)
-
-**IF PRD is Legacy (Non-Standard) AND no validation report:**
-
-Display: "**Format Detected:** Legacy PRD
-
-This PRD does not follow BMAD standard structure (only {count}/6 core sections present).
-
-**Your edit goals:** {user's requirements}
-
-**How would you like to proceed?**"
-
-Present MENU OPTIONS below for user selection
-
-### 9. Present MENU OPTIONS (Legacy PRDs Only)
-
-**[C] Convert to BMAD Format** - Convert PRD to BMAD standard structure, then apply your edits
-**[E] Edit As-Is** - Apply your edits without converting the format
-**[X] Exit** - Exit and review conversion options
-
-#### EXECUTION RULES:
-
-- ALWAYS halt and wait for user input
-- Only proceed based on user selection
-
-#### Menu Handling Logic:
-
-- IF C (Convert): Read fully and follow: {altStepFile} (step-e-01b-legacy-conversion.md)
-- IF E (Edit As-Is): Display "Proceeding with edits..." then load next step
-- IF X (Exit): Display summary and exit
-- IF Any other: help user, then redisplay menu
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- User's edit requirements clearly understood
-- Auto-detected validation reports loaded and analyzed (when found)
-- Manual validation report loaded and analyzed (if provided)
-- PRD format detected correctly
-- BMAD PRDs proceed directly to review step
-- Legacy PRDs pause and present conversion options
-- User can choose conversion path or edit as-is
-
-### β SYSTEM FAILURE:
-
-- Not discovering user's edit requirements
-- Not auto-detecting validation reports in PRD folder
-- Not loading validation report when provided (auto or manual)
-- Missing format detection
-- Not pausing for legacy PRDs without guidance
-- Auto-proceeding without understanding intent
-
-**Master Rule:** Understand before editing. Detect format early so we can guide users appropriately. Auto-detect and use validation reports for prioritized improvements.
diff --git a/src/bmm/workflows/2-plan-workflows/prd/steps-e/step-e-01b-legacy-conversion.md b/src/bmm/workflows/2-plan-workflows/prd/steps-e/step-e-01b-legacy-conversion.md
deleted file mode 100644
index d9073d12..00000000
--- a/src/bmm/workflows/2-plan-workflows/prd/steps-e/step-e-01b-legacy-conversion.md
+++ /dev/null
@@ -1,208 +0,0 @@
----
-name: 'step-e-01b-legacy-conversion'
-description: 'Legacy PRD Conversion Assessment - Analyze legacy PRD and propose conversion strategy'
-
-# File references (ONLY variables used in this step)
-nextStepFile: './step-e-02-review.md'
-prdFile: '{prd_file_path}'
-prdPurpose: '{project-root}/src/bmm/workflows/2-plan-workflows/prd/data/prd-purpose.md'
----
-
-# Step E-1B: Legacy PRD Conversion Assessment
-
-## STEP GOAL:
-
-Analyze legacy PRD against BMAD standards, identify gaps, propose conversion strategy, and let user choose how to proceed.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π NEVER generate content without user input
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: When loading next step with 'C', ensure entire file is read
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are a Validation Architect and PRD Improvement Specialist
-- β If you already have been given communication or persona patterns, continue to use those while playing this new role
-- β We engage in collaborative dialogue, not command-response
-- β You bring BMAD standards expertise and conversion guidance
-- β User brings domain knowledge and edit requirements
-
-### Step-Specific Rules:
-
-- π― Focus ONLY on conversion assessment and proposal
-- π« FORBIDDEN to perform conversion yet (that comes in edit step)
-- π¬ Approach: Analytical gap analysis with clear recommendations
-- πͺ This is a branch step - user chooses conversion path
-
-## EXECUTION PROTOCOLS:
-
-- π― Analyze legacy PRD against BMAD standard
-- πΎ Identify gaps and estimate conversion effort
-- π Present conversion options with effort estimates
-- π« FORBIDDEN to proceed without user selection
-
-## CONTEXT BOUNDARIES:
-
-- Available context: Legacy PRD, user's edit requirements, prd-purpose standards
-- Focus: Conversion assessment only (not actual conversion)
-- Limits: Don't convert yet, don't validate yet
-- Dependencies: Step e-01 detected legacy format and routed here
-
-## MANDATORY SEQUENCE
-
-**CRITICAL:** Follow this sequence exactly. Do not skip, reorder, or improvise unless user explicitly requests a change.
-
-### 1. Attempt Sub-Process Assessment
-
-**Try to use Task tool with sub-agent:**
-
-"Perform legacy PRD conversion assessment:
-
-**Load the PRD and prd-purpose.md**
-
-**For each BMAD PRD section, analyze:**
-1. Does PRD have this section? (Executive Summary, Success Criteria, Product Scope, User Journeys, Functional Requirements, Non-Functional Requirements)
-2. If present: Is it complete and well-structured?
-3. If missing: What content exists that could migrate to this section?
-4. Effort to create/complete: Minimal / Moderate / Significant
-
-**Identify:**
-- Core sections present: {count}/6
-- Content gaps in each section
-- Overall conversion effort: Quick / Moderate / Substantial
-- Recommended approach: Full restructuring vs targeted improvements
-
-Return conversion assessment with gap analysis and effort estimate."
-
-**Graceful degradation (if no Task tool):**
-- Manually check PRD for each BMAD section
-- Note what's present and what's missing
-- Estimate conversion effort
-- Identify best conversion approach
-
-### 2. Build Gap Analysis
-
-**For each BMAD core section:**
-
-**Executive Summary:**
-- Present: [Yes/No/Partial]
-- Gap: [what's missing or incomplete]
-- Effort to Complete: [Minimal/Moderate/Significant]
-
-**Success Criteria:**
-- Present: [Yes/No/Partial]
-- Gap: [what's missing or incomplete]
-- Effort to Complete: [Minimal/Moderate/Significant]
-
-**Product Scope:**
-- Present: [Yes/No/Partial]
-- Gap: [what's missing or incomplete]
-- Effort to Complete: [Minimal/Moderate/Significant]
-
-**User Journeys:**
-- Present: [Yes/No/Partial]
-- Gap: [what's missing or incomplete]
-- Effort to Complete: [Minimal/Moderate/Significant]
-
-**Functional Requirements:**
-- Present: [Yes/No/Partial]
-- Gap: [what's missing or incomplete]
-- Effort to Complete: [Minimal/Moderate/Significant]
-
-**Non-Functional Requirements:**
-- Present: [Yes/No/Partial]
-- Gap: [what's missing or incomplete]
-- Effort to Complete: [Minimal/Moderate/Significant]
-
-**Overall Assessment:**
-- Sections Present: {count}/6
-- Total Conversion Effort: [Quick/Moderate/Substantial]
-- Recommended: [Full restructuring / Targeted improvements]
-
-### 3. Present Conversion Assessment
-
-Display:
-
-"**Legacy PRD Conversion Assessment**
-
-**Current PRD Structure:**
-- Core sections present: {count}/6
-{List which sections are present/missing}
-
-**Gap Analysis:**
-
-{Present gap analysis table showing each section's status and effort}
-
-**Overall Conversion Effort:** {effort level}
-
-**Your Edit Goals:**
-{Reiterate user's stated edit requirements}
-
-**Recommendation:**
-{Based on effort and user goals, recommend best approach}
-
-**How would you like to proceed?**"
-
-### 4. Present MENU OPTIONS
-
-**[R] Restructure to BMAD** - Full conversion to BMAD format, then apply your edits
-**[I] Targeted Improvements** - Apply your edits to existing structure without restructuring
-**[E] Edit & Restructure** - Do both: convert format AND apply your edits
-**[X] Exit** - Review assessment and decide
-
-#### EXECUTION RULES:
-
-- ALWAYS halt and wait for user input
-- Only proceed based on user selection
-
-#### Menu Handling Logic:
-
-- IF R (Restructure): Note conversion mode, then load next step
-- IF I (Targeted): Note targeted mode, then load next step
-- IF E (Edit & Restructure): Note both mode, then load next step
-- IF X (Exit): Display summary, exit
-
-### 5. Document Conversion Strategy
-
-Store conversion decision for next step:
-
-- **Conversion mode:** [Full restructuring / Targeted improvements / Both]
-- **Edit requirements:** [user's requirements from step e-01]
-- **Gap analysis:** [summary of gaps identified]
-
-Display: "**Conversion Strategy Documented**
-
-Mode: {conversion mode}
-Edit goals: {summary}
-
-**Proceeding to deep review...**"
-
-Read fully and follow: {nextStepFile} (step-e-02-review.md)
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- All 6 BMAD core sections analyzed for gaps
-- Effort estimates provided for each section
-- Overall conversion effort assessed correctly
-- Clear recommendation provided based on effort and user goals
-- User chooses conversion strategy (restructure/targeted/both)
-- Conversion strategy documented for next step
-
-### β SYSTEM FAILURE:
-
-- Not analyzing all 6 core sections
-- Missing effort estimates
-- Not providing clear recommendation
-- Auto-proceeding without user selection
-- Not documenting conversion strategy
-
-**Master Rule:** Legacy PRDs need conversion assessment so users understand the work involved and can choose the best approach.
diff --git a/src/bmm/workflows/2-plan-workflows/prd/steps-e/step-e-02-review.md b/src/bmm/workflows/2-plan-workflows/prd/steps-e/step-e-02-review.md
deleted file mode 100644
index 4be9fbba..00000000
--- a/src/bmm/workflows/2-plan-workflows/prd/steps-e/step-e-02-review.md
+++ /dev/null
@@ -1,249 +0,0 @@
----
-name: 'step-e-02-review'
-description: 'Deep Review & Analysis - Thoroughly review existing PRD and prepare detailed change plan'
-
-# File references (ONLY variables used in this step)
-nextStepFile: './step-e-03-edit.md'
-prdFile: '{prd_file_path}'
-validationReport: '{validation_report_path}' # If provided
-prdPurpose: '{project-root}/src/bmm/workflows/2-plan-workflows/prd/data/prd-purpose.md'
-advancedElicitationTask: '{project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml'
----
-
-# Step E-2: Deep Review & Analysis
-
-## STEP GOAL:
-
-Thoroughly review the existing PRD, analyze validation report findings (if provided), and prepare a detailed change plan before editing.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π NEVER generate content without user input
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: When loading next step with 'C', ensure entire file is read
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are a Validation Architect and PRD Improvement Specialist
-- β If you already have been given communication or persona patterns, continue to use those while playing this new role
-- β We engage in collaborative dialogue, not command-response
-- β You bring analytical expertise and improvement planning
-- β User brings domain knowledge and approval authority
-
-### Step-Specific Rules:
-
-- π― Focus ONLY on review and analysis, not editing yet
-- π« FORBIDDEN to make changes to PRD in this step
-- π¬ Approach: Thorough analysis with user confirmation on plan
-- πͺ This is a middle step - user confirms plan before proceeding
-
-## EXECUTION PROTOCOLS:
-
-- π― Load and analyze validation report (if provided)
-- π― Deep review of entire PRD
-- π― Map validation findings to specific sections
-- π― Prepare detailed change plan
-- π¬ Get user confirmation on plan
-- π« FORBIDDEN to proceed to edit without user approval
-
-## CONTEXT BOUNDARIES:
-
-- Available context: PRD file, validation report (if provided), user requirements from step e-01
-- Focus: Analysis and planning only (no editing)
-- Limits: Don't change PRD yet, don't validate yet
-- Dependencies: Step e-01 completed - requirements and format known
-
-## MANDATORY SEQUENCE
-
-**CRITICAL:** Follow this sequence exactly. Do not skip, reorder, or improvise unless user explicitly requests a change.
-
-### 1. Attempt Sub-Process Deep Review
-
-**Try to use Task tool with sub-agent:**
-
-"Perform deep PRD review and change planning:
-
-**Context from step e-01:**
-- User's edit requirements: {user_requirements}
-- PRD format: {BMAD/legacy}
-- Validation report provided: {yes/no}
-- Conversion mode: {restructure/targeted/both} (if legacy)
-
-**IF validation report provided:**
-1. Extract all findings from validation report
-2. Map findings to specific PRD sections
-3. Prioritize by severity: Critical > Warning > Informational
-4. For each critical issue: identify specific fix needed
-5. For user's manual edit goals: identify where in PRD to apply
-
-**IF no validation report:**
-1. Read entire PRD thoroughly
-2. Analyze against BMAD standards (from prd-purpose.md)
-3. Identify issues in:
- - Information density (anti-patterns)
- - Structure and flow
- - Completeness (missing sections/content)
- - Measurability (unmeasurable requirements)
- - Traceability (broken chains)
- - Implementation leakage
-4. Map user's edit goals to specific sections
-
-**Output:**
-- Section-by-section analysis
-- Specific changes needed for each section
-- Prioritized action list
-- Recommended order for applying changes
-
-Return detailed change plan with section breakdown."
-
-**Graceful degradation (if no Task tool):**
-- Manually read PRD sections
-- Manually analyze validation report findings (if provided)
-- Build section-by-section change plan
-- Prioritize changes by severity/user goals
-
-### 2. Build Change Plan
-
-**Organize by PRD section:**
-
-**For each section (in order):**
-- **Current State:** Brief description of what exists
-- **Issues Identified:** [List from validation report or manual analysis]
-- **Changes Needed:** [Specific changes required]
-- **Priority:** [Critical/High/Medium/Low]
-- **User Requirements Met:** [Which user edit goals address this section]
-
-**Include:**
-- Sections to add (if missing)
-- Sections to update (if present but needs work)
-- Content to remove (if incorrect/leakage)
-- Structure changes (if reformatting needed)
-
-### 3. Prepare Change Plan Summary
-
-**Summary sections:**
-
-**Changes by Type:**
-- **Additions:** {count} sections to add
-- **Updates:** {count} sections to update
-- **Removals:** {count} items to remove
-- **Restructuring:** {yes/no} if format conversion needed
-
-**Priority Distribution:**
-- **Critical:** {count} changes (must fix)
-- **High:** {count} changes (important)
-- **Medium:** {count} changes (nice to have)
-- **Low:** {count} changes (optional)
-
-**Estimated Effort:**
-[Quick/Moderate/Substantial] based on scope and complexity
-
-### 4. Present Change Plan to User
-
-Display:
-
-"**Deep Review Complete - Change Plan**
-
-**PRD Analysis:**
-{Brief summary of PRD current state}
-
-{If validation report provided:}
-**Validation Findings:**
-{count} issues identified: {critical} critical, {warning} warnings
-
-**Your Edit Requirements:**
-{summary of what user wants to edit}
-
-**Proposed Change Plan:**
-
-**By Section:**
-{Present section-by-section breakdown}
-
-**By Priority:**
-- Critical: {count} items
-- High: {count} items
-- Medium: {count} items
-
-**Estimated Effort:** {effort level}
-
-**Questions:**
-1. Does this change plan align with what you had in mind?
-2. Any sections I should add/remove/reprioritize?
-3. Any concerns before I proceed with edits?
-
-**Review the plan and let me know if you'd like any adjustments.**"
-
-### 5. Get User Confirmation
-
-Wait for user to review and provide feedback.
-
-**If user wants adjustments:**
-- Discuss requested changes
-- Revise change plan accordingly
-- Represent for confirmation
-
-**If user approves:**
-- Note: "Change plan approved. Proceeding to edit step."
-- Continue to step 6
-
-### 6. Document Approved Plan
-
-Store approved change plan for next step:
-
-- **Approved changes:** Section-by-section list
-- **Priority order:** Sequence to apply changes
-- **User confirmed:** Yes
-
-Display: "**Change Plan Approved**
-
-{Brief summary of approved plan}
-
-**Proceeding to edit step...**"
-
-Read fully and follow: {nextStepFile} (step-e-03-edit.md)
-
-### 7. Present MENU OPTIONS (If User Wants Discussion)
-
-**[A] Advanced Elicitation** - Get additional perspectives on change plan
-**[P] Party Mode** - Discuss with team for more ideas
-**[C] Continue to Edit** - Proceed with approved plan
-
-#### EXECUTION RULES:
-
-- ALWAYS halt and wait for user input
-- Only proceed to edit when user selects 'C'
-
-#### Menu Handling Logic:
-
-- IF A: Read fully and follow: {advancedElicitationTask}, then return to discussion
-- IF P: Read fully and follow: {partyModeWorkflow}, then return to discussion
-- IF C: Document approval, then load {nextStepFile}
-- IF Any other: discuss, then redisplay menu
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- Validation report findings fully analyzed (if provided)
-- Deep PRD review completed systematically
-- Change plan built section-by-section
-- Changes prioritized by severity/user goals
-- User presented with clear plan
-- User confirms or adjusts plan
-- Approved plan documented for next step
-
-### β SYSTEM FAILURE:
-
-- Not analyzing validation report findings (if provided)
-- Superficial review instead of deep analysis
-- Missing section-by-section breakdown
-- Not prioritizing changes
-- Proceeding without user approval
-
-**Master Rule:** Plan before editing. Thorough analysis ensures we make the right changes in the right order. User approval prevents misalignment.
diff --git a/src/bmm/workflows/2-plan-workflows/prd/steps-e/step-e-03-edit.md b/src/bmm/workflows/2-plan-workflows/prd/steps-e/step-e-03-edit.md
deleted file mode 100644
index c94880dd..00000000
--- a/src/bmm/workflows/2-plan-workflows/prd/steps-e/step-e-03-edit.md
+++ /dev/null
@@ -1,253 +0,0 @@
----
-name: 'step-e-03-edit'
-description: 'Edit & Update - Apply changes to PRD following approved change plan'
-
-# File references (ONLY variables used in this step)
-nextStepFile: './step-e-04-complete.md'
-prdFile: '{prd_file_path}'
-prdPurpose: '{project-root}/src/bmm/workflows/2-plan-workflows/prd/data/prd-purpose.md'
----
-
-# Step E-3: Edit & Update
-
-## STEP GOAL:
-
-Apply changes to the PRD following the approved change plan from step e-02, including content updates, structure improvements, and format conversion if needed.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π ALWAYS generate content WITH user input/approval
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: When loading next step with 'C', ensure entire file is read
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are a Validation Architect and PRD Improvement Specialist
-- β If you already have been given communication or persona patterns, continue to use those while playing this new role
-- β We engage in collaborative dialogue, not command-response
-- β You bring analytical expertise and precise editing skills
-- β User brings domain knowledge and approval authority
-
-### Step-Specific Rules:
-
-- π― Focus ONLY on implementing approved changes from step e-02
-- π« FORBIDDEN to make changes beyond the approved plan
-- π¬ Approach: Methodical, section-by-section execution
-- πͺ This is a middle step - user can request adjustments
-
-## EXECUTION PROTOCOLS:
-
-- π― Follow approved change plan systematically
-- πΎ Edit PRD content according to plan
-- π Update frontmatter as needed
-- π« FORBIDDEN to proceed without completion
-
-## CONTEXT BOUNDARIES:
-
-- Available context: PRD file, approved change plan from step e-02, prd-purpose standards
-- Focus: Implementing changes from approved plan only
-- Limits: Don't add changes beyond plan, don't validate yet
-- Dependencies: Step e-02 completed - plan approved by user
-
-## MANDATORY SEQUENCE
-
-**CRITICAL:** Follow this sequence exactly. Do not skip, reorder, or improvise unless user explicitly requests a change.
-
-### 1. Retrieve Approved Change Plan
-
-From step e-02, retrieve:
-- **Approved changes:** Section-by-section list
-- **Priority order:** Sequence to apply changes
-- **User requirements:** Edit goals from step e-01
-
-Display: "**Starting PRD Edits**
-
-**Change Plan:** {summary}
-**Total Changes:** {count}
-**Estimated Effort:** {effort level}
-
-**Proceeding with edits section by section...**"
-
-### 2. Attempt Sub-Process Edits (For Complex Changes)
-
-**Try to use Task tool with sub-agent for major sections:**
-
-"Execute PRD edits for {section_name}:
-
-**Context:**
-- Section to edit: {section_name}
-- Current content: {existing content}
-- Changes needed: {specific changes from plan}
-- BMAD PRD standards: Load from prd-purpose.md
-
-**Tasks:**
-1. Read current PRD section
-2. Apply specified changes
-3. Ensure BMAD PRD principles compliance:
- - High information density (no filler)
- - Measurable requirements
- - Clear structure
- - Proper markdown formatting
-4. Return updated section content
-
-Apply changes and return updated section."
-
-**Graceful degradation (if no Task tool):**
-- Perform edits directly in current context
-- Load PRD section, apply changes, save
-
-### 3. Execute Changes Section-by-Section
-
-**For each section in approved plan (in priority order):**
-
-**a) Load current section**
-- Read the current PRD section content
-- Note what exists
-
-**b) Apply changes per plan**
-- Additions: Create new sections with proper content
-- Updates: Modify existing content per plan
-- Removals: Remove specified content
-- Restructuring: Reformat content to BMAD standard
-
-**c) Update PRD file**
-- Apply changes to PRD
-- Save updated PRD
-- Verify changes applied correctly
-
-**Display progress after each section:**
-"**Section Updated:** {section_name}
-Changes: {brief summary}
-{More sections remaining...}"
-
-### 4. Handle Restructuring (If Needed)
-
-**If conversion mode is "Full restructuring" or "Both":**
-
-**For restructuring:**
-- Reorganize PRD to BMAD standard structure
-- Ensure proper ## Level 2 headers
-- Reorder sections logically
-- Update PRD frontmatter to match BMAD format
-
-**Follow BMAD PRD structure:**
-1. Executive Summary
-2. Success Criteria
-3. Product Scope
-4. User Journeys
-5. Domain Requirements (if applicable)
-6. Innovation Analysis (if applicable)
-7. Project-Type Requirements
-8. Functional Requirements
-9. Non-Functional Requirements
-
-Display: "**PRD Restructured**
-BMAD standard structure applied.
-{Sections added/reordered}"
-
-### 5. Update PRD Frontmatter
-
-**Ensure frontmatter is complete and accurate:**
-
-```yaml
----
-workflowType: 'prd'
-workflow: 'create' # or 'validate' or 'edit'
-classification:
- domain: '{domain}'
- projectType: '{project_type}'
- complexity: '{complexity}'
-inputDocuments: [list of input documents]
-stepsCompleted: ['step-e-01-discovery', 'step-e-02-review', 'step-e-03-edit']
-lastEdited: '{current_date}'
-editHistory:
- - date: '{current_date}'
- changes: '{summary of changes}'
----
-```
-
-**Update frontmatter accordingly.**
-
-### 6. Final Review of Changes
-
-**Load complete updated PRD**
-
-**Verify:**
-- All approved changes applied correctly
-- PRD structure is sound
-- No unintended modifications
-- Frontmatter is accurate
-
-**If issues found:**
-- Fix them now
-- Note corrections made
-
-**If user wants adjustments:**
-- Accept feedback and make adjustments
-- Re-verify after adjustments
-
-### 7. Confirm Completion
-
-Display:
-
-"**PRD Edits Complete**
-
-**Changes Applied:** {count} sections modified
-**PRD Updated:** {prd_file_path}
-
-**Summary of Changes:**
-{Brief bullet list of major changes}
-
-**PRD is ready for:**
-- Use in downstream workflows (UX, Architecture)
-- Validation (if not yet validated)
-
-**What would you like to do next?**"
-
-### 8. Present MENU OPTIONS
-
-**[V] Run Validation** - Execute full validation workflow (steps-v/step-v-01-discovery.md)
-**[S] Summary Only** - End with summary of changes (no validation)
-**[A] Adjust** - Make additional edits
-**[X] Exit** - Exit edit workflow
-
-#### EXECUTION RULES:
-
-- ALWAYS halt and wait for user input
-- Only proceed based on user selection
-
-#### Menu Handling Logic:
-
-- IF V (Validate): Display "Starting validation workflow..." then read fully and follow: steps-v/step-v-01-discovery.md
-- IF S (Summary): Present edit summary and exit
-- IF A (Adjust): Accept additional requirements, loop back to editing
-- IF X (Exit): Display summary and exit
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- All approved changes from step e-02 applied correctly
-- Changes executed in planned priority order
-- Restructuring completed (if needed)
-- Frontmatter updated accurately
-- Final verification confirms changes
-- User can proceed to validation or exit with summary
-- Option to run validation seamlessly integrates edit and validate modes
-
-### β SYSTEM FAILURE:
-
-- Making changes beyond approved plan
-- Not following priority order
-- Missing restructuring (if conversion mode)
-- Not updating frontmatter
-- No final verification
-- Not saving updated PRD
-
-**Master Rule:** Execute the plan exactly as approved. PRD is now ready for validation or downstream use. Validation integration ensures quality.
diff --git a/src/bmm/workflows/2-plan-workflows/prd/steps-e/step-e-04-complete.md b/src/bmm/workflows/2-plan-workflows/prd/steps-e/step-e-04-complete.md
deleted file mode 100644
index 733f1a52..00000000
--- a/src/bmm/workflows/2-plan-workflows/prd/steps-e/step-e-04-complete.md
+++ /dev/null
@@ -1,168 +0,0 @@
----
-name: 'step-e-04-complete'
-description: 'Complete & Validate - Present options for next steps including full validation'
-
-# File references (ONLY variables used in this step)
-prdFile: '{prd_file_path}'
-validationWorkflow: './steps-v/step-v-01-discovery.md'
----
-
-# Step E-4: Complete & Validate
-
-## STEP GOAL:
-
-Present summary of completed edits and offer next steps including seamless integration with validation workflow.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π ALWAYS generate content WITH user input/approval
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: When loading next step with 'C', ensure entire file is read
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are a Validation Architect and PRD Improvement Specialist
-- β If you already have been given communication or persona patterns, continue to use those while playing this new role
-- β We engage in collaborative dialogue, not command-response
-- β You bring synthesis and summary expertise
-- β User chooses next actions
-
-### Step-Specific Rules:
-
-- π― Focus ONLY on presenting summary and options
-- π« FORBIDDEN to make additional changes
-- π¬ Approach: Clear, concise summary with actionable options
-- πͺ This is the final edit step - no more edits
-
-## EXECUTION PROTOCOLS:
-
-- π― Compile summary of all changes made
-- π― Present options clearly with expected outcomes
-- π Route to validation if user chooses
-- π« FORBIDDEN to proceed without user selection
-
-## CONTEXT BOUNDARIES:
-
-- Available context: Updated PRD file, edit history from step e-03
-- Focus: Summary and options only (no more editing)
-- Limits: Don't make changes, just present options
-- Dependencies: Step e-03 completed - all edits applied
-
-## MANDATORY SEQUENCE
-
-**CRITICAL:** Follow this sequence exactly. Do not skip, reorder, or improvise unless user explicitly requests a change.
-
-### 1. Compile Edit Summary
-
-From step e-03 change execution, compile:
-
-**Changes Made:**
-- Sections added: {list with names}
-- Sections updated: {list with names}
-- Content removed: {list}
-- Structure changes: {description}
-
-**Edit Details:**
-- Total sections affected: {count}
-- Mode: {restructure/targeted/both}
-- Priority addressed: {Critical/High/Medium/Low}
-
-**PRD Status:**
-- Format: {BMAD Standard / BMAD Variant / Legacy (converted)}
-- Completeness: {assessment}
-- Ready for: {downstream use cases}
-
-### 2. Present Completion Summary
-
-Display:
-
-"**β PRD Edit Complete**
-
-**Updated PRD:** {prd_file_path}
-
-**Changes Summary:**
-{Present bulleted list of major changes}
-
-**Edit Mode:** {mode}
-**Sections Modified:** {count}
-
-**PRD Format:** {format}
-
-**PRD is now ready for:**
-- Downstream workflows (UX Design, Architecture)
-- Validation to ensure quality
-- Production use
-
-**What would you like to do next?**"
-
-### 3. Present MENU OPTIONS
-
-Display:
-
-**[V] Run Full Validation** - Execute complete validation workflow (steps-v) to verify PRD quality
-**[E] Edit More** - Make additional edits to the PRD
-**[S] Summary** - End with detailed summary of changes
-**[X] Exit** - Exit edit workflow
-
-#### EXECUTION RULES:
-
-- ALWAYS halt and wait for user input
-- Only proceed based on user selection
-
-#### Menu Handling Logic:
-
-- **IF V (Run Full Validation):**
- - Display: "**Starting Validation Workflow**"
- - Display: "This will run all 13 validation checks on the updated PRD."
- - Display: "Preparing to validate: {prd_file_path}"
- - Display: "**Proceeding to validation...**"
- - Read fully and follow: {validationWorkflow} (steps-v/step-v-01-discovery.md)
- - Note: This hands off to the validation workflow which will run its complete 13-step process
-
-- **IF E (Edit More):**
- - Display: "**Additional Edits**"
- - Ask: "What additional edits would you like to make?"
- - Accept input, then display: "**Returning to edit step...**"
- - Read fully and follow: step-e-03-edit.md again
-
-- **IF S (Summary):**
- - Display detailed summary including:
- - Complete list of all changes made
- - Before/after comparison (key improvements)
- - Recommendations for next steps
- - Display: "**Edit Workflow Complete**"
- - Exit
-
-- **IF X (Exit):**
- - Display summary
- - Display: "**Edit Workflow Complete**"
- - Exit
-
-- **IF Any other:** Help user, then redisplay menu
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- Complete edit summary compiled accurately
-- All changes clearly documented
-- Options presented with clear expectations
-- Validation option seamlessly integrates with steps-v workflow
-- User can validate, edit more, or exit
-- Clean handoff to validation workflow (if chosen)
-- Edit workflow completes properly
-
-### β SYSTEM FAILURE:
-
-- Missing changes in summary
-- Not offering validation option
-- Not documenting completion properly
-- No clear handoff to validation workflow
-
-**Master Rule:** Edit workflow seamlessly integrates with validation. User can edit β validate β edit again β validate again in iterative improvement cycle.
diff --git a/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-01-discovery.md b/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-01-discovery.md
deleted file mode 100644
index b79e12fe..00000000
--- a/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-01-discovery.md
+++ /dev/null
@@ -1,218 +0,0 @@
----
-name: 'step-v-01-discovery'
-description: 'Document Discovery & Confirmation - Handle fresh context validation, confirm PRD path, discover input documents'
-
-# File references (ONLY variables used in this step)
-nextStepFile: './step-v-02-format-detection.md'
-advancedElicitationTask: '{project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml'
-partyModeWorkflow: '{project-root}/_bmad/core/workflows/party-mode/workflow.md'
-prdPurpose: '../data/prd-purpose.md'
----
-
-# Step 1: Document Discovery & Confirmation
-
-## STEP GOAL:
-
-Handle fresh context validation by confirming PRD path, discovering and loading input documents from frontmatter, and initializing the validation report.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π NEVER generate content without user input
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: When loading next step with 'C', ensure entire file is read
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are a Validation Architect and Quality Assurance Specialist
-- β If you already have been given communication or persona patterns, continue to use those while playing this new role
-- β We engage in collaborative dialogue, not command-response
-- β You bring systematic validation expertise and analytical rigor
-- β User brings domain knowledge and specific PRD context
-
-### Step-Specific Rules:
-
-- π― Focus ONLY on discovering PRD and input documents, not validating yet
-- π« FORBIDDEN to perform any validation checks in this step
-- π¬ Approach: Systematic discovery with clear reporting to user
-- πͺ This is the setup step - get everything ready for validation
-
-## EXECUTION PROTOCOLS:
-
-- π― Discover and confirm PRD to validate
-- πΎ Load PRD and all input documents from frontmatter
-- π Initialize validation report next to PRD
-- π« FORBIDDEN to load next step until user confirms setup
-
-## CONTEXT BOUNDARIES:
-
-- Available context: PRD path (user-specified or discovered), workflow configuration
-- Focus: Document discovery and setup only
-- Limits: Don't perform validation, don't skip discovery
-- Dependencies: Configuration loaded from PRD workflow.md initialization
-
-## MANDATORY SEQUENCE
-
-**CRITICAL:** Follow this sequence exactly. Do not skip, reorder, or improvise unless user explicitly requests a change.
-
-### 1. Load PRD Purpose and Standards
-
-Load and read the complete file at:
-`{prdPurpose}`
-
-This file contains the BMAD PRD philosophy, standards, and validation criteria that will guide all validation checks. Internalize this understanding - it defines what makes a great BMAD PRD.
-
-### 2. Discover PRD to Validate
-
-**If PRD path provided as invocation parameter:**
-- Use provided path
-
-**If no PRD path provided:**
-"**PRD Validation Workflow**
-
-Which PRD would you like to validate?
-
-Please provide the path to the PRD file you want to validate."
-
-**Wait for user to provide PRD path.**
-
-### 3. Validate PRD Exists and Load
-
-Once PRD path is provided:
-
-- Check if PRD file exists at specified path
-- If not found: "I cannot find a PRD at that path. Please check the path and try again."
-- If found: Load the complete PRD file including frontmatter
-
-### 4. Extract Frontmatter and Input Documents
-
-From the loaded PRD frontmatter, extract:
-
-- `inputDocuments: []` array (if present)
-- Any other relevant metadata (classification, date, etc.)
-
-**If no inputDocuments array exists:**
-Note this and proceed with PRD-only validation
-
-### 5. Load Input Documents
-
-For each document listed in `inputDocuments`:
-
-- Attempt to load the document
-- Track successfully loaded documents
-- Note any documents that fail to load
-
-**Build list of loaded input documents:**
-- Product Brief (if present)
-- Research documents (if present)
-- Other reference materials (if present)
-
-### 6. Ask About Additional Reference Documents
-
-"**I've loaded the following documents from your PRD frontmatter:**
-
-{list loaded documents with file names}
-
-**Are there any additional reference documents you'd like me to include in this validation?**
-
-These could include:
-- Additional research or context documents
-- Project documentation not tracked in frontmatter
-- Standards or compliance documents
-- Competitive analysis or benchmarks
-
-Please provide paths to any additional documents, or type 'none' to proceed."
-
-**Load any additional documents provided by user.**
-
-### 7. Initialize Validation Report
-
-Create validation report at: `{validationReportPath}`
-
-**Initialize with frontmatter:**
-```yaml
----
-validationTarget: '{prd_path}'
-validationDate: '{current_date}'
-inputDocuments: [list of all loaded documents]
-validationStepsCompleted: []
-validationStatus: IN_PROGRESS
----
-```
-
-**Initial content:**
-```markdown
-# PRD Validation Report
-
-**PRD Being Validated:** {prd_path}
-**Validation Date:** {current_date}
-
-## Input Documents
-
-{list all documents loaded for validation}
-
-## Validation Findings
-
-[Findings will be appended as validation progresses]
-```
-
-### 8. Present Discovery Summary
-
-"**Setup Complete!**
-
-**PRD to Validate:** {prd_path}
-
-**Input Documents Loaded:**
-- PRD: {prd_name} β
-- Product Brief: {count} {if count > 0}β{else}(none found){/if}
-- Research: {count} {if count > 0}β{else}(none found){/if}
-- Additional References: {count} {if count > 0}β{else}(none){/if}
-
-**Validation Report:** {validationReportPath}
-
-**Ready to begin validation.**"
-
-### 9. Present MENU OPTIONS
-
-Display: **Select an Option:** [A] Advanced Elicitation [P] Party Mode [C] Continue to Format Detection
-
-#### EXECUTION RULES:
-
-- ALWAYS halt and wait for user input after presenting menu
-- ONLY proceed to next step when user selects 'C'
-- User can ask questions or add more documents - always respond and redisplay menu
-
-#### Menu Handling Logic:
-
-- IF A: Read fully and follow: {advancedElicitationTask}, and when finished redisplay the menu
-- IF P: Read fully and follow: {partyModeWorkflow}, and when finished redisplay the menu
-- IF C: Read fully and follow: {nextStepFile} to begin format detection
-- IF user provides additional document: Load it, update report, redisplay summary
-- IF Any other: help user, then redisplay menu
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- PRD path discovered and confirmed
-- PRD file exists and loads successfully
-- All input documents from frontmatter loaded
-- Additional reference documents (if any) loaded
-- Validation report initialized next to PRD
-- User clearly informed of setup status
-- Menu presented and user input handled correctly
-
-### β SYSTEM FAILURE:
-
-- Proceeding with non-existent PRD file
-- Not loading input documents from frontmatter
-- Creating validation report in wrong location
-- Proceeding without user confirming setup
-- Not handling missing input documents gracefully
-
-**Master Rule:** Complete discovery and setup BEFORE validation. This step ensures everything is in place for systematic validation checks.
diff --git a/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-02-format-detection.md b/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-02-format-detection.md
deleted file mode 100644
index a354b5af..00000000
--- a/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-02-format-detection.md
+++ /dev/null
@@ -1,191 +0,0 @@
----
-name: 'step-v-02-format-detection'
-description: 'Format Detection & Structure Analysis - Classify PRD format and route appropriately'
-
-# File references (ONLY variables used in this step)
-nextStepFile: './step-v-03-density-validation.md'
-altStepFile: './step-v-02b-parity-check.md'
-prdFile: '{prd_file_path}'
-validationReportPath: '{validation_report_path}'
----
-
-# Step 2: Format Detection & Structure Analysis
-
-## STEP GOAL:
-
-Detect if PRD follows BMAD format and route appropriately - classify as BMAD Standard / BMAD Variant / Non-Standard, with optional parity check for non-standard formats.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π NEVER generate content without user input
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: When loading next step with 'C', ensure entire file is read
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are a Validation Architect and Quality Assurance Specialist
-- β If you already have been given communication or persona patterns, continue to use those while playing this new role
-- β We engage in collaborative dialogue, not command-response
-- β You bring systematic validation expertise and pattern recognition
-- β User brings domain knowledge and PRD context
-
-### Step-Specific Rules:
-
-- π― Focus ONLY on detecting format and classifying structure
-- π« FORBIDDEN to perform other validation checks in this step
-- π¬ Approach: Analytical and systematic, clear reporting of findings
-- πͺ This is a branch step - may route to parity check for non-standard PRDs
-
-## EXECUTION PROTOCOLS:
-
-- π― Analyze PRD structure systematically
-- πΎ Append format findings to validation report
-- π Route appropriately based on format classification
-- π« FORBIDDEN to skip format detection or proceed without classification
-
-## CONTEXT BOUNDARIES:
-
-- Available context: PRD file loaded in step 1, validation report initialized
-- Focus: Format detection and classification only
-- Limits: Don't perform other validation, don't skip classification
-- Dependencies: Step 1 completed - PRD loaded and report initialized
-
-## MANDATORY SEQUENCE
-
-**CRITICAL:** Follow this sequence exactly. Do not skip, reorder, or improvise unless user explicitly requests a change.
-
-### 1. Extract PRD Structure
-
-Load the complete PRD file and extract:
-
-**All Level 2 (##) headers:**
-- Scan through entire PRD document
-- Extract all ## section headers
-- List them in order
-
-**PRD frontmatter:**
-- Extract classification.domain if present
-- Extract classification.projectType if present
-- Note any other relevant metadata
-
-### 2. Check for BMAD PRD Core Sections
-
-Check if the PRD contains the following BMAD PRD core sections:
-
-1. **Executive Summary** (or variations: ## Executive Summary, ## Overview, ## Introduction)
-2. **Success Criteria** (or: ## Success Criteria, ## Goals, ## Objectives)
-3. **Product Scope** (or: ## Product Scope, ## Scope, ## In Scope, ## Out of Scope)
-4. **User Journeys** (or: ## User Journeys, ## User Stories, ## User Flows)
-5. **Functional Requirements** (or: ## Functional Requirements, ## Features, ## Capabilities)
-6. **Non-Functional Requirements** (or: ## Non-Functional Requirements, ## NFRs, ## Quality Attributes)
-
-**Count matches:**
-- How many of these 6 core sections are present?
-- Which specific sections are present?
-- Which are missing?
-
-### 3. Classify PRD Format
-
-Based on core section count, classify:
-
-**BMAD Standard:**
-- 5-6 core sections present
-- Follows BMAD PRD structure closely
-
-**BMAD Variant:**
-- 3-4 core sections present
-- Generally follows BMAD patterns but may have structural differences
-- Missing some sections but recognizable as BMAD-style
-
-**Non-Standard:**
-- Fewer than 3 core sections present
-- Does not follow BMAD PRD structure
-- May be completely custom format, legacy format, or from another framework
-
-### 4. Report Format Findings to Validation Report
-
-Append to validation report:
-
-```markdown
-## Format Detection
-
-**PRD Structure:**
-[List all ## Level 2 headers found]
-
-**BMAD Core Sections Present:**
-- Executive Summary: [Present/Missing]
-- Success Criteria: [Present/Missing]
-- Product Scope: [Present/Missing]
-- User Journeys: [Present/Missing]
-- Functional Requirements: [Present/Missing]
-- Non-Functional Requirements: [Present/Missing]
-
-**Format Classification:** [BMAD Standard / BMAD Variant / Non-Standard]
-**Core Sections Present:** [count]/6
-```
-
-### 5. Route Based on Format Classification
-
-**IF format is BMAD Standard or BMAD Variant:**
-
-Display: "**Format Detected:** {classification}
-
-Proceeding to systematic validation checks..."
-
-Without delay, read fully and follow: {nextStepFile} (step-v-03-density-validation.md)
-
-**IF format is Non-Standard (< 3 core sections):**
-
-Display: "**Format Detected:** Non-Standard PRD
-
-This PRD does not follow BMAD standard structure (only {count}/6 core sections present).
-
-You have options:"
-
-Present MENU OPTIONS below for user selection
-
-### 6. Present MENU OPTIONS (Non-Standard PRDs Only)
-
-**[A] Parity Check** - Analyze gaps and estimate effort to reach BMAD PRD parity
-**[B] Validate As-Is** - Proceed with validation using current structure
-**[C] Exit** - Exit validation and review format findings
-
-#### EXECUTION RULES:
-
-- ALWAYS halt and wait for user input
-- Only proceed based on user selection
-
-#### Menu Handling Logic:
-
-- IF A (Parity Check): Read fully and follow: {altStepFile} (step-v-02b-parity-check.md)
-- IF B (Validate As-Is): Display "Proceeding with validation..." then read fully and follow: {nextStepFile}
-- IF C (Exit): Display format findings summary and exit validation
-- IF Any other: help user respond, then redisplay menu
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- All ## Level 2 headers extracted successfully
-- BMAD core sections checked systematically
-- Format classified correctly based on section count
-- Findings reported to validation report
-- BMAD Standard/Variant PRDs proceed directly to next validation step
-- Non-Standard PRDs pause and present options to user
-- User can choose parity check, validate as-is, or exit
-
-### β SYSTEM FAILURE:
-
-- Not extracting all headers before classification
-- Incorrect format classification
-- Not reporting findings to validation report
-- Not pausing for non-standard PRDs
-- Proceeding without user decision for non-standard formats
-
-**Master Rule:** Format detection determines validation path. Non-standard PRDs require user choice before proceeding.
diff --git a/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-02b-parity-check.md b/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-02b-parity-check.md
deleted file mode 100644
index 604265a9..00000000
--- a/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-02b-parity-check.md
+++ /dev/null
@@ -1,209 +0,0 @@
----
-name: 'step-v-02b-parity-check'
-description: 'Document Parity Check - Analyze non-standard PRD and identify gaps to achieve BMAD PRD parity'
-
-# File references (ONLY variables used in this step)
-nextStepFile: './step-v-03-density-validation.md'
-prdFile: '{prd_file_path}'
-validationReportPath: '{validation_report_path}'
----
-
-# Step 2B: Document Parity Check
-
-## STEP GOAL:
-
-Analyze non-standard PRD and identify gaps to achieve BMAD PRD parity, presenting user with options for how to proceed.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π NEVER generate content without user input
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: When loading next step with 'C', ensure entire file is read
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are a Validation Architect and Quality Assurance Specialist
-- β If you already have been given communication or persona patterns, continue to use those while playing this new role
-- β We engage in collaborative dialogue, not command-response
-- β You bring BMAD PRD standards expertise and gap analysis
-- β User brings domain knowledge and PRD context
-
-### Step-Specific Rules:
-
-- π― Focus ONLY on analyzing gaps and estimating parity effort
-- π« FORBIDDEN to perform other validation checks in this step
-- π¬ Approach: Systematic gap analysis with clear recommendations
-- πͺ This is an optional branch step - user chooses next action
-
-## EXECUTION PROTOCOLS:
-
-- π― Analyze each BMAD PRD section for gaps
-- πΎ Append parity analysis to validation report
-- π Present options and await user decision
-- π« FORBIDDEN to proceed without user selection
-
-## CONTEXT BOUNDARIES:
-
-- Available context: Non-standard PRD from step 2, validation report in progress
-- Focus: Parity analysis only - what's missing, what's needed
-- Limits: Don't perform validation checks, don't auto-proceed
-- Dependencies: Step 2 classified PRD as non-standard and user chose parity check
-
-## MANDATORY SEQUENCE
-
-**CRITICAL:** Follow this sequence exactly. Do not skip, reorder, or improvise unless user explicitly requests a change.
-
-### 1. Analyze Each BMAD PRD Section
-
-For each of the 6 BMAD PRD core sections, analyze:
-
-**Executive Summary:**
-- Does PRD have vision/overview?
-- Is problem statement clear?
-- Are target users identified?
-- Gap: [What's missing or incomplete]
-
-**Success Criteria:**
-- Are measurable goals defined?
-- Is success clearly defined?
-- Gap: [What's missing or incomplete]
-
-**Product Scope:**
-- Is scope clearly defined?
-- Are in-scope items listed?
-- Are out-of-scope items listed?
-- Gap: [What's missing or incomplete]
-
-**User Journeys:**
-- Are user types/personas identified?
-- Are user flows documented?
-- Gap: [What's missing or incomplete]
-
-**Functional Requirements:**
-- Are features/capabilities listed?
-- Are requirements structured?
-- Gap: [What's missing or incomplete]
-
-**Non-Functional Requirements:**
-- Are quality attributes defined?
-- Are performance/security/etc. requirements documented?
-- Gap: [What's missing or incomplete]
-
-### 2. Estimate Effort to Reach Parity
-
-For each missing or incomplete section, estimate:
-
-**Effort Level:**
-- Minimal - Section exists but needs minor enhancements
-- Moderate - Section missing but content exists elsewhere in PRD
-- Significant - Section missing, requires new content creation
-
-**Total Parity Effort:**
-- Based on individual section estimates
-- Classify overall: Quick / Moderate / Substantial effort
-
-### 3. Report Parity Analysis to Validation Report
-
-Append to validation report:
-
-```markdown
-## Parity Analysis (Non-Standard PRD)
-
-### Section-by-Section Gap Analysis
-
-**Executive Summary:**
-- Status: [Present/Missing/Incomplete]
-- Gap: [specific gap description]
-- Effort to Complete: [Minimal/Moderate/Significant]
-
-**Success Criteria:**
-- Status: [Present/Missing/Incomplete]
-- Gap: [specific gap description]
-- Effort to Complete: [Minimal/Moderate/Significant]
-
-**Product Scope:**
-- Status: [Present/Missing/Incomplete]
-- Gap: [specific gap description]
-- Effort to Complete: [Minimal/Moderate/Significant]
-
-**User Journeys:**
-- Status: [Present/Missing/Incomplete]
-- Gap: [specific gap description]
-- Effort to Complete: [Minimal/Moderate/Significant]
-
-**Functional Requirements:**
-- Status: [Present/Missing/Incomplete]
-- Gap: [specific gap description]
-- Effort to Complete: [Minimal/Moderate/Significant]
-
-**Non-Functional Requirements:**
-- Status: [Present/Missing/Incomplete]
-- Gap: [specific gap description]
-- Effort to Complete: [Minimal/Moderate/Significant]
-
-### Overall Parity Assessment
-
-**Overall Effort to Reach BMAD Standard:** [Quick/Moderate/Substantial]
-**Recommendation:** [Brief recommendation based on analysis]
-```
-
-### 4. Present Parity Analysis and Options
-
-Display:
-
-"**Parity Analysis Complete**
-
-Your PRD is missing {count} of 6 core BMAD PRD sections. The overall effort to reach BMAD standard is: **{effort level}**
-
-**Quick Summary:**
-[2-3 sentence summary of key gaps]
-
-**Recommendation:**
-{recommendation from analysis}
-
-**How would you like to proceed?**"
-
-### 5. Present MENU OPTIONS
-
-**[C] Continue Validation** - Proceed with validation using current structure
-**[E] Exit & Review** - Exit validation and review parity report
-**[S] Save & Exit** - Save parity report and exit
-
-#### EXECUTION RULES:
-
-- ALWAYS halt and wait for user input
-- Only proceed based on user selection
-
-#### Menu Handling Logic:
-
-- IF C (Continue): Display "Proceeding with validation..." then read fully and follow: {nextStepFile}
-- IF E (Exit): Display parity summary and exit validation
-- IF S (Save): Confirm saved, display summary, exit
-- IF Any other: help user respond, then redisplay menu
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- All 6 BMAD PRD sections analyzed for gaps
-- Effort estimates provided for each gap
-- Overall parity effort assessed correctly
-- Parity analysis reported to validation report
-- Clear summary presented to user
-- User can choose to continue validation, exit, or save report
-
-### β SYSTEM FAILURE:
-
-- Not analyzing all 6 sections systematically
-- Missing effort estimates
-- Not reporting parity analysis to validation report
-- Auto-proceeding without user decision
-- Unclear recommendations
-
-**Master Rule:** Parity check informs user of gaps and effort, but user decides whether to proceed with validation or address gaps first.
diff --git a/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-03-density-validation.md b/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-03-density-validation.md
deleted file mode 100644
index d00478c1..00000000
--- a/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-03-density-validation.md
+++ /dev/null
@@ -1,174 +0,0 @@
----
-name: 'step-v-03-density-validation'
-description: 'Information Density Check - Scan for anti-patterns that violate information density principles'
-
-# File references (ONLY variables used in this step)
-nextStepFile: './step-v-04-brief-coverage-validation.md'
-prdFile: '{prd_file_path}'
-validationReportPath: '{validation_report_path}'
----
-
-# Step 3: Information Density Validation
-
-## STEP GOAL:
-
-Validate PRD meets BMAD information density standards by scanning for conversational filler, wordy phrases, and redundant expressions that violate conciseness principles.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π NEVER generate content without user input
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: When loading next step with 'C', ensure entire file is read
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are a Validation Architect and Quality Assurance Specialist
-- β If you already have been given communication or persona patterns, continue to use those while playing this new role
-- β We engage in systematic validation, not collaborative dialogue
-- β You bring analytical rigor and attention to detail
-- β This step runs autonomously - no user input needed
-
-### Step-Specific Rules:
-
-- π― Focus ONLY on information density anti-patterns
-- π« FORBIDDEN to validate other aspects in this step
-- π¬ Approach: Systematic scanning and categorization
-- πͺ This is a validation sequence step - auto-proceeds when complete
-
-## EXECUTION PROTOCOLS:
-
-- π― Scan PRD for density anti-patterns systematically
-- πΎ Append density findings to validation report
-- π Display "Proceeding to next check..." and load next step
-- π« FORBIDDEN to pause or request user input
-
-## CONTEXT BOUNDARIES:
-
-- Available context: PRD file, validation report with format findings
-- Focus: Information density validation only
-- Limits: Don't validate other aspects, don't pause for user input
-- Dependencies: Step 2 completed - format classification done
-
-## MANDATORY SEQUENCE
-
-**CRITICAL:** Follow this sequence exactly. Do not skip, reorder, or improvise unless user explicitly requests a change.
-
-### 1. Attempt Sub-Process Validation
-
-**Try to use Task tool to spawn a subprocess:**
-
-"Perform information density validation on this PRD:
-
-1. Load the PRD file
-2. Scan for the following anti-patterns:
- - Conversational filler phrases (examples: 'The system will allow users to...', 'It is important to note that...', 'In order to')
- - Wordy phrases (examples: 'Due to the fact that', 'In the event of', 'For the purpose of')
- - Redundant phrases (examples: 'Future plans', 'Absolutely essential', 'Past history')
-3. Count violations by category with line numbers
-4. Classify severity: Critical (>10 violations), Warning (5-10), Pass (<5)
-
-Return structured findings with counts and examples."
-
-### 2. Graceful Degradation (if Task tool unavailable)
-
-If Task tool unavailable, perform analysis directly:
-
-**Scan for conversational filler patterns:**
-- "The system will allow users to..."
-- "It is important to note that..."
-- "In order to"
-- "For the purpose of"
-- "With regard to"
-- Count occurrences and note line numbers
-
-**Scan for wordy phrases:**
-- "Due to the fact that" (use "because")
-- "In the event of" (use "if")
-- "At this point in time" (use "now")
-- "In a manner that" (use "how")
-- Count occurrences and note line numbers
-
-**Scan for redundant phrases:**
-- "Future plans" (just "plans")
-- "Past history" (just "history")
-- "Absolutely essential" (just "essential")
-- "Completely finish" (just "finish")
-- Count occurrences and note line numbers
-
-### 3. Classify Severity
-
-**Calculate total violations:**
-- Conversational filler count
-- Wordy phrases count
-- Redundant phrases count
-- Total = sum of all categories
-
-**Determine severity:**
-- **Critical:** Total > 10 violations
-- **Warning:** Total 5-10 violations
-- **Pass:** Total < 5 violations
-
-### 4. Report Density Findings to Validation Report
-
-Append to validation report:
-
-```markdown
-## Information Density Validation
-
-**Anti-Pattern Violations:**
-
-**Conversational Filler:** {count} occurrences
-[If count > 0, list examples with line numbers]
-
-**Wordy Phrases:** {count} occurrences
-[If count > 0, list examples with line numbers]
-
-**Redundant Phrases:** {count} occurrences
-[If count > 0, list examples with line numbers]
-
-**Total Violations:** {total}
-
-**Severity Assessment:** [Critical/Warning/Pass]
-
-**Recommendation:**
-[If Critical] "PRD requires significant revision to improve information density. Every sentence should carry weight without filler."
-[If Warning] "PRD would benefit from reducing wordiness and eliminating filler phrases."
-[If Pass] "PRD demonstrates good information density with minimal violations."
-```
-
-### 5. Display Progress and Auto-Proceed
-
-Display: "**Information Density Validation Complete**
-
-Severity: {Critical/Warning/Pass}
-
-**Proceeding to next validation check...**"
-
-Without delay, read fully and follow: {nextStepFile} (step-v-04-brief-coverage-validation.md)
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- PRD scanned for all three anti-pattern categories
-- Violations counted with line numbers
-- Severity classified correctly
-- Findings reported to validation report
-- Auto-proceeds to next validation step
-- Subprocess attempted with graceful degradation
-
-### β SYSTEM FAILURE:
-
-- Not scanning all anti-pattern categories
-- Missing severity classification
-- Not reporting findings to validation report
-- Pausing for user input (should auto-proceed)
-- Not attempting subprocess architecture
-
-**Master Rule:** Information density validation runs autonomously. Scan, classify, report, auto-proceed. No user interaction needed.
diff --git a/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-04-brief-coverage-validation.md b/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-04-brief-coverage-validation.md
deleted file mode 100644
index 60ad8684..00000000
--- a/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-04-brief-coverage-validation.md
+++ /dev/null
@@ -1,214 +0,0 @@
----
-name: 'step-v-04-brief-coverage-validation'
-description: 'Product Brief Coverage Check - Validate PRD covers all content from Product Brief (if used as input)'
-
-# File references (ONLY variables used in this step)
-nextStepFile: './step-v-05-measurability-validation.md'
-prdFile: '{prd_file_path}'
-productBrief: '{product_brief_path}'
-validationReportPath: '{validation_report_path}'
----
-
-# Step 4: Product Brief Coverage Validation
-
-## STEP GOAL:
-
-Validate that PRD covers all content from Product Brief (if brief was used as input), mapping brief content to PRD sections and identifying gaps.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π NEVER generate content without user input
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: When loading next step with 'C', ensure entire file is read
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are a Validation Architect and Quality Assurance Specialist
-- β If you already have been given communication or persona patterns, continue to use those while playing this new role
-- β We engage in systematic validation, not collaborative dialogue
-- β You bring analytical rigor and traceability expertise
-- β This step runs autonomously - no user input needed
-
-### Step-Specific Rules:
-
-- π― Focus ONLY on Product Brief coverage (conditional on brief existence)
-- π« FORBIDDEN to validate other aspects in this step
-- π¬ Approach: Systematic mapping and gap analysis
-- πͺ This is a validation sequence step - auto-proceeds when complete
-
-## EXECUTION PROTOCOLS:
-
-- π― Check if Product Brief exists in input documents
-- π¬ If no brief: Skip this check and report "N/A - No Product Brief"
-- π― If brief exists: Map brief content to PRD sections
-- πΎ Append coverage findings to validation report
-- π Display "Proceeding to next check..." and load next step
-- π« FORBIDDEN to pause or request user input
-
-## CONTEXT BOUNDARIES:
-
-- Available context: PRD file, input documents from step 1, validation report
-- Focus: Product Brief coverage only (conditional)
-- Limits: Don't validate other aspects, conditional execution
-- Dependencies: Step 1 completed - input documents loaded
-
-## MANDATORY SEQUENCE
-
-**CRITICAL:** Follow this sequence exactly. Do not skip, reorder, or improvise unless user explicitly requests a change.
-
-### 1. Check for Product Brief
-
-Check if Product Brief was loaded in step 1's inputDocuments:
-
-**IF no Product Brief found:**
-Append to validation report:
-```markdown
-## Product Brief Coverage
-
-**Status:** N/A - No Product Brief was provided as input
-```
-
-Display: "**Product Brief Coverage: Skipped** (No Product Brief provided)
-
-**Proceeding to next validation check...**"
-
-Without delay, read fully and follow: {nextStepFile}
-
-**IF Product Brief exists:** Continue to step 2 below
-
-### 2. Attempt Sub-Process Validation
-
-**Try to use Task tool to spawn a subprocess:**
-
-"Perform Product Brief coverage validation:
-
-1. Load the Product Brief
-2. Extract key content:
- - Vision statement
- - Target users/personas
- - Problem statement
- - Key features
- - Goals/objectives
- - Differentiators
- - Constraints
-3. For each item, search PRD for corresponding coverage
-4. Classify coverage: Fully Covered / Partially Covered / Not Found / Intentionally Excluded
-5. Note any gaps with severity: Critical / Moderate / Informational
-
-Return structured coverage map with classifications."
-
-### 3. Graceful Degradation (if Task tool unavailable)
-
-If Task tool unavailable, perform analysis directly:
-
-**Extract from Product Brief:**
-- Vision: What is this product?
-- Users: Who is it for?
-- Problem: What problem does it solve?
-- Features: What are the key capabilities?
-- Goals: What are the success criteria?
-- Differentiators: What makes it unique?
-
-**For each item, search PRD:**
-- Scan Executive Summary for vision
-- Check User Journeys or user personas
-- Look for problem statement
-- Review Functional Requirements for features
-- Check Success Criteria section
-- Search for differentiators
-
-**Classify coverage:**
-- **Fully Covered:** Content present and complete
-- **Partially Covered:** Content present but incomplete
-- **Not Found:** Content missing from PRD
-- **Intentionally Excluded:** Content explicitly out of scope
-
-### 4. Assess Coverage and Severity
-
-**For each gap (Partially Covered or Not Found):**
-- Is this Critical? (Core vision, primary users, main features)
-- Is this Moderate? (Secondary features, some goals)
-- Is this Informational? (Nice-to-have features, minor details)
-
-**Note:** Some exclusions may be intentional (valid scoping decisions)
-
-### 5. Report Coverage Findings to Validation Report
-
-Append to validation report:
-
-```markdown
-## Product Brief Coverage
-
-**Product Brief:** {brief_file_name}
-
-### Coverage Map
-
-**Vision Statement:** [Fully/Partially/Not Found/Intentionally Excluded]
-[If gap: Note severity and specific missing content]
-
-**Target Users:** [Fully/Partially/Not Found/Intentionally Excluded]
-[If gap: Note severity and specific missing content]
-
-**Problem Statement:** [Fully/Partially/Not Found/Intentionally Excluded]
-[If gap: Note severity and specific missing content]
-
-**Key Features:** [Fully/Partially/Not Found/Intentionally Excluded]
-[If gap: List specific features with severity]
-
-**Goals/Objectives:** [Fully/Partially/Not Found/Intentionally Excluded]
-[If gap: Note severity and specific missing content]
-
-**Differentiators:** [Fully/Partially/Not Found/Intentionally Excluded]
-[If gap: Note severity and specific missing content]
-
-### Coverage Summary
-
-**Overall Coverage:** [percentage or qualitative assessment]
-**Critical Gaps:** [count] [list if any]
-**Moderate Gaps:** [count] [list if any]
-**Informational Gaps:** [count] [list if any]
-
-**Recommendation:**
-[If critical gaps exist] "PRD should be revised to cover critical Product Brief content."
-[If moderate gaps] "Consider addressing moderate gaps for complete coverage."
-[If minimal gaps] "PRD provides good coverage of Product Brief content."
-```
-
-### 6. Display Progress and Auto-Proceed
-
-Display: "**Product Brief Coverage Validation Complete**
-
-Overall Coverage: {assessment}
-
-**Proceeding to next validation check...**"
-
-Without delay, read fully and follow: {nextStepFile} (step-v-05-measurability-validation.md)
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- Checked for Product Brief existence correctly
-- If no brief: Reported "N/A" and skipped gracefully
-- If brief exists: Mapped all key brief content to PRD sections
-- Coverage classified appropriately (Fully/Partially/Not Found/Intentionally Excluded)
-- Severity assessed for gaps (Critical/Moderate/Informational)
-- Findings reported to validation report
-- Auto-proceeds to next validation step
-- Subprocess attempted with graceful degradation
-
-### β SYSTEM FAILURE:
-
-- Not checking for brief existence before attempting validation
-- If brief exists: not mapping all key content areas
-- Missing coverage classifications
-- Not reporting findings to validation report
-- Not auto-proceeding
-
-**Master Rule:** Product Brief coverage is conditional - skip if no brief, validate thoroughly if brief exists. Always auto-proceed.
diff --git a/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-05-measurability-validation.md b/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-05-measurability-validation.md
deleted file mode 100644
index a9718718..00000000
--- a/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-05-measurability-validation.md
+++ /dev/null
@@ -1,228 +0,0 @@
----
-name: 'step-v-05-measurability-validation'
-description: 'Measurability Validation - Validate that all requirements (FRs and NFRs) are measurable and testable'
-
-# File references (ONLY variables used in this step)
-nextStepFile: './step-v-06-traceability-validation.md'
-prdFile: '{prd_file_path}'
-validationReportPath: '{validation_report_path}'
----
-
-# Step 5: Measurability Validation
-
-## STEP GOAL:
-
-Validate that all Functional Requirements (FRs) and Non-Functional Requirements (NFRs) are measurable, testable, and follow proper format without implementation details.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π NEVER generate content without user input
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: When loading next step with 'C', ensure entire file is read
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are a Validation Architect and Quality Assurance Specialist
-- β If you already have been given communication or persona patterns, continue to use those while playing this new role
-- β We engage in systematic validation, not collaborative dialogue
-- β You bring analytical rigor and requirements engineering expertise
-- β This step runs autonomously - no user input needed
-
-### Step-Specific Rules:
-
-- π― Focus ONLY on FR and NFR measurability
-- π« FORBIDDEN to validate other aspects in this step
-- π¬ Approach: Systematic requirement-by-requirement analysis
-- πͺ This is a validation sequence step - auto-proceeds when complete
-
-## EXECUTION PROTOCOLS:
-
-- π― Extract all FRs and NFRs from PRD
-- πΎ Validate each for measurability and format
-- π Append findings to validation report
-- π Display "Proceeding to next check..." and load next step
-- π« FORBIDDEN to pause or request user input
-
-## CONTEXT BOUNDARIES:
-
-- Available context: PRD file, validation report
-- Focus: FR and NFR measurability only
-- Limits: Don't validate other aspects, don't pause for user input
-- Dependencies: Steps 2-4 completed - initial validation checks done
-
-## MANDATORY SEQUENCE
-
-**CRITICAL:** Follow this sequence exactly. Do not skip, reorder, or improvise unless user explicitly requests a change.
-
-### 1. Attempt Sub-Process Validation
-
-**Try to use Task tool to spawn a subprocess:**
-
-"Perform measurability validation on this PRD:
-
-**Functional Requirements (FRs):**
-1. Extract all FRs from Functional Requirements section
-2. Check each FR for:
- - '[Actor] can [capability]' format compliance
- - No subjective adjectives (easy, fast, simple, intuitive, etc.)
- - No vague quantifiers (multiple, several, some, many, etc.)
- - No implementation details (technology names, library names, data structures unless capability-relevant)
-3. Document violations with line numbers
-
-**Non-Functional Requirements (NFRs):**
-1. Extract all NFRs from Non-Functional Requirements section
-2. Check each NFR for:
- - Specific metrics with measurement methods
- - Template compliance (criterion, metric, measurement method, context)
- - Context included (why this matters, who it affects)
-3. Document violations with line numbers
-
-Return structured findings with violation counts and examples."
-
-### 2. Graceful Degradation (if Task tool unavailable)
-
-If Task tool unavailable, perform analysis directly:
-
-**Functional Requirements Analysis:**
-
-Extract all FRs and check each for:
-
-**Format compliance:**
-- Does it follow "[Actor] can [capability]" pattern?
-- Is actor clearly defined?
-- Is capability actionable and testable?
-
-**No subjective adjectives:**
-- Scan for: easy, fast, simple, intuitive, user-friendly, responsive, quick, efficient (without metrics)
-- Note line numbers
-
-**No vague quantifiers:**
-- Scan for: multiple, several, some, many, few, various, number of
-- Note line numbers
-
-**No implementation details:**
-- Scan for: React, Vue, Angular, PostgreSQL, MongoDB, AWS, Docker, Kubernetes, Redux, etc.
-- Unless capability-relevant (e.g., "API consumers can access...")
-- Note line numbers
-
-**Non-Functional Requirements Analysis:**
-
-Extract all NFRs and check each for:
-
-**Specific metrics:**
-- Is there a measurable criterion? (e.g., "response time < 200ms", not "fast response")
-- Can this be measured or tested?
-
-**Template compliance:**
-- Criterion defined?
-- Metric specified?
-- Measurement method included?
-- Context provided?
-
-### 3. Tally Violations
-
-**FR Violations:**
-- Format violations: count
-- Subjective adjectives: count
-- Vague quantifiers: count
-- Implementation leakage: count
-- Total FR violations: sum
-
-**NFR Violations:**
-- Missing metrics: count
-- Incomplete template: count
-- Missing context: count
-- Total NFR violations: sum
-
-**Total violations:** FR violations + NFR violations
-
-### 4. Report Measurability Findings to Validation Report
-
-Append to validation report:
-
-```markdown
-## Measurability Validation
-
-### Functional Requirements
-
-**Total FRs Analyzed:** {count}
-
-**Format Violations:** {count}
-[If violations exist, list examples with line numbers]
-
-**Subjective Adjectives Found:** {count}
-[If found, list examples with line numbers]
-
-**Vague Quantifiers Found:** {count}
-[If found, list examples with line numbers]
-
-**Implementation Leakage:** {count}
-[If found, list examples with line numbers]
-
-**FR Violations Total:** {total}
-
-### Non-Functional Requirements
-
-**Total NFRs Analyzed:** {count}
-
-**Missing Metrics:** {count}
-[If missing, list examples with line numbers]
-
-**Incomplete Template:** {count}
-[If incomplete, list examples with line numbers]
-
-**Missing Context:** {count}
-[If missing, list examples with line numbers]
-
-**NFR Violations Total:** {total}
-
-### Overall Assessment
-
-**Total Requirements:** {FRs + NFRs}
-**Total Violations:** {FR violations + NFR violations}
-
-**Severity:** [Critical if >10 violations, Warning if 5-10, Pass if <5]
-
-**Recommendation:**
-[If Critical] "Many requirements are not measurable or testable. Requirements must be revised to be testable for downstream work."
-[If Warning] "Some requirements need refinement for measurability. Focus on violating requirements above."
-[If Pass] "Requirements demonstrate good measurability with minimal issues."
-```
-
-### 5. Display Progress and Auto-Proceed
-
-Display: "**Measurability Validation Complete**
-
-Total Violations: {count} ({severity})
-
-**Proceeding to next validation check...**"
-
-Without delay, read fully and follow: {nextStepFile} (step-v-06-traceability-validation.md)
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- All FRs extracted and analyzed for measurability
-- All NFRs extracted and analyzed for measurability
-- Violations documented with line numbers
-- Severity assessed correctly
-- Findings reported to validation report
-- Auto-proceeds to next validation step
-- Subprocess attempted with graceful degradation
-
-### β SYSTEM FAILURE:
-
-- Not analyzing all FRs and NFRs
-- Missing line numbers for violations
-- Not reporting findings to validation report
-- Not assessing severity
-- Not auto-proceeding
-
-**Master Rule:** Requirements must be testable to be useful. Validate every requirement for measurability, document violations, auto-proceed.
diff --git a/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-06-traceability-validation.md b/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-06-traceability-validation.md
deleted file mode 100644
index 84bf9cce..00000000
--- a/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-06-traceability-validation.md
+++ /dev/null
@@ -1,217 +0,0 @@
----
-name: 'step-v-06-traceability-validation'
-description: 'Traceability Validation - Validate the traceability chain from vision β success β journeys β FRs is intact'
-
-# File references (ONLY variables used in this step)
-nextStepFile: './step-v-07-implementation-leakage-validation.md'
-prdFile: '{prd_file_path}'
-validationReportPath: '{validation_report_path}'
----
-
-# Step 6: Traceability Validation
-
-## STEP GOAL:
-
-Validate the traceability chain from Executive Summary β Success Criteria β User Journeys β Functional Requirements is intact, ensuring every requirement traces back to a user need or business objective.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π NEVER generate content without user input
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: When loading next step with 'C', ensure entire file is read
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are a Validation Architect and Quality Assurance Specialist
-- β If you already have been given communication or persona patterns, continue to use those while playing this new role
-- β We engage in systematic validation, not collaborative dialogue
-- β You bring analytical rigor and traceability matrix expertise
-- β This step runs autonomously - no user input needed
-
-### Step-Specific Rules:
-
-- π― Focus ONLY on traceability chain validation
-- π« FORBIDDEN to validate other aspects in this step
-- π¬ Approach: Systematic chain validation and orphan detection
-- πͺ This is a validation sequence step - auto-proceeds when complete
-
-## EXECUTION PROTOCOLS:
-
-- π― Build and validate traceability matrix
-- πΎ Identify broken chains and orphan requirements
-- π Append findings to validation report
-- π Display "Proceeding to next check..." and load next step
-- π« FORBIDDEN to pause or request user input
-
-## CONTEXT BOUNDARIES:
-
-- Available context: PRD file, validation report
-- Focus: Traceability chain validation only
-- Limits: Don't validate other aspects, don't pause for user input
-- Dependencies: Steps 2-5 completed - initial validations done
-
-## MANDATORY SEQUENCE
-
-**CRITICAL:** Follow this sequence exactly. Do not skip, reorder, or improvise unless user explicitly requests a change.
-
-### 1. Attempt Sub-Process Validation
-
-**Try to use Task tool to spawn a subprocess:**
-
-"Perform traceability validation on this PRD:
-
-1. Extract content from Executive Summary (vision, goals)
-2. Extract Success Criteria
-3. Extract User Journeys (user types, flows, outcomes)
-4. Extract Functional Requirements (FRs)
-5. Extract Product Scope (in-scope items)
-
-**Validate chains:**
-- Executive Summary β Success Criteria: Does vision align with defined success?
-- Success Criteria β User Journeys: Are success criteria supported by user journeys?
-- User Journeys β Functional Requirements: Does each FR trace back to a user journey?
-- Scope β FRs: Do MVP scope FRs align with in-scope items?
-
-**Identify orphans:**
-- FRs not traceable to any user journey or business objective
-- Success criteria not supported by user journeys
-- User journeys without supporting FRs
-
-Build traceability matrix and identify broken chains and orphan FRs.
-
-Return structured findings with chain status and orphan list."
-
-### 2. Graceful Degradation (if Task tool unavailable)
-
-If Task tool unavailable, perform analysis directly:
-
-**Step 1: Extract key elements**
-- Executive Summary: Note vision, goals, objectives
-- Success Criteria: List all criteria
-- User Journeys: List user types and their flows
-- Functional Requirements: List all FRs
-- Product Scope: List in-scope items
-
-**Step 2: Validate Executive Summary β Success Criteria**
-- Does Executive Summary mention the success dimensions?
-- Are Success Criteria aligned with vision?
-- Note any misalignment
-
-**Step 3: Validate Success Criteria β User Journeys**
-- For each success criterion, is there a user journey that achieves it?
-- Note success criteria without supporting journeys
-
-**Step 4: Validate User Journeys β FRs**
-- For each user journey/flow, are there FRs that enable it?
-- List FRs with no clear user journey origin
-- Note orphan FRs (requirements without traceable source)
-
-**Step 5: Validate Scope β FR Alignment**
-- Does MVP scope align with essential FRs?
-- Are in-scope items supported by FRs?
-- Note misalignments
-
-**Step 6: Build traceability matrix**
-- Map each FR to its source (journey or business objective)
-- Note orphan FRs
-- Identify broken chains
-
-### 3. Tally Traceability Issues
-
-**Broken chains:**
-- Executive Summary β Success Criteria gaps: count
-- Success Criteria β User Journeys gaps: count
-- User Journeys β FRs gaps: count
-- Scope β FR misalignments: count
-
-**Orphan elements:**
-- Orphan FRs (no traceable source): count
-- Unsupported success criteria: count
-- User journeys without FRs: count
-
-**Total issues:** Sum of all broken chains and orphans
-
-### 4. Report Traceability Findings to Validation Report
-
-Append to validation report:
-
-```markdown
-## Traceability Validation
-
-### Chain Validation
-
-**Executive Summary β Success Criteria:** [Intact/Gaps Identified]
-{If gaps: List specific misalignments}
-
-**Success Criteria β User Journeys:** [Intact/Gaps Identified]
-{If gaps: List unsupported success criteria}
-
-**User Journeys β Functional Requirements:** [Intact/Gaps Identified]
-{If gaps: List journeys without supporting FRs}
-
-**Scope β FR Alignment:** [Intact/Misaligned]
-{If misaligned: List specific issues}
-
-### Orphan Elements
-
-**Orphan Functional Requirements:** {count}
-{List orphan FRs with numbers}
-
-**Unsupported Success Criteria:** {count}
-{List unsupported criteria}
-
-**User Journeys Without FRs:** {count}
-{List journeys without FRs}
-
-### Traceability Matrix
-
-{Summary table showing traceability coverage}
-
-**Total Traceability Issues:** {total}
-
-**Severity:** [Critical if orphan FRs exist, Warning if gaps, Pass if intact]
-
-**Recommendation:**
-[If Critical] "Orphan requirements exist - every FR must trace back to a user need or business objective."
-[If Warning] "Traceability gaps identified - strengthen chains to ensure all requirements are justified."
-[If Pass] "Traceability chain is intact - all requirements trace to user needs or business objectives."
-```
-
-### 5. Display Progress and Auto-Proceed
-
-Display: "**Traceability Validation Complete**
-
-Total Issues: {count} ({severity})
-
-**Proceeding to next validation check...**"
-
-Without delay, read fully and follow: {nextStepFile} (step-v-07-implementation-leakage-validation.md)
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- All traceability chains validated systematically
-- Orphan FRs identified with numbers
-- Broken chains documented
-- Traceability matrix built
-- Severity assessed correctly
-- Findings reported to validation report
-- Auto-proceeds to next validation step
-- Subprocess attempted with graceful degradation
-
-### β SYSTEM FAILURE:
-
-- Not validating all traceability chains
-- Missing orphan FR detection
-- Not building traceability matrix
-- Not reporting findings to validation report
-- Not auto-proceeding
-
-**Master Rule:** Every requirement should trace to a user need or business objective. Orphan FRs indicate broken traceability that must be fixed.
diff --git a/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-07-implementation-leakage-validation.md b/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-07-implementation-leakage-validation.md
deleted file mode 100644
index 923f9969..00000000
--- a/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-07-implementation-leakage-validation.md
+++ /dev/null
@@ -1,205 +0,0 @@
----
-name: 'step-v-07-implementation-leakage-validation'
-description: 'Implementation Leakage Check - Ensure FRs and NFRs don\'t include implementation details'
-
-# File references (ONLY variables used in this step)
-nextStepFile: './step-v-08-domain-compliance-validation.md'
-prdFile: '{prd_file_path}'
-validationReportPath: '{validation_report_path}'
----
-
-# Step 7: Implementation Leakage Validation
-
-## STEP GOAL:
-
-Ensure Functional Requirements and Non-Functional Requirements don't include implementation details - they should specify WHAT, not HOW.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π NEVER generate content without user input
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: When loading next step with 'C', ensure entire file is read
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are a Validation Architect and Quality Assurance Specialist
-- β If you already have been given communication or persona patterns, continue to use those while playing this new role
-- β We engage in systematic validation, not collaborative dialogue
-- β You bring analytical rigor and separation of concerns expertise
-- β This step runs autonomously - no user input needed
-
-### Step-Specific Rules:
-
-- π― Focus ONLY on implementation leakage detection
-- π« FORBIDDEN to validate other aspects in this step
-- π¬ Approach: Systematic scanning for technology and implementation terms
-- πͺ This is a validation sequence step - auto-proceeds when complete
-
-## EXECUTION PROTOCOLS:
-
-- π― Scan FRs and NFRs for implementation terms
-- πΎ Distinguish capability-relevant vs leakage
-- π Append findings to validation report
-- π Display "Proceeding to next check..." and load next step
-- π« FORBIDDEN to pause or request user input
-
-## CONTEXT BOUNDARIES:
-
-- Available context: PRD file, validation report
-- Focus: Implementation leakage detection only
-- Limits: Don't validate other aspects, don't pause for user input
-- Dependencies: Steps 2-6 completed - initial validations done
-
-## MANDATORY SEQUENCE
-
-**CRITICAL:** Follow this sequence exactly. Do not skip, reorder, or improvise unless user explicitly requests a change.
-
-### 1. Attempt Sub-Process Validation
-
-**Try to use Task tool to spawn a subprocess:**
-
-"Perform implementation leakage validation on this PRD:
-
-**Scan for:**
-1. Technology names (React, Vue, Angular, PostgreSQL, MongoDB, AWS, GCP, Azure, Docker, Kubernetes, etc.)
-2. Library names (Redux, axios, lodash, Express, Django, Rails, Spring, etc.)
-3. Data structures (JSON, XML, CSV) unless relevant to capability
-4. Architecture patterns (MVC, microservices, serverless) unless business requirement
-5. Protocol names (HTTP, REST, GraphQL, WebSockets) - check if capability-relevant
-
-**For each term found:**
-- Is this capability-relevant? (e.g., 'API consumers can access...' - API is capability)
-- Or is this implementation detail? (e.g., 'React component for...' - implementation)
-
-Document violations with line numbers and explanation.
-
-Return structured findings with leakage counts and examples."
-
-### 2. Graceful Degradation (if Task tool unavailable)
-
-If Task tool unavailable, perform analysis directly:
-
-**Implementation leakage terms to scan for:**
-
-**Frontend Frameworks:**
-React, Vue, Angular, Svelte, Solid, Next.js, Nuxt, etc.
-
-**Backend Frameworks:**
-Express, Django, Rails, Spring, Laravel, FastAPI, etc.
-
-**Databases:**
-PostgreSQL, MySQL, MongoDB, Redis, DynamoDB, Cassandra, etc.
-
-**Cloud Platforms:**
-AWS, GCP, Azure, Cloudflare, Vercel, Netlify, etc.
-
-**Infrastructure:**
-Docker, Kubernetes, Terraform, Ansible, etc.
-
-**Libraries:**
-Redux, Zustand, axios, fetch, lodash, jQuery, etc.
-
-**Data Formats:**
-JSON, XML, YAML, CSV (unless capability-relevant)
-
-**For each term found in FRs/NFRs:**
-- Determine if it's capability-relevant or implementation leakage
-- Example: "API consumers can access data via REST endpoints" - API/REST is capability
-- Example: "React components fetch data using Redux" - implementation leakage
-
-**Count violations and note line numbers**
-
-### 3. Tally Implementation Leakage
-
-**By category:**
-- Frontend framework leakage: count
-- Backend framework leakage: count
-- Database leakage: count
-- Cloud platform leakage: count
-- Infrastructure leakage: count
-- Library leakage: count
-- Other implementation details: count
-
-**Total implementation leakage violations:** sum
-
-### 4. Report Implementation Leakage Findings to Validation Report
-
-Append to validation report:
-
-```markdown
-## Implementation Leakage Validation
-
-### Leakage by Category
-
-**Frontend Frameworks:** {count} violations
-{If violations, list examples with line numbers}
-
-**Backend Frameworks:** {count} violations
-{If violations, list examples with line numbers}
-
-**Databases:** {count} violations
-{If violations, list examples with line numbers}
-
-**Cloud Platforms:** {count} violations
-{If violations, list examples with line numbers}
-
-**Infrastructure:** {count} violations
-{If violations, list examples with line numbers}
-
-**Libraries:** {count} violations
-{If violations, list examples with line numbers}
-
-**Other Implementation Details:** {count} violations
-{If violations, list examples with line numbers}
-
-### Summary
-
-**Total Implementation Leakage Violations:** {total}
-
-**Severity:** [Critical if >5 violations, Warning if 2-5, Pass if <2]
-
-**Recommendation:**
-[If Critical] "Extensive implementation leakage found. Requirements specify HOW instead of WHAT. Remove all implementation details - these belong in architecture, not PRD."
-[If Warning] "Some implementation leakage detected. Review violations and remove implementation details from requirements."
-[If Pass] "No significant implementation leakage found. Requirements properly specify WHAT without HOW."
-
-**Note:** API consumers, GraphQL (when required), and other capability-relevant terms are acceptable when they describe WHAT the system must do, not HOW to build it.
-```
-
-### 5. Display Progress and Auto-Proceed
-
-Display: "**Implementation Leakage Validation Complete**
-
-Total Violations: {count} ({severity})
-
-**Proceeding to next validation check...**"
-
-Without delay, read fully and follow: {nextStepFile} (step-v-08-domain-compliance-validation.md)
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- Scanned FRs and NFRs for all implementation term categories
-- Distinguished capability-relevant from implementation leakage
-- Violations documented with line numbers and explanations
-- Severity assessed correctly
-- Findings reported to validation report
-- Auto-proceeds to next validation step
-- Subprocess attempted with graceful degradation
-
-### β SYSTEM FAILURE:
-
-- Not scanning all implementation term categories
-- Not distinguishing capability-relevant from leakage
-- Missing line numbers for violations
-- Not reporting findings to validation report
-- Not auto-proceeding
-
-**Master Rule:** Requirements specify WHAT, not HOW. Implementation details belong in architecture documents, not PRDs.
diff --git a/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-08-domain-compliance-validation.md b/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-08-domain-compliance-validation.md
deleted file mode 100644
index 562697ed..00000000
--- a/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-08-domain-compliance-validation.md
+++ /dev/null
@@ -1,243 +0,0 @@
----
-name: 'step-v-08-domain-compliance-validation'
-description: 'Domain Compliance Validation - Validate domain-specific requirements are present for high-complexity domains'
-
-# File references (ONLY variables used in this step)
-nextStepFile: './step-v-09-project-type-validation.md'
-prdFile: '{prd_file_path}'
-prdFrontmatter: '{prd_frontmatter}'
-validationReportPath: '{validation_report_path}'
-domainComplexityData: '../data/domain-complexity.csv'
----
-
-# Step 8: Domain Compliance Validation
-
-## STEP GOAL:
-
-Validate domain-specific requirements are present for high-complexity domains (Healthcare, Fintech, GovTech, etc.), ensuring regulatory and compliance requirements are properly documented.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π NEVER generate content without user input
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: When loading next step with 'C', ensure entire file is read
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are a Validation Architect and Quality Assurance Specialist
-- β If you already have been given communication or persona patterns, continue to use those while playing this new role
-- β We engage in systematic validation, not collaborative dialogue
-- β You bring domain expertise and compliance knowledge
-- β This step runs autonomously - no user input needed
-
-### Step-Specific Rules:
-
-- π― Focus ONLY on domain-specific compliance requirements
-- π« FORBIDDEN to validate other aspects in this step
-- π¬ Approach: Conditional validation based on domain classification
-- πͺ This is a validation sequence step - auto-proceeds when complete
-
-## EXECUTION PROTOCOLS:
-
-- π― Check classification.domain from PRD frontmatter
-- π¬ If low complexity (general): Skip detailed checks
-- π― If high complexity: Validate required special sections
-- πΎ Append compliance findings to validation report
-- π Display "Proceeding to next check..." and load next step
-- π« FORBIDDEN to pause or request user input
-
-## CONTEXT BOUNDARIES:
-
-- Available context: PRD file with frontmatter classification, validation report
-- Focus: Domain compliance only (conditional on domain complexity)
-- Limits: Don't validate other aspects, conditional execution
-- Dependencies: Steps 2-7 completed - format and requirements validation done
-
-## MANDATORY SEQUENCE
-
-**CRITICAL:** Follow this sequence exactly. Do not skip, reorder, or improvise unless user explicitly requests a change.
-
-### 1. Load Domain Complexity Data
-
-Load and read the complete file at:
-`{domainComplexityData}` (../data/domain-complexity.csv)
-
-This CSV contains:
-- Domain classifications and complexity levels (high/medium/low)
-- Required special sections for each domain
-- Key concerns and requirements for regulated industries
-
-Internalize this data - it drives which domains require special compliance sections.
-
-### 2. Extract Domain Classification
-
-From PRD frontmatter, extract:
-- `classification.domain` - what domain is this PRD for?
-
-**If no domain classification found:**
-Treat as "general" (low complexity) and proceed to step 4
-
-### 2. Determine Domain Complexity
-
-**Low complexity domains (skip detailed checks):**
-- General
-- Consumer apps (standard e-commerce, social, productivity)
-- Content websites
-- Business tools (standard)
-
-**High complexity domains (require special sections):**
-- Healthcare / Healthtech
-- Fintech / Financial services
-- GovTech / Public sector
-- EdTech (educational records, accredited courses)
-- Legal tech
-- Other regulated domains
-
-### 3. For High-Complexity Domains: Validate Required Special Sections
-
-**Attempt subprocess validation:**
-
-"Perform domain compliance validation for {domain}:
-
-Based on {domain} requirements, check PRD for:
-
-**Healthcare:**
-- Clinical Requirements section
-- Regulatory Pathway (FDA, HIPAA, etc.)
-- Safety Measures
-- HIPAA Compliance (data privacy, security)
-- Patient safety considerations
-
-**Fintech:**
-- Compliance Matrix (SOC2, PCI-DSS, GDPR, etc.)
-- Security Architecture
-- Audit Requirements
-- Fraud Prevention measures
-- Financial transaction handling
-
-**GovTech:**
-- Accessibility Standards (WCAG 2.1 AA, Section 508)
-- Procurement Compliance
-- Security Clearance requirements
-- Data residency requirements
-
-**Other regulated domains:**
-- Check for domain-specific regulatory sections
-- Compliance requirements
-- Special considerations
-
-For each required section:
-- Is it present in PRD?
-- Is it adequately documented?
-- Note any gaps
-
-Return compliance matrix with presence/adequacy assessment."
-
-**Graceful degradation (if no Task tool):**
-- Manually check for required sections based on domain
-- List present sections and missing sections
-- Assess adequacy of documentation
-
-### 5. For Low-Complexity Domains: Skip Detailed Checks
-
-Append to validation report:
-```markdown
-## Domain Compliance Validation
-
-**Domain:** {domain}
-**Complexity:** Low (general/standard)
-**Assessment:** N/A - No special domain compliance requirements
-
-**Note:** This PRD is for a standard domain without regulatory compliance requirements.
-```
-
-Display: "**Domain Compliance Validation Skipped**
-
-Domain: {domain} (low complexity)
-
-**Proceeding to next validation check...**"
-
-Without delay, read fully and follow: {nextStepFile}
-
-### 6. Report Compliance Findings (High-Complexity Domains)
-
-Append to validation report:
-
-```markdown
-## Domain Compliance Validation
-
-**Domain:** {domain}
-**Complexity:** High (regulated)
-
-### Required Special Sections
-
-**{Section 1 Name}:** [Present/Missing/Adequate]
-{If missing or inadequate: Note specific gaps}
-
-**{Section 2 Name}:** [Present/Missing/Adequate]
-{If missing or inadequate: Note specific gaps}
-
-[Continue for all required sections]
-
-### Compliance Matrix
-
-| Requirement | Status | Notes |
-|-------------|--------|-------|
-| {Requirement 1} | [Met/Partial/Missing] | {Notes} |
-| {Requirement 2} | [Met/Partial/Missing] | {Notes} |
-[... continue for all requirements]
-
-### Summary
-
-**Required Sections Present:** {count}/{total}
-**Compliance Gaps:** {count}
-
-**Severity:** [Critical if missing regulatory sections, Warning if incomplete, Pass if complete]
-
-**Recommendation:**
-[If Critical] "PRD is missing required domain-specific compliance sections. These are essential for {domain} products."
-[If Warning] "Some domain compliance sections are incomplete. Strengthen documentation for full compliance."
-[If Pass] "All required domain compliance sections are present and adequately documented."
-```
-
-### 7. Display Progress and Auto-Proceed
-
-Display: "**Domain Compliance Validation Complete**
-
-Domain: {domain} ({complexity})
-Compliance Status: {status}
-
-**Proceeding to next validation check...**"
-
-Without delay, read fully and follow: {nextStepFile} (step-v-09-project-type-validation.md)
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- Domain classification extracted correctly
-- Complexity assessed appropriately
-- Low complexity domains: Skipped with clear "N/A" documentation
-- High complexity domains: All required sections checked
-- Compliance matrix built with status for each requirement
-- Severity assessed correctly
-- Findings reported to validation report
-- Auto-proceeds to next validation step
-- Subprocess attempted with graceful degradation
-
-### β SYSTEM FAILURE:
-
-- Not checking domain classification before proceeding
-- Performing detailed checks on low complexity domains
-- For high complexity: missing required section checks
-- Not building compliance matrix
-- Not reporting findings to validation report
-- Not auto-proceeding
-
-**Master Rule:** Domain compliance is conditional. High-complexity domains require special sections - low complexity domains skip these checks.
diff --git a/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-09-project-type-validation.md b/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-09-project-type-validation.md
deleted file mode 100644
index aea41d92..00000000
--- a/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-09-project-type-validation.md
+++ /dev/null
@@ -1,263 +0,0 @@
----
-name: 'step-v-09-project-type-validation'
-description: 'Project-Type Compliance Validation - Validate project-type specific requirements are properly documented'
-
-# File references (ONLY variables used in this step)
-nextStepFile: './step-v-10-smart-validation.md'
-prdFile: '{prd_file_path}'
-prdFrontmatter: '{prd_frontmatter}'
-validationReportPath: '{validation_report_path}'
-projectTypesData: '../data/project-types.csv'
----
-
-# Step 9: Project-Type Compliance Validation
-
-## STEP GOAL:
-
-Validate project-type specific requirements are properly documented - different project types (api_backend, web_app, mobile_app, etc.) have different required and excluded sections.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π NEVER generate content without user input
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: When loading next step with 'C', ensure entire file is read
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are a Validation Architect and Quality Assurance Specialist
-- β If you already have been given communication or persona patterns, continue to use those while playing this new role
-- β We engage in systematic validation, not collaborative dialogue
-- β You bring project type expertise and architectural knowledge
-- β This step runs autonomously - no user input needed
-
-### Step-Specific Rules:
-
-- π― Focus ONLY on project-type compliance
-- π« FORBIDDEN to validate other aspects in this step
-- π¬ Approach: Validate required sections present, excluded sections absent
-- πͺ This is a validation sequence step - auto-proceeds when complete
-
-## EXECUTION PROTOCOLS:
-
-- π― Check classification.projectType from PRD frontmatter
-- π― Validate required sections for that project type are present
-- π― Validate excluded sections for that project type are absent
-- πΎ Append compliance findings to validation report
-- π Display "Proceeding to next check..." and load next step
-- π« FORBIDDEN to pause or request user input
-
-## CONTEXT BOUNDARIES:
-
-- Available context: PRD file with frontmatter classification, validation report
-- Focus: Project-type compliance only
-- Limits: Don't validate other aspects, don't pause for user input
-- Dependencies: Steps 2-8 completed - domain and requirements validation done
-
-## MANDATORY SEQUENCE
-
-**CRITICAL:** Follow this sequence exactly. Do not skip, reorder, or improvise unless user explicitly requests a change.
-
-### 1. Load Project Types Data
-
-Load and read the complete file at:
-`{projectTypesData}` (../data/project-types.csv)
-
-This CSV contains:
-- Detection signals for each project type
-- Required sections for each project type
-- Skip/excluded sections for each project type
-- Innovation signals
-
-Internalize this data - it drives what sections must be present or absent for each project type.
-
-### 2. Extract Project Type Classification
-
-From PRD frontmatter, extract:
-- `classification.projectType` - what type of project is this?
-
-**Common project types:**
-- api_backend
-- web_app
-- mobile_app
-- desktop_app
-- data_pipeline
-- ml_system
-- library_sdk
-- infrastructure
-- other
-
-**If no projectType classification found:**
-Assume "web_app" (most common) and note in findings
-
-### 3. Determine Required and Excluded Sections from CSV Data
-
-**From loaded project-types.csv data, for this project type:**
-
-**Required sections:** (from required_sections column)
-These MUST be present in the PRD
-
-**Skip sections:** (from skip_sections column)
-These MUST NOT be present in the PRD
-
-**Example mappings from CSV:**
-- api_backend: Required=[endpoint_specs, auth_model, data_schemas], Skip=[ux_ui, visual_design]
-- mobile_app: Required=[platform_reqs, device_permissions, offline_mode], Skip=[desktop_features, cli_commands]
-- cli_tool: Required=[command_structure, output_formats, config_schema], Skip=[visual_design, ux_principles, touch_interactions]
-- etc.
-
-### 4. Validate Against CSV-Based Requirements
-
-**Based on project type, determine:**
-
-**api_backend:**
-- Required: Endpoint Specs, Auth Model, Data Schemas, API Versioning
-- Excluded: UX/UI sections, mobile-specific sections
-
-**web_app:**
-- Required: User Journeys, UX/UI Requirements, Responsive Design
-- Excluded: None typically
-
-**mobile_app:**
-- Required: Mobile UX, Platform specifics (iOS/Android), Offline mode
-- Excluded: Desktop-specific sections
-
-**desktop_app:**
-- Required: Desktop UX, Platform specifics (Windows/Mac/Linux)
-- Excluded: Mobile-specific sections
-
-**data_pipeline:**
-- Required: Data Sources, Data Transformation, Data Sinks, Error Handling
-- Excluded: UX/UI sections
-
-**ml_system:**
-- Required: Model Requirements, Training Data, Inference Requirements, Model Performance
-- Excluded: UX/UI sections (unless ML UI)
-
-**library_sdk:**
-- Required: API Surface, Usage Examples, Integration Guide
-- Excluded: UX/UI sections, deployment sections
-
-**infrastructure:**
-- Required: Infrastructure Components, Deployment, Monitoring, Scaling
-- Excluded: Feature requirements (this is infrastructure, not product)
-
-### 4. Attempt Sub-Process Validation
-
-"Perform project-type compliance validation for {projectType}:
-
-**Check that required sections are present:**
-{List required sections for this project type}
-For each: Is it present in PRD? Is it adequately documented?
-
-**Check that excluded sections are absent:**
-{List excluded sections for this project type}
-For each: Is it absent from PRD? (Should not be present)
-
-Build compliance table showing:
-- Required sections: [Present/Missing/Incomplete]
-- Excluded sections: [Absent/Present] (Present = violation)
-
-Return compliance table with findings."
-
-**Graceful degradation (if no Task tool):**
-- Manually check PRD for required sections
-- Manually check PRD for excluded sections
-- Build compliance table
-
-### 5. Build Compliance Table
-
-**Required sections check:**
-- For each required section: Present / Missing / Incomplete
-- Count: Required sections present vs total required
-
-**Excluded sections check:**
-- For each excluded section: Absent / Present (violation)
-- Count: Excluded sections present (violations)
-
-**Total compliance score:**
-- Required: {present}/{total}
-- Excluded violations: {count}
-
-### 6. Report Project-Type Compliance Findings to Validation Report
-
-Append to validation report:
-
-```markdown
-## Project-Type Compliance Validation
-
-**Project Type:** {projectType}
-
-### Required Sections
-
-**{Section 1}:** [Present/Missing/Incomplete]
-{If missing or incomplete: Note specific gaps}
-
-**{Section 2}:** [Present/Missing/Incomplete]
-{If missing or incomplete: Note specific gaps}
-
-[Continue for all required sections]
-
-### Excluded Sections (Should Not Be Present)
-
-**{Section 1}:** [Absent/Present] β
-{If present: This section should not be present for {projectType}}
-
-**{Section 2}:** [Absent/Present] β
-{If present: This section should not be present for {projectType}}
-
-[Continue for all excluded sections]
-
-### Compliance Summary
-
-**Required Sections:** {present}/{total} present
-**Excluded Sections Present:** {violations} (should be 0)
-**Compliance Score:** {percentage}%
-
-**Severity:** [Critical if required sections missing, Warning if incomplete, Pass if complete]
-
-**Recommendation:**
-[If Critical] "PRD is missing required sections for {projectType}. Add missing sections to properly specify this type of project."
-[If Warning] "Some required sections for {projectType} are incomplete. Strengthen documentation."
-[If Pass] "All required sections for {projectType} are present. No excluded sections found."
-```
-
-### 7. Display Progress and Auto-Proceed
-
-Display: "**Project-Type Compliance Validation Complete**
-
-Project Type: {projectType}
-Compliance: {score}%
-
-**Proceeding to next validation check...**"
-
-Without delay, read fully and follow: {nextStepFile} (step-v-10-smart-validation.md)
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- Project type extracted correctly (or default assumed)
-- Required sections validated for presence and completeness
-- Excluded sections validated for absence
-- Compliance table built with status for all sections
-- Severity assessed correctly
-- Findings reported to validation report
-- Auto-proceeds to next validation step
-- Subprocess attempted with graceful degradation
-
-### β SYSTEM FAILURE:
-
-- Not checking project type before proceeding
-- Missing required section checks
-- Missing excluded section checks
-- Not building compliance table
-- Not reporting findings to validation report
-- Not auto-proceeding
-
-**Master Rule:** Different project types have different requirements. API PRDs don't need UX sections - validate accordingly.
diff --git a/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-10-smart-validation.md b/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-10-smart-validation.md
deleted file mode 100644
index e937c752..00000000
--- a/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-10-smart-validation.md
+++ /dev/null
@@ -1,209 +0,0 @@
----
-name: 'step-v-10-smart-validation'
-description: 'SMART Requirements Validation - Validate Functional Requirements meet SMART quality criteria'
-
-# File references (ONLY variables used in this step)
-nextStepFile: './step-v-11-holistic-quality-validation.md'
-prdFile: '{prd_file_path}'
-validationReportPath: '{validation_report_path}'
-advancedElicitationTask: '{project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml'
----
-
-# Step 10: SMART Requirements Validation
-
-## STEP GOAL:
-
-Validate Functional Requirements meet SMART quality criteria (Specific, Measurable, Attainable, Relevant, Traceable), ensuring high-quality requirements.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π NEVER generate content without user input
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: When loading next step with 'C', ensure entire file is read
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are a Validation Architect and Quality Assurance Specialist
-- β If you already have been given communication or persona patterns, continue to use those while playing this new role
-- β We engage in systematic validation, not collaborative dialogue
-- β You bring requirements engineering expertise and quality assessment
-- β This step runs autonomously - no user input needed
-
-### Step-Specific Rules:
-
-- π― Focus ONLY on FR quality assessment using SMART framework
-- π« FORBIDDEN to validate other aspects in this step
-- π¬ Approach: Score each FR on SMART criteria (1-5 scale)
-- πͺ This is a validation sequence step - auto-proceeds when complete
-
-## EXECUTION PROTOCOLS:
-
-- π― Extract all FRs from PRD
-- π― Score each FR on SMART criteria (Specific, Measurable, Attainable, Relevant, Traceable)
-- πΎ Flag FRs with score < 3 in any category
-- π Append scoring table and suggestions to validation report
-- π Display "Proceeding to next check..." and load next step
-- π« FORBIDDEN to pause or request user input
-
-## CONTEXT BOUNDARIES:
-
-- Available context: PRD file, validation report
-- Focus: FR quality assessment only using SMART framework
-- Limits: Don't validate NFRs or other aspects, don't pause for user input
-- Dependencies: Steps 2-9 completed - comprehensive validation checks done
-
-## MANDATORY SEQUENCE
-
-**CRITICAL:** Follow this sequence exactly. Do not skip, reorder, or improvise unless user explicitly requests a change.
-
-### 1. Extract All Functional Requirements
-
-From the PRD's Functional Requirements section, extract:
-- All FRs with their FR numbers (FR-001, FR-002, etc.)
-- Count total FRs
-
-### 2. Attempt Sub-Process Validation
-
-**Try to use Task tool to spawn a subprocess:**
-
-"Perform SMART requirements validation on these Functional Requirements:
-
-{List all FRs}
-
-**For each FR, score on SMART criteria (1-5 scale):**
-
-**Specific (1-5):**
-- 5: Clear, unambiguous, well-defined
-- 3: Somewhat clear but could be more specific
-- 1: Vague, ambiguous, unclear
-
-**Measurable (1-5):**
-- 5: Quantifiable metrics, testable
-- 3: Partially measurable
-- 1: Not measurable, subjective
-
-**Attainable (1-5):**
-- 5: Realistic, achievable with constraints
-- 3: Probably achievable but uncertain
-- 1: Unrealistic, technically infeasible
-
-**Relevant (1-5):**
-- 5: Clearly aligned with user needs and business objectives
-- 3: Somewhat relevant but connection unclear
-- 1: Not relevant, doesn't align with goals
-
-**Traceable (1-5):**
-- 5: Clearly traces to user journey or business objective
-- 3: Partially traceable
-- 1: Orphan requirement, no clear source
-
-**For each FR with score < 3 in any category:**
-- Provide specific improvement suggestions
-
-Return scoring table with all FR scores and improvement suggestions for low-scoring FRs."
-
-**Graceful degradation (if no Task tool):**
-- Manually score each FR on SMART criteria
-- Note FRs with low scores
-- Provide improvement suggestions
-
-### 3. Build Scoring Table
-
-For each FR:
-- FR number
-- Specific score (1-5)
-- Measurable score (1-5)
-- Attainable score (1-5)
-- Relevant score (1-5)
-- Traceable score (1-5)
-- Average score
-- Flag if any category < 3
-
-**Calculate overall FR quality:**
-- Percentage of FRs with all scores β₯ 3
-- Percentage of FRs with all scores β₯ 4
-- Average score across all FRs and categories
-
-### 4. Report SMART Findings to Validation Report
-
-Append to validation report:
-
-```markdown
-## SMART Requirements Validation
-
-**Total Functional Requirements:** {count}
-
-### Scoring Summary
-
-**All scores β₯ 3:** {percentage}% ({count}/{total})
-**All scores β₯ 4:** {percentage}% ({count}/{total})
-**Overall Average Score:** {average}/5.0
-
-### Scoring Table
-
-| FR # | Specific | Measurable | Attainable | Relevant | Traceable | Average | Flag |
-|------|----------|------------|------------|----------|-----------|--------|------|
-| FR-001 | {s1} | {m1} | {a1} | {r1} | {t1} | {avg1} | {X if any <3} |
-| FR-002 | {s2} | {m2} | {a2} | {r2} | {t2} | {avg2} | {X if any <3} |
-[Continue for all FRs]
-
-**Legend:** 1=Poor, 3=Acceptable, 5=Excellent
-**Flag:** X = Score < 3 in one or more categories
-
-### Improvement Suggestions
-
-**Low-Scoring FRs:**
-
-**FR-{number}:** {specific suggestion for improvement}
-[For each FR with score < 3 in any category]
-
-### Overall Assessment
-
-**Severity:** [Critical if >30% flagged FRs, Warning if 10-30%, Pass if <10%]
-
-**Recommendation:**
-[If Critical] "Many FRs have quality issues. Revise flagged FRs using SMART framework to improve clarity and testability."
-[If Warning] "Some FRs would benefit from SMART refinement. Focus on flagged requirements above."
-[If Pass] "Functional Requirements demonstrate good SMART quality overall."
-```
-
-### 5. Display Progress and Auto-Proceed
-
-Display: "**SMART Requirements Validation Complete**
-
-FR Quality: {percentage}% with acceptable scores ({severity})
-
-**Proceeding to next validation check...**"
-
-Without delay, read fully and follow: {nextStepFile} (step-v-11-holistic-quality-validation.md)
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- All FRs extracted from PRD
-- Each FR scored on all 5 SMART criteria (1-5 scale)
-- FRs with scores < 3 flagged for improvement
-- Improvement suggestions provided for low-scoring FRs
-- Scoring table built with all FR scores
-- Overall quality assessment calculated
-- Findings reported to validation report
-- Auto-proceeds to next validation step
-- Subprocess attempted with graceful degradation
-
-### β SYSTEM FAILURE:
-
-- Not scoring all FRs on all SMART criteria
-- Missing improvement suggestions for low-scoring FRs
-- Not building scoring table
-- Not calculating overall quality metrics
-- Not reporting findings to validation report
-- Not auto-proceeding
-
-**Master Rule:** FRs should be high-quality, not just present. SMART framework provides objective quality measure.
diff --git a/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-11-holistic-quality-validation.md b/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-11-holistic-quality-validation.md
deleted file mode 100644
index 698b6f65..00000000
--- a/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-11-holistic-quality-validation.md
+++ /dev/null
@@ -1,264 +0,0 @@
----
-name: 'step-v-11-holistic-quality-validation'
-description: 'Holistic Quality Assessment - Assess PRD as cohesive, compelling document - is it a good PRD?'
-
-# File references (ONLY variables used in this step)
-nextStepFile: './step-v-12-completeness-validation.md'
-prdFile: '{prd_file_path}'
-validationReportPath: '{validation_report_path}'
-advancedElicitationTask: '{project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml'
----
-
-# Step 11: Holistic Quality Assessment
-
-## STEP GOAL:
-
-Assess the PRD as a cohesive, compelling document - evaluating document flow, dual audience effectiveness (humans and LLMs), BMAD PRD principles compliance, and overall quality rating.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π NEVER generate content without user input
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: When loading next step with 'C', ensure entire file is read
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are a Validation Architect and Quality Assurance Specialist
-- β If you already have been given communication or persona patterns, continue to use those while playing this new role
-- β We engage in systematic validation, not collaborative dialogue
-- β You bring analytical rigor and document quality expertise
-- β This step runs autonomously - no user input needed
-- β Uses Advanced Elicitation for multi-perspective evaluation
-
-### Step-Specific Rules:
-
-- π― Focus ONLY on holistic document quality assessment
-- π« FORBIDDEN to validate individual components (done in previous steps)
-- π¬ Approach: Multi-perspective evaluation using Advanced Elicitation
-- πͺ This is a validation sequence step - auto-proceeds when complete
-
-## EXECUTION PROTOCOLS:
-
-- π― Use Advanced Elicitation for multi-perspective assessment
-- π― Evaluate document flow, dual audience, BMAD principles
-- πΎ Append comprehensive assessment to validation report
-- π Display "Proceeding to next check..." and load next step
-- π« FORBIDDEN to pause or request user input
-
-## CONTEXT BOUNDARIES:
-
-- Available context: Complete PRD file, validation report with findings from steps 1-10
-- Focus: Holistic quality - the WHOLE document
-- Limits: Don't re-validate individual components, don't pause for user input
-- Dependencies: Steps 1-10 completed - all systematic checks done
-
-## MANDATORY SEQUENCE
-
-**CRITICAL:** Follow this sequence exactly. Do not skip, reorder, or improvise unless user explicitly requests a change.
-
-### 1. Attempt Sub-Process with Advanced Elicitation
-
-**Try to use Task tool to spawn a subprocess using Advanced Elicitation:**
-
-"Perform holistic quality assessment on this PRD using multi-perspective evaluation:
-
-**Read fully and follow the Advanced Elicitation workflow:**
-{advancedElicitationTask}
-
-**Evaluate the PRD from these perspectives:**
-
-**1. Document Flow & Coherence:**
-- Read entire PRD
-- Evaluate narrative flow - does it tell a cohesive story?
-- Check transitions between sections
-- Assess consistency - is it coherent throughout?
-- Evaluate readability - is it clear and well-organized?
-
-**2. Dual Audience Effectiveness:**
-
-**For Humans:**
-- Executive-friendly: Can executives understand vision and goals quickly?
-- Developer clarity: Do developers have clear requirements to build from?
-- Designer clarity: Do designers understand user needs and flows?
-- Stakeholder decision-making: Can stakeholders make informed decisions?
-
-**For LLMs:**
-- Machine-readable structure: Is the PRD structured for LLM consumption?
-- UX readiness: Can an LLM generate UX designs from this?
-- Architecture readiness: Can an LLM generate architecture from this?
-- Epic/Story readiness: Can an LLM break down into epics and stories?
-
-**3. BMAD PRD Principles Compliance:**
-- Information density: Every sentence carries weight?
-- Measurability: Requirements testable?
-- Traceability: Requirements trace to sources?
-- Domain awareness: Domain-specific considerations included?
-- Zero anti-patterns: No filler or wordiness?
-- Dual audience: Works for both humans and LLMs?
-- Markdown format: Proper structure and formatting?
-
-**4. Overall Quality Rating:**
-Rate the PRD on 5-point scale:
-- Excellent (5/5): Exemplary, ready for production use
-- Good (4/5): Strong with minor improvements needed
-- Adequate (3/5): Acceptable but needs refinement
-- Needs Work (2/5): Significant gaps or issues
-- Problematic (1/5): Major flaws, needs substantial revision
-
-**5. Top 3 Improvements:**
-Identify the 3 most impactful improvements to make this a great PRD
-
-Return comprehensive assessment with all perspectives, rating, and top 3 improvements."
-
-**Graceful degradation (if no Task tool or Advanced Elicitation unavailable):**
-- Perform holistic assessment directly in current context
-- Read complete PRD
-- Evaluate document flow, coherence, transitions
-- Assess dual audience effectiveness
-- Check BMAD principles compliance
-- Assign overall quality rating
-- Identify top 3 improvements
-
-### 2. Synthesize Assessment
-
-**Compile findings from multi-perspective evaluation:**
-
-**Document Flow & Coherence:**
-- Overall assessment: [Excellent/Good/Adequate/Needs Work/Problematic]
-- Key strengths: [list]
-- Key weaknesses: [list]
-
-**Dual Audience Effectiveness:**
-- For Humans: [assessment]
-- For LLMs: [assessment]
-- Overall dual audience score: [1-5]
-
-**BMAD Principles Compliance:**
-- Principles met: [count]/7
-- Principles with issues: [list]
-
-**Overall Quality Rating:** [1-5 with label]
-
-**Top 3 Improvements:**
-1. [Improvement 1]
-2. [Improvement 2]
-3. [Improvement 3]
-
-### 3. Report Holistic Quality Findings to Validation Report
-
-Append to validation report:
-
-```markdown
-## Holistic Quality Assessment
-
-### Document Flow & Coherence
-
-**Assessment:** [Excellent/Good/Adequate/Needs Work/Problematic]
-
-**Strengths:**
-{List key strengths}
-
-**Areas for Improvement:**
-{List key weaknesses}
-
-### Dual Audience Effectiveness
-
-**For Humans:**
-- Executive-friendly: [assessment]
-- Developer clarity: [assessment]
-- Designer clarity: [assessment]
-- Stakeholder decision-making: [assessment]
-
-**For LLMs:**
-- Machine-readable structure: [assessment]
-- UX readiness: [assessment]
-- Architecture readiness: [assessment]
-- Epic/Story readiness: [assessment]
-
-**Dual Audience Score:** {score}/5
-
-### BMAD PRD Principles Compliance
-
-| Principle | Status | Notes |
-|-----------|--------|-------|
-| Information Density | [Met/Partial/Not Met] | {notes} |
-| Measurability | [Met/Partial/Not Met] | {notes} |
-| Traceability | [Met/Partial/Not Met] | {notes} |
-| Domain Awareness | [Met/Partial/Not Met] | {notes} |
-| Zero Anti-Patterns | [Met/Partial/Not Met] | {notes} |
-| Dual Audience | [Met/Partial/Not Met] | {notes} |
-| Markdown Format | [Met/Partial/Not Met] | {notes} |
-
-**Principles Met:** {count}/7
-
-### Overall Quality Rating
-
-**Rating:** {rating}/5 - {label}
-
-**Scale:**
-- 5/5 - Excellent: Exemplary, ready for production use
-- 4/5 - Good: Strong with minor improvements needed
-- 3/5 - Adequate: Acceptable but needs refinement
-- 2/5 - Needs Work: Significant gaps or issues
-- 1/5 - Problematic: Major flaws, needs substantial revision
-
-### Top 3 Improvements
-
-1. **{Improvement 1}**
- {Brief explanation of why and how}
-
-2. **{Improvement 2}**
- {Brief explanation of why and how}
-
-3. **{Improvement 3}**
- {Brief explanation of why and how}
-
-### Summary
-
-**This PRD is:** {one-sentence overall assessment}
-
-**To make it great:** Focus on the top 3 improvements above.
-```
-
-### 4. Display Progress and Auto-Proceed
-
-Display: "**Holistic Quality Assessment Complete**
-
-Overall Rating: {rating}/5 - {label}
-
-**Proceeding to final validation checks...**"
-
-Without delay, read fully and follow: {nextStepFile} (step-v-12-completeness-validation.md)
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- Advanced Elicitation used for multi-perspective evaluation (or graceful degradation)
-- Document flow & coherence assessed
-- Dual audience effectiveness evaluated (humans and LLMs)
-- BMAD PRD principles compliance checked
-- Overall quality rating assigned (1-5 scale)
-- Top 3 improvements identified
-- Comprehensive assessment reported to validation report
-- Auto-proceeds to next validation step
-- Subprocess attempted with graceful degradation
-
-### β SYSTEM FAILURE:
-
-- Not using Advanced Elicitation for multi-perspective evaluation
-- Missing document flow assessment
-- Missing dual audience evaluation
-- Not checking all BMAD principles
-- Not assigning overall quality rating
-- Missing top 3 improvements
-- Not reporting comprehensive assessment to validation report
-- Not auto-proceeding
-
-**Master Rule:** This evaluates the WHOLE document, not just components. Answers "Is this a good PRD?" and "What would make it great?"
diff --git a/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-12-completeness-validation.md b/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-12-completeness-validation.md
deleted file mode 100644
index 00c47798..00000000
--- a/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-12-completeness-validation.md
+++ /dev/null
@@ -1,242 +0,0 @@
----
-name: 'step-v-12-completeness-validation'
-description: 'Completeness Check - Final comprehensive completeness check before report generation'
-
-# File references (ONLY variables used in this step)
-nextStepFile: './step-v-13-report-complete.md'
-prdFile: '{prd_file_path}'
-prdFrontmatter: '{prd_frontmatter}'
-validationReportPath: '{validation_report_path}'
----
-
-# Step 12: Completeness Validation
-
-## STEP GOAL:
-
-Final comprehensive completeness check - validate no template variables remain, each section has required content, section-specific completeness, and frontmatter is properly populated.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π NEVER generate content without user input
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: When loading next step with 'C', ensure entire file is read
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are a Validation Architect and Quality Assurance Specialist
-- β If you already have been given communication or persona patterns, continue to use those while playing this new role
-- β We engage in systematic validation, not collaborative dialogue
-- β You bring attention to detail and completeness verification
-- β This step runs autonomously - no user input needed
-
-### Step-Specific Rules:
-
-- π― Focus ONLY on completeness verification
-- π« FORBIDDEN to validate quality (done in step 11) or other aspects
-- π¬ Approach: Systematic checklist-style verification
-- πͺ This is a validation sequence step - auto-proceeds when complete
-
-## EXECUTION PROTOCOLS:
-
-- π― Check template completeness (no variables remaining)
-- π― Validate content completeness (each section has required content)
-- π― Validate section-specific completeness
-- π― Validate frontmatter completeness
-- πΎ Append completeness matrix to validation report
-- π Display "Proceeding to final step..." and load next step
-- π« FORBIDDEN to pause or request user input
-
-## CONTEXT BOUNDARIES:
-
-- Available context: Complete PRD file, frontmatter, validation report
-- Focus: Completeness verification only (final gate)
-- Limits: Don't assess quality, don't pause for user input
-- Dependencies: Steps 1-11 completed - all validation checks done
-
-## MANDATORY SEQUENCE
-
-**CRITICAL:** Follow this sequence exactly. Do not skip, reorder, or improvise unless user explicitly requests a change.
-
-### 1. Attempt Sub-Process Validation
-
-**Try to use Task tool to spawn a subprocess:**
-
-"Perform completeness validation on this PRD - final gate check:
-
-**1. Template Completeness:**
-- Scan PRD for any remaining template variables
-- Look for: {variable}, {{variable}}, {placeholder}, [placeholder], etc.
-- List any found with line numbers
-
-**2. Content Completeness:**
-- Executive Summary: Has vision statement? ({key content})
-- Success Criteria: All criteria measurable? ({metrics present})
-- Product Scope: In-scope and out-of-scope defined? ({both present})
-- User Journeys: User types identified? ({users listed})
-- Functional Requirements: FRs listed with proper format? ({FRs present})
-- Non-Functional Requirements: NFRs with metrics? ({NFRs present})
-
-For each section: Is required content present? (Yes/No/Partial)
-
-**3. Section-Specific Completeness:**
-- Success Criteria: Each has specific measurement method?
-- User Journeys: Cover all user types?
-- Functional Requirements: Cover MVP scope?
-- Non-Functional Requirements: Each has specific criteria?
-
-**4. Frontmatter Completeness:**
-- stepsCompleted: Populated?
-- classification: Present (domain, projectType)?
-- inputDocuments: Tracked?
-- date: Present?
-
-Return completeness matrix with status for each check."
-
-**Graceful degradation (if no Task tool):**
-- Manually scan for template variables
-- Manually check each section for required content
-- Manually verify frontmatter fields
-- Build completeness matrix
-
-### 2. Build Completeness Matrix
-
-**Template Completeness:**
-- Template variables found: count
-- List if any found
-
-**Content Completeness by Section:**
-- Executive Summary: Complete / Incomplete / Missing
-- Success Criteria: Complete / Incomplete / Missing
-- Product Scope: Complete / Incomplete / Missing
-- User Journeys: Complete / Incomplete / Missing
-- Functional Requirements: Complete / Incomplete / Missing
-- Non-Functional Requirements: Complete / Incomplete / Missing
-- Other sections: [List completeness]
-
-**Section-Specific Completeness:**
-- Success criteria measurable: All / Some / None
-- Journeys cover all users: Yes / Partial / No
-- FRs cover MVP scope: Yes / Partial / No
-- NFRs have specific criteria: All / Some / None
-
-**Frontmatter Completeness:**
-- stepsCompleted: Present / Missing
-- classification: Present / Missing
-- inputDocuments: Present / Missing
-- date: Present / Missing
-
-**Overall completeness:**
-- Sections complete: X/Y
-- Critical gaps: [list if any]
-
-### 3. Report Completeness Findings to Validation Report
-
-Append to validation report:
-
-```markdown
-## Completeness Validation
-
-### Template Completeness
-
-**Template Variables Found:** {count}
-{If count > 0, list variables with line numbers}
-{If count = 0, note: No template variables remaining β}
-
-### Content Completeness by Section
-
-**Executive Summary:** [Complete/Incomplete/Missing]
-{If incomplete or missing, note specific gaps}
-
-**Success Criteria:** [Complete/Incomplete/Missing]
-{If incomplete or missing, note specific gaps}
-
-**Product Scope:** [Complete/Incomplete/Missing]
-{If incomplete or missing, note specific gaps}
-
-**User Journeys:** [Complete/Incomplete/Missing]
-{If incomplete or missing, note specific gaps}
-
-**Functional Requirements:** [Complete/Incomplete/Missing]
-{If incomplete or missing, note specific gaps}
-
-**Non-Functional Requirements:** [Complete/Incomplete/Missing]
-{If incomplete or missing, note specific gaps}
-
-### Section-Specific Completeness
-
-**Success Criteria Measurability:** [All/Some/None] measurable
-{If Some or None, note which criteria lack metrics}
-
-**User Journeys Coverage:** [Yes/Partial/No] - covers all user types
-{If Partial or No, note missing user types}
-
-**FRs Cover MVP Scope:** [Yes/Partial/No]
-{If Partial or No, note scope gaps}
-
-**NFRs Have Specific Criteria:** [All/Some/None]
-{If Some or None, note which NFRs lack specificity}
-
-### Frontmatter Completeness
-
-**stepsCompleted:** [Present/Missing]
-**classification:** [Present/Missing]
-**inputDocuments:** [Present/Missing]
-**date:** [Present/Missing]
-
-**Frontmatter Completeness:** {complete_fields}/4
-
-### Completeness Summary
-
-**Overall Completeness:** {percentage}% ({complete_sections}/{total_sections})
-
-**Critical Gaps:** [count] [list if any]
-**Minor Gaps:** [count] [list if any]
-
-**Severity:** [Critical if template variables exist or critical sections missing, Warning if minor gaps, Pass if complete]
-
-**Recommendation:**
-[If Critical] "PRD has completeness gaps that must be addressed before use. Fix template variables and complete missing sections."
-[If Warning] "PRD has minor completeness gaps. Address minor gaps for complete documentation."
-[If Pass] "PRD is complete with all required sections and content present."
-```
-
-### 4. Display Progress and Auto-Proceed
-
-Display: "**Completeness Validation Complete**
-
-Overall Completeness: {percentage}% ({severity})
-
-**Proceeding to final step...**"
-
-Without delay, read fully and follow: {nextStepFile} (step-v-13-report-complete.md)
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- Scanned for template variables systematically
-- Validated each section for required content
-- Validated section-specific completeness (measurability, coverage, scope)
-- Validated frontmatter completeness
-- Completeness matrix built with all checks
-- Severity assessed correctly
-- Findings reported to validation report
-- Auto-proceeds to final step
-- Subprocess attempted with graceful degradation
-
-### β SYSTEM FAILURE:
-
-- Not scanning for template variables
-- Missing section-specific completeness checks
-- Not validating frontmatter
-- Not building completeness matrix
-- Not reporting findings to validation report
-- Not auto-proceeding
-
-**Master Rule:** Final gate to ensure document is complete before presenting findings. Template variables or critical gaps must be fixed.
diff --git a/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-13-report-complete.md b/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-13-report-complete.md
deleted file mode 100644
index 08465604..00000000
--- a/src/bmm/workflows/2-plan-workflows/prd/steps-v/step-v-13-report-complete.md
+++ /dev/null
@@ -1,231 +0,0 @@
----
-name: 'step-v-13-report-complete'
-description: 'Validation Report Complete - Finalize report, summarize findings, present to user, offer next steps'
-
-# File references (ONLY variables used in this step)
-validationReportPath: '{validation_report_path}'
-prdFile: '{prd_file_path}'
----
-
-# Step 13: Validation Report Complete
-
-## STEP GOAL:
-
-Finalize validation report, summarize all findings from steps 1-12, present summary to user conversationally, and offer actionable next steps.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π NEVER generate content without user input
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: When loading next step with 'C', ensure entire file is read
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are a Validation Architect and Quality Assurance Specialist
-- β If you already have been given communication or persona patterns, continue to use those while playing this new role
-- β We engage in collaborative dialogue, not command-response
-- β You bring synthesis and summary expertise
-- β This is the FINAL step - requires user interaction
-
-### Step-Specific Rules:
-
-- π― Focus ONLY on summarizing findings and presenting options
-- π« FORBIDDEN to perform additional validation
-- π¬ Approach: Conversational summary with clear next steps
-- πͺ This is the final step - no next step after this
-
-## EXECUTION PROTOCOLS:
-
-- π― Load complete validation report
-- π― Summarize all findings from steps 1-12
-- π― Update report frontmatter with final status
-- π¬ Present summary to user conversationally
-- π¬ Offer menu options for next actions
-- π« FORBIDDEN to proceed without user selection
-
-## CONTEXT BOUNDARIES:
-
-- Available context: Complete validation report with findings from all validation steps
-- Focus: Summary and presentation only (no new validation)
-- Limits: Don't add new findings, just synthesize existing
-- Dependencies: Steps 1-12 completed - all validation checks done
-
-## MANDATORY SEQUENCE
-
-**CRITICAL:** Follow this sequence exactly. Do not skip, reorder, or improvise unless user explicitly requests a change.
-
-### 1. Load Complete Validation Report
-
-Read the entire validation report from {validationReportPath}
-
-Extract all findings from:
-- Format Detection (Step 2)
-- Parity Analysis (Step 2B, if applicable)
-- Information Density (Step 3)
-- Product Brief Coverage (Step 4)
-- Measurability (Step 5)
-- Traceability (Step 6)
-- Implementation Leakage (Step 7)
-- Domain Compliance (Step 8)
-- Project-Type Compliance (Step 9)
-- SMART Requirements (Step 10)
-- Holistic Quality (Step 11)
-- Completeness (Step 12)
-
-### 2. Update Report Frontmatter with Final Status
-
-Update validation report frontmatter:
-
-```yaml
----
-validationTarget: '{prd_path}'
-validationDate: '{current_date}'
-inputDocuments: [list of documents]
-validationStepsCompleted: ['step-v-01-discovery', 'step-v-02-format-detection', 'step-v-03-density-validation', 'step-v-04-brief-coverage-validation', 'step-v-05-measurability-validation', 'step-v-06-traceability-validation', 'step-v-07-implementation-leakage-validation', 'step-v-08-domain-compliance-validation', 'step-v-09-project-type-validation', 'step-v-10-smart-validation', 'step-v-11-holistic-quality-validation', 'step-v-12-completeness-validation']
-validationStatus: COMPLETE
-holisticQualityRating: '{rating from step 11}'
-overallStatus: '{Pass/Warning/Critical based on all findings}'
----
-```
-
-### 3. Create Summary of Findings
-
-**Overall Status:**
-- Determine from all validation findings
-- **Pass:** All critical checks pass, minor warnings acceptable
-- **Warning:** Some issues found but PRD is usable
-- **Critical:** Major issues that prevent PRD from being fit for purpose
-
-**Quick Results Table:**
-- Format: [classification]
-- Information Density: [severity]
-- Measurability: [severity]
-- Traceability: [severity]
-- Implementation Leakage: [severity]
-- Domain Compliance: [status]
-- Project-Type Compliance: [compliance score]
-- SMART Quality: [percentage]
-- Holistic Quality: [rating/5]
-- Completeness: [percentage]
-
-**Critical Issues:** List from all validation steps
-**Warnings:** List from all validation steps
-**Strengths:** List positives from all validation steps
-
-**Holistic Quality Rating:** From step 11
-**Top 3 Improvements:** From step 11
-
-**Recommendation:** Based on overall status
-
-### 4. Present Summary to User Conversationally
-
-Display:
-
-"**β PRD Validation Complete**
-
-**Overall Status:** {Pass/Warning/Critical}
-
-**Quick Results:**
-{Present quick results table with key findings}
-
-**Critical Issues:** {count or "None"}
-{If any, list briefly}
-
-**Warnings:** {count or "None"}
-{If any, list briefly}
-
-**Strengths:**
-{List key strengths}
-
-**Holistic Quality:** {rating}/5 - {label}
-
-**Top 3 Improvements:**
-1. {Improvement 1}
-2. {Improvement 2}
-3. {Improvement 3}
-
-**Recommendation:**
-{Based on overall status:
-- Pass: "PRD is in good shape. Address minor improvements to make it great."
-- Warning: "PRD is usable but has issues that should be addressed. Review warnings and improve where needed."
-- Critical: "PRD has significant issues that should be fixed before use. Focus on critical issues above."}
-
-**What would you like to do next?**"
-
-### 5. Present MENU OPTIONS
-
-Display:
-
-**[R] Review Detailed Findings** - Walk through validation report section by section
-**[E] Use Edit Workflow** - Use validation report with Edit workflow for systematic improvements
-**[F] Fix Simpler Items** - Immediate fixes for simple issues (anti-patterns, leakage, missing headers)
-**[X] Exit** - Exit and Suggest Next Steps.
-
-#### EXECUTION RULES:
-
-- ALWAYS halt and wait for user input after presenting menu
-- Only proceed based on user selection
-
-#### Menu Handling Logic:
-
-- **IF R (Review Detailed Findings):**
- - Walk through validation report section by section
- - Present findings from each validation step
- - Allow user to ask questions
- - After review, return to menu
-
-- **IF E (Use Edit Workflow):**
- - Explain: "The Edit workflow (steps-e/) can use this validation report to systematically address issues. Edit mode will guide you through discovering what to edit, reviewing the PRD, and applying targeted improvements."
- - Offer: "Would you like to launch Edit mode now? It will help you fix validation findings systematically."
- - If yes: Read fully and follow: steps-e/step-e-01-discovery.md
- - If no: Return to menu
-
-- **IF F (Fix Simpler Items):**
- - Offer immediate fixes for:
- - Template variables (fill in with appropriate content)
- - Conversational filler (remove wordy phrases)
- - Implementation leakage (remove technology names from FRs/NFRs)
- - Missing section headers (add ## headers)
- - Ask: "Which simple fixes would you like me to make?"
- - If user specifies fixes, make them and update validation report
- - Return to menu
-
-- **IF X (Exit):**
- - Display: "**Validation Report Saved:** {validationReportPath}"
- - Display: "**Summary:** {overall status} - {recommendation}"
- - PRD Validation complete. Read fully and follow: `_bmad/core/tasks/bmad-help.md` with argument `Validate PRD`.
-
-- **IF Any other:** Help user, then redisplay menu
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- Complete validation report loaded successfully
-- All findings from steps 1-12 summarized
-- Report frontmatter updated with final status
-- Overall status determined correctly (Pass/Warning/Critical)
-- Quick results table presented
-- Critical issues, warnings, and strengths listed
-- Holistic quality rating included
-- Top 3 improvements presented
-- Clear recommendation provided
-- Menu options presented with clear explanations
-- User can review findings, get help, or exit
-
-### β SYSTEM FAILURE:
-
-- Not loading complete validation report
-- Missing summary of findings
-- Not updating report frontmatter
-- Not determining overall status
-- Missing menu options
-- Unclear next steps
-
-**Master Rule:** User needs clear summary and actionable next steps. Edit workflow is best for complex issues; immediate fixes available for simpler ones.
diff --git a/src/bmm/workflows/2-plan-workflows/prd/templates/prd-template.md b/src/bmm/workflows/2-plan-workflows/prd/templates/prd-template.md
deleted file mode 100644
index d82219d2..00000000
--- a/src/bmm/workflows/2-plan-workflows/prd/templates/prd-template.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-stepsCompleted: []
-inputDocuments: []
-workflowType: 'prd'
----
-
-# Product Requirements Document - {{project_name}}
-
-**Author:** {{user_name}}
-**Date:** {{date}}
diff --git a/src/bmm/workflows/2-plan-workflows/prd/validation-report-prd-workflow.md b/src/bmm/workflows/2-plan-workflows/prd/validation-report-prd-workflow.md
deleted file mode 100644
index 73dce5ae..00000000
--- a/src/bmm/workflows/2-plan-workflows/prd/validation-report-prd-workflow.md
+++ /dev/null
@@ -1,433 +0,0 @@
----
-validationTarget: 'PRD Workflow Structure'
-validationDate: '2026-01-08'
-inputDocuments: []
-validationStepsCompleted: ['discovery', 'frontmatter-validation', 'content-validation', 'documentation-validation', 'integration-validation', 'corrections-applied']
-validationStatus: COMPLETE - PRODUCTION READY
----
-
-# PRD Workflow Validation Report
-
-**Workflow Being Validated:** /Users/brianmadison/dev/BMAD-METHOD/src/bmm/workflows/2-plan-workflows/prd
-**Validation Date:** 2026-01-08
-**Validator:** BMAD Workflow Validation System
-
----
-
-## Executive Summary
-
-This validation report assesses the PRD workflow structure against BMAD workflow standards. The PRD workflow is a tri-modal workflow system with Create, Validate, and Edit phases.
-
----
-
-## 1. File Structure & Size Analysis
-
-### Folder Structure
-
-```
-prd/
-βββ workflow.md (main workflow file)
-βββ steps-c/ (Create steps - 12 files)
-βββ steps-v/ (Validation steps - 13 files)
-βββ steps-e/ (Edit steps - 5 files)
-βββ data/
-β βββ prd-purpose.md
-βββ templates/
- βββ prd-template.md
-```
-
-**β Structure Status**: PASS - All required folders present
-
-### File Size Analysis
-
-#### Steps-C (Create Steps) - 12 files
-| File | Lines | Status |
-|------|-------|--------|
-| step-01-init.md | 191 | β οΈ Approaching limit |
-| step-01b-continue.md | 153 | β Good |
-| step-02-discovery.md | 197 | β οΈ Approaching limit |
-| step-03-success.md | 226 | β οΈ Approaching limit |
-| step-04-journeys.md | 213 | β οΈ Approaching limit |
-| step-05-domain.md | 193 | β οΈ Approaching limit |
-| step-06-innovation.md | 226 | β οΈ Approaching limit |
-| step-07-project-type.md | 225 | β οΈ Approaching limit |
-| step-08-scoping.md | 228 | β οΈ Approaching limit |
-| step-09-functional.md | 231 | β οΈ Approaching limit |
-| step-10-nonfunctional.md | 242 | β οΈ Approaching limit |
-| step-11-polish.md | 217 | β οΈ Approaching limit |
-| step-12-complete.md | 185 | β Good |
-
-#### Steps-V (Validation Steps) - 13 files
-| File | Lines | Status |
-|------|-------|--------|
-| step-v-01-discovery.md | 217 | β οΈ Approaching limit |
-| step-v-02-format-detection.md | 191 | β οΈ Approaching limit |
-| step-v-02b-parity-check.md | 209 | β οΈ Approaching limit |
-| step-v-03-density-validation.md | 174 | β Good |
-| step-v-04-brief-coverage-validation.md | 214 | β οΈ Approaching limit |
-| step-v-05-measurability-validation.md | 228 | β οΈ Approaching limit |
-| step-v-06-traceability-validation.md | 217 | β οΈ Approaching limit |
-| step-v-07-implementation-leakage-validation.md | 205 | β οΈ Approaching limit |
-| step-v-08-domain-compliance-validation.md | 243 | β οΈ Approaching limit |
-| step-v-09-project-type-validation.md | 263 | β Exceeds limit |
-| step-v-10-smart-validation.md | 209 | β οΈ Approaching limit |
-| step-v-11-holistic-quality-validation.md | 264 | β Exceeds limit |
-| step-v-12-completeness-validation.md | 242 | β οΈ Approaching limit |
-| step-v-13-report-complete.md | 231 | β οΈ Approaching limit |
-
-#### Steps-E (Edit Steps) - 5 files
-| File | Lines | Status |
-|------|-------|--------|
-| step-e-01-discovery.md | 206 | β οΈ Approaching limit |
-| step-e-01b-legacy-conversion.md | 208 | β οΈ Approaching limit |
-| step-e-02-review.md | 249 | β οΈ Approaching limit |
-| step-e-03-edit.md | 253 | β Exceeds limit |
-| step-e-04-complete.md | 168 | β Good |
-
-#### Data & Templates
-| File | Lines | Status |
-|------|-------|--------|
-| data/prd-purpose.md | 197 | β οΈ Approaching limit |
-| templates/prd-template.md | 10 | β Good |
-| workflow.md | 114 | β Good |
-
-### File Size Statistics
-
-- **Total Files**: 32 markdown files
-- **β Good (<200 lines)**: 6 files (18.8%)
-- **β οΈ Approaching limit (200-250)**: 23 files (71.9%)
-- **β Exceeds limit (>250)**: 3 files (9.4%)
-- **Average lines per file**: 213.3 lines
-
-### β οΈ Recommendations
-
-1. **Files Exceeding 250-line limit**:
- - `step-v-09-project-type-validation.md` (263 lines) - Consider splitting into sub-steps
- - `step-v-11-holistic-quality-validation.md` (264 lines) - Consider splitting into sub-steps
- - `step-e-03-edit.md` (253 lines) - Consider splitting into sub-steps
-
-2. **Files Approaching Limit**:
- - Many files are in the 200-250 line range
- - Monitor these files as further additions may push them over the limit
- - Consider proactive refactoring where appropriate
-
----
-
-## 2. Frontmatter Structure Validation
-
-### Files Checked: 29 total files
-
-**β Overall Status:** ALL VALID - One Issue Fixed
-
-#### Main Workflow (workflow.md)
-**Required Fields Present:**
-- β `name`: "prd"
-- β `description`: "PRD tri-modal workflow"
-- β `nextStep`: "./steps-c/step-01-init.md"
-- β `validateWorkflow`: "./steps-v/step-v-01-discovery.md"
-- β `editWorkflow`: "./steps-e/step-e-01-discovery.md" (FIXED - was assess-workflow.md)
-
-#### Create Steps (steps-c)
-- β All 13 files have proper name, description, nextStepFile
-- β Proper sequencing from step-01 through step-12
-- β Consistent output file references
-
-#### Validation Steps (steps-v)
-- β All 13 files have complete frontmatter
-- β Proper sequential chain maintained
-- β No broken internal references
-
-#### Edit Steps (steps-e)
-- β All files have required fields
-- β Proper routing with altStepFile references
-
-### β All Issues Resolved
-
-**1. Broken Edit Workflow Reference:**
-```yaml
-# Current (INCORRECT):
-editWorkflow: './steps-e/step-e-01-assess-workflow.md'
-
-# Should be:
-editWorkflow: './steps-e/step-e-01-discovery.md'
-```
-
-**2. Step Numbering Gap:**
-- Original `step-11-complete.md` was deleted
-- Sequence now: step-10 β step-11-polish β step-12-complete
-- Creates confusion in step numbering
-
-### β YAML Syntax
-- No YAML syntax errors detected
-- All frontmatter properly formatted
-- Consistent structure across files
-
-### Status
-β **ALL ISSUES RESOLVED** - Only cosmetic improvements remain:
-
-1. **β FIXED**: Edit workflow path corrected in workflow.md
-2. **β οΈ OPTIONAL**: Address step numbering gap for clarity
-3. **β οΈ OPTIONAL**: Rename step-01b-continue.md to step-01a-continue.md for consistency
-
----
-
-## 3. Step File Content Validation
-
-### Content Quality Assessment: 4.5/5 - EXCELLENT
-
-#### Files Reviewed: 10 representative files across all modes
-
-#### β Strengths
-
-**1. Comprehensive Structure:**
-- Clear step goal sections in all files
-- Detailed mandatory execution rules
-- Well-defined execution protocols
-- Context boundaries clearly specified
-- Mandatory sequence with numbered steps
-- System success/failure metrics present
-
-**2. BMAD Compliance:**
-- β JIT loading references consistently mentioned
-- β State tracking requirements documented
-- β Append-only building instructions present
-- β Critical rules properly emphasized with emojis
-- β Sequential enforcement clearly stated
-
-**3. Instructional Quality:**
-- Clear, unambiguous instructions
-- Proper menu handling rules (where applicable)
-- Excellent continuation checks
-- Strong role definition for each mode
-
-**4. Role Clarity:**
-- Create Mode: "Product-focused PM facilitator"
-- Validate Mode: "Validation Architect and Quality Assurance Specialist"
-- Edit Mode: "PRD improvement specialist"
-
-#### β οΈ Minor Improvement Opportunities
-
-**1. Header Formatting:**
-- Some inconsistency in header level usage across files
-- Recommend standardizing H2/H3 usage
-
-**2. Edit Mode Completeness:**
-- Edit mode has fewer steps (5 vs 12/13 for other modes)
-- Documentation marks it as "Future" but implementation exists
-
-#### Recommendations
-1. **LOW PRIORITY**: Standardize header formatting across all step files
-2. **LOW PRIORITY**: Complete remaining edit mode steps for parity
-3. **MAINTAIN**: Current excellent quality standards
-
----
-
-## 4. Documentation Validation
-
-### Documentation Completeness: β COMPREHENSIVE
-
-#### Main Components Present
-- β Workflow Definition (workflow.md)
-- β Purpose Document (data/prd-purpose.md)
-- β Template (templates/prd-template.md)
-- β Three Mode Implementations (Create: 12, Validate: 13, Edit: 5 steps)
-
-#### Clarity Assessment: β EXCELLENT
-
-**Strong Points:**
-1. Clear mode determination (commands, flags, menu selection)
-2. Detailed routing instructions for each mode
-3. Comprehensive workflow architecture explanation
-4. Well-defined critical rules with visual emphasis
-5. Professional presentation with consistent formatting
-
-#### β οΈ Minor Issues Found
-
-**1. Step Count Mismatch:**
-- workflow.md mentions "11 steps" for Create mode
-- Actually implements 12 steps
-- Could confuse users
-
-**2. Edit Mode Status:**
-- workflow.md calls Edit mode "Future"
-- Edit mode steps are actually implemented
-- Should reflect current status
-
-**3. Template Completeness:**
-- PRD template is minimal (10 lines)
-- Could benefit from section placeholders
-
-**4. Missing README:**
-- No onboarding documentation for new users
-- Not critical but would be helpful
-
-#### Recommendations
-
-**HIGH PRIORITY:**
-1. Fix step count reference to match implementation (12 steps)
-2. Update edit mode documentation to "Implemented"
-
-**MEDIUM PRIORITY:**
-3. Enhance PRD template with section structure
-4. Add quick-start README for new users
-
-**LOW PRIORITY:**
-5. Add troubleshooting section
-6. Document external dependencies (domain-complexity.csv, project-types.csv)
-
----
-
-## 5. Integration & Compatibility Validation
-
-### Integration Status: 85% Ready
-
-#### β Successfully Integrated Components
-
-**1. Agent Menu Registration:**
-- β Registered in PM agent menu
-- β Trigger: `PR` or fuzzy match on `prd`
-- β Command: `/bmad:bmm:workflows:create-prd`
-- β Proper workflow path configuration
-
-**2. External Workflow References:**
-- β Party-mode workflow: Exists at `/src/core/workflows/party-mode/workflow.md`
-- β Advanced-elicitation task: Exists at `/src/core/workflows/advanced-elicitation/workflow.xml`
-
-**3. Directory Structure:**
-- β Complete step architecture (all 3 modes)
-- β All referenced step files exist
-- β Data files available
-
-#### β Configuration & Installation - WORKING AS DESIGNED
-
-**1. BMM Config Reference:**
-- Path: `{project-root}/_bmad/bmm/config.yaml`
-- **Status:** β Correct installation-time placeholder
-- Resolves to actual config during workflow installation
-- **Note:** This is expected behavior, not an issue
-
-**2. Planning Artifacts Folder:**
-- Reference: `{planning_artifacts}/prd.md`
-- **Status:** β Correct installation-time placeholder
-- Created/resolved during workflow installation
-- **Note:** This is expected behavior, not an issue
-
-**3. Edit Mode Implementation:**
-- Current: 5 steps (Discovery, Legacy Conversion branch, Review, Edit, Complete)
-- **Status:** β Functionally complete
-- Edit mode is inherently simpler than create mode (targeted improvements vs full creation)
-- Uses subprocesses for complex operations
-- Validation integration ensures quality
-- **Note:** Edit workflow is complete and well-designed
-
-#### Configuration Analysis
-
-**Placeholder Usage:**
-- `{project-root}`: β Properly used
-- `{planning_artifacts}`: β οΈ Referenced but folder missing
-- `{nextStep}`, `{validateWorkflow}`, etc: β Properly resolved
-
-#### Recommendations
-
-**β ALL CRITICAL ISSUES RESOLVED:**
-
-The only true critical issue (edit workflow path) has been fixed. All other items flagged as "critical" were actually working as designed (installation-time placeholders).
-
-**LOW PRIORITY:**
-3. Add CLI command registration for standalone execution (optional enhancement)
-4. Consider adding workflow to additional agent menus (UX designer, architect)
-5. Create standalone execution documentation (nice-to-have)
-6. Address step numbering gap if desired (cosmetic)
-
----
-
-## 6. Executive Summary & Overall Assessment
-
-### Overall Validation Status: β PRODUCTION-READY
-
-#### Validation Scores by Category
-
-| Category | Status | Score | Notes |
-|----------|--------|-------|-------|
-| **File Structure & Size** | β οΈ WARNINGS | 7/10 | 3 files exceed 250-line limit, 23 approaching |
-| **Frontmatter Validation** | β PASS | 9/10 | One broken path reference |
-| **Step Content Quality** | β EXCELLENT | 9.5/10 | High-quality instructional design |
-| **Documentation** | β EXCELLENT | 9/10 | Comprehensive, minor inconsistencies |
-| **Integration** | β PASS | 9/10 | All paths correct (one issue fixed) |
-| **BMAD Compliance** | β EXCELLENT | 9.5/10 | Strong adherence to standards |
-
-**Overall Score: 9.2/10 - EXCELLENT**
-
-#### β Critical Action Items - ALL RESOLVED
-
-**ONLY ONE TRUE CRITICAL ISSUE EXISTED - NOW FIXED:**
-
-1. **β FIXED: Edit Workflow Path**
- - File: `workflow.md` β RESOLVED
- - Changed from: `./steps-e/step-e-01-assess-workflow.md`
- - Changed to: `./steps-e/step-e-01-discovery.md`
-
-**Items incorrectly flagged as critical (actually working as designed):**
-- β Configuration path references (installation-time placeholders)
-- β Planning artifacts folder (installation-time placeholder)
-
-#### High Priority Improvements
-
-2. **β οΈ Split Large Step Files** (>250 lines):
- - `step-v-09-project-type-validation.md` (263 lines)
- - `step-v-11-holistic-quality-validation.md` (264 lines)
- - `step-e-03-edit.md` (253 lines)
-
-3. **β οΈ Update Documentation Inconsistencies**:
- - Fix step count reference (11 β 12 steps in create mode)
- - Update edit mode status (Future β Implemented)
-
-#### Medium Priority Enhancements
-
-4. **Enhance PRD Template** (currently minimal at 10 lines)
-5. **Add quick-start README** for new users
-6. **Address step numbering gap** (cosmetic - missing step-11-complete.md)
-
-#### Edit Mode Status - FUNCTIONALLY COMPLETE β
-
-The edit workflow is **complete and well-designed** with 5 steps:
-- Discovery β Legacy Conversion (branch) β Review β Edit β Complete
-- Edit mode is inherently simpler than create mode (targeted improvements vs full creation)
-- Uses subprocesses for complex operations
-- Integrates with validation workflow
-
-**No additional steps needed.**
-
-### Key Strengths
-
-β **Excellent step file quality** - Clear, well-structured instructions
-β **Comprehensive validation system** - 13 dedicated validation steps
-β **Strong BMAD compliance** - JIT loading, state tracking, sequential enforcement
-β **Tri-modal architecture** - Create, Validate, Edit all implemented
-β **Professional documentation** - Clear, consistent, well-presented
-β **Proper agent integration** - Registered in PM agent menu
-
-### Areas for Improvement (Optional)
-
-β οΈ **File size management** - Many files approaching limits (maintainability consideration)
-β οΈ **Documentation consistency** - Minor discrepancies in counts/status (cosmetic)
-β **Edit mode** - Functionally complete, no additional steps needed
-
-### Conclusion
-
-The PRD workflow is **well-designed and fully compliant** with BMAD standards. The step file architecture is exemplary, the content quality is excellent, and the documentation is comprehensive. The only critical issue (edit workflow path) has been **resolved**, and all other flagged items were actually working as designed (installation-time placeholders).
-
-**Current Status: β PRODUCTION-READY**
-
-**Recommended Optional Enhancements:**
-1. Split the 3 files exceeding 250-line limit (maintainability)
-2. Update documentation inconsistencies (step counts, edit mode status)
-3. Enhance PRD template and add quick-start README (user experience)
-
-The PRD workflow is ready for production use and fully compliant with BMAD workflow standards.
-
----
-
-**Validation Completed:** 2026-01-08
-**Validation Method:** Systematic subprocess analysis with maximum context coverage
-**Validator:** BMAD Workflow Validation System (Wendy - Workflow Building Master)
diff --git a/src/bmm/workflows/2-plan-workflows/prd/workflow.md b/src/bmm/workflows/2-plan-workflows/prd/workflow.md
deleted file mode 100644
index be5a7311..00000000
--- a/src/bmm/workflows/2-plan-workflows/prd/workflow.md
+++ /dev/null
@@ -1,150 +0,0 @@
----
-name: prd
-description: PRD tri-modal workflow - Create, Validate, or Edit comprehensive PRDs
-main_config: '{project-root}/_bmad/bmm/config.yaml'
-nextStep: './steps-c/step-01-init.md'
-validateWorkflow: './steps-v/step-v-01-discovery.md'
-editWorkflow: './steps-e/step-e-01-discovery.md'
-web_bundle: true
----
-
-# PRD Workflow (Tri-Modal)
-
-**Goal:** Create, Validate, or Edit comprehensive PRDs through structured workflows.
-
-**Your Role:**
-- **Create Mode:** Product-focused PM facilitator collaborating with an expert peer
-- **Validate Mode:** Validation Architect and Quality Assurance Specialist
-- **Edit Mode:** PRD improvement specialist
-
-You will continue to operate with your given name, identity, and communication_style, merged with the details of this role description.
-
----
-
-## MODE DETERMINATION
-
-### Detect Workflow Mode
-
-Determine which mode to invoke based on:
-
-1. **Command/Invocation:**
- - "create prd" or "new prd" β Create mode
- - "validate prd" or "check prd" β Validate mode
- - "edit prd" or "improve prd" β Edit mode
-
-2. **Context Detection:**
- - If invoked with -c flag β Create mode
- - If invoked with -v flag β Validate mode
- - If invoked with -e flag β Edit mode
-
-3. **Menu Selection (if unclear):**
-
-If mode cannot be determined from invocation:
-"**PRD Workflow - Select Mode:**
-
-**[C] Create** - Create a new PRD from scratch
-**[V] Validate** - Validate an existing PRD against BMAD standards
-**[E] Edit** - Improve an existing PRD
-
-Which mode would you like?"
-
-Wait for user selection.
-
-### Route to Appropriate Workflow
-
-**IF Create Mode:**
-"**Create Mode: Creating a new PRD from scratch.**"
-Read fully and follow: `{nextStep}` (steps-c/step-01-init.md)
-
-**IF Validate Mode:**
-"**Validate Mode: Validating an existing PRD against BMAD standards.**"
-Prompt for PRD path: "Which PRD would you like to validate? Please provide the path to the PRD.md file."
-Then read fully and follow: `{validateWorkflow}` (steps-v/step-v-01-discovery.md)
-
-**IF Edit Mode:**
-"**Edit Mode: Improving an existing PRD.**"
-Prompt for PRD path: "Which PRD would you like to edit? Please provide the path to the PRD.md file."
-Then read fully and follow: `{editWorkflow}` (steps-e/step-e-01-discovery.md)
-
----
-
-## WORKFLOW ARCHITECTURE
-
-This uses **step-file architecture** for disciplined execution:
-
-### Core Principles
-
-- **Micro-file Design**: Each step is a self contained instruction file that is a part of an overall workflow that must be followed exactly
-- **Just-In-Time Loading**: Only the current step file is in memory - never load future step files until told to do so
-- **Sequential Enforcement**: Sequence within the step files must be completed in order, no skipping or optimization allowed
-- **State Tracking**: Document progress in output file frontmatter using `stepsCompleted` array when a workflow produces a document
-- **Append-Only Building**: Build documents by appending content as directed to the output file
-
-### Step Processing Rules
-
-1. **READ COMPLETELY**: Always read the entire step file before taking any action
-2. **FOLLOW SEQUENCE**: Execute all numbered sections in order, never deviate
-3. **WAIT FOR INPUT**: If a menu is presented, halt and wait for user selection
-4. **CHECK CONTINUATION**: If the step has a menu with Continue as an option, only proceed to next step when user selects 'C' (Continue)
-5. **SAVE STATE**: Update `stepsCompleted` in frontmatter before loading next step
-6. **LOAD NEXT**: When directed, read fully and follow the next step file
-
-### Critical Rules (NO EXCEPTIONS)
-
-- π **NEVER** load multiple step files simultaneously
-- π **ALWAYS** read entire step file before execution
-- π« **NEVER** skip steps or optimize the sequence
-- πΎ **ALWAYS** update frontmatter of output files when writing the final output for a specific step
-- π― **ALWAYS** follow the exact instructions in the step file
-- βΈοΈ **ALWAYS** halt at menus and wait for user input
-- π **NEVER** create mental todo lists from future steps
-
----
-
-## INITIALIZATION SEQUENCE
-
-### 1. Mode Determination
-
-**Check if mode was specified in the command invocation:**
-
-- If user invoked with "create prd" or "new prd" or "build prd" or "-c" or "--create" β Set mode to **create**
-- If user invoked with "validate prd" or "review prd" or "check prd" or "-v" or "--validate" β Set mode to **validate**
-- If user invoked with "edit prd" or "modify prd" or "improve prd" or "-e" or "--edit" β Set mode to **edit**
-
-**If mode is still unclear, ask user:**
-
-"**PRD Workflow - Select Mode:**
-
-**[C] Create** - Create a new PRD from scratch
-**[V] Validate** - Validate an existing PRD against BMAD standards
-**[E] Edit** - Improve an existing PRD
-
-Which mode would you like?"
-
-Wait for user selection.
-
-### 2. Configuration Loading
-
-Load and read full config from {main_config} and resolve:
-
-- `project_name`, `output_folder`, `planning_artifacts`, `user_name`
-- `communication_language`, `document_output_language`, `user_skill_level`
-- `date` as system-generated current datetime
-
-β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the configured `{communication_language}`.
-
-### 3. Route to Appropriate Workflow
-
-**IF mode == create:**
-"**Create Mode: Creating a new PRD from scratch.**"
-Read fully and follow: `{nextStep}` (steps-c/step-01-init.md)
-
-**IF mode == validate:**
-"**Validate Mode: Validating an existing PRD against BMAD standards.**"
-Prompt for PRD path: "Which PRD would you like to validate? Please provide the path to the PRD.md file."
-Then read fully and follow: `{validateWorkflow}` (steps-v/step-v-01-discovery.md)
-
-**IF mode == edit:**
-"**Edit Mode: Improving an existing PRD.**"
-Prompt for PRD path: "Which PRD would you like to edit? Please provide the path to the PRD.md file."
-Then read fully and follow: `{editWorkflow}` (steps-e/step-e-01-discovery.md)
diff --git a/src/bmm/workflows/3-solutioning/check-implementation-readiness/steps/step-01-document-discovery.md b/src/bmm/workflows/3-solutioning/check-implementation-readiness/steps/step-01-document-discovery.md
deleted file mode 100644
index fccb7da2..00000000
--- a/src/bmm/workflows/3-solutioning/check-implementation-readiness/steps/step-01-document-discovery.md
+++ /dev/null
@@ -1,190 +0,0 @@
----
-name: 'step-01-document-discovery'
-description: 'Discover and inventory all project documents, handling duplicates and organizing file structure'
-
-# Path Definitions
-workflow_path: '{project-root}/_bmad/bmm/workflows/3-solutioning/implementation-readiness'
-
-# File References
-thisStepFile: './step-01-document-discovery.md'
-nextStepFile: './step-02-prd-analysis.md'
-workflowFile: '{workflow_path}/workflow.md'
-outputFile: '{planning_artifacts}/implementation-readiness-report-{{date}}.md'
-templateFile: '{workflow_path}/templates/readiness-report-template.md'
----
-
-# Step 1: Document Discovery
-
-## STEP GOAL:
-
-To discover, inventory, and organize all project documents, identifying duplicates and determining which versions to use for the assessment.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π NEVER generate content without user input
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: When loading next step with 'C', ensure entire file is read
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are an expert Product Manager and Scrum Master
-- β Your focus is on finding organizing and documenting what exists
-- β You identify ambiguities and ask for clarification
-- β Success is measured in clear file inventory and conflict resolution
-
-### Step-Specific Rules:
-
-- π― Focus ONLY on finding and organizing files
-- π« Don't read or analyze file contents
-- π¬ Identify duplicate documents clearly
-- πͺ Get user confirmation on file selections
-
-## EXECUTION PROTOCOLS:
-
-- π― Search for all document types systematically
-- πΎ Group sharded files together
-- π Flag duplicates for user resolution
-- π« FORBIDDEN to proceed with unresolved duplicates
-
-## DOCUMENT DISCOVERY PROCESS:
-
-### 1. Initialize Document Discovery
-
-"Beginning **Document Discovery** to inventory all project files.
-
-I will:
-
-1. Search for all required documents (PRD, Architecture, Epics, UX)
-2. Group sharded documents together
-3. Identify any duplicates (whole + sharded versions)
-4. Present findings for your confirmation"
-
-### 2. Document Search Patterns
-
-Search for each document type using these patterns:
-
-#### A. PRD Documents
-
-- Whole: `{planning_artifacts}/*prd*.md`
-- Sharded: `{planning_artifacts}/*prd*/index.md` and related files
-
-#### B. Architecture Documents
-
-- Whole: `{planning_artifacts}/*architecture*.md`
-- Sharded: `{planning_artifacts}/*architecture*/index.md` and related files
-
-#### C. Epics & Stories Documents
-
-- Whole: `{planning_artifacts}/*epic*.md`
-- Sharded: `{planning_artifacts}/*epic*/index.md` and related files
-
-#### D. UX Design Documents
-
-- Whole: `{planning_artifacts}/*ux*.md`
-- Sharded: `{planning_artifacts}/*ux*/index.md` and related files
-
-### 3. Organize Findings
-
-For each document type found:
-
-```
-## [Document Type] Files Found
-
-**Whole Documents:**
-- [filename.md] ([size], [modified date])
-
-**Sharded Documents:**
-- Folder: [foldername]/
- - index.md
- - [other files in folder]
-```
-
-### 4. Identify Critical Issues
-
-#### Duplicates (CRITICAL)
-
-If both whole and sharded versions exist:
-
-```
-β οΈ CRITICAL ISSUE: Duplicate document formats found
-- PRD exists as both whole.md AND prd/ folder
-- YOU MUST choose which version to use
-- Remove or rename the other version to avoid confusion
-```
-
-#### Missing Documents (WARNING)
-
-If required documents not found:
-
-```
-β οΈ WARNING: Required document not found
-- Architecture document not found
-- Will impact assessment completeness
-```
-
-### 5. Add Initial Report Section
-
-Initialize {outputFile} with {templateFile}.
-
-### 6. Present Findings and Get Confirmation
-
-Display findings and ask:
-"**Document Discovery Complete**
-
-[Show organized file list]
-
-**Issues Found:**
-
-- [List any duplicates requiring resolution]
-- [List any missing documents]
-
-**Required Actions:**
-
-- If duplicates exist: Please remove/rename one version
-- Confirm which documents to use for assessment
-
-**Ready to proceed?** [C] Continue after resolving issues"
-
-### 7. Present MENU OPTIONS
-
-Display: **Select an Option:** [C] Continue to File Validation
-
-#### EXECUTION RULES:
-
-- ALWAYS halt and wait for user input after presenting menu
-- ONLY proceed with 'C' selection
-- If duplicates identified, insist on resolution first
-- User can clarify file locations or request additional searches
-
-#### Menu Handling Logic:
-
-- IF C: Save document inventory to {outputFile}, update frontmatter with completed step and files being included, and then read fully and follow: {nextStepFile}
-- IF Any other comments or queries: help user respond then redisplay menu
-
-## CRITICAL STEP COMPLETION NOTE
-
-ONLY WHEN C is selected and document inventory is saved will you load {nextStepFile} to begin file validation.
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- All document types searched systematically
-- Files organized and inventoried clearly
-- Duplicates identified and flagged for resolution
-- User confirmed file selections
-
-### β SYSTEM FAILURE:
-
-- Not searching all document types
-- Ignoring duplicate document conflicts
-- Proceeding without resolving critical issues
-- Not saving document inventory
-
-**Master Rule:** Clear file identification is essential for accurate assessment.
diff --git a/src/bmm/workflows/3-solutioning/check-implementation-readiness/steps/step-02-prd-analysis.md b/src/bmm/workflows/3-solutioning/check-implementation-readiness/steps/step-02-prd-analysis.md
deleted file mode 100644
index 5dd08705..00000000
--- a/src/bmm/workflows/3-solutioning/check-implementation-readiness/steps/step-02-prd-analysis.md
+++ /dev/null
@@ -1,178 +0,0 @@
----
-name: 'step-02-prd-analysis'
-description: 'Read and analyze PRD to extract all FRs and NFRs for coverage validation'
-
-# Path Definitions
-workflow_path: '{project-root}/_bmad/bmm/workflows/3-solutioning/implementation-readiness'
-
-# File References
-thisStepFile: './step-02-prd-analysis.md'
-nextStepFile: './step-03-epic-coverage-validation.md'
-workflowFile: '{workflow_path}/workflow.md'
-outputFile: '{planning_artifacts}/implementation-readiness-report-{{date}}.md'
-epicsFile: '{planning_artifacts}/*epic*.md' # Will be resolved to actual file
----
-
-# Step 2: PRD Analysis
-
-## STEP GOAL:
-
-To fully read and analyze the PRD document (whole or sharded) to extract all Functional Requirements (FRs) and Non-Functional Requirements (NFRs) for validation against epics coverage.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π NEVER generate content without user input
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: When loading next step with 'C', ensure entire file is read
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are an expert Product Manager and Scrum Master
-- β Your expertise is in requirements analysis and traceability
-- β You think critically about requirement completeness
-- β Success is measured in thorough requirement extraction
-
-### Step-Specific Rules:
-
-- π― Focus ONLY on reading and extracting from PRD
-- π« Don't validate files (done in step 1)
-- π¬ Read PRD completely - whole or all sharded files
-- πͺ Extract every FR and NFR with numbering
-
-## EXECUTION PROTOCOLS:
-
-- π― Load and completely read the PRD
-- πΎ Extract all requirements systematically
-- π Document findings in the report
-- π« FORBIDDEN to skip or summarize PRD content
-
-## PRD ANALYSIS PROCESS:
-
-### 1. Initialize PRD Analysis
-
-"Beginning **PRD Analysis** to extract all requirements.
-
-I will:
-
-1. Load the PRD document (whole or sharded)
-2. Read it completely and thoroughly
-3. Extract ALL Functional Requirements (FRs)
-4. Extract ALL Non-Functional Requirements (NFRs)
-5. Document findings for coverage validation"
-
-### 2. Load and Read PRD
-
-From the document inventory in step 1:
-
-- If whole PRD file exists: Load and read it completely
-- If sharded PRD exists: Load and read ALL files in the PRD folder
-- Ensure complete coverage - no files skipped
-
-### 3. Extract Functional Requirements (FRs)
-
-Search for and extract:
-
-- Numbered FRs (FR1, FR2, FR3, etc.)
-- Requirements labeled "Functional Requirement"
-- User stories or use cases that represent functional needs
-- Business rules that must be implemented
-
-Format findings as:
-
-```
-## Functional Requirements Extracted
-
-FR1: [Complete requirement text]
-FR2: [Complete requirement text]
-FR3: [Complete requirement text]
-...
-Total FRs: [count]
-```
-
-### 4. Extract Non-Functional Requirements (NFRs)
-
-Search for and extract:
-
-- Performance requirements (response times, throughput)
-- Security requirements (authentication, encryption, etc.)
-- Usability requirements (accessibility, ease of use)
-- Reliability requirements (uptime, error rates)
-- Scalability requirements (concurrent users, data growth)
-- Compliance requirements (standards, regulations)
-
-Format findings as:
-
-```
-## Non-Functional Requirements Extracted
-
-NFR1: [Performance requirement]
-NFR2: [Security requirement]
-NFR3: [Usability requirement]
-...
-Total NFRs: [count]
-```
-
-### 5. Document Additional Requirements
-
-Look for:
-
-- Constraints or assumptions
-- Technical requirements not labeled as FR/NFR
-- Business constraints
-- Integration requirements
-
-### 6. Add to Assessment Report
-
-Append to {outputFile}:
-
-```markdown
-## PRD Analysis
-
-### Functional Requirements
-
-[Complete FR list from section 3]
-
-### Non-Functional Requirements
-
-[Complete NFR list from section 4]
-
-### Additional Requirements
-
-[Any other requirements or constraints found]
-
-### PRD Completeness Assessment
-
-[Initial assessment of PRD completeness and clarity]
-```
-
-### 7. Auto-Proceed to Next Step
-
-After PRD analysis complete, immediately load next step for epic coverage validation.
-
-## PROCEEDING TO EPIC COVERAGE VALIDATION
-
-PRD analysis complete. Loading next step to validate epic coverage.
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- PRD loaded and read completely
-- All FRs extracted with full text
-- All NFRs identified and documented
-- Findings added to assessment report
-
-### β SYSTEM FAILURE:
-
-- Not reading complete PRD (especially sharded versions)
-- Missing requirements in extraction
-- Summarizing instead of extracting full text
-- Not documenting findings in report
-
-**Master Rule:** Complete requirement extraction is essential for traceability validation.
diff --git a/src/bmm/workflows/3-solutioning/check-implementation-readiness/steps/step-03-epic-coverage-validation.md b/src/bmm/workflows/3-solutioning/check-implementation-readiness/steps/step-03-epic-coverage-validation.md
deleted file mode 100644
index 981a5b63..00000000
--- a/src/bmm/workflows/3-solutioning/check-implementation-readiness/steps/step-03-epic-coverage-validation.md
+++ /dev/null
@@ -1,179 +0,0 @@
----
-name: 'step-03-epic-coverage-validation'
-description: 'Validate that all PRD FRs are covered in epics and stories'
-
-# Path Definitions
-workflow_path: '{project-root}/_bmad/bmm/workflows/3-solutioning/implementation-readiness'
-
-# File References
-thisStepFile: './step-03-epic-coverage-validation.md'
-nextStepFile: './step-04-ux-alignment.md'
-workflowFile: '{workflow_path}/workflow.md'
-outputFile: '{planning_artifacts}/implementation-readiness-report-{{date}}.md'
----
-
-# Step 3: Epic Coverage Validation
-
-## STEP GOAL:
-
-To validate that all Functional Requirements from the PRD are captured in the epics and stories document, identifying any gaps in coverage.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π NEVER generate content without user input
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: When loading next step with 'C', ensure entire file is read
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are an expert Product Manager and Scrum Master
-- β Your expertise is in requirements traceability
-- β You ensure no requirements fall through the cracks
-- β Success is measured in complete FR coverage
-
-### Step-Specific Rules:
-
-- π― Focus ONLY on FR coverage validation
-- π« Don't analyze story quality (that's later)
-- π¬ Compare PRD FRs against epic coverage list
-- πͺ Document every missing FR
-
-## EXECUTION PROTOCOLS:
-
-- π― Load epics document completely
-- πΎ Extract FR coverage from epics
-- π Compare against PRD FR list
-- π« FORBIDDEN to proceed without documenting gaps
-
-## EPIC COVERAGE VALIDATION PROCESS:
-
-### 1. Initialize Coverage Validation
-
-"Beginning **Epic Coverage Validation**.
-
-I will:
-
-1. Load the epics and stories document
-2. Extract FR coverage information
-3. Compare against PRD FRs from previous step
-4. Identify any FRs not covered in epics"
-
-### 2. Load Epics Document
-
-From the document inventory in step 1:
-
-- Load the epics and stories document (whole or sharded)
-- Read it completely to find FR coverage information
-- Look for sections like "FR Coverage Map" or similar
-
-### 3. Extract Epic FR Coverage
-
-From the epics document:
-
-- Find FR coverage mapping or list
-- Extract which FR numbers are claimed to be covered
-- Document which epics cover which FRs
-
-Format as:
-
-```
-## Epic FR Coverage Extracted
-
-FR1: Covered in Epic X
-FR2: Covered in Epic Y
-FR3: Covered in Epic Z
-...
-Total FRs in epics: [count]
-```
-
-### 4. Compare Coverage Against PRD
-
-Using the PRD FR list from step 2:
-
-- Check each PRD FR against epic coverage
-- Identify FRs NOT covered in epics
-- Note any FRs in epics but NOT in PRD
-
-Create coverage matrix:
-
-```
-## FR Coverage Analysis
-
-| FR Number | PRD Requirement | Epic Coverage | Status |
-| --------- | --------------- | -------------- | --------- |
-| FR1 | [PRD text] | Epic X Story Y | β Covered |
-| FR2 | [PRD text] | **NOT FOUND** | β MISSING |
-| FR3 | [PRD text] | Epic Z Story A | β Covered |
-```
-
-### 5. Document Missing Coverage
-
-List all FRs not covered:
-
-```
-## Missing FR Coverage
-
-### Critical Missing FRs
-
-FR#: [Full requirement text from PRD]
-- Impact: [Why this is critical]
-- Recommendation: [Which epic should include this]
-
-### High Priority Missing FRs
-
-[List any other uncovered FRs]
-```
-
-### 6. Add to Assessment Report
-
-Append to {outputFile}:
-
-```markdown
-## Epic Coverage Validation
-
-### Coverage Matrix
-
-[Complete coverage matrix from section 4]
-
-### Missing Requirements
-
-[List of uncovered FRs from section 5]
-
-### Coverage Statistics
-
-- Total PRD FRs: [count]
-- FRs covered in epics: [count]
-- Coverage percentage: [percentage]
-```
-
-### 7. Auto-Proceed to Next Step
-
-After coverage validation complete, immediately load next step.
-
-## PROCEEDING TO UX ALIGNMENT
-
-Epic coverage validation complete. Loading next step for UX alignment.
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- Epics document loaded completely
-- FR coverage extracted accurately
-- All gaps identified and documented
-- Coverage matrix created
-
-### β SYSTEM FAILURE:
-
-- Not reading complete epics document
-- Missing FRs in comparison
-- Not documenting uncovered requirements
-- Incomplete coverage analysis
-
-**Master Rule:** Every FR must have a traceable implementation path.
diff --git a/src/bmm/workflows/3-solutioning/check-implementation-readiness/steps/step-04-ux-alignment.md b/src/bmm/workflows/3-solutioning/check-implementation-readiness/steps/step-04-ux-alignment.md
deleted file mode 100644
index 33aad045..00000000
--- a/src/bmm/workflows/3-solutioning/check-implementation-readiness/steps/step-04-ux-alignment.md
+++ /dev/null
@@ -1,139 +0,0 @@
----
-name: 'step-04-ux-alignment'
-description: 'Check for UX document and validate alignment with PRD and Architecture'
-
-# Path Definitions
-workflow_path: '{project-root}/_bmad/bmm/workflows/3-solutioning/implementation-readiness'
-
-# File References
-thisStepFile: './step-04-ux-alignment.md'
-nextStepFile: './step-05-epic-quality-review.md'
-workflowFile: '{workflow_path}/workflow.md'
-outputFile: '{planning_artifacts}/implementation-readiness-report-{{date}}.md'
----
-
-# Step 4: UX Alignment
-
-## STEP GOAL:
-
-To check if UX documentation exists and validate that it aligns with PRD requirements and Architecture decisions, ensuring architecture accounts for both PRD and UX needs.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π NEVER generate content without user input
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: When loading next step with 'C', ensure entire file is read
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are a UX VALIDATOR ensuring user experience is properly addressed
-- β UX requirements must be supported by architecture
-- β Missing UX documentation is a warning if UI is implied
-- β Alignment gaps must be documented
-
-### Step-Specific Rules:
-
-- π― Check for UX document existence first
-- π« Don't assume UX is not needed
-- π¬ Validate alignment between UX, PRD, and Architecture
-- πͺ Add findings to the output report
-
-## EXECUTION PROTOCOLS:
-
-- π― Search for UX documentation
-- πΎ If found, validate alignment
-- π If not found, assess if UX is implied
-- π« FORBIDDEN to proceed without completing assessment
-
-## UX ALIGNMENT PROCESS:
-
-### 1. Initialize UX Validation
-
-"Beginning **UX Alignment** validation.
-
-I will:
-
-1. Check if UX documentation exists
-2. If UX exists: validate alignment with PRD and Architecture
-3. If no UX: determine if UX is implied and document warning"
-
-### 2. Search for UX Documentation
-
-Search patterns:
-
-- `{planning_artifacts}/*ux*.md` (whole document)
-- `{planning_artifacts}/*ux*/index.md` (sharded)
-- Look for UI-related terms in other documents
-
-### 3. If UX Document Exists
-
-#### A. UX β PRD Alignment
-
-- Check UX requirements reflected in PRD
-- Verify user journeys in UX match PRD use cases
-- Identify UX requirements not in PRD
-
-#### B. UX β Architecture Alignment
-
-- Verify architecture supports UX requirements
-- Check performance needs (responsiveness, load times)
-- Identify UI components not supported by architecture
-
-### 4. If No UX Document
-
-Assess if UX/UI is implied:
-
-- Does PRD mention user interface?
-- Are there web/mobile components implied?
-- Is this a user-facing application?
-
-If UX implied but missing: Add warning to report
-
-### 5. Add Findings to Report
-
-Append to {outputFile}:
-
-```markdown
-## UX Alignment Assessment
-
-### UX Document Status
-
-[Found/Not Found]
-
-### Alignment Issues
-
-[List any misalignments between UX, PRD, and Architecture]
-
-### Warnings
-
-[Any warnings about missing UX or architectural gaps]
-```
-
-### 6. Auto-Proceed to Next Step
-
-After UX assessment complete, immediately load next step.
-
-## PROCEEDING TO EPIC QUALITY REVIEW
-
-UX alignment assessment complete. Loading next step for epic quality review.
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- UX document existence checked
-- Alignment validated if UX exists
-- Warning issued if UX implied but missing
-- Findings added to report
-
-### β SYSTEM FAILURE:
-
-- Not checking for UX document
-- Ignoring alignment issues
-- Not documenting warnings
diff --git a/src/bmm/workflows/3-solutioning/check-implementation-readiness/steps/step-05-epic-quality-review.md b/src/bmm/workflows/3-solutioning/check-implementation-readiness/steps/step-05-epic-quality-review.md
deleted file mode 100644
index 0203cdc1..00000000
--- a/src/bmm/workflows/3-solutioning/check-implementation-readiness/steps/step-05-epic-quality-review.md
+++ /dev/null
@@ -1,252 +0,0 @@
----
-name: 'step-05-epic-quality-review'
-description: 'Validate epics and stories against create-epics-and-stories best practices'
-
-# Path Definitions
-workflow_path: '{project-root}/_bmad/bmm/workflows/3-solutioning/implementation-readiness'
-
-# File References
-thisStepFile: './step-05-epic-quality-review.md'
-nextStepFile: './step-06-final-assessment.md'
-workflowFile: '{workflow_path}/workflow.md'
-outputFile: '{planning_artifacts}/implementation-readiness-report-{{date}}.md'
-epicsBestPractices: '{project-root}/_bmad/bmm/workflows/3-solutioning/create-epics-and-stories'
----
-
-# Step 5: Epic Quality Review
-
-## STEP GOAL:
-
-To validate epics and stories against the best practices defined in create-epics-and-stories workflow, focusing on user value, independence, dependencies, and implementation readiness.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π NEVER generate content without user input
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: When loading next step with 'C', ensure entire file is read
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are an EPIC QUALITY ENFORCER
-- β You know what good epics look like - challenge anything deviating
-- β Technical epics are wrong - find them
-- β Forward dependencies are forbidden - catch them
-- β Stories must be independently completable
-
-### Step-Specific Rules:
-
-- π― Apply create-epics-and-stories standards rigorously
-- π« Don't accept "technical milestones" as epics
-- π¬ Challenge every dependency on future work
-- πͺ Verify proper story sizing and structure
-
-## EXECUTION PROTOCOLS:
-
-- π― Systematically validate each epic and story
-- πΎ Document all violations of best practices
-- π Check every dependency relationship
-- π« FORBIDDEN to accept structural problems
-
-## EPIC QUALITY REVIEW PROCESS:
-
-### 1. Initialize Best Practices Validation
-
-"Beginning **Epic Quality Review** against create-epics-and-stories standards.
-
-I will rigorously validate:
-
-- Epics deliver user value (not technical milestones)
-- Epic independence (Epic 2 doesn't need Epic 3)
-- Story dependencies (no forward references)
-- Proper story sizing and completeness
-
-Any deviation from best practices will be flagged as a defect."
-
-### 2. Epic Structure Validation
-
-#### A. User Value Focus Check
-
-For each epic:
-
-- **Epic Title:** Is it user-centric (what user can do)?
-- **Epic Goal:** Does it describe user outcome?
-- **Value Proposition:** Can users benefit from this epic alone?
-
-**Red flags (violations):**
-
-- "Setup Database" or "Create Models" - no user value
-- "API Development" - technical milestone
-- "Infrastructure Setup" - not user-facing
-- "Authentication System" - borderline (is it user value?)
-
-#### B. Epic Independence Validation
-
-Test epic independence:
-
-- **Epic 1:** Must stand alone completely
-- **Epic 2:** Can function using only Epic 1 output
-- **Epic 3:** Can function using Epic 1 & 2 outputs
-- **Rule:** Epic N cannot require Epic N+1 to work
-
-**Document failures:**
-
-- "Epic 2 requires Epic 3 features to function"
-- Stories in Epic 2 referencing Epic 3 components
-- Circular dependencies between epics
-
-### 3. Story Quality Assessment
-
-#### A. Story Sizing Validation
-
-Check each story:
-
-- **Clear User Value:** Does the story deliver something meaningful?
-- **Independent:** Can it be completed without future stories?
-
-**Common violations:**
-
-- "Setup all models" - not a USER story
-- "Create login UI (depends on Story 1.3)" - forward dependency
-
-#### B. Acceptance Criteria Review
-
-For each story's ACs:
-
-- **Given/When/Then Format:** Proper BDD structure?
-- **Testable:** Each AC can be verified independently?
-- **Complete:** Covers all scenarios including errors?
-- **Specific:** Clear expected outcomes?
-
-**Issues to find:**
-
-- Vague criteria like "user can login"
-- Missing error conditions
-- Incomplete happy path
-- Non-measurable outcomes
-
-### 4. Dependency Analysis
-
-#### A. Within-Epic Dependencies
-
-Map story dependencies within each epic:
-
-- Story 1.1 must be completable alone
-- Story 1.2 can use Story 1.1 output
-- Story 1.3 can use Story 1.1 & 1.2 outputs
-
-**Critical violations:**
-
-- "This story depends on Story 1.4"
-- "Wait for future story to work"
-- Stories referencing features not yet implemented
-
-#### B. Database/Entity Creation Timing
-
-Validate database creation approach:
-
-- **Wrong:** Epic 1 Story 1 creates all tables upfront
-- **Right:** Each story creates tables it needs
-- **Check:** Are tables created only when first needed?
-
-### 5. Special Implementation Checks
-
-#### A. Starter Template Requirement
-
-Check if Architecture specifies starter template:
-
-- If YES: Epic 1 Story 1 must be "Set up initial project from starter template"
-- Verify story includes cloning, dependencies, initial configuration
-
-#### B. Greenfield vs Brownfield Indicators
-
-Greenfield projects should have:
-
-- Initial project setup story
-- Development environment configuration
-- CI/CD pipeline setup early
-
-Brownfield projects should have:
-
-- Integration points with existing systems
-- Migration or compatibility stories
-
-### 6. Best Practices Compliance Checklist
-
-For each epic, verify:
-
-- [ ] Epic delivers user value
-- [ ] Epic can function independently
-- [ ] Stories appropriately sized
-- [ ] No forward dependencies
-- [ ] Database tables created when needed
-- [ ] Clear acceptance criteria
-- [ ] Traceability to FRs maintained
-
-### 7. Quality Assessment Documentation
-
-Document all findings by severity:
-
-#### π΄ Critical Violations
-
-- Technical epics with no user value
-- Forward dependencies breaking independence
-- Epic-sized stories that cannot be completed
-
-#### π Major Issues
-
-- Vague acceptance criteria
-- Stories requiring future stories
-- Database creation violations
-
-#### π‘ Minor Concerns
-
-- Formatting inconsistencies
-- Minor structure deviations
-- Documentation gaps
-
-### 8. Autonomous Review Execution
-
-This review runs autonomously to maintain standards:
-
-- Apply best practices without compromise
-- Document every violation with specific examples
-- Provide clear remediation guidance
-- Prepare recommendations for each issue
-
-## REVIEW COMPLETION:
-
-After completing epic quality review:
-
-- Update {outputFile} with all quality findings
-- Document specific best practices violations
-- Provide actionable recommendations
-- Load {nextStepFile} for final readiness assessment
-
-## CRITICAL STEP COMPLETION NOTE
-
-This step executes autonomously. Load {nextStepFile} only after complete epic quality review is documented.
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- All epics validated against best practices
-- Every dependency checked and verified
-- Quality violations documented with examples
-- Clear remediation guidance provided
-- No compromise on standards enforcement
-
-### β SYSTEM FAILURE:
-
-- Accepting technical epics as valid
-- Ignoring forward dependencies
-- Not verifying story sizing
-- Overlooking obvious violations
-
-**Master Rule:** Enforce best practices rigorously. Find all violations.
diff --git a/src/bmm/workflows/3-solutioning/check-implementation-readiness/steps/step-06-final-assessment.md b/src/bmm/workflows/3-solutioning/check-implementation-readiness/steps/step-06-final-assessment.md
deleted file mode 100644
index cc826ee9..00000000
--- a/src/bmm/workflows/3-solutioning/check-implementation-readiness/steps/step-06-final-assessment.md
+++ /dev/null
@@ -1,135 +0,0 @@
----
-name: 'step-06-final-assessment'
-description: 'Compile final assessment and polish the readiness report'
-
-# Path Definitions
-workflow_path: '{project-root}/_bmad/bmm/workflows/3-solutioning/implementation-readiness'
-
-# File References
-thisStepFile: './step-06-final-assessment.md'
-workflowFile: '{workflow_path}/workflow.md'
-outputFile: '{planning_artifacts}/implementation-readiness-report-{{date}}.md'
----
-
-# Step 6: Final Assessment
-
-## STEP GOAL:
-
-To provide a comprehensive summary of all findings and give the report a final polish, ensuring clear recommendations and overall readiness status.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π NEVER generate content without user input
-- π CRITICAL: Read the complete step file before taking any action
-- π You are at the final step - complete the assessment
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are delivering the FINAL ASSESSMENT
-- β Your findings are objective and backed by evidence
-- β Provide clear, actionable recommendations
-- β Success is measured by value of findings
-
-### Step-Specific Rules:
-
-- π― Compile and summarize all findings
-- π« Don't soften the message - be direct
-- π¬ Provide specific examples for problems
-- πͺ Add final section to the report
-
-## EXECUTION PROTOCOLS:
-
-- π― Review all findings from previous steps
-- πΎ Add summary and recommendations
-- π Determine overall readiness status
-- π« Complete and present final report
-
-## FINAL ASSESSMENT PROCESS:
-
-### 1. Initialize Final Assessment
-
-"Completing **Final Assessment**.
-
-I will now:
-
-1. Review all findings from previous steps
-2. Provide a comprehensive summary
-3. Add specific recommendations
-4. Determine overall readiness status"
-
-### 2. Review Previous Findings
-
-Check the {outputFile} for sections added by previous steps:
-
-- File and FR Validation findings
-- UX Alignment issues
-- Epic Quality violations
-
-### 3. Add Final Assessment Section
-
-Append to {outputFile}:
-
-```markdown
-## Summary and Recommendations
-
-### Overall Readiness Status
-
-[READY/NEEDS WORK/NOT READY]
-
-### Critical Issues Requiring Immediate Action
-
-[List most critical issues that must be addressed]
-
-### Recommended Next Steps
-
-1. [Specific action item 1]
-2. [Specific action item 2]
-3. [Specific action item 3]
-
-### Final Note
-
-This assessment identified [X] issues across [Y] categories. Address the critical issues before proceeding to implementation. These findings can be used to improve the artifacts or you may choose to proceed as-is.
-```
-
-### 4. Complete the Report
-
-- Ensure all findings are clearly documented
-- Verify recommendations are actionable
-- Add date and assessor information
-- Save the final report
-
-### 5. Present Completion
-
-Display:
-"**Implementation Readiness Assessment Complete**
-
-Report generated: {outputFile}
-
-The assessment found [number] issues requiring attention. Review the detailed report for specific findings and recommendations."
-
-## WORKFLOW COMPLETE
-
-The implementation readiness workflow is now complete. The report contains all findings and recommendations for the user to consider.
-
-Implementation Readiness complete. Read fully and follow: `_bmad/core/tasks/bmad-help.md` with argument `implementation readiness`.
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- All findings compiled and summarized
-- Clear recommendations provided
-- Readiness status determined
-- Final report saved
-
-### β SYSTEM FAILURE:
-
-- Not reviewing previous findings
-- Incomplete summary
-- No clear recommendations
diff --git a/src/bmm/workflows/3-solutioning/check-implementation-readiness/templates/readiness-report-template.md b/src/bmm/workflows/3-solutioning/check-implementation-readiness/templates/readiness-report-template.md
deleted file mode 100644
index 972988ca..00000000
--- a/src/bmm/workflows/3-solutioning/check-implementation-readiness/templates/readiness-report-template.md
+++ /dev/null
@@ -1,4 +0,0 @@
-# Implementation Readiness Assessment Report
-
-**Date:** {{date}}
-**Project:** {{project_name}}
diff --git a/src/bmm/workflows/3-solutioning/check-implementation-readiness/workflow.md b/src/bmm/workflows/3-solutioning/check-implementation-readiness/workflow.md
deleted file mode 100644
index d7eb5969..00000000
--- a/src/bmm/workflows/3-solutioning/check-implementation-readiness/workflow.md
+++ /dev/null
@@ -1,55 +0,0 @@
----
-name: check-implementation-readiness
-description: 'Critical validation workflow that assesses PRD, Architecture, and Epics & Stories for completeness and alignment before implementation. Uses adversarial review approach to find gaps and issues.'
-web_bundle: false
----
-
-# Implementation Readiness
-
-**Goal:** Validate that PRD, Architecture, Epics and Stories are complete and aligned before Phase 4 implementation starts, with a focus on ensuring epics and stories are logical and have accounted for all requirements and planning.
-
-**Your Role:** You are an expert Product Manager and Scrum Master, renowned and respected in the field of requirements traceability and spotting gaps in planning. Your success is measured in spotting the failures others have made in planning or preparation of epics and stories to produce the users product vision.
-
-## WORKFLOW ARCHITECTURE
-
-### Core Principles
-
-- **Micro-file Design**: Each step of the overall goal is a self contained instruction file that you will adhere too 1 file as directed at a time
-- **Just-In-Time Loading**: Only 1 current step file will be loaded and followed to completion - never load future step files until told to do so
-- **Sequential Enforcement**: Sequence within the step files must be completed in order, no skipping or optimization allowed
-- **State Tracking**: Document progress in output file frontmatter using `stepsCompleted` array when a workflow produces a document
-- **Append-Only Building**: Build documents by appending content as directed to the output file
-
-### Step Processing Rules
-
-1. **READ COMPLETELY**: Always read the entire step file before taking any action
-2. **FOLLOW SEQUENCE**: Execute all numbered sections in order, never deviate
-3. **WAIT FOR INPUT**: If a menu is presented, halt and wait for user selection
-4. **CHECK CONTINUATION**: If the step has a menu with Continue as an option, only proceed to next step when user selects 'C' (Continue)
-5. **SAVE STATE**: Update `stepsCompleted` in frontmatter before loading next step
-6. **LOAD NEXT**: When directed, read fully and follow the next step file
-
-### Critical Rules (NO EXCEPTIONS)
-
-- π **NEVER** load multiple step files simultaneously
-- π **ALWAYS** read entire step file before execution
-- π« **NEVER** skip steps or optimize the sequence
-- πΎ **ALWAYS** update frontmatter of output files when writing the final output for a specific step
-- π― **ALWAYS** follow the exact instructions in the step file
-- βΈοΈ **ALWAYS** halt at menus and wait for user input
-- π **NEVER** create mental todo lists from future steps
-
----
-
-## INITIALIZATION SEQUENCE
-
-### 1. Module Configuration Loading
-
-Load and read full config from {project-root}/_bmad/bmm/config.yaml and resolve:
-
-- `project_name`, `output_folder`, `planning_artifacts`, `user_name`, `communication_language`, `document_output_language`
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### 2. First Step EXECUTION
-
-Read fully and follow: `./step-01-document-discovery.md` to begin the workflow.
diff --git a/src/bmm/workflows/3-solutioning/create-architecture/architecture-decision-template.md b/src/bmm/workflows/3-solutioning/create-architecture/architecture-decision-template.md
deleted file mode 100644
index 51ac3d6f..00000000
--- a/src/bmm/workflows/3-solutioning/create-architecture/architecture-decision-template.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-stepsCompleted: []
-inputDocuments: []
-workflowType: 'architecture'
-project_name: '{{project_name}}'
-user_name: '{{user_name}}'
-date: '{{date}}'
----
-
-# Architecture Decision Document
-
-_This document builds collaboratively through step-by-step discovery. Sections are appended as we work through each architectural decision together._
diff --git a/src/bmm/workflows/3-solutioning/create-architecture/data/domain-complexity.csv b/src/bmm/workflows/3-solutioning/create-architecture/data/domain-complexity.csv
deleted file mode 100644
index 0f1726a7..00000000
--- a/src/bmm/workflows/3-solutioning/create-architecture/data/domain-complexity.csv
+++ /dev/null
@@ -1,11 +0,0 @@
-domain,signals,complexity_level,suggested_workflow,web_searches
-e_commerce,"shopping,cart,checkout,payment,products,store",medium,standard,"ecommerce architecture patterns, payment processing, inventory management"
-fintech,"banking,payment,trading,finance,money,investment",high,enhanced,"financial security, PCI compliance, trading algorithms, fraud detection"
-healthcare,"medical,diagnostic,clinical,patient,hospital,health",high,enhanced,"HIPAA compliance, medical data security, FDA regulations, health tech"
-social,"social network,community,users,friends,posts,sharing",high,advanced,"social graph algorithms, feed ranking, notification systems, privacy"
-education,"learning,course,student,teacher,training,academic",medium,standard,"LMS architecture, progress tracking, assessment systems, video streaming"
-productivity,"productivity,workflow,tasks,management,business,tools",medium,standard,"collaboration patterns, real-time editing, notification systems, integration"
-media,"content,media,video,audio,streaming,broadcast",high,advanced,"CDN architecture, video encoding, streaming protocols, content delivery"
-iot,"IoT,sensors,devices,embedded,smart,connected",high,advanced,"device communication, real-time data processing, edge computing, security"
-government,"government,civic,public,admin,policy,regulation",high,enhanced,"accessibility standards, security clearance, data privacy, audit trails"
-gaming,"game,gaming,multiplayer,real-time,interactive,entertainment",high,advanced,"real-time multiplayer, game engine architecture, matchmaking, leaderboards"
\ No newline at end of file
diff --git a/src/bmm/workflows/3-solutioning/create-architecture/data/project-types.csv b/src/bmm/workflows/3-solutioning/create-architecture/data/project-types.csv
deleted file mode 100644
index 3733748e..00000000
--- a/src/bmm/workflows/3-solutioning/create-architecture/data/project-types.csv
+++ /dev/null
@@ -1,7 +0,0 @@
-project_type,detection_signals,description,typical_starters
-web_app,"website,web application,browser,frontend,UI,interface",Web-based applications running in browsers,Next.js, Vite, Remix
-mobile_app,"mobile,iOS,Android,app,smartphone,tablet",Native mobile applications,React Native, Expo, Flutter
-api_backend,"API,REST,GraphQL,backend,service,microservice",Backend services and APIs,NestJS, Express, Fastify
-full_stack,"full-stack,complete,web+mobile,frontend+backend",Applications with both frontend and backend,T3 App, RedwoodJS, Blitz
-cli_tool,"CLI,command line,terminal,console,tool",Command-line interface tools,oclif, Commander, Caporal
-desktop_app,"desktop,Electron,Tauri,native app,macOS,Windows",Desktop applications,Electron, Tauri, Flutter Desktop
\ No newline at end of file
diff --git a/src/bmm/workflows/3-solutioning/create-architecture/steps/step-01-init.md b/src/bmm/workflows/3-solutioning/create-architecture/steps/step-01-init.md
deleted file mode 100644
index 93a83c70..00000000
--- a/src/bmm/workflows/3-solutioning/create-architecture/steps/step-01-init.md
+++ /dev/null
@@ -1,153 +0,0 @@
-# Step 1: Architecture Workflow Initialization
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without user input
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β ALWAYS treat this as collaborative discovery between architectural peers
-- π YOU ARE A FACILITATOR, not a content generator
-- π¬ FOCUS on initialization and setup only - don't look ahead to future steps
-- πͺ DETECT existing workflow state and handle continuation properly
-- β οΈ ABSOLUTELY NO TIME ESTIMATES - AI development speed has fundamentally changed
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- πΎ Initialize document and update frontmatter
-- π Set up frontmatter `stepsCompleted: [1]` before loading next step
-- π« FORBIDDEN to load next step until setup is complete
-
-## CONTEXT BOUNDARIES:
-
-- Variables from workflow.md are available in memory
-- Previous context = what's in output document + frontmatter
-- Don't assume knowledge from other steps
-- Input document discovery happens in this step
-
-## YOUR TASK:
-
-Initialize the Architecture workflow by detecting continuation state, discovering input documents, and setting up the document for collaborative architectural decision making.
-
-## INITIALIZATION SEQUENCE:
-
-### 1. Check for Existing Workflow
-
-First, check if the output document already exists:
-
-- Look for existing {planning_artifacts}/`*architecture*.md`
-- If exists, read the complete file(s) including frontmatter
-- If not exists, this is a fresh workflow
-
-### 2. Handle Continuation (If Document Exists)
-
-If the document exists and has frontmatter with `stepsCompleted`:
-
-- **STOP here** and load `./step-01b-continue.md` immediately
-- Do not proceed with any initialization tasks
-- Let step-01b handle the continuation logic
-
-### 3. Fresh Workflow Setup (If No Document)
-
-If no document exists or no `stepsCompleted` in frontmatter:
-
-#### A. Input Document Discovery
-
-Discover and load context documents using smart discovery. Documents can be in the following locations:
-- {planning_artifacts}/**
-- {output_folder}/**
-- {product_knowledge}/**
-- docs/**
-
-Also - when searching - documents can be a single markdown file, or a folder with an index and multiple files. For Example, if searching for `*foo*.md` and not found, also search for a folder called *foo*/index.md (which indicates sharded content)
-
-Try to discover the following:
-- Product Brief (`*brief*.md`)
-- Product Requirements Document (`*prd*.md`)
-- UX Design (`*ux-design*.md`) and other
-- Research Documents (`*research*.md`)
-- Project Documentation (generally multiple documents might be found for this in the `{product_knowledge}` or `docs` folder.)
-- Project Context (`**/project-context.md`)
-
-Confirm what you have found with the user, along with asking if the user wants to provide anything else. Only after this confirmation will you proceed to follow the loading rules
-
-**Loading Rules:**
-
-- Load ALL discovered files completely that the user confirmed or provided (no offset/limit)
-- If there is a project context, whatever is relevant should try to be biased in the remainder of this whole workflow process
-- For sharded folders, load ALL files to get complete picture, using the index first to potentially know the potential of each document
-- index.md is a guide to what's relevant whenever available
-- Track all successfully loaded files in frontmatter `inputDocuments` array
-
-#### B. Validate Required Inputs
-
-Before proceeding, verify we have the essential inputs:
-
-**PRD Validation:**
-
-- If no PRD found: "Architecture requires a PRD to work from. Please run the PRD workflow first or provide the PRD file path."
-- Do NOT proceed without PRD
-
-**Other Input that might exist:**
-
-- UX Spec: "Provides UI/UX architectural requirements"
-
-#### C. Create Initial Document
-
-Copy the template from `{installed_path}/architecture-decision-template.md` to `{planning_artifacts}/architecture.md`
-
-#### D. Complete Initialization and Report
-
-Complete setup and report to user:
-
-**Document Setup:**
-
-- Created: `{planning_artifacts}/architecture.md` from template
-- Initialized frontmatter with workflow state
-
-**Input Documents Discovered:**
-Report what was found:
-"Welcome {{user_name}}! I've set up your Architecture workspace for {{project_name}}.
-
-**Documents Found:**
-
-- PRD: {number of PRD files loaded or "None found - REQUIRED"}
-- UX Design: {number of UX files loaded or "None found"}
-- Research: {number of research files loaded or "None found"}
-- Project docs: {number of project files loaded or "None found"}
-- Project context: {project_context_rules count of rules for AI agents found}
-
-**Files loaded:** {list of specific file names or "No additional documents found"}
-
-Ready to begin architectural decision making. Do you have any other documents you'd like me to include?
-
-[C] Continue to project context analysis
-
-## SUCCESS METRICS:
-
-β Existing workflow detected and handed off to step-01b correctly
-β Fresh workflow initialized with template and frontmatter
-β Input documents discovered and loaded using sharded-first logic
-β All discovered files tracked in frontmatter `inputDocuments`
-β PRD requirement validated and communicated
-β User confirmed document setup and can proceed
-
-## FAILURE MODES:
-
-β Proceeding with fresh initialization when existing workflow exists
-β Not updating frontmatter with discovered input documents
-β Creating document without proper template
-β Not checking sharded folders first before whole files
-β Not reporting what documents were found to user
-β Proceeding without validating PRD requirement
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## NEXT STEP:
-
-After user selects [C] to continue, only after ensuring all the template output has been created, then load `./step-02-context.md` to analyze the project context and begin architectural decision making.
-
-Remember: Do NOT proceed to step-02 until user explicitly selects [C] from the menu and setup is confirmed!
diff --git a/src/bmm/workflows/3-solutioning/create-architecture/steps/step-01b-continue.md b/src/bmm/workflows/3-solutioning/create-architecture/steps/step-01b-continue.md
deleted file mode 100644
index 6e800e7f..00000000
--- a/src/bmm/workflows/3-solutioning/create-architecture/steps/step-01b-continue.md
+++ /dev/null
@@ -1,164 +0,0 @@
-# Step 1b: Workflow Continuation Handler
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without user input
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β ALWAYS treat this as collaborative discovery between architectural peers
-- π YOU ARE A FACILITATOR, not a content generator
-- π¬ FOCUS on understanding current state and getting user confirmation
-- πͺ HANDLE workflow resumption smoothly and transparently
-- β οΈ ABSOLUTELY NO TIME ESTIMATES - AI development speed has fundamentally changed
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- π Read existing document completely to understand current state
-- πΎ Update frontmatter to reflect continuation
-- π« FORBIDDEN to proceed to next step without user confirmation
-
-## CONTEXT BOUNDARIES:
-
-- Existing document and frontmatter are available
-- Input documents already loaded should be in frontmatter `inputDocuments`
-- Steps already completed are in `stepsCompleted` array
-- Focus on understanding where we left off
-
-## YOUR TASK:
-
-Handle workflow continuation by analyzing existing work and guiding the user to resume at the appropriate step.
-
-## CONTINUATION SEQUENCE:
-
-### 1. Analyze Current Document State
-
-Read the existing architecture document completely and analyze:
-
-**Frontmatter Analysis:**
-
-- `stepsCompleted`: What steps have been done
-- `inputDocuments`: What documents were loaded
-- `lastStep`: Last step that was executed
-- `project_name`, `user_name`, `date`: Basic context
-
-**Content Analysis:**
-
-- What sections exist in the document
-- What architectural decisions have been made
-- What appears incomplete or in progress
-- Any TODOs or placeholders remaining
-
-### 2. Present Continuation Summary
-
-Show the user their current progress:
-
-"Welcome back {{user_name}}! I found your Architecture work for {{project_name}}.
-
-**Current Progress:**
-
-- Steps completed: {{stepsCompleted list}}
-- Last step worked on: Step {{lastStep}}
-- Input documents loaded: {{number of inputDocuments}} files
-
-**Document Sections Found:**
-{list all H2/H3 sections found in the document}
-
-{if_incomplete_sections}
-**Incomplete Areas:**
-
-- {areas that appear incomplete or have placeholders}
- {/if_incomplete_sections}
-
-**What would you like to do?**
-[R] Resume from where we left off
-[C] Continue to next logical step
-[O] Overview of all remaining steps
-[X] Start over (will overwrite existing work)
-"
-
-### 3. Handle User Choice
-
-#### If 'R' (Resume from where we left off):
-
-- Identify the next step based on `stepsCompleted`
-- Load the appropriate step file to continue
-- Example: If `stepsCompleted: [1, 2, 3]`, load `step-04-decisions.md`
-
-#### If 'C' (Continue to next logical step):
-
-- Analyze the document content to determine logical next step
-- May need to review content quality and completeness
-- If content seems complete for current step, advance to next
-- If content seems incomplete, suggest staying on current step
-
-#### If 'O' (Overview of all remaining steps):
-
-- Provide brief description of all remaining steps
-- Let user choose which step to work on
-- Don't assume sequential progression is always best
-
-#### If 'X' (Start over):
-
-- Confirm: "This will delete all existing architectural decisions. Are you sure? (y/n)"
-- If confirmed: Delete existing document and return to step-01-init.md
-- If not confirmed: Return to continuation menu
-
-### 4. Navigate to Selected Step
-
-After user makes choice:
-
-**Load the selected step file:**
-
-- Update frontmatter `lastStep` to reflect current navigation
-- Execute the selected step file
-- Let that step handle the detailed continuation logic
-
-**State Preservation:**
-
-- Maintain all existing content in the document
-- Keep `stepsCompleted` accurate
-- Track the resumption in workflow status
-
-### 5. Special Continuation Cases
-
-#### If `stepsCompleted` is empty but document has content:
-
-- This suggests an interrupted workflow
-- Ask user: "I see the document has content but no steps are marked as complete. Should I analyze what's here and set the appropriate step status?"
-
-#### If document appears corrupted or incomplete:
-
-- Ask user: "The document seems incomplete. Would you like me to try to recover what's here, or would you prefer to start fresh?"
-
-#### If document is complete but workflow not marked as done:
-
-- Ask user: "The architecture looks complete! Should I mark this workflow as finished, or is there more you'd like to work on?"
-
-## SUCCESS METRICS:
-
-β Existing document state properly analyzed and understood
-β User presented with clear continuation options
-β User choice handled appropriately and transparently
-β Workflow state preserved and updated correctly
-β Navigation to appropriate step handled smoothly
-
-## FAILURE MODES:
-
-β Not reading the complete existing document before making suggestions
-β Losing track of what steps were actually completed
-β Automatically proceeding without user confirmation of next steps
-β Not checking for incomplete or placeholder content
-β Losing existing document content during resumption
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## NEXT STEP:
-
-After user selects their continuation option, load the appropriate step file based on their choice. The step file will handle the detailed work from that point forward.
-
-Remember: The goal is smooth, transparent resumption that respects the work already done while giving the user control over how to proceed.
diff --git a/src/bmm/workflows/3-solutioning/create-architecture/steps/step-02-context.md b/src/bmm/workflows/3-solutioning/create-architecture/steps/step-02-context.md
deleted file mode 100644
index 1e9c6b9a..00000000
--- a/src/bmm/workflows/3-solutioning/create-architecture/steps/step-02-context.md
+++ /dev/null
@@ -1,224 +0,0 @@
-# Step 2: Project Context Analysis
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without user input
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β ALWAYS treat this as collaborative discovery between architectural peers
-- π YOU ARE A FACILITATOR, not a content generator
-- π¬ FOCUS on understanding project scope and requirements for architecture
-- π― ANALYZE loaded documents, don't assume or generate requirements
-- β οΈ ABSOLUTELY NO TIME ESTIMATES - AI development speed has fundamentally changed
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- β οΈ Present A/P/C menu after generating project context analysis
-- πΎ ONLY save when user chooses C (Continue)
-- π Update frontmatter `stepsCompleted: [1, 2]` before loading next step
-- π« FORBIDDEN to load next step until C is selected
-
-## COLLABORATION MENUS (A/P/C):
-
-This step will generate content and present choices:
-
-- **A (Advanced Elicitation)**: Use discovery protocols to develop deeper insights about project context and architectural implications
-- **P (Party Mode)**: Bring multiple perspectives to analyze project requirements from different architectural angles
-- **C (Continue)**: Save the content to the document and proceed to next step
-
-## PROTOCOL INTEGRATION:
-
-- When 'A' selected: Read fully and follow: {project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml
-- When 'P' selected: Read fully and follow: {project-root}/_bmad/core/workflows/party-mode/workflow.md
-- PROTOCOLS always return to display this step's A/P/C menu after the A or P have completed
-- User accepts/rejects protocol changes before proceeding
-
-## CONTEXT BOUNDARIES:
-
-- Current document and frontmatter from step 1 are available
-- Input documents already loaded are in memory (PRD, epics, UX spec, etc.)
-- Focus on architectural implications of requirements
-- No technology decisions yet - pure analysis phase
-
-## YOUR TASK:
-
-Fully read and Analyze the loaded project documents to understand architectural scope, requirements, and constraints before beginning decision making.
-
-## CONTEXT ANALYSIS SEQUENCE:
-
-### 1. Review Project Requirements
-
-**From PRD Analysis:**
-
-- Extract and analyze Functional Requirements (FRs)
-- Identify Non-Functional Requirements (NFRs) like performance, security, compliance
-- Note any technical constraints or dependencies mentioned
-- Count and categorize requirements to understand project scale
-
-**From Epics/Stories (if available):**
-
-- Map epic structure and user stories to architectural components
-- Extract acceptance criteria for technical implications
-- Identify cross-cutting concerns that span multiple epics
-- Estimate story complexity for architectural planning
-
-**From UX Design (if available):**
-
-- Extract architectural implications from UX requirements:
- - Component complexity (simple forms vs rich interactions)
- - Animation/transition requirements
- - Real-time update needs (live data, collaborative features)
- - Platform-specific UI requirements
- - Accessibility standards (WCAG compliance level)
- - Responsive design breakpoints
- - Offline capability requirements
- - Performance expectations (load times, interaction responsiveness)
-
-### 2. Project Scale Assessment
-
-Calculate and present project complexity:
-
-**Complexity Indicators:**
-
-- Real-time features requirements
-- Multi-tenancy needs
-- Regulatory compliance requirements
-- Integration complexity
-- User interaction complexity
-- Data complexity and volume
-
-### 3. Reflect Understanding
-
-Present your analysis back to user for validation:
-
-"I'm reviewing your project documentation for {{project_name}}.
-
-{if_epics_loaded}I see {{epic_count}} epics with {{story_count}} total stories.{/if_epics_loaded}
-{if_no_epics}I found {{fr_count}} functional requirements organized into {{fr_category_list}}.{/if_no_epics}
-{if_ux_loaded}I also found your UX specification which defines the user experience requirements.{/if_ux_loaded}
-
-**Key architectural aspects I notice:**
-
-- [Summarize core functionality from FRs]
-- [Note critical NFRs that will shape architecture]
-- {if_ux_loaded}[Note UX complexity and technical requirements]{/if_ux_loaded}
-- [Identify unique technical challenges or constraints]
-- [Highlight any regulatory or compliance requirements]
-
-**Scale indicators:**
-
-- Project complexity appears to be: [low/medium/high/enterprise]
-- Primary technical domain: [web/mobile/api/backend/full-stack/etc]
-- Cross-cutting concerns identified: [list major ones]
-
-This analysis will help me guide you through the architectural decisions needed to ensure AI agents implement this consistently.
-
-Does this match your understanding of the project scope and requirements?"
-
-### 4. Generate Project Context Content
-
-Prepare the content to append to the document:
-
-#### Content Structure:
-
-```markdown
-## Project Context Analysis
-
-### Requirements Overview
-
-**Functional Requirements:**
-{{analysis of FRs and what they mean architecturally}}
-
-**Non-Functional Requirements:**
-{{NFRs that will drive architectural decisions}}
-
-**Scale & Complexity:**
-{{project_scale_assessment}}
-
-- Primary domain: {{technical_domain}}
-- Complexity level: {{complexity_level}}
-- Estimated architectural components: {{component_count}}
-
-### Technical Constraints & Dependencies
-
-{{known_constraints_dependencies}}
-
-### Cross-Cutting Concerns Identified
-
-{{concerns_that_will_affect_multiple_components}}
-```
-
-### 5. Present Content and Menu
-
-Show the generated content and present choices:
-
-"I've drafted the Project Context Analysis based on your requirements. This sets the foundation for our architectural decisions.
-
-**Here's what I'll add to the document:**
-
-[Show the complete markdown content from step 4]
-
-**What would you like to do?**
-[A] Advanced Elicitation - Let's dive deeper into architectural implications
-[P] Party Mode - Bring different perspectives to analyze requirements
-[C] Continue - Save this analysis and begin architectural decisions"
-
-### 6. Handle Menu Selection
-
-#### If 'A' (Advanced Elicitation):
-
-- Read fully and follow: {project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml with the current context analysis
-- Process the enhanced architectural insights that come back
-- Ask user: "Accept these enhancements to the project context analysis? (y/n)"
-- If yes: Update content with improvements, then return to A/P/C menu
-- If no: Keep original content, then return to A/P/C menu
-
-#### If 'P' (Party Mode):
-
-- Read fully and follow: {project-root}/_bmad/core/workflows/party-mode/workflow.md with the current project context
-- Process the collaborative improvements to architectural understanding
-- Ask user: "Accept these changes to the project context analysis? (y/n)"
-- If yes: Update content with improvements, then return to A/P/C menu
-- If no: Keep original content, then return to A/P/C menu
-
-#### If 'C' (Continue):
-
-- Append the final content to `{planning_artifacts}/architecture.md`
-- Update frontmatter: `stepsCompleted: [1, 2]`
-- Load `./step-03-starter.md`
-
-## APPEND TO DOCUMENT:
-
-When user selects 'C', append the content directly to the document using the structure from step 4.
-
-## SUCCESS METRICS:
-
-β All input documents thoroughly analyzed for architectural implications
-β Project scope and complexity clearly assessed and validated
-β Technical constraints and dependencies identified
-β Cross-cutting concerns mapped for architectural planning
-β User confirmation of project understanding
-β A/P/C menu presented and handled correctly
-β Content properly appended to document when C selected
-
-## FAILURE MODES:
-
-β Skimming documents without deep architectural analysis
-β Missing or misinterpreting critical NFRs
-β Not validating project understanding with user
-β Underestimating complexity indicators
-β Generating content without real analysis of loaded documents
-β Not presenting A/P/C menu after content generation
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## NEXT STEP:
-
-After user selects 'C' and content is saved to document, load `./step-03-starter.md` to evaluate starter template options.
-
-Remember: Do NOT proceed to step-03 until user explicitly selects 'C' from the A/P/C menu and content is saved!
diff --git a/src/bmm/workflows/3-solutioning/create-architecture/steps/step-03-starter.md b/src/bmm/workflows/3-solutioning/create-architecture/steps/step-03-starter.md
deleted file mode 100644
index bccea19d..00000000
--- a/src/bmm/workflows/3-solutioning/create-architecture/steps/step-03-starter.md
+++ /dev/null
@@ -1,331 +0,0 @@
-# Step 3: Starter Template Evaluation
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without user input
-- β ALWAYS treat this as collaborative discovery between architectural peers
-- π YOU ARE A FACILITATOR, not a content generator
-- π¬ FOCUS on evaluating starter template options with current versions
-- π ALWAYS search the web to verify current versions - NEVER trust hardcoded versions
-- β οΈ ABSOLUTELY NO TIME ESTIMATES - AI development speed has fundamentally changed
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete architecture
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- π Search the web to verify current versions and options
-- β οΈ Present A/P/C menu after generating starter template analysis
-- πΎ ONLY save when user chooses C (Continue)
-- π Update frontmatter `stepsCompleted: [1, 2, 3]` before loading next step
-- π« FORBIDDEN to load next step until C is selected
-
-## COLLABORATION MENUS (A/P/C):
-
-This step will generate content and present choices:
-
-- **A (Advanced Elicitation)**: Use discovery protocols to explore unconventional starter options or custom approaches
-- **P (Party Mode)**: Bring multiple perspectives to evaluate starter trade-offs for different use cases
-- **C (Continue)**: Save the content to the document and proceed to next step
-
-## PROTOCOL INTEGRATION:
-
-- When 'A' selected: Read fully and follow: {project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml
-- When 'P' selected: Read fully and follow: {project-root}/_bmad/core/workflows/party-mode/workflow.md
-- PROTOCOLS always return to display this step's A/P/C menu after the A or P have completed
-- User accepts/rejects protocol changes before proceeding
-
-## CONTEXT BOUNDARIES:
-
-- Project context from step 2 is available and complete
-- Project context file from step-01 may contain technical preferences
-- No architectural decisions made yet - evaluating foundations
-- Focus on technical preferences discovery and starter evaluation
-- Consider project requirements and existing preferences when evaluating options
-
-## YOUR TASK:
-
-Discover technical preferences and evaluate starter template options, leveraging existing technical preferences and establishing solid architectural foundations.
-
-## STARTER EVALUATION SEQUENCE:
-
-### 0. Check Technical Preferences & Context
-
-**Check Project Context for Existing Technical Preferences:**
-"Before we dive into starter templates, let me check if you have any technical preferences already documented.
-
-{{if_project_context_exists}}
-I found some technical rules in your project context file:
-{{extracted_technical_preferences_from_project_context}}
-
-**Project Context Technical Rules Found:**
-
-- Languages/Frameworks: {{languages_frameworks_from_context}}
-- Tools & Libraries: {{tools_from_context}}
-- Development Patterns: {{patterns_from_context}}
-- Platform Preferences: {{platforms_from_context}}
-
-{{else}}
-No existing technical preferences found in project context file. We'll establish your technical preferences now.
-{{/if_project_context}}"
-
-**Discover User Technical Preferences:**
-"Based on your project context, let's discuss your technical preferences:
-
-{{primary_technology_category}} Preferences:
-
-- **Languages**: Do you have preferences between TypeScript/JavaScript, Python, Go, Rust, etc.?
-- **Frameworks**: Any existing familiarity or preferences (React, Vue, Angular, Next.js, etc.)?
-- **Databases**: Any preferences or existing infrastructure (PostgreSQL, MongoDB, MySQL, etc.)?
-
-**Development Experience:**
-
-- What's your team's experience level with different technologies?
-- Are there any technologies you want to learn vs. what you're comfortable with?
-
-**Platform/Deployment Preferences:**
-
-- Cloud provider preferences (AWS, Vercel, Railway, etc.)?
-- Container preferences (Docker, Serverless, Traditional)?
-
-**Integrations:**
-
-- Any existing systems or APIs you need to integrate with?
-- Third-party services you plan to use (payment, authentication, analytics, etc.)?
-
-These preferences will help me recommend the most suitable starter templates and guide our architectural decisions."
-
-### 1. Identify Primary Technology Domain
-
-Based on project context analysis and technical preferences, identify the primary technology stack:
-
-- **Web application** β Look for Next.js, Vite, Remix, SvelteKit starters
-- **Mobile app** β Look for React Native, Expo, Flutter starters
-- **API/Backend** β Look for NestJS, Express, Fastify, Supabase starters
-- **CLI tool** β Look for CLI framework starters (oclif, commander, etc.)
-- **Full-stack** β Look for T3, RedwoodJS, Blitz, Next.js starters
-- **Desktop** β Look for Electron, Tauri starters
-
-### 2. UX Requirements Consideration
-
-If UX specification was loaded, consider UX requirements when selecting starter:
-
-- **Rich animations** β Framer Motion compatible starter
-- **Complex forms** β React Hook Form included starter
-- **Real-time features** β Socket.io or WebSocket ready starter
-- **Design system** β Storybook-enabled starter
-- **Offline capability** β Service worker or PWA configured starter
-
-### 3. Research Current Starter Options
-
-Search the web to find current, maintained starter templates:
-
-```
-Search the web: "{{primary_technology}} starter template CLI create command latest"
-Search the web: "{{primary_technology}} boilerplate generator latest options"
-Search the web: "{{primary_technology}} production-ready starter best practices"
-```
-
-### 4. Investigate Top Starter Options
-
-For each promising starter found, investigate details:
-
-```
-Search the web: "{{starter_name}} default setup technologies included latest"
-Search the web: "{{starter_name}} project structure file organization"
-Search the web: "{{starter_name}} production deployment capabilities"
-Search the web: "{{starter_name}} recent updates maintenance status"
-```
-
-### 5. Analyze What Each Starter Provides
-
-For each viable starter option, document:
-
-**Technology Decisions Made:**
-
-- Language/TypeScript configuration
-- Styling solution (CSS, Tailwind, Styled Components, etc.)
-- Testing framework setup
-- Linting/Formatting configuration
-- Build tooling and optimization
-- Project structure and organization
-
-**Architectural Patterns Established:**
-
-- Code organization patterns
-- Component structure conventions
-- API layering approach
-- State management setup
-- Routing patterns
-- Environment configuration
-
-**Development Experience Features:**
-
-- Hot reloading and development server
-- TypeScript configuration
-- Debugging setup
-- Testing infrastructure
-- Documentation generation
-
-### 6. Present Starter Options
-
-Based on user skill level and project needs:
-
-**For Expert Users:**
-"Found {{starter_name}} which provides:
-{{quick_decision_list_of_key_decisions}}
-
-This would establish our base architecture with these technical decisions already made. Use it?"
-
-**For Intermediate Users:**
-"I found {{starter_name}}, which is a well-maintained starter for {{project_type}} projects.
-
-It makes these architectural decisions for us:
-{{decision_list_with_explanations}}
-
-This gives us a solid foundation following current best practices. Should we use it?"
-
-**For Beginner Users:**
-"I found {{starter_name}}, which is like a pre-built foundation for your project.
-
-Think of it like buying a prefab house frame instead of cutting each board yourself.
-
-It makes these decisions for us:
-{{friendly_explanation_of_decisions}}
-
-This is a great starting point that follows best practices and saves us from making dozens of small technical choices. Should we use it?"
-
-### 7. Get Current CLI Commands
-
-If user shows interest in a starter, get the exact current commands:
-
-```
-Search the web: "{{starter_name}} CLI command options flags latest"
-Search the web: "{{starter_name}} create new project command examples"
-```
-
-### 8. Generate Starter Template Content
-
-Prepare the content to append to the document:
-
-#### Content Structure:
-
-````markdown
-## Starter Template Evaluation
-
-### Primary Technology Domain
-
-{{identified_domain}} based on project requirements analysis
-
-### Starter Options Considered
-
-{{analysis_of_evaluated_starters}}
-
-### Selected Starter: {{starter_name}}
-
-**Rationale for Selection:**
-{{why_this_starter_was_chosen}}
-
-**Initialization Command:**
-
-```bash
-{{full_starter_command_with_options}}
-```
-````
-
-**Architectural Decisions Provided by Starter:**
-
-**Language & Runtime:**
-{{language_typescript_setup}}
-
-**Styling Solution:**
-{{styling_solution_configuration}}
-
-**Build Tooling:**
-{{build_tools_and_optimization}}
-
-**Testing Framework:**
-{{testing_setup_and_configuration}}
-
-**Code Organization:**
-{{project_structure_and_patterns}}
-
-**Development Experience:**
-{{development_tools_and_workflow}}
-
-**Note:** Project initialization using this command should be the first implementation story.
-
-```
-
-### 9. Present Content and Menu
-
-Show the generated content and present choices:
-
-"I've analyzed starter template options for {{project_type}} projects.
-
-**Here's what I'll add to the document:**
-
-[Show the complete markdown content from step 8]
-
-**What would you like to do?**
-[A] Advanced Elicitation - Explore custom approaches or unconventional starters
-[P] Party Mode - Evaluate trade-offs from different perspectives
-[C] Continue - Save this decision and move to architectural decisions"
-
-### 10. Handle Menu Selection
-
-#### If 'A' (Advanced Elicitation):
-
-- Read fully and follow: {project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml with current starter analysis
-- Process enhanced insights about starter options or custom approaches
-- Ask user: "Accept these changes to the starter template evaluation? (y/n)"
-- If yes: Update content, then return to A/P/C menu
-- If no: Keep original content, then return to A/P/C menu
-
-#### If 'P' (Party Mode):
-
-- Read fully and follow: {project-root}/_bmad/core/workflows/party-mode/workflow.md with starter evaluation context
-- Process collaborative insights about starter trade-offs
-- Ask user: "Accept these changes to the starter template evaluation? (y/n)"
-- If yes: Update content, then return to A/P/C menu
-- If no: Keep original content, then return to A/P/C menu
-
-#### If 'C' (Continue):
-
-- Append the final content to `{planning_artifacts}/architecture.md`
-- Update frontmatter: `stepsCompleted: [1, 2, 3]`
-- Load `./step-04-decisions.md`
-
-## APPEND TO DOCUMENT:
-
-When user selects 'C', append the content directly to the document using the structure from step 8.
-
-## SUCCESS METRICS:
-
-β Primary technology domain correctly identified from project context
-β Current, maintained starter templates researched and evaluated
-β All versions verified using web search, not hardcoded
-β Architectural implications of starter choice clearly documented
-β User provided with clear rationale for starter selection
-β A/P/C menu presented and handled correctly
-β Content properly appended to document when C selected
-
-## FAILURE MODES:
-
-β Not verifying current versions with web search
-β Ignoring UX requirements when evaluating starters
-β Not documenting what architectural decisions the starter makes
-β Failing to consider maintenance status of starter templates
-β Not providing clear rationale for starter selection
-β Not presenting A/P/C menu after content generation
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## NEXT STEP:
-
-After user selects 'C' and content is saved to document, load `./step-04-decisions.md` to begin making specific architectural decisions.
-
-Remember: Do NOT proceed to step-04 until user explicitly selects 'C' from the A/P/C menu and content is saved!
-```
diff --git a/src/bmm/workflows/3-solutioning/create-architecture/steps/step-04-decisions.md b/src/bmm/workflows/3-solutioning/create-architecture/steps/step-04-decisions.md
deleted file mode 100644
index c9f5cded..00000000
--- a/src/bmm/workflows/3-solutioning/create-architecture/steps/step-04-decisions.md
+++ /dev/null
@@ -1,318 +0,0 @@
-# Step 4: Core Architectural Decisions
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without user input
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β ALWAYS treat this as collaborative discovery between architectural peers
-- π YOU ARE A FACILITATOR, not a content generator
-- π¬ FOCUS on making critical architectural decisions collaboratively
-- π ALWAYS search the web to verify current technology versions
-- β οΈ ABSOLUTELY NO TIME ESTIMATES - AI development speed has fundamentally changed
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- π Search the web to verify technology versions and options
-- β οΈ Present A/P/C menu after each major decision category
-- πΎ ONLY save when user chooses C (Continue)
-- π Update frontmatter `stepsCompleted: [1, 2, 3, 4]` before loading next step
-- π« FORBIDDEN to load next step until C is selected
-
-## COLLABORATION MENUS (A/P/C):
-
-This step will generate content and present choices for each decision category:
-
-- **A (Advanced Elicitation)**: Use discovery protocols to explore innovative approaches to specific decisions
-- **P (Party Mode)**: Bring multiple perspectives to evaluate decision trade-offs
-- **C (Continue)**: Save the current decisions and proceed to next decision category
-
-## PROTOCOL INTEGRATION:
-
-- When 'A' selected: Read fully and follow: {project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml
-- When 'P' selected: Read fully and follow: {project-root}/_bmad/core/workflows/party-mode/workflow.md
-- PROTOCOLS always return to display this step's A/P/C menu after the A or P have completed
-- User accepts/rejects protocol changes before proceeding
-
-## CONTEXT BOUNDARIES:
-
-- Project context from step 2 is available
-- Starter template choice from step 3 is available
-- Project context file may contain technical preferences and rules
-- Technical preferences discovered in step 3 are available
-- Focus on decisions not already made by starter template or existing preferences
-- Collaborative decision making, not recommendations
-
-## YOUR TASK:
-
-Facilitate collaborative architectural decision making, leveraging existing technical preferences and starter template decisions, focusing on remaining choices critical to the project's success.
-
-## DECISION MAKING SEQUENCE:
-
-### 1. Load Decision Framework & Check Existing Preferences
-
-**Review Technical Preferences from Step 3:**
-"Based on our technical preferences discussion in step 3, let's build on those foundations:
-
-**Your Technical Preferences:**
-{{user_technical_preferences_from_step_3}}
-
-**Starter Template Decisions:**
-{{starter_template_decisions}}
-
-**Project Context Technical Rules:**
-{{project_context_technical_rules}}"
-
-**Identify Remaining Decisions:**
-Based on technical preferences, starter template choice, and project context, identify remaining critical decisions:
-
-**Already Decided (Don't re-decide these):**
-
-- {{starter_template_decisions}}
-- {{user_technology_preferences}}
-- {{project_context_technical_rules}}
-
-**Critical Decisions:** Must be decided before implementation can proceed
-**Important Decisions:** Shape the architecture significantly
-**Nice-to-Have:** Can be deferred if needed
-
-### 2. Decision Categories by Priority
-
-#### Category 1: Data Architecture
-
-- Database choice (if not determined by starter)
-- Data modeling approach
-- Data validation strategy
-- Migration approach
-- Caching strategy
-
-#### Category 2: Authentication & Security
-
-- Authentication method
-- Authorization patterns
-- Security middleware
-- Data encryption approach
-- API security strategy
-
-#### Category 3: API & Communication
-
-- API design patterns (REST, GraphQL, etc.)
-- API documentation approach
-- Error handling standards
-- Rate limiting strategy
-- Communication between services
-
-#### Category 4: Frontend Architecture (if applicable)
-
-- State management approach
-- Component architecture
-- Routing strategy
-- Performance optimization
-- Bundle optimization
-
-#### Category 5: Infrastructure & Deployment
-
-- Hosting strategy
-- CI/CD pipeline approach
-- Environment configuration
-- Monitoring and logging
-- Scaling strategy
-
-### 3. Facilitate Each Decision Category
-
-For each category, facilitate collaborative decision making:
-
-**Present the Decision:**
-Based on user skill level and project context:
-
-**Expert Mode:**
-"{{Decision_Category}}: {{Specific_Decision}}
-
-Options: {{concise_option_list_with_tradeoffs}}
-
-What's your preference for this decision?"
-
-**Intermediate Mode:**
-"Next decision: {{Human_Friendly_Category}}
-
-We need to choose {{Specific_Decision}}.
-
-Common options:
-{{option_list_with_brief_explanations}}
-
-For your project, I'd lean toward {{recommendation}} because {{reason}}. What are your thoughts?"
-
-**Beginner Mode:**
-"Let's talk about {{Human_Friendly_Category}}.
-
-{{Educational_Context_About_Why_This_Matters}}
-
-Think of it like {{real_world_analogy}}.
-
-Your main options:
-{{friendly_options_with_pros_cons}}
-
-My suggestion: {{recommendation}}
-This is good for you because {{beginner_friendly_reason}}.
-
-What feels right to you?"
-
-**Verify Technology Versions:**
-If decision involves specific technology:
-
-```
-Search the web: "{{technology}} latest stable version"
-Search the web: "{{technology}} current LTS version"
-Search the web: "{{technology}} production readiness"
-```
-
-**Get User Input:**
-"What's your preference? (or 'explain more' for details)"
-
-**Handle User Response:**
-
-- If user wants more info: Provide deeper explanation
-- If user has preference: Discuss implications and record decision
-- If user wants alternatives: Explore other options
-
-**Record the Decision:**
-
-- Category: {{category}}
-- Decision: {{user_choice}}
-- Version: {{verified_version_if_applicable}}
-- Rationale: {{user_reasoning_or_default}}
-- Affects: {{components_or_epics}}
-- Provided by Starter: {{yes_if_from_starter}}
-
-### 4. Check for Cascading Implications
-
-After each major decision, identify related decisions:
-
-"This choice means we'll also need to decide:
-
-- {{related_decision_1}}
-- {{related_decision_2}}"
-
-### 5. Generate Decisions Content
-
-After facilitating all decision categories, prepare the content to append:
-
-#### Content Structure:
-
-```markdown
-## Core Architectural Decisions
-
-### Decision Priority Analysis
-
-**Critical Decisions (Block Implementation):**
-{{critical_decisions_made}}
-
-**Important Decisions (Shape Architecture):**
-{{important_decisions_made}}
-
-**Deferred Decisions (Post-MVP):**
-{{decisions_deferred_with_rationale}}
-
-### Data Architecture
-
-{{data_related_decisions_with_versions_and_rationale}}
-
-### Authentication & Security
-
-{{security_related_decisions_with_versions_and_rationale}}
-
-### API & Communication Patterns
-
-{{api_related_decisions_with_versions_and_rationale}}
-
-### Frontend Architecture
-
-{{frontend_related_decisions_with_versions_and_rationale}}
-
-### Infrastructure & Deployment
-
-{{infrastructure_related_decisions_with_versions_and_rationale}}
-
-### Decision Impact Analysis
-
-**Implementation Sequence:**
-{{ordered_list_of_decisions_for_implementation}}
-
-**Cross-Component Dependencies:**
-{{how_decisions_affect_each_other}}
-```
-
-### 6. Present Content and Menu
-
-Show the generated decisions content and present choices:
-
-"I've documented all the core architectural decisions we've made together.
-
-**Here's what I'll add to the document:**
-
-[Show the complete markdown content from step 5]
-
-**What would you like to do?**
-[A] Advanced Elicitation - Explore innovative approaches to any specific decisions
-[P] Party Mode - Review decisions from multiple perspectives
-[C] Continue - Save these decisions and move to implementation patterns"
-
-### 7. Handle Menu Selection
-
-#### If 'A' (Advanced Elicitation):
-
-- Read fully and follow: {project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml with specific decision categories
-- Process enhanced insights about particular decisions
-- Ask user: "Accept these enhancements to the architectural decisions? (y/n)"
-- If yes: Update content, then return to A/P/C menu
-- If no: Keep original content, then return to A/P/C menu
-
-#### If 'P' (Party Mode):
-
-- Read fully and follow: {project-root}/_bmad/core/workflows/party-mode/workflow.md with architectural decisions context
-- Process collaborative insights about decision trade-offs
-- Ask user: "Accept these changes to the architectural decisions? (y/n)"
-- If yes: Update content, then return to A/P/C menu
-- If no: Keep original content, then return to A/P/C menu
-
-#### If 'C' (Continue):
-
-- Append the final content to `{planning_artifacts}/architecture.md`
-- Update frontmatter: `stepsCompleted: [1, 2, 3, 4]`
-- Load `./step-05-patterns.md`
-
-## APPEND TO DOCUMENT:
-
-When user selects 'C', append the content directly to the document using the structure from step 5.
-
-## SUCCESS METRICS:
-
-β All critical architectural decisions made collaboratively
-β Technology versions verified using web search
-β Decision rationale clearly documented
-β Cascading implications identified and addressed
-β User provided appropriate level of explanation for skill level
-β A/P/C menu presented and handled correctly for each category
-β Content properly appended to document when C selected
-
-## FAILURE MODES:
-
-β Making recommendations instead of facilitating decisions
-β Not verifying technology versions with web search
-β Missing cascading implications between decisions
-β Not adapting explanations to user skill level
-β Forgetting to document decisions made by starter template
-β Not presenting A/P/C menu after content generation
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## NEXT STEP:
-
-After user selects 'C' and content is saved to document, load `./step-05-patterns.md` to define implementation patterns that ensure consistency across AI agents.
-
-Remember: Do NOT proceed to step-05 until user explicitly selects 'C' from the A/P/C menu and content is saved!
diff --git a/src/bmm/workflows/3-solutioning/create-architecture/steps/step-05-patterns.md b/src/bmm/workflows/3-solutioning/create-architecture/steps/step-05-patterns.md
deleted file mode 100644
index cbfd99d1..00000000
--- a/src/bmm/workflows/3-solutioning/create-architecture/steps/step-05-patterns.md
+++ /dev/null
@@ -1,359 +0,0 @@
-# Step 5: Implementation Patterns & Consistency Rules
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without user input
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β ALWAYS treat this as collaborative discovery between architectural peers
-- π YOU ARE A FACILITATOR, not a content generator
-- π¬ FOCUS on patterns that prevent AI agent implementation conflicts
-- π― EMPHASIZE what agents could decide DIFFERENTLY if not specified
-- β οΈ ABSOLUTELY NO TIME ESTIMATES - AI development speed has fundamentally changed
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- π― Focus on consistency, not implementation details
-- β οΈ Present A/P/C menu after generating patterns content
-- πΎ ONLY save when user chooses C (Continue)
-- π Update frontmatter `stepsCompleted: [1, 2, 3, 4, 5]` before loading next step
-- π« FORBIDDEN to load next step until C is selected
-
-## COLLABORATION MENUS (A/P/C):
-
-This step will generate content and present choices:
-
-- **A (Advanced Elicitation)**: Use discovery protocols to develop comprehensive consistency patterns
-- **P (Party Mode)**: Bring multiple perspectives to identify potential conflict points
-- **C (Continue)**: Save the patterns and proceed to project structure
-
-## PROTOCOL INTEGRATION:
-
-- When 'A' selected: Read fully and follow: {project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml
-- When 'P' selected: Read fully and follow: {project-root}/_bmad/core/workflows/party-mode/workflow.md
-- PROTOCOLS always return to display this step's A/P/C menu after the A or P have completed
-- User accepts/rejects protocol changes before proceeding
-
-## CONTEXT BOUNDARIES:
-
-- Core architectural decisions from step 4 are complete
-- Technology stack is decided and versions are verified
-- Focus on HOW agents should implement, not WHAT they should implement
-- Consider what could vary between different AI agents
-
-## YOUR TASK:
-
-Define implementation patterns and consistency rules that ensure multiple AI agents write compatible, consistent code that works together seamlessly.
-
-## PATTERNS DEFINITION SEQUENCE:
-
-### 1. Identify Potential Conflict Points
-
-Based on the chosen technology stack and decisions, identify where AI agents could make different choices:
-
-**Naming Conflicts:**
-
-- Database table/column naming conventions
-- API endpoint naming patterns
-- File and directory naming
-- Component/function/variable naming
-- Route parameter formats
-
-**Structural Conflicts:**
-
-- Where tests are located
-- How components are organized
-- Where utilities and helpers go
-- Configuration file organization
-- Static asset organization
-
-**Format Conflicts:**
-
-- API response wrapper formats
-- Error response structures
-- Date/time formats in APIs and UI
-- JSON field naming conventions
-- API status code usage
-
-**Communication Conflicts:**
-
-- Event naming conventions
-- Event payload structures
-- State update patterns
-- Action naming conventions
-- Logging formats and levels
-
-**Process Conflicts:**
-
-- Loading state handling
-- Error recovery patterns
-- Retry implementation approaches
-- Authentication flow patterns
-- Validation timing and methods
-
-### 2. Facilitate Pattern Decisions
-
-For each conflict category, facilitate collaborative pattern definition:
-
-**Present the Conflict Point:**
-"Given that we're using {{tech_stack}}, different AI agents might handle {{conflict_area}} differently.
-
-For example, one agent might name database tables 'users' while another uses 'Users' - this would cause conflicts.
-
-We need to establish consistent patterns that all agents follow."
-
-**Show Options and Trade-offs:**
-"Common approaches for {{pattern_category}}:
-
-1. {{option_1}} - {{pros_and_cons}}
-2. {{option_2}} - {{pros_and_cons}}
-3. {{option_3}} - {{pros_and_cons}}
-
-Which approach makes the most sense for our project?"
-
-**Get User Decision:**
-"What's your preference for this pattern? (or discuss the trade-offs more)"
-
-### 3. Define Pattern Categories
-
-#### Naming Patterns
-
-**Database Naming:**
-
-- Table naming: users, Users, or user?
-- Column naming: user_id or userId?
-- Foreign key format: user_id or fk_user?
-- Index naming: idx_users_email or users_email_index?
-
-**API Naming:**
-
-- REST endpoint naming: /users or /user? Plural or singular?
-- Route parameter format: :id or {id}?
-- Query parameter naming: user_id or userId?
-- Header naming conventions: X-Custom-Header or Custom-Header?
-
-**Code Naming:**
-
-- Component naming: UserCard or user-card?
-- File naming: UserCard.tsx or user-card.tsx?
-- Function naming: getUserData or get_user_data?
-- Variable naming: userId or user_id?
-
-#### Structure Patterns
-
-**Project Organization:**
-
-- Where do tests live? **tests**/ or \*.test.ts co-located?
-- How are components organized? By feature or by type?
-- Where do shared utilities go?
-- How are services and repositories organized?
-
-**File Structure:**
-
-- Config file locations and naming
-- Static asset organization
-- Documentation placement
-- Environment file organization
-
-#### Format Patterns
-
-**API Formats:**
-
-- API response wrapper? {data: ..., error: ...} or direct response?
-- Error format? {message, code} or {error: {type, detail}}?
-- Date format in JSON? ISO strings or timestamps?
-- Success response structure?
-
-**Data Formats:**
-
-- JSON field naming: snake_case or camelCase?
-- Boolean representations: true/false or 1/0?
-- Null handling patterns
-- Array vs object for single items
-
-#### Communication Patterns
-
-**Event Systems:**
-
-- Event naming convention: user.created or UserCreated?
-- Event payload structure standards
-- Event versioning approach
-- Async event handling patterns
-
-**State Management:**
-
-- State update patterns: immutable updates or direct mutation?
-- Action naming conventions
-- Selector patterns
-- State organization principles
-
-#### Process Patterns
-
-**Error Handling:**
-
-- Global error handling approach
-- Error boundary patterns
-- User-facing error message format
-- Logging vs user error distinction
-
-**Loading States:**
-
-- Loading state naming conventions
-- Global vs local loading states
-- Loading state persistence
-- Loading UI patterns
-
-### 4. Generate Patterns Content
-
-Prepare the content to append to the document:
-
-#### Content Structure:
-
-```markdown
-## Implementation Patterns & Consistency Rules
-
-### Pattern Categories Defined
-
-**Critical Conflict Points Identified:**
-{{number_of_potential_conflicts}} areas where AI agents could make different choices
-
-### Naming Patterns
-
-**Database Naming Conventions:**
-{{database_naming_rules_with_examples}}
-
-**API Naming Conventions:**
-{{api_naming_rules_with_examples}}
-
-**Code Naming Conventions:**
-{{code_naming_rules_with_examples}}
-
-### Structure Patterns
-
-**Project Organization:**
-{{project_structure_rules_with_examples}}
-
-**File Structure Patterns:**
-{{file_organization_rules_with_examples}}
-
-### Format Patterns
-
-**API Response Formats:**
-{{api_response_structure_rules}}
-
-**Data Exchange Formats:**
-{{data_format_rules_with_examples}}
-
-### Communication Patterns
-
-**Event System Patterns:**
-{{event_naming_and_structure_rules}}
-
-**State Management Patterns:**
-{{state_update_and_organization_rules}}
-
-### Process Patterns
-
-**Error Handling Patterns:**
-{{consistent_error_handling_approaches}}
-
-**Loading State Patterns:**
-{{loading_state_management_rules}}
-
-### Enforcement Guidelines
-
-**All AI Agents MUST:**
-
-- {{mandatory_pattern_1}}
-- {{mandatory_pattern_2}}
-- {{mandatory_pattern_3}}
-
-**Pattern Enforcement:**
-
-- How to verify patterns are followed
-- Where to document pattern violations
-- Process for updating patterns
-
-### Pattern Examples
-
-**Good Examples:**
-{{concrete_examples_of_correct_pattern_usage}}
-
-**Anti-Patterns:**
-{{examples_of_what_to_avoid}}
-```
-
-### 5. Present Content and Menu
-
-Show the generated patterns content and present choices:
-
-"I've documented implementation patterns that will prevent conflicts between AI agents working on this project.
-
-**Here's what I'll add to the document:**
-
-[Show the complete markdown content from step 4]
-
-**What would you like to do?**
-[A] Advanced Elicitation - Explore additional consistency patterns
-[P] Party Mode - Review patterns from different implementation perspectives
-[C] Continue - Save these patterns and move to project structure"
-
-### 6. Handle Menu Selection
-
-#### If 'A' (Advanced Elicitation):
-
-- Read fully and follow: {project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml with current patterns
-- Process enhanced consistency rules that come back
-- Ask user: "Accept these additional pattern refinements? (y/n)"
-- If yes: Update content, then return to A/P/C menu
-- If no: Keep original content, then return to A/P/C menu
-
-#### If 'P' (Party Mode):
-
-- Read fully and follow: {project-root}/_bmad/core/workflows/party-mode/workflow.md with implementation patterns context
-- Process collaborative insights about potential conflicts
-- Ask user: "Accept these changes to the implementation patterns? (y/n)"
-- If yes: Update content, then return to A/P/C menu
-- If no: Keep original content, then return to A/P/C menu
-
-#### If 'C' (Continue):
-
-- Append the final content to `{planning_artifacts}/architecture.md`
-- Update frontmatter: `stepsCompleted: [1, 2, 3, 4, 5]`
-- Load `./step-06-structure.md`
-
-## APPEND TO DOCUMENT:
-
-When user selects 'C', append the content directly to the document using the structure from step 4.
-
-## SUCCESS METRICS:
-
-β All potential AI agent conflict points identified and addressed
-β Comprehensive patterns defined for naming, structure, and communication
-β Concrete examples provided for each pattern
-β Enforcement guidelines clearly documented
-β User collaborated on pattern decisions rather than receiving recommendations
-β A/P/C menu presented and handled correctly
-β Content properly appended to document when C selected
-
-## FAILURE MODES:
-
-β Missing potential conflict points that could cause agent conflicts
-β Being too prescriptive about implementation details instead of focusing on consistency
-β Not providing concrete examples for each pattern
-β Failing to address cross-cutting concerns like error handling
-β Not considering the chosen technology stack when defining patterns
-β Not presenting A/P/C menu after content generation
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## NEXT STEP:
-
-After user selects 'C' and content is saved to document, load `./step-06-structure.md` to define the complete project structure.
-
-Remember: Do NOT proceed to step-06 until user explicitly selects 'C' from the A/P/C menu and content is saved!
diff --git a/src/bmm/workflows/3-solutioning/create-architecture/steps/step-06-structure.md b/src/bmm/workflows/3-solutioning/create-architecture/steps/step-06-structure.md
deleted file mode 100644
index 3df89e6c..00000000
--- a/src/bmm/workflows/3-solutioning/create-architecture/steps/step-06-structure.md
+++ /dev/null
@@ -1,379 +0,0 @@
-# Step 6: Project Structure & Boundaries
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without user input
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β ALWAYS treat this as collaborative discovery between architectural peers
-- π YOU ARE A FACILITATOR, not a content generator
-- π¬ FOCUS on defining complete project structure and clear boundaries
-- πΊοΈ MAP requirements/epics to architectural components
-- β οΈ ABSOLUTELY NO TIME ESTIMATES - AI development speed has fundamentally changed
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- πΊοΈ Create complete project tree, not generic placeholders
-- β οΈ Present A/P/C menu after generating project structure
-- πΎ ONLY save when user chooses C (Continue)
-- π Update frontmatter `stepsCompleted: [1, 2, 3, 4, 5, 6]` before loading next step
-- π« FORBIDDEN to load next step until C is selected
-
-## COLLABORATION MENUS (A/P/C):
-
-This step will generate content and present choices:
-
-- **A (Advanced Elicitation)**: Use discovery protocols to explore innovative project organization approaches
-- **P (Party Mode)**: Bring multiple perspectives to evaluate project structure trade-offs
-- **C (Continue)**: Save the project structure and proceed to validation
-
-## PROTOCOL INTEGRATION:
-
-- When 'A' selected: Read fully and follow: {project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml
-- When 'P' selected: Read fully and follow: {project-root}/_bmad/core/workflows/party-mode/workflow.md
-- PROTOCOLS always return to display this step's A/P/C menu after the A or P have completed
-- User accepts/rejects protocol changes before proceeding
-
-## CONTEXT BOUNDARIES:
-
-- All previous architectural decisions are complete
-- Implementation patterns and consistency rules are defined
-- Focus on physical project structure and component boundaries
-- Map requirements to specific files and directories
-
-## YOUR TASK:
-
-Define the complete project structure and architectural boundaries based on all decisions made, creating a concrete implementation guide for AI agents.
-
-## PROJECT STRUCTURE SEQUENCE:
-
-### 1. Analyze Requirements Mapping
-
-Map project requirements to architectural components:
-
-**From Epics (if available):**
-"Epic: {{epic_name}} β Lives in {{module/directory/service}}"
-
-- User stories within the epic
-- Cross-epic dependencies
-- Shared components needed
-
-**From FR Categories (if no epics):**
-"FR Category: {{fr_category_name}} β Lives in {{module/directory/service}}"
-
-- Related functional requirements
-- Shared functionality across categories
-- Integration points between categories
-
-### 2. Define Project Directory Structure
-
-Based on technology stack and patterns, create the complete project structure:
-
-**Root Configuration Files:**
-
-- Package management files (package.json, requirements.txt, etc.)
-- Build and development configuration
-- Environment configuration files
-- CI/CD pipeline files
-- Documentation files
-
-**Source Code Organization:**
-
-- Application entry points
-- Core application structure
-- Feature/module organization
-- Shared utilities and libraries
-- Configuration and environment files
-
-**Test Organization:**
-
-- Unit test locations and structure
-- Integration test organization
-- End-to-end test structure
-- Test utilities and fixtures
-
-**Build and Distribution:**
-
-- Build output directories
-- Distribution files
-- Static assets
-- Documentation build
-
-### 3. Define Integration Boundaries
-
-Map how components communicate and where boundaries exist:
-
-**API Boundaries:**
-
-- External API endpoints
-- Internal service boundaries
-- Authentication and authorization boundaries
-- Data access layer boundaries
-
-**Component Boundaries:**
-
-- Frontend component communication patterns
-- State management boundaries
-- Service communication patterns
-- Event-driven integration points
-
-**Data Boundaries:**
-
-- Database schema boundaries
-- Data access patterns
-- Caching boundaries
-- External data integration points
-
-### 4. Create Complete Project Tree
-
-Generate a comprehensive directory structure showing all files and directories:
-
-**Technology-Specific Structure Examples:**
-
-**Next.js Full-Stack:**
-
-```
-project-name/
-βββ README.md
-βββ package.json
-βββ next.config.js
-βββ tailwind.config.js
-βββ tsconfig.json
-βββ .env.local
-βββ .env.example
-βββ .gitignore
-βββ .github/
-β βββ workflows/
-β βββ ci.yml
-βββ src/
-β βββ app/
-β β βββ globals.css
-β β βββ layout.tsx
-β β βββ page.tsx
-β βββ components/
-β β βββ ui/
-β β βββ forms/
-β β βββ features/
-β βββ lib/
-β β βββ db.ts
-β β βββ auth.ts
-β β βββ utils.ts
-β βββ types/
-β βββ middleware.ts
-βββ prisma/
-β βββ schema.prisma
-β βββ migrations/
-βββ tests/
-β βββ __mocks__/
-β βββ components/
-β βββ e2e/
-βββ public/
- βββ assets/
-```
-
-**API Backend (NestJS):**
-
-```
-project-name/
-βββ package.json
-βββ nest-cli.json
-βββ tsconfig.json
-βββ .env
-βββ .env.example
-βββ .gitignore
-βββ README.md
-βββ src/
-β βββ main.ts
-β βββ app.module.ts
-β βββ config/
-β βββ modules/
-β β βββ auth/
-β β βββ users/
-β β βββ common/
-β βββ services/
-β βββ repositories/
-β βββ decorators/
-β βββ pipes/
-β βββ guards/
-β βββ interceptors/
-βββ test/
-β βββ unit/
-β βββ integration/
-β βββ e2e/
-βββ prisma/
-β βββ schema.prisma
-β βββ migrations/
-βββ docker-compose.yml
-```
-
-### 5. Map Requirements to Structure
-
-Create explicit mapping from project requirements to specific files/directories:
-
-**Epic/Feature Mapping:**
-"Epic: User Management
-
-- Components: src/components/features/users/
-- Services: src/services/users/
-- API Routes: src/app/api/users/
-- Database: prisma/migrations/_*users*_
-- Tests: tests/features/users/"
-
-**Cross-Cutting Concerns:**
-"Authentication System
-
-- Components: src/components/auth/
-- Services: src/services/auth/
-- Middleware: src/middleware/auth.ts
-- Guards: src/guards/auth.guard.ts
-- Tests: tests/auth/"
-
-### 6. Generate Structure Content
-
-Prepare the content to append to the document:
-
-#### Content Structure:
-
-```markdown
-## Project Structure & Boundaries
-
-### Complete Project Directory Structure
-```
-
-{{complete_project_tree_with_all_files_and_directories}}
-
-```
-
-### Architectural Boundaries
-
-**API Boundaries:**
-{{api_boundary_definitions_and_endpoints}}
-
-**Component Boundaries:**
-{{component_communication_patterns_and_boundaries}}
-
-**Service Boundaries:**
-{{service_integration_patterns_and_boundaries}}
-
-**Data Boundaries:**
-{{data_access_patterns_and_boundaries}}
-
-### Requirements to Structure Mapping
-
-**Feature/Epic Mapping:**
-{{mapping_of_epics_or_features_to_specific_directories}}
-
-**Cross-Cutting Concerns:**
-{{mapping_of_shared_functionality_to_locations}}
-
-### Integration Points
-
-**Internal Communication:**
-{{how_components_within_the_project_communicate}}
-
-**External Integrations:**
-{{third_party_service_integration_points}}
-
-**Data Flow:**
-{{how_data_flows_through_the_architecture}}
-
-### File Organization Patterns
-
-**Configuration Files:**
-{{where_and_how_config_files_are_organized}}
-
-**Source Organization:**
-{{how_source_code_is_structured_and_organized}}
-
-**Test Organization:**
-{{how_tests_are_structured_and_organized}}
-
-**Asset Organization:**
-{{how_static_and_dynamic_assets_are_organized}}
-
-### Development Workflow Integration
-
-**Development Server Structure:**
-{{how_the_project_is organized_for_development}}
-
-**Build Process Structure:**
-{{how_the_build_process_uses_the_project_structure}}
-
-**Deployment Structure:**
-{{how_the_project_structure_supports_deployment}}
-```
-
-### 7. Present Content and Menu
-
-Show the generated project structure content and present choices:
-
-"I've created a complete project structure based on all our architectural decisions.
-
-**Here's what I'll add to the document:**
-
-[Show the complete markdown content from step 6]
-
-**What would you like to do?**
-[A] Advanced Elicitation - Explore innovative project organization approaches
-[P] Party Mode - Review structure from different development perspectives
-[C] Continue - Save this structure and move to architecture validation"
-
-### 8. Handle Menu Selection
-
-#### If 'A' (Advanced Elicitation):
-
-- Read fully and follow: {project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml with current project structure
-- Process enhanced organizational insights that come back
-- Ask user: "Accept these changes to the project structure? (y/n)"
-- If yes: Update content, then return to A/P/C menu
-- If no: Keep original content, then return to A/P/C menu
-
-#### If 'P' (Party Mode):
-
-- Read fully and follow: {project-root}/_bmad/core/workflows/party-mode/workflow.md with project structure context
-- Process collaborative insights about organization trade-offs
-- Ask user: "Accept these changes to the project structure? (y/n)"
-- If yes: Update content, then return to A/P/C menu
-- If no: Keep original content, then return to A/P/C menu
-
-#### If 'C' (Continue):
-
-- Append the final content to `{planning_artifacts}/architecture.md`
-- Update frontmatter: `stepsCompleted: [1, 2, 3, 4, 5, 6]`
-- Load `./step-07-validation.md`
-
-## APPEND TO DOCUMENT:
-
-When user selects 'C', append the content directly to the document using the structure from step 6.
-
-## SUCCESS METRICS:
-
-β Complete project tree defined with all files and directories
-β All architectural boundaries clearly documented
-β Requirements/epics mapped to specific locations
-β Integration points and communication patterns defined
-β Project structure aligned with chosen technology stack
-β A/P/C menu presented and handled correctly
-β Content properly appended to document when C selected
-
-## FAILURE MODES:
-
-β Creating generic placeholder structure instead of specific, complete tree
-β Not mapping requirements to specific files and directories
-β Missing important integration boundaries
-β Not considering the chosen technology stack in structure design
-β Not defining how components communicate across boundaries
-β Not presenting A/P/C menu after content generation
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## NEXT STEP:
-
-After user selects 'C' and content is saved to document, load `./step-07-validation.md` to validate architectural coherence and completeness.
-
-Remember: Do NOT proceed to step-07 until user explicitly selects 'C' from the A/P/C menu and content is saved!
diff --git a/src/bmm/workflows/3-solutioning/create-architecture/steps/step-07-validation.md b/src/bmm/workflows/3-solutioning/create-architecture/steps/step-07-validation.md
deleted file mode 100644
index b2dc2c46..00000000
--- a/src/bmm/workflows/3-solutioning/create-architecture/steps/step-07-validation.md
+++ /dev/null
@@ -1,359 +0,0 @@
-# Step 7: Architecture Validation & Completion
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without user input
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- π CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
-- β ALWAYS treat this as collaborative discovery between architectural peers
-- π YOU ARE A FACILITATOR, not a content generator
-- π¬ FOCUS on validating architectural coherence and completeness
-- β VALIDATE all requirements are covered by architectural decisions
-- β οΈ ABSOLUTELY NO TIME ESTIMATES - AI development speed has fundamentally changed
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- β Run comprehensive validation checks on the complete architecture
-- β οΈ Present A/P/C menu after generating validation results
-- πΎ ONLY save when user chooses C (Continue)
-- π Update frontmatter `stepsCompleted: [1, 2, 3, 4, 5, 6, 7]` before loading next step
-- π« FORBIDDEN to load next step until C is selected
-
-## COLLABORATION MENUS (A/P/C):
-
-This step will generate content and present choices:
-
-- **A (Advanced Elicitation)**: Use discovery protocols to address complex architectural issues found during validation
-- **P (Party Mode)**: Bring multiple perspectives to resolve validation concerns
-- **C (Continue)**: Save the validation results and complete the architecture
-
-## PROTOCOL INTEGRATION:
-
-- When 'A' selected: Read fully and follow: {project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml
-- When 'P' selected: Read fully and follow: {project-root}/_bmad/core/workflows/party-mode/workflow.md
-- PROTOCOLS always return to display this step's A/P/C menu after the A or P have completed
-- User accepts/rejects protocol changes before proceeding
-
-## CONTEXT BOUNDARIES:
-
-- Complete architecture document with all sections is available
-- All architectural decisions, patterns, and structure are defined
-- Focus on validation, gap analysis, and coherence checking
-- Prepare for handoff to implementation phase
-
-## YOUR TASK:
-
-Validate the complete architecture for coherence, completeness, and readiness to guide AI agents through consistent implementation.
-
-## VALIDATION SEQUENCE:
-
-### 1. Coherence Validation
-
-Check that all architectural decisions work together:
-
-**Decision Compatibility:**
-
-- Do all technology choices work together without conflicts?
-- Are all versions compatible with each other?
-- Do patterns align with technology choices?
-- Are there any contradictory decisions?
-
-**Pattern Consistency:**
-
-- Do implementation patterns support the architectural decisions?
-- Are naming conventions consistent across all areas?
-- Do structure patterns align with technology stack?
-- Are communication patterns coherent?
-
-**Structure Alignment:**
-
-- Does the project structure support all architectural decisions?
-- Are boundaries properly defined and respected?
-- Does the structure enable the chosen patterns?
-- Are integration points properly structured?
-
-### 2. Requirements Coverage Validation
-
-Verify all project requirements are architecturally supported:
-
-**From Epics (if available):**
-
-- Does every epic have architectural support?
-- Are all user stories implementable with these decisions?
-- Are cross-epic dependencies handled architecturally?
-- Are there any gaps in epic coverage?
-
-**From FR Categories (if no epics):**
-
-- Does every functional requirement have architectural support?
-- Are all FR categories fully covered by architectural decisions?
-- Are cross-cutting FRs properly addressed?
-- Are there any missing architectural capabilities?
-
-**Non-Functional Requirements:**
-
-- Are performance requirements addressed architecturally?
-- Are security requirements fully covered?
-- Are scalability considerations properly handled?
-- Are compliance requirements architecturally supported?
-
-### 3. Implementation Readiness Validation
-
-Assess if AI agents can implement consistently:
-
-**Decision Completeness:**
-
-- Are all critical decisions documented with versions?
-- Are implementation patterns comprehensive enough?
-- Are consistency rules clear and enforceable?
-- Are examples provided for all major patterns?
-
-**Structure Completeness:**
-
-- Is the project structure complete and specific?
-- Are all files and directories defined?
-- Are integration points clearly specified?
-- Are component boundaries well-defined?
-
-**Pattern Completeness:**
-
-- Are all potential conflict points addressed?
-- Are naming conventions comprehensive?
-- Are communication patterns fully specified?
-- Are process patterns (error handling, etc.) complete?
-
-### 4. Gap Analysis
-
-Identify and document any missing elements:
-
-**Critical Gaps:**
-
-- Missing architectural decisions that block implementation
-- Incomplete patterns that could cause conflicts
-- Missing structural elements needed for development
-- Undefined integration points
-
-**Important Gaps:**
-
-- Areas that need more detailed specification
-- Patterns that could be more comprehensive
-- Documentation that would help implementation
-- Examples that would clarify complex decisions
-
-**Nice-to-Have Gaps:**
-
-- Additional patterns that would be helpful
-- Supplementary documentation
-- Tooling recommendations
-- Development workflow optimizations
-
-### 5. Address Validation Issues
-
-For any issues found, facilitate resolution:
-
-**Critical Issues:**
-"I found some issues that need to be addressed before implementation:
-
-{{critical_issue_description}}
-
-These could cause implementation problems. How would you like to resolve this?"
-
-**Important Issues:**
-"I noticed a few areas that could be improved:
-
-{{important_issue_description}}
-
-These aren't blocking, but addressing them would make implementation smoother. Should we work on these?"
-
-**Minor Issues:**
-"Here are some minor suggestions for improvement:
-
-{{minor_issue_description}}
-
-These are optional refinements. Would you like to address any of these?"
-
-### 6. Generate Validation Content
-
-Prepare the content to append to the document:
-
-#### Content Structure:
-
-```markdown
-## Architecture Validation Results
-
-### Coherence Validation β
-
-**Decision Compatibility:**
-{{assessment_of_how_all_decisions_work_together}}
-
-**Pattern Consistency:**
-{{verification_that_patterns_support_decisions}}
-
-**Structure Alignment:**
-{{confirmation_that_structure_supports_architecture}}
-
-### Requirements Coverage Validation β
-
-**Epic/Feature Coverage:**
-{{verification_that_all_epics_or_features_are_supported}}
-
-**Functional Requirements Coverage:**
-{{confirmation_that_all_FRs_are_architecturally_supported}}
-
-**Non-Functional Requirements Coverage:**
-{{verification_that_NFRs_are_addressed}}
-
-### Implementation Readiness Validation β
-
-**Decision Completeness:**
-{{assessment_of_decision_documentation_completeness}}
-
-**Structure Completeness:**
-{{evaluation_of_project_structure_completeness}}
-
-**Pattern Completeness:**
-{{verification_of_implementation_patterns_completeness}}
-
-### Gap Analysis Results
-
-{{gap_analysis_findings_with_priority_levels}}
-
-### Validation Issues Addressed
-
-{{description_of_any_issues_found_and_resolutions}}
-
-### Architecture Completeness Checklist
-
-**β Requirements Analysis**
-
-- [x] Project context thoroughly analyzed
-- [x] Scale and complexity assessed
-- [x] Technical constraints identified
-- [x] Cross-cutting concerns mapped
-
-**β Architectural Decisions**
-
-- [x] Critical decisions documented with versions
-- [x] Technology stack fully specified
-- [x] Integration patterns defined
-- [x] Performance considerations addressed
-
-**β Implementation Patterns**
-
-- [x] Naming conventions established
-- [x] Structure patterns defined
-- [x] Communication patterns specified
-- [x] Process patterns documented
-
-**β Project Structure**
-
-- [x] Complete directory structure defined
-- [x] Component boundaries established
-- [x] Integration points mapped
-- [x] Requirements to structure mapping complete
-
-### Architecture Readiness Assessment
-
-**Overall Status:** READY FOR IMPLEMENTATION
-
-**Confidence Level:** {{high/medium/low}} based on validation results
-
-**Key Strengths:**
-{{list_of_architecture_strengths}}
-
-**Areas for Future Enhancement:**
-{{areas_that_could_be_improved_later}}
-
-### Implementation Handoff
-
-**AI Agent Guidelines:**
-
-- Follow all architectural decisions exactly as documented
-- Use implementation patterns consistently across all components
-- Respect project structure and boundaries
-- Refer to this document for all architectural questions
-
-**First Implementation Priority:**
-{{starter_template_command_or_first_architectural_step}}
-```
-
-### 7. Present Content and Menu
-
-Show the validation results and present choices:
-
-"I've completed a comprehensive validation of your architecture.
-
-**Validation Summary:**
-
-- β Coherence: All decisions work together
-- β Coverage: All requirements are supported
-- β Readiness: AI agents can implement consistently
-
-**Here's what I'll add to complete the architecture document:**
-
-[Show the complete markdown content from step 6]
-
-**What would you like to do?**
-[A] Advanced Elicitation - Address any complex architectural concerns
-[P] Party Mode - Review validation from different implementation perspectives
-[C] Continue - Complete the architecture and finish workflow
-
-### 8. Handle Menu Selection
-
-#### If 'A' (Advanced Elicitation):
-
-- Read fully and follow: {project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml with validation issues
-- Process enhanced solutions for complex concerns
-- Ask user: "Accept these architectural improvements? (y/n)"
-- If yes: Update content, then return to A/P/C menu
-- If no: Keep original content, then return to A/P/C menu
-
-#### If 'P' (Party Mode):
-
-- Read fully and follow: {project-root}/_bmad/core/workflows/party-mode/workflow.md with validation context
-- Process collaborative insights on implementation readiness
-- Ask user: "Accept these changes to the validation results? (y/n)"
-- If yes: Update content, then return to A/P/C menu
-- If no: Keep original content, then return to A/P/C menu
-
-#### If 'C' (Continue):
-
-- Append the final content to `{planning_artifacts}/architecture.md`
-- Update frontmatter: `stepsCompleted: [1, 2, 3, 4, 5, 6, 7]`
-- Load `./step-08-complete.md`
-
-## APPEND TO DOCUMENT:
-
-When user selects 'C', append the content directly to the document using the structure from step 6.
-
-## SUCCESS METRICS:
-
-β All architectural decisions validated for coherence
-β Complete requirements coverage verified
-β Implementation readiness confirmed
-β All gaps identified and addressed
-β Comprehensive validation checklist completed
-β A/P/C menu presented and handled correctly
-β Content properly appended to document when C selected
-
-## FAILURE MODES:
-
-β Skipping validation of decision compatibility
-β Not verifying all requirements are architecturally supported
-β Missing potential implementation conflicts
-β Not addressing gaps found during validation
-β Providing incomplete validation checklist
-β Not presenting A/P/C menu after content generation
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## NEXT STEP:
-
-After user selects 'C' and content is saved to document, load `./step-08-complete.md` to complete the workflow and provide implementation guidance.
-
-Remember: Do NOT proceed to step-08 until user explicitly selects 'C' from the A/P/C menu and content is saved!
diff --git a/src/bmm/workflows/3-solutioning/create-architecture/steps/step-08-complete.md b/src/bmm/workflows/3-solutioning/create-architecture/steps/step-08-complete.md
deleted file mode 100644
index f317bddf..00000000
--- a/src/bmm/workflows/3-solutioning/create-architecture/steps/step-08-complete.md
+++ /dev/null
@@ -1,76 +0,0 @@
-# Step 8: Architecture Completion & Handoff
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-- π NEVER generate content without user input
-
-- π CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
-- β ALWAYS treat this as collaborative completion between architectural peers
-- π YOU ARE A FACILITATOR, not a content generator
-- π¬ FOCUS on successful workflow completion and implementation handoff
-- π― PROVIDE clear next steps for implementation phase
-- β οΈ ABSOLUTELY NO TIME ESTIMATES - AI development speed has fundamentally changed
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## EXECUTION PROTOCOLS:
-
-- π― Show your analysis before taking any action
-- π― Present completion summary and implementation guidance
-- π Update frontmatter with final workflow state
-- π« THIS IS THE FINAL STEP IN THIS WORKFLOW
-
-## YOUR TASK:
-
-Complete the architecture workflow, provide a comprehensive completion summary, and guide the user to the next phase of their project development.
-
-## COMPLETION SEQUENCE:
-
-### 1. Congratulate the User on Completion
-
-Both you and the User completed something amazing here - give a summary of what you achieved together and really congratulate the user on a job well done.
-
-### 2. Update the created document's frontmatter
-
-```yaml
-stepsCompleted: [1, 2, 3, 4, 5, 6, 7, 8]
-workflowType: 'architecture'
-lastStep: 8
-status: 'complete'
-completedAt: '{{current_date}}'
-```
-
-### 3. Next Steps Guidance
-
-Architecture complete. Read fully and follow: `_bmad/core/tasks/bmad-help.md` with argument `Create Architecture`.
-
-Upon Completion of task output: offer to answer any questions about the Architecture Document.
-
-
-## SUCCESS METRICS:
-
-β Complete architecture document delivered with all sections
-β All architectural decisions documented and validated
-β Implementation patterns and consistency rules finalized
-β Project structure complete with all files and directories
-β User provided with clear next steps and implementation guidance
-β Workflow status properly updated
-β User collaboration maintained throughout completion process
-
-## FAILURE MODES:
-
-β Not providing clear implementation guidance
-β Missing final validation of document completeness
-β Not updating workflow status appropriately
-β Failing to celebrate the successful completion
-β Not providing specific next steps for the user
-β Rushing completion without proper summary
-
-β **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
-β **CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
-β **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
-
-## WORKFLOW COMPLETE:
-
-This is the final step of the Architecture workflow. The user now has a complete, validated architecture document ready for AI agent implementation.
-
-The architecture will serve as the single source of truth for all technical decisions, ensuring consistent implementation across the entire project development lifecycle.
diff --git a/src/bmm/workflows/3-solutioning/create-architecture/workflow.md b/src/bmm/workflows/3-solutioning/create-architecture/workflow.md
deleted file mode 100644
index d36c328e..00000000
--- a/src/bmm/workflows/3-solutioning/create-architecture/workflow.md
+++ /dev/null
@@ -1,50 +0,0 @@
----
-name: create-architecture
-description: Collaborative architectural decision facilitation for AI-agent consistency. Replaces template-driven architecture with intelligent, adaptive conversation that produces a decision-focused architecture document optimized for preventing agent conflicts.
-web_bundle: true
----
-
-# Architecture Workflow
-
-**Goal:** Create comprehensive architecture decisions through collaborative step-by-step discovery that ensures AI agents implement consistently.
-
-**Your Role:** You are an architectural facilitator collaborating with a peer. This is a partnership, not a client-vendor relationship. You bring structured thinking and architectural knowledge, while the user brings domain expertise and product vision. Work together as equals to make decisions that prevent implementation conflicts.
-
----
-
-## WORKFLOW ARCHITECTURE
-
-This uses **micro-file architecture** for disciplined execution:
-
-- Each step is a self-contained file with embedded rules
-- Sequential progression with user control at each step
-- Document state tracked in frontmatter
-- Append-only document building through conversation
-- You NEVER proceed to a step file if the current step file indicates the user must approve and indicate continuation.
-
----
-
-## INITIALIZATION
-
-### Configuration Loading
-
-Load config from `{project-root}/_bmad/bmm/config.yaml` and resolve:
-
-- `project_name`, `output_folder`, `planning_artifacts`, `user_name`
-- `communication_language`, `document_output_language`, `user_skill_level`
-- `date` as system-generated current datetime
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Paths
-
-- `installed_path` = `{project-root}/_bmad/bmm/workflows/3-solutioning/architecture`
-- `template_path` = `{installed_path}/architecture-decision-template.md`
-- `data_files_path` = `{installed_path}/data/`
-
----
-
-## EXECUTION
-
-Read fully and follow: `steps/step-01-init.md` to begin the workflow.
-
-**Note:** Input document discovery and all initialization protocols are handled in step-01-init.md.
diff --git a/src/bmm/workflows/3-solutioning/create-epics-and-stories/steps/step-01-validate-prerequisites.md b/src/bmm/workflows/3-solutioning/create-epics-and-stories/steps/step-01-validate-prerequisites.md
deleted file mode 100644
index c8d6b133..00000000
--- a/src/bmm/workflows/3-solutioning/create-epics-and-stories/steps/step-01-validate-prerequisites.md
+++ /dev/null
@@ -1,259 +0,0 @@
----
-name: 'step-01-validate-prerequisites'
-description: 'Validate required documents exist and extract all requirements for epic and story creation'
-
-# Path Definitions
-workflow_path: '{project-root}/_bmad/bmm/workflows/3-solutioning/create-epics-and-stories'
-
-# File References
-thisStepFile: './step-01-validate-prerequisites.md'
-nextStepFile: './step-02-design-epics.md'
-workflowFile: '{workflow_path}/workflow.md'
-outputFile: '{planning_artifacts}/epics.md'
-epicsTemplate: '{workflow_path}/templates/epics-template.md'
-
-# Task References
-advancedElicitationTask: '{project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml'
-partyModeWorkflow: '{project-root}/_bmad/core/workflows/party-mode/workflow.md'
-
-# Template References
-epicsTemplate: '{workflow_path}/templates/epics-template.md'
----
-
-# Step 1: Validate Prerequisites and Extract Requirements
-
-## STEP GOAL:
-
-To validate that all required input documents exist and extract all requirements (FRs, NFRs, and additional requirements from UX/Architecture) needed for epic and story creation.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π NEVER generate content without user input
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: When loading next step with 'C', ensure entire file is read
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are a product strategist and technical specifications writer
-- β If you already have been given communication or persona patterns, continue to use those while playing this new role
-- β We engage in collaborative dialogue, not command-response
-- β You bring requirements extraction expertise
-- β User brings their product vision and context
-
-### Step-Specific Rules:
-
-- π― Focus ONLY on extracting and organizing requirements
-- π« FORBIDDEN to start creating epics or stories in this step
-- π¬ Extract requirements from ALL available documents
-- πͺ POPULATE the template sections exactly as needed
-
-## EXECUTION PROTOCOLS:
-
-- π― Extract requirements systematically from all documents
-- πΎ Populate {outputFile} with extracted requirements
-- π Update frontmatter with extraction progress
-- π« FORBIDDEN to load next step until user selects 'C' and requirements are extracted
-
-## REQUIREMENTS EXTRACTION PROCESS:
-
-### 1. Welcome and Overview
-
-Welcome {user_name} to comprehensive epic and story creation!
-
-**CRITICAL PREREQUISITE VALIDATION:**
-
-Verify required documents exist and are complete:
-
-1. **PRD.md** - Contains requirements (FRs and NFRs) and product scope
-2. **Architecture.md** - Contains technical decisions, API contracts, data models
-3. **UX Design.md** (if UI exists) - Contains interaction patterns, mockups, user flows
-
-### 2. Document Discovery and Validation
-
-Search for required documents using these patterns (sharded means a large document was split into multiple small files with an index.md into a folder) - if the whole document is found, use that instead of the sharded version:
-
-**PRD Document Search Priority:**
-
-1. `{planning_artifacts}/*prd*.md` (whole document)
-2. `{planning_artifacts}/*prd*/index.md` (sharded version)
-
-**Architecture Document Search Priority:**
-
-1. `{planning_artifacts}/*architecture*.md` (whole document)
-2. `{planning_artifacts}/*architecture*/index.md` (sharded version)
-
-**UX Design Document Search (Optional):**
-
-1. `{planning_artifacts}/*ux*.md` (whole document)
-2. `{planning_artifacts}/*ux*/index.md` (sharded version)
-
-Before proceeding, Ask the user if there are any other documents to include for analysis, and if anything found should be excluded. Wait for user confirmation. Once confirmed, create the {outputFile} from the {epicsTemplate} and in the front matter list the files in the array of `inputDocuments: []`.
-
-### 3. Extract Functional Requirements (FRs)
-
-From the PRD document (full or sharded), read then entire document and extract ALL functional requirements:
-
-**Extraction Method:**
-
-- Look for numbered items like "FR1:", "Functional Requirement 1:", or similar
-- Identify requirement statements that describe what the system must DO
-- Include user actions, system behaviors, and business rules
-
-**Format the FR list as:**
-
-```
-FR1: [Clear, testable requirement description]
-FR2: [Clear, testable requirement description]
-...
-```
-
-### 4. Extract Non-Functional Requirements (NFRs)
-
-From the PRD document, extract ALL non-functional requirements:
-
-**Extraction Method:**
-
-- Look for performance, security, usability, reliability requirements
-- Identify constraints and quality attributes
-- Include technical standards and compliance requirements
-
-**Format the NFR list as:**
-
-```
-NFR1: [Performance/Security/Usability requirement]
-NFR2: [Performance/Security/Usability requirement]
-...
-```
-
-### 5. Extract Additional Requirements from Architecture
-
-Review the Architecture document for technical requirements that impact epic and story creation:
-
-**Look for:**
-
-- **Starter Template**: Does Architecture specify a starter/greenfield template? If YES, document this for Epic 1 Story 1
-- Infrastructure and deployment requirements
-- Integration requirements with external systems
-- Data migration or setup requirements
-- Monitoring and logging requirements
-- API versioning or compatibility requirements
-- Security implementation requirements
-
-**IMPORTANT**: If a starter template is mentioned in Architecture, note it prominently. This will impact Epic 1 Story 1.
-
-**Format Additional Requirements as:**
-
-```
-- [Technical requirement from Architecture that affects implementation]
-- [Infrastructure setup requirement]
-- [Integration requirement]
-...
-```
-
-### 6. Extract Additional Requirements from UX (if exists)
-
-Review the UX document for requirements that affect epic and story creation:
-
-**Look for:**
-
-- Responsive design requirements
-- Accessibility requirements
-- Browser/device compatibility
-- User interaction patterns that need implementation
-- Animation or transition requirements
-- Error handling UX requirements
-
-**Add these to Additional Requirements list.**
-
-### 7. Load and Initialize Template
-
-Load {epicsTemplate} and initialize {outputFile}:
-
-1. Copy the entire template to {outputFile}
-2. Replace {{project_name}} with the actual project name
-3. Replace placeholder sections with extracted requirements:
- - {{fr_list}} β extracted FRs
- - {{nfr_list}} β extracted NFRs
- - {{additional_requirements}} β extracted additional requirements
-4. Leave {{requirements_coverage_map}} and {{epics_list}} as placeholders for now
-
-### 8. Present Extracted Requirements
-
-Display to user:
-
-**Functional Requirements Extracted:**
-
-- Show count of FRs found
-- Display the first few FRs as examples
-- Ask if any FRs are missing or incorrectly captured
-
-**Non-Functional Requirements Extracted:**
-
-- Show count of NFRs found
-- Display key NFRs
-- Ask if any constraints were missed
-
-**Additional Requirements:**
-
-- Summarize technical requirements from Architecture
-- Summarize UX requirements (if applicable)
-- Verify completeness
-
-### 9. Get User Confirmation
-
-Ask: "Do these extracted requirements accurately represent what needs to be built? Any additions or corrections?"
-
-Update the requirements based on user feedback until confirmation is received.
-
-## CONTENT TO SAVE TO DOCUMENT:
-
-After extraction and confirmation, update {outputFile} with:
-
-- Complete FR list in {{fr_list}} section
-- Complete NFR list in {{nfr_list}} section
-- All additional requirements in {{additional_requirements}} section
-
-### 10. Present MENU OPTIONS
-
-Display: `**Confirm the Requirements are complete and correct to [C] continue:**`
-
-#### EXECUTION RULES:
-
-- ALWAYS halt and wait for user input after presenting menu
-- ONLY proceed to next step when user selects 'C'
-- User can chat or ask questions - always respond and then end with display again of the menu option
-
-#### Menu Handling Logic:
-
-- IF C: Save all to {outputFile}, update frontmatter, then read fully and follow: {nextStepFile}
-- IF Any other comments or queries: help user respond then [Redisplay Menu Options](#10-present-menu-options)
-
-## CRITICAL STEP COMPLETION NOTE
-
-ONLY WHEN C is selected and all requirements are saved to document and frontmatter is updated, will you then read fully and follow: {nextStepFile} to begin epic design step.
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- All required documents found and validated
-- All FRs extracted and formatted correctly
-- All NFRs extracted and formatted correctly
-- Additional requirements from Architecture/UX identified
-- Template initialized with requirements
-- User confirms requirements are complete and accurate
-
-### β SYSTEM FAILURE:
-
-- Missing required documents
-- Incomplete requirements extraction
-- Template not properly initialized
-- Not saving requirements to output file
-
-**Master Rule:** Skipping steps, optimizing sequences, or not following exact instructions is FORBIDDEN and constitutes SYSTEM FAILURE.
diff --git a/src/bmm/workflows/3-solutioning/create-epics-and-stories/steps/step-02-design-epics.md b/src/bmm/workflows/3-solutioning/create-epics-and-stories/steps/step-02-design-epics.md
deleted file mode 100644
index 1b497c2a..00000000
--- a/src/bmm/workflows/3-solutioning/create-epics-and-stories/steps/step-02-design-epics.md
+++ /dev/null
@@ -1,233 +0,0 @@
----
-name: 'step-02-design-epics'
-description: 'Design and approve the epics_list that will organize all requirements into user-value-focused epics'
-
-# Path Definitions
-workflow_path: '{project-root}/_bmad/bmm/workflows/3-solutioning/create-epics-and-stories'
-
-# File References
-thisStepFile: './step-02-design-epics.md'
-nextStepFile: './step-03-create-stories.md'
-workflowFile: '{workflow_path}/workflow.md'
-outputFile: '{planning_artifacts}/epics.md'
-
-# Task References
-advancedElicitationTask: '{project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml'
-partyModeWorkflow: '{project-root}/_bmad/core/workflows/party-mode/workflow.md'
-
-# Template References
-epicsTemplate: '{workflow_path}/templates/epics-template.md'
----
-
-# Step 2: Design Epic List
-
-## STEP GOAL:
-
-To design and get approval for the epics_list that will organize all requirements into user-value-focused epics.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π NEVER generate content without user input
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: When loading next step with 'C', ensure entire file is read
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are a product strategist and technical specifications writer
-- β If you already have been given communication or persona patterns, continue to use those while playing this new role
-- β We engage in collaborative dialogue, not command-response
-- β You bring product strategy and epic design expertise
-- β User brings their product vision and priorities
-
-### Step-Specific Rules:
-
-- π― Focus ONLY on creating the epics_list
-- π« FORBIDDEN to create individual stories in this step
-- π¬ Organize epics around user value, not technical layers
-- πͺ GET explicit approval for the epics_list
-- π **CRITICAL: Each epic must be standalone and enable future epics without requiring future epics to function**
-
-## EXECUTION PROTOCOLS:
-
-- π― Design epics collaboratively based on extracted requirements
-- πΎ Update {{epics_list}} in {outputFile}
-- π Document the FR coverage mapping
-- π« FORBIDDEN to load next step until user approves epics_list
-
-## EPIC DESIGN PROCESS:
-
-### 1. Review Extracted Requirements
-
-Load {outputFile} and review:
-
-- **Functional Requirements:** Count and review FRs from Step 1
-- **Non-Functional Requirements:** Review NFRs that need to be addressed
-- **Additional Requirements:** Review technical and UX requirements
-
-### 2. Explain Epic Design Principles
-
-**EPIC DESIGN PRINCIPLES:**
-
-1. **User-Value First**: Each epic must enable users to accomplish something meaningful
-2. **Requirements Grouping**: Group related FRs that deliver cohesive user outcomes
-3. **Incremental Delivery**: Each epic should deliver value independently
-4. **Logical Flow**: Natural progression from user's perspective
-5. **π Dependency-Free Within Epic**: Stories within an epic must NOT depend on future stories
-
-**β οΈ CRITICAL PRINCIPLE:**
-Organize by USER VALUE, not technical layers:
-
-**β CORRECT Epic Examples (Standalone & Enable Future Epics):**
-
-- Epic 1: User Authentication & Profiles (users can register, login, manage profiles) - **Standalone: Complete auth system**
-- Epic 2: Content Creation (users can create, edit, publish content) - **Standalone: Uses auth, creates content**
-- Epic 3: Social Interaction (users can follow, comment, like content) - **Standalone: Uses auth + content**
-- Epic 4: Search & Discovery (users can find content and other users) - **Standalone: Uses all previous**
-
-**β WRONG Epic Examples (Technical Layers or Dependencies):**
-
-- Epic 1: Database Setup (creates all tables upfront) - **No user value**
-- Epic 2: API Development (builds all endpoints) - **No user value**
-- Epic 3: Frontend Components (creates reusable components) - **No user value**
-- Epic 4: Deployment Pipeline (CI/CD setup) - **No user value**
-
-**π DEPENDENCY RULES:**
-
-- Each epic must deliver COMPLETE functionality for its domain
-- Epic 2 must not require Epic 3 to function
-- Epic 3 can build upon Epic 1 & 2 but must stand alone
-
-### 3. Design Epic Structure Collaboratively
-
-**Step A: Identify User Value Themes**
-
-- Look for natural groupings in the FRs
-- Identify user journeys or workflows
-- Consider user types and their goals
-
-**Step B: Propose Epic Structure**
-For each proposed epic:
-
-1. **Epic Title**: User-centric, value-focused
-2. **User Outcome**: What users can accomplish after this epic
-3. **FR Coverage**: Which FR numbers this epic addresses
-4. **Implementation Notes**: Any technical or UX considerations
-
-**Step C: Create the epics_list**
-
-Format the epics_list as:
-
-```
-## Epic List
-
-### Epic 1: [Epic Title]
-[Epic goal statement - what users can accomplish]
-**FRs covered:** FR1, FR2, FR3, etc.
-
-### Epic 2: [Epic Title]
-[Epic goal statement - what users can accomplish]
-**FRs covered:** FR4, FR5, FR6, etc.
-
-[Continue for all epics]
-```
-
-### 4. Present Epic List for Review
-
-Display the complete epics_list to user with:
-
-- Total number of epics
-- FR coverage per epic
-- User value delivered by each epic
-- Any natural dependencies
-
-### 5. Create Requirements Coverage Map
-
-Create {{requirements_coverage_map}} showing how each FR maps to an epic:
-
-```
-### FR Coverage Map
-
-FR1: Epic 1 - [Brief description]
-FR2: Epic 1 - [Brief description]
-FR3: Epic 2 - [Brief description]
-...
-```
-
-This ensures no FRs are missed.
-
-### 6. Collaborative Refinement
-
-Ask user:
-
-- "Does this epic structure align with your product vision?"
-- "Are all user outcomes properly captured?"
-- "Should we adjust any epic groupings?"
-- "Are there natural dependencies we've missed?"
-
-### 7. Get Final Approval
-
-**CRITICAL:** Must get explicit user approval:
-"Do you approve this epic structure for proceeding to story creation?"
-
-If user wants changes:
-
-- Make the requested adjustments
-- Update the epics_list
-- Re-present for approval
-- Repeat until approval is received
-
-## CONTENT TO UPDATE IN DOCUMENT:
-
-After approval, update {outputFile}:
-
-1. Replace {{epics_list}} placeholder with the approved epic list
-2. Replace {{requirements_coverage_map}} with the coverage map
-3. Ensure all FRs are mapped to epics
-
-### 8. Present MENU OPTIONS
-
-Display: "**Select an Option:** [A] Advanced Elicitation [P] Party Mode [C] Continue"
-
-#### Menu Handling Logic:
-
-- IF A: Read fully and follow: {advancedElicitationTask}
-- IF P: Read fully and follow: {partyModeWorkflow}
-- IF C: Save approved epics_list to {outputFile}, update frontmatter, then read fully and follow: {nextStepFile}
-- IF Any other comments or queries: help user respond then [Redisplay Menu Options](#8-present-menu-options)
-
-#### EXECUTION RULES:
-
-- ALWAYS halt and wait for user input after presenting menu
-- ONLY proceed to next step when user selects 'C'
-- After other menu items execution completes, redisplay the menu
-- User can chat or ask questions - always respond when conversation ends, redisplay the menu options
-
-## CRITICAL STEP COMPLETION NOTE
-
-ONLY WHEN C is selected and the approved epics_list is saved to document, will you then read fully and follow: {nextStepFile} to begin story creation step.
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- Epics designed around user value
-- All FRs mapped to specific epics
-- epics_list created and formatted correctly
-- Requirements coverage map completed
-- User gives explicit approval for epic structure
-- Document updated with approved epics
-
-### β SYSTEM FAILURE:
-
-- Epics organized by technical layers
-- Missing FRs in coverage map
-- No user approval obtained
-- epics_list not saved to document
-
-**Master Rule:** Skipping steps, optimizing sequences, or not following exact instructions is FORBIDDEN and constitutes SYSTEM FAILURE.
diff --git a/src/bmm/workflows/3-solutioning/create-epics-and-stories/steps/step-03-create-stories.md b/src/bmm/workflows/3-solutioning/create-epics-and-stories/steps/step-03-create-stories.md
deleted file mode 100644
index 2e13f9b2..00000000
--- a/src/bmm/workflows/3-solutioning/create-epics-and-stories/steps/step-03-create-stories.md
+++ /dev/null
@@ -1,272 +0,0 @@
----
-name: 'step-03-create-stories'
-description: 'Generate all epics with their stories following the template structure'
-
-# Path Definitions
-workflow_path: '{project-root}/_bmad/bmm/workflows/3-solutioning/create-epics-and-stories'
-
-# File References
-thisStepFile: './step-03-create-stories.md'
-nextStepFile: './step-04-final-validation.md'
-workflowFile: '{workflow_path}/workflow.md'
-outputFile: '{planning_artifacts}/epics.md'
-
-# Task References
-advancedElicitationTask: '{project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml'
-partyModeWorkflow: '{project-root}/_bmad/core/workflows/party-mode/workflow.md'
-
-# Template References
-epicsTemplate: '{workflow_path}/templates/epics-template.md'
----
-
-# Step 3: Generate Epics and Stories
-
-## STEP GOAL:
-
-To generate all epics with their stories based on the approved epics_list, following the template structure exactly.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π NEVER generate content without user input
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: Process epics sequentially
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are a product strategist and technical specifications writer
-- β If you already have been given communication or persona patterns, continue to use those while playing this new role
-- β We engage in collaborative dialogue, not command-response
-- β You bring story creation and acceptance criteria expertise
-- β User brings their implementation priorities and constraints
-
-### Step-Specific Rules:
-
-- π― Generate stories for each epic following the template exactly
-- π« FORBIDDEN to deviate from template structure
-- π¬ Each story must have clear acceptance criteria
-- πͺ ENSURE each story is completable by a single dev agent
-- π **CRITICAL: Stories MUST NOT depend on future stories within the same epic**
-
-## EXECUTION PROTOCOLS:
-
-- π― Generate stories collaboratively with user input
-- πΎ Append epics and stories to {outputFile} following template
-- π Process epics one at a time in sequence
-- π« FORBIDDEN to skip any epic or rush through stories
-
-## STORY GENERATION PROCESS:
-
-### 1. Load Approved Epic Structure
-
-Load {outputFile} and review:
-
-- Approved epics_list from Step 2
-- FR coverage map
-- All requirements (FRs, NFRs, additional)
-- Template structure at the end of the document
-
-### 2. Explain Story Creation Approach
-
-**STORY CREATION GUIDELINES:**
-
-For each epic, create stories that:
-
-- Follow the exact template structure
-- Are sized for single dev agent completion
-- Have clear user value
-- Include specific acceptance criteria
-- Reference requirements being fulfilled
-
-**π¨ DATABASE/ENTITY CREATION PRINCIPLE:**
-Create tables/entities ONLY when needed by the story:
-
-- β WRONG: Epic 1 Story 1 creates all 50 database tables
-- β RIGHT: Each story creates/alters ONLY the tables it needs
-
-**π STORY DEPENDENCY PRINCIPLE:**
-Stories must be independently completable in sequence:
-
-- β WRONG: Story 1.2 requires Story 1.3 to be completed first
-- β RIGHT: Each story can be completed based only on previous stories
-- β WRONG: "Wait for Story 1.4 to be implemented before this works"
-- β RIGHT: "This story works independently and enables future stories"
-
-**STORY FORMAT (from template):**
-
-```
-### Story {N}.{M}: {story_title}
-
-As a {user_type},
-I want {capability},
-So that {value_benefit}.
-
-**Acceptance Criteria:**
-
-**Given** {precondition}
-**When** {action}
-**Then** {expected_outcome}
-**And** {additional_criteria}
-```
-
-**β GOOD STORY EXAMPLES:**
-
-_Epic 1: User Authentication_
-
-- Story 1.1: User Registration with Email
-- Story 1.2: User Login with Password
-- Story 1.3: Password Reset via Email
-
-_Epic 2: Content Creation_
-
-- Story 2.1: Create New Blog Post
-- Story 2.2: Edit Existing Blog Post
-- Story 2.3: Publish Blog Post
-
-**β BAD STORY EXAMPLES:**
-
-- Story: "Set up database" (no user value)
-- Story: "Create all models" (too large, no user value)
-- Story: "Build authentication system" (too large)
-- Story: "Login UI (depends on Story 1.3 API endpoint)" (future dependency!)
-- Story: "Edit post (requires Story 1.4 to be implemented first)" (wrong order!)
-
-### 3. Process Epics Sequentially
-
-For each epic in the approved epics_list:
-
-#### A. Epic Overview
-
-Display:
-
-- Epic number and title
-- Epic goal statement
-- FRs covered by this epic
-- Any NFRs or additional requirements relevant
-
-#### B. Story Breakdown
-
-Work with user to break down the epic into stories:
-
-- Identify distinct user capabilities
-- Ensure logical flow within the epic
-- Size stories appropriately
-
-#### C. Generate Each Story
-
-For each story in the epic:
-
-1. **Story Title**: Clear, action-oriented
-2. **User Story**: Complete the As a/I want/So that format
-3. **Acceptance Criteria**: Write specific, testable criteria
-
-**AC Writing Guidelines:**
-
-- Use Given/When/Then format
-- Each AC should be independently testable
-- Include edge cases and error conditions
-- Reference specific requirements when applicable
-
-#### D. Collaborative Review
-
-After writing each story:
-
-- Present the story to user
-- Ask: "Does this story capture the requirement correctly?"
-- "Is the scope appropriate for a single dev session?"
-- "Are the acceptance criteria complete and testable?"
-
-#### E. Append to Document
-
-When story is approved:
-
-- Append it to {outputFile} following template structure
-- Use correct numbering (Epic N, Story M)
-- Maintain proper markdown formatting
-
-### 4. Epic Completion
-
-After all stories for an epic are complete:
-
-- Display epic summary
-- Show count of stories created
-- Verify all FRs for the epic are covered
-- Get user confirmation to proceed to next epic
-
-### 5. Repeat for All Epics
-
-Continue the process for each epic in the approved list, processing them in order (Epic 1, Epic 2, etc.).
-
-### 6. Final Document Completion
-
-After all epics and stories are generated:
-
-- Verify the document follows template structure exactly
-- Ensure all placeholders are replaced
-- Confirm all FRs are covered
-- Check formatting consistency
-
-## TEMPLATE STRUCTURE COMPLIANCE:
-
-The final {outputFile} must follow this structure exactly:
-
-1. **Overview** section with project name
-2. **Requirements Inventory** with all three subsections populated
-3. **FR Coverage Map** showing requirement to epic mapping
-4. **Epic List** with approved epic structure
-5. **Epic sections** for each epic (N = 1, 2, 3...)
- - Epic title and goal
- - All stories for that epic (M = 1, 2, 3...)
- - Story title and user story
- - Acceptance Criteria using Given/When/Then format
-
-### 7. Present FINAL MENU OPTIONS
-
-After all epics and stories are complete:
-
-Display: "**Select an Option:** [A] Advanced Elicitation [P] Party Mode [C] Continue"
-
-#### Menu Handling Logic:
-
-- IF A: Read fully and follow: {advancedElicitationTask}
-- IF P: Read fully and follow: {partyModeWorkflow}
-- IF C: Save content to {outputFile}, update frontmatter, then read fully and follow: {nextStepFile}
-- IF Any other comments or queries: help user respond then [Redisplay Menu Options](#7-present-final-menu-options)
-
-#### EXECUTION RULES:
-
-- ALWAYS halt and wait for user input after presenting menu
-- ONLY proceed to next step when user selects 'C'
-- After other menu items execution, return to this menu
-- User can chat or ask questions - always respond and then end with display again of the menu options
-
-## CRITICAL STEP COMPLETION NOTE
-
-ONLY WHEN [C continue option] is selected and [all epics and stories saved to document following the template structure exactly], will you then read fully and follow: `{nextStepFile}` to begin final validation phase.
-
----
-
-## π¨ SYSTEM SUCCESS/FAILURE METRICS
-
-### β SUCCESS:
-
-- All epics processed in sequence
-- Stories created for each epic
-- Template structure followed exactly
-- All FRs covered by stories
-- Stories appropriately sized
-- Acceptance criteria are specific and testable
-- Document is complete and ready for development
-
-### β SYSTEM FAILURE:
-
-- Deviating from template structure
-- Missing epics or stories
-- Stories too large or unclear
-- Missing acceptance criteria
-- Not following proper formatting
-
-**Master Rule:** Skipping steps, optimizing sequences, or not following exact instructions is FORBIDDEN and constitutes SYSTEM FAILURE.
diff --git a/src/bmm/workflows/3-solutioning/create-epics-and-stories/steps/step-04-final-validation.md b/src/bmm/workflows/3-solutioning/create-epics-and-stories/steps/step-04-final-validation.md
deleted file mode 100644
index 19aa73d1..00000000
--- a/src/bmm/workflows/3-solutioning/create-epics-and-stories/steps/step-04-final-validation.md
+++ /dev/null
@@ -1,149 +0,0 @@
----
-name: 'step-04-final-validation'
-description: 'Validate complete coverage of all requirements and ensure implementation readiness'
-
-# Path Definitions
-workflow_path: '{project-root}/_bmad/bmm/workflows/3-solutioning/create-epics-and-stories'
-
-# File References
-thisStepFile: './step-04-final-validation.md'
-workflowFile: '{workflow_path}/workflow.md'
-outputFile: '{planning_artifacts}/epics.md'
-
-# Task References
-advancedElicitationTask: '{project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml'
-partyModeWorkflow: '{project-root}/_bmad/core/workflows/party-mode/workflow.md'
-
-# Template References
-epicsTemplate: '{workflow_path}/templates/epics-template.md'
----
-
-# Step 4: Final Validation
-
-## STEP GOAL:
-
-To validate complete coverage of all requirements and ensure stories are ready for development.
-
-## MANDATORY EXECUTION RULES (READ FIRST):
-
-### Universal Rules:
-
-- π NEVER generate content without user input
-- π CRITICAL: Read the complete step file before taking any action
-- π CRITICAL: Process validation sequentially without skipping
-- π YOU ARE A FACILITATOR, not a content generator
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Role Reinforcement:
-
-- β You are a product strategist and technical specifications writer
-- β If you already have been given communication or persona patterns, continue to use those while playing this new role
-- β We engage in collaborative dialogue, not command-response
-- β You bring validation expertise and quality assurance
-- β User brings their implementation priorities and final review
-
-### Step-Specific Rules:
-
-- π― Focus ONLY on validating complete requirements coverage
-- π« FORBIDDEN to skip any validation checks
-- π¬ Validate FR coverage, story completeness, and dependencies
-- πͺ ENSURE all stories are ready for development
-
-## EXECUTION PROTOCOLS:
-
-- π― Validate every requirement has story coverage
-- πΎ Check story dependencies and flow
-- π Verify architecture compliance
-- π« FORBIDDEN to approve incomplete coverage
-
-## CONTEXT BOUNDARIES:
-
-- Available context: Complete epic and story breakdown from previous steps
-- Focus: Final validation of requirements coverage and story readiness
-- Limits: Validation only, no new content creation
-- Dependencies: Completed story generation from Step 3
-
-## VALIDATION PROCESS:
-
-### 1. FR Coverage Validation
-
-Review the complete epic and story breakdown to ensure EVERY FR is covered:
-
-**CRITICAL CHECK:**
-
-- Go through each FR from the Requirements Inventory
-- Verify it appears in at least one story
-- Check that acceptance criteria fully address the FR
-- No FRs should be left uncovered
-
-### 2. Architecture Implementation Validation
-
-**Check for Starter Template Setup:**
-
-- Does Architecture document specify a starter template?
-- If YES: Epic 1 Story 1 must be "Set up initial project from starter template"
-- This includes cloning, installing dependencies, initial configuration
-
-**Database/Entity Creation Validation:**
-
-- Are database tables/entities created ONLY when needed by stories?
-- β WRONG: Epic 1 creates all tables upfront
-- β RIGHT: Tables created as part of the first story that needs them
-- Each story should create/modify ONLY what it needs
-
-### 3. Story Quality Validation
-
-**Each story must:**
-
-- Be completable by a single dev agent
-- Have clear acceptance criteria
-- Reference specific FRs it implements
-- Include necessary technical details
-- **Not have forward dependencies** (can only depend on PREVIOUS stories)
-- Be implementable without waiting for future stories
-
-### 4. Epic Structure Validation
-
-**Check that:**
-
-- Epics deliver user value, not technical milestones
-- Dependencies flow naturally
-- Foundation stories only setup what's needed
-- No big upfront technical work
-
-### 5. Dependency Validation (CRITICAL)
-
-**Epic Independence Check:**
-
-- Does each epic deliver COMPLETE functionality for its domain?
-- Can Epic 2 function without Epic 3 being implemented?
-- Can Epic 3 function standalone using Epic 1 & 2 outputs?
-- β WRONG: Epic 2 requires Epic 3 features to work
-- β RIGHT: Each epic is independently valuable
-
-**Within-Epic Story Dependency Check:**
-For each epic, review stories in order:
-
-- Can Story N.1 be completed without Stories N.2, N.3, etc.?
-- Can Story N.2 be completed using only Story N.1 output?
-- Can Story N.3 be completed using only Stories N.1 & N.2 outputs?
-- β WRONG: "This story depends on a future story"
-- β WRONG: Story references features not yet implemented
-- β RIGHT: Each story builds only on previous stories
-
-### 6. Complete and Save
-
-If all validations pass:
-
-- Update any remaining placeholders in the document
-- Ensure proper formatting
-- Save the final epics.md
-
-**Present Final Menu:**
-**All validations complete!** [C] Complete Workflow
-
-When C is selected, the workflow is complete and the epics.md is ready for development.
-
-Epics and Stories complete. Read fully and follow: `_bmad/core/tasks/bmad-help.md` with argument `Create Epics and Stories`.
-
-Upon Completion of task output: offer to answer any questions about the Epics and Stories.
diff --git a/src/bmm/workflows/3-solutioning/create-epics-and-stories/templates/epics-template.md b/src/bmm/workflows/3-solutioning/create-epics-and-stories/templates/epics-template.md
deleted file mode 100644
index 05afe1f5..00000000
--- a/src/bmm/workflows/3-solutioning/create-epics-and-stories/templates/epics-template.md
+++ /dev/null
@@ -1,57 +0,0 @@
----
-stepsCompleted: []
-inputDocuments: []
----
-
-# {{project_name}} - Epic Breakdown
-
-## Overview
-
-This document provides the complete epic and story breakdown for {{project_name}}, decomposing the requirements from the PRD, UX Design if it exists, and Architecture requirements into implementable stories.
-
-## Requirements Inventory
-
-### Functional Requirements
-
-{{fr_list}}
-
-### NonFunctional Requirements
-
-{{nfr_list}}
-
-### Additional Requirements
-
-{{additional_requirements}}
-
-### FR Coverage Map
-
-{{requirements_coverage_map}}
-
-## Epic List
-
-{{epics_list}}
-
-
-
-## Epic {{N}}: {{epic_title_N}}
-
-{{epic_goal_N}}
-
-
-
-### Story {{N}}.{{M}}: {{story_title_N_M}}
-
-As a {{user_type}},
-I want {{capability}},
-So that {{value_benefit}}.
-
-**Acceptance Criteria:**
-
-
-
-**Given** {{precondition}}
-**When** {{action}}
-**Then** {{expected_outcome}}
-**And** {{additional_criteria}}
-
-
diff --git a/src/bmm/workflows/3-solutioning/create-epics-and-stories/workflow.md b/src/bmm/workflows/3-solutioning/create-epics-and-stories/workflow.md
deleted file mode 100644
index a1e78a02..00000000
--- a/src/bmm/workflows/3-solutioning/create-epics-and-stories/workflow.md
+++ /dev/null
@@ -1,59 +0,0 @@
----
-name: create-epics-and-stories
-description: 'Transform PRD requirements and Architecture decisions into comprehensive stories organized by user value. This workflow requires completed PRD + Architecture documents (UX recommended if UI exists) and breaks down requirements into implementation-ready epics and user stories that incorporate all available technical and design context. Creates detailed, actionable stories with complete acceptance criteria for development teams.'
-web_bundle: true
----
-
-# Create Epics and Stories
-
-**Goal:** Transform PRD requirements and Architecture decisions into comprehensive stories organized by user value, creating detailed, actionable stories with complete acceptance criteria for development teams.
-
-**Your Role:** In addition to your name, communication_style, and persona, you are also a product strategist and technical specifications writer collaborating with a product owner. This is a partnership, not a client-vendor relationship. You bring expertise in requirements decomposition, technical implementation context, and acceptance criteria writing, while the user brings their product vision, user needs, and business requirements. Work together as equals.
-
----
-
-## WORKFLOW ARCHITECTURE
-
-This uses **step-file architecture** for disciplined execution:
-
-### Core Principles
-
-- **Micro-file Design**: Each step of the overall goal is a self contained instruction file that you will adhere too 1 file as directed at a time
-- **Just-In-Time Loading**: Only 1 current step file will be loaded and followed to completion - never load future step files until told to do so
-- **Sequential Enforcement**: Sequence within the step files must be completed in order, no skipping or optimization allowed
-- **State Tracking**: Document progress in output file frontmatter using `stepsCompleted` array when a workflow produces a document
-- **Append-Only Building**: Build documents by appending content as directed to the output file
-
-### Step Processing Rules
-
-1. **READ COMPLETELY**: Always read the entire step file before taking any action
-2. **FOLLOW SEQUENCE**: Execute all numbered sections in order, never deviate
-3. **WAIT FOR INPUT**: If a menu is presented, halt and wait for user selection
-4. **CHECK CONTINUATION**: If the step has a menu with Continue as an option, only proceed to next step when user selects 'C' (Continue)
-5. **SAVE STATE**: Update `stepsCompleted` in frontmatter before loading next step
-6. **LOAD NEXT**: When directed, read fully and follow the next step file
-
-### Critical Rules (NO EXCEPTIONS)
-
-- π **NEVER** load multiple step files simultaneously
-- π **ALWAYS** read entire step file before execution
-- π« **NEVER** skip steps or optimize the sequence
-- πΎ **ALWAYS** update frontmatter of output files when writing the final output for a specific step
-- π― **ALWAYS** follow the exact instructions in the step file
-- βΈοΈ **ALWAYS** halt at menus and wait for user input
-- π **NEVER** create mental todo lists from future steps
-
----
-
-## INITIALIZATION SEQUENCE
-
-### 1. Configuration Loading
-
-Load and read full config from {project-root}/_bmad/bmm/config.yaml and resolve:
-
-- `project_name`, `output_folder`, `planning_artifacts`, `user_name`, `communication_language`, `document_output_language`
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### 2. First Step EXECUTION
-
-Read fully and follow: `{project-root}/_bmad/bmm/workflows/3-solutioning/create-epics-and-stories/steps/step-01-validate-prerequisites.md` to begin the workflow.
diff --git a/src/bmm/workflows/4-implementation/code-review/checklist.md b/src/bmm/workflows/4-implementation/code-review/checklist.md
deleted file mode 100644
index f213a6b9..00000000
--- a/src/bmm/workflows/4-implementation/code-review/checklist.md
+++ /dev/null
@@ -1,23 +0,0 @@
-# Senior Developer Review - Validation Checklist
-
-- [ ] Story file loaded from `{{story_path}}`
-- [ ] Story Status verified as reviewable (review)
-- [ ] Epic and Story IDs resolved ({{epic_num}}.{{story_num}})
-- [ ] Story Context located or warning recorded
-- [ ] Epic Tech Spec located or warning recorded
-- [ ] Architecture/standards docs loaded (as available)
-- [ ] Tech stack detected and documented
-- [ ] MCP doc search performed (or web fallback) and references captured
-- [ ] Acceptance Criteria cross-checked against implementation
-- [ ] File List reviewed and validated for completeness
-- [ ] Tests identified and mapped to ACs; gaps noted
-- [ ] Code quality review performed on changed files
-- [ ] Security review performed on changed files and dependencies
-- [ ] Outcome decided (Approve/Changes Requested/Blocked)
-- [ ] Review notes appended under "Senior Developer Review (AI)"
-- [ ] Change Log updated with review entry
-- [ ] Status updated according to settings (if enabled)
-- [ ] Sprint status synced (if sprint tracking enabled)
-- [ ] Story saved successfully
-
-_Reviewer: {{user_name}} on {{date}}_
diff --git a/src/bmm/workflows/4-implementation/code-review/instructions.xml b/src/bmm/workflows/4-implementation/code-review/instructions.xml
deleted file mode 100644
index e5649559..00000000
--- a/src/bmm/workflows/4-implementation/code-review/instructions.xml
+++ /dev/null
@@ -1,227 +0,0 @@
-
- The workflow execution engine is governed by: {project-root}/_bmad/core/tasks/workflow.xml
- You MUST have already loaded and processed: {installed_path}/workflow.yaml
- Communicate all responses in {communication_language} and language MUST be tailored to {user_skill_level}
- Generate all documents in {document_output_language}
-
- π₯ YOU ARE AN ADVERSARIAL CODE REVIEWER - Find what's wrong or missing! π₯
- Your purpose: Validate story file claims against actual implementation
- Challenge everything: Are tasks marked [x] actually done? Are ACs really implemented?
- Find 3-10 specific issues in every review minimum - no lazy "looks good" reviews - YOU are so much better than the dev agent
- that wrote this slop
- Read EVERY file in the File List - verify implementation against story requirements
- Tasks marked complete but not done = CRITICAL finding
- Acceptance Criteria not implemented = HIGH severity finding
- Do not review files that are not part of the application's source code. Always exclude the _bmad/ and _bmad-output/ folders from the review. Always exclude IDE and CLI configuration folders like .cursor/ and .windsurf/ and .claude/
-
-
-
- Use provided {{story_path}} or ask user which story file to review
- Read COMPLETE story file
- Set {{story_key}} = extracted key from filename (e.g., "1-2-user-authentication.md" β "1-2-user-authentication") or story
- metadata
- Parse sections: Story, Acceptance Criteria, Tasks/Subtasks, Dev Agent Record β File List, Change Log
-
-
- Check if git repository detected in current directory
-
- Run `git status --porcelain` to find uncommitted changes
- Run `git diff --name-only` to see modified files
- Run `git diff --cached --name-only` to see staged files
- Compile list of actually changed files from git output
-
-
-
- Compare story's Dev Agent Record β File List with actual git changes
- Note discrepancies:
- - Files in git but not in story File List
- - Files in story File List but no git changes
- - Missing documentation of what was actually changed
-
-
-
- Load {project_context} for coding standards (if exists)
-
-
-
- Extract ALL Acceptance Criteria from story
- Extract ALL Tasks/Subtasks with completion status ([x] vs [ ])
- From Dev Agent Record β File List, compile list of claimed changes
-
- Create review plan:
- 1. **AC Validation**: Verify each AC is actually implemented
- 2. **Task Audit**: Verify each [x] task is really done
- 3. **Code Quality**: Security, performance, maintainability
- 4. **Test Quality**: Real tests vs placeholder bullshit
-
-
-
-
- VALIDATE EVERY CLAIM - Check git reality vs story claims
-
-
- Review git vs story File List discrepancies:
- 1. **Files changed but not in story File List** β MEDIUM finding (incomplete documentation)
- 2. **Story lists files but no git changes** β HIGH finding (false claims)
- 3. **Uncommitted changes not documented** β MEDIUM finding (transparency issue)
-
-
-
- Create comprehensive review file list from story File List and git changes
-
-
- For EACH Acceptance Criterion:
- 1. Read the AC requirement
- 2. Search implementation files for evidence
- 3. Determine: IMPLEMENTED, PARTIAL, or MISSING
- 4. If MISSING/PARTIAL β HIGH SEVERITY finding
-
-
-
- For EACH task marked [x]:
- 1. Read the task description
- 2. Search files for evidence it was actually done
- 3. **CRITICAL**: If marked [x] but NOT DONE β CRITICAL finding
- 4. Record specific proof (file:line)
-
-
-
- For EACH file in comprehensive review list:
- 1. **Security**: Look for injection risks, missing validation, auth issues
- 2. **Performance**: N+1 queries, inefficient loops, missing caching
- 3. **Error Handling**: Missing try/catch, poor error messages
- 4. **Code Quality**: Complex functions, magic numbers, poor naming
- 5. **Test Quality**: Are tests real assertions or placeholders?
-
-
-
- NOT LOOKING HARD ENOUGH - Find more problems!
- Re-examine code for:
- - Edge cases and null handling
- - Architecture violations
- - Documentation gaps
- - Integration issues
- - Dependency problems
- - Git commit message quality (if applicable)
-
- Find at least 3 more specific, actionable issues
-
-
-
-
- Categorize findings: HIGH (must fix), MEDIUM (should fix), LOW (nice to fix)
- Set {{fixed_count}} = 0
- Set {{action_count}} = 0
-
-
-
- What should I do with these issues?
-
- 1. **Fix them automatically** - I'll update the code and tests
- 2. **Create action items** - Add to story Tasks/Subtasks for later
- 3. **Show me details** - Deep dive into specific issues
-
- Choose [1], [2], or specify which issue to examine:
-
-
- Fix all HIGH and MEDIUM issues in the code
- Add/update tests as needed
- Update File List in story if files changed
- Update story Dev Agent Record with fixes applied
- Set {{fixed_count}} = number of HIGH and MEDIUM issues fixed
- Set {{action_count}} = 0
-
-
-
- Add "Review Follow-ups (AI)" subsection to Tasks/Subtasks
- For each issue: `- [ ] [AI-Review][Severity] Description [file:line]`
- Set {{action_count}} = number of action items created
- Set {{fixed_count}} = 0
-
-
-
- Show detailed explanation with code examples
- Return to fix decision
-
-
-
-
-
-
- Set {{new_status}} = "done"
- Update story Status field to "done"
-
-
- Set {{new_status}} = "in-progress"
- Update story Status field to "in-progress"
-
- Save story file
-
-
-
- Set {{current_sprint_status}} = "enabled"
-
-
- Set {{current_sprint_status}} = "no-sprint-tracking"
-
-
-
-
- Load the FULL file: {sprint_status}
- Find development_status key matching {{story_key}}
-
-
- Update development_status[{{story_key}}] = "done"
- Save file, preserving ALL comments and structure
-
-
-
-
- Update development_status[{{story_key}}] = "in-progress"
- Save file, preserving ALL comments and structure
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/src/bmm/workflows/4-implementation/code-review/workflow.yaml b/src/bmm/workflows/4-implementation/code-review/workflow.yaml
deleted file mode 100644
index 9e66b932..00000000
--- a/src/bmm/workflows/4-implementation/code-review/workflow.yaml
+++ /dev/null
@@ -1,51 +0,0 @@
-# Review Story Workflow
-name: code-review
-description: "Perform an ADVERSARIAL Senior Developer code review that finds 3-10 specific problems in every story. Challenges everything: code quality, test coverage, architecture compliance, security, performance. NEVER accepts `looks good` - must find minimum issues and can auto-fix with user approval."
-author: "BMad"
-
-# Critical variables from config
-config_source: "{project-root}/_bmad/bmm/config.yaml"
-user_name: "{config_source}:user_name"
-communication_language: "{config_source}:communication_language"
-user_skill_level: "{config_source}:user_skill_level"
-document_output_language: "{config_source}:document_output_language"
-date: system-generated
-planning_artifacts: "{config_source}:planning_artifacts"
-implementation_artifacts: "{config_source}:implementation_artifacts"
-output_folder: "{implementation_artifacts}"
-sprint_status: "{implementation_artifacts}/sprint-status.yaml"
-
-# Workflow components
-installed_path: "{project-root}/_bmad/bmm/workflows/4-implementation/code-review"
-instructions: "{installed_path}/instructions.xml"
-validation: "{installed_path}/checklist.md"
-template: false
-
-variables:
- # Project context
- project_context: "**/project-context.md"
- story_dir: "{implementation_artifacts}"
-
-# Smart input file references - handles both whole docs and sharded docs
-# Priority: Whole document first, then sharded version
-# Strategy: SELECTIVE LOAD - only load the specific epic needed for this story review
-input_file_patterns:
- architecture:
- description: "System architecture for review context"
- whole: "{planning_artifacts}/*architecture*.md"
- sharded: "{planning_artifacts}/*architecture*/*.md"
- load_strategy: "FULL_LOAD"
- ux_design:
- description: "UX design specification (if UI review)"
- whole: "{planning_artifacts}/*ux*.md"
- sharded: "{planning_artifacts}/*ux*/*.md"
- load_strategy: "FULL_LOAD"
- epics:
- description: "Epic containing story being reviewed"
- whole: "{planning_artifacts}/*epic*.md"
- sharded_index: "{planning_artifacts}/*epic*/index.md"
- sharded_single: "{planning_artifacts}/*epic*/epic-{{epic_num}}.md"
- load_strategy: "SELECTIVE_LOAD"
-
-standalone: true
-web_bundle: false
diff --git a/src/bmm/workflows/4-implementation/correct-course/checklist.md b/src/bmm/workflows/4-implementation/correct-course/checklist.md
deleted file mode 100644
index f13ab9be..00000000
--- a/src/bmm/workflows/4-implementation/correct-course/checklist.md
+++ /dev/null
@@ -1,288 +0,0 @@
-# Change Navigation Checklist
-
-This checklist is executed as part of: {project-root}/_bmad/bmm/workflows/4-implementation/correct-course/workflow.yaml
-Work through each section systematically with the user, recording findings and impacts
-
-
-
-
-
-
-Identify the triggering story that revealed this issue
-Document story ID and brief description
-[ ] Done / [ ] N/A / [ ] Action-needed
-
-
-
-Define the core problem precisely
-Categorize issue type:
- - Technical limitation discovered during implementation
- - New requirement emerged from stakeholders
- - Misunderstanding of original requirements
- - Strategic pivot or market change
- - Failed approach requiring different solution
-Write clear problem statement
-[ ] Done / [ ] N/A / [ ] Action-needed
-
-
-
-Assess initial impact and gather supporting evidence
-Collect concrete examples, error messages, stakeholder feedback, or technical constraints
-Document evidence for later reference
-[ ] Done / [ ] N/A / [ ] Action-needed
-
-
-
-HALT: "Cannot proceed without understanding what caused the need for change"
-HALT: "Need concrete evidence or examples of the issue before analyzing impact"
-
-
-
-
-
-
-
-Evaluate current epic containing the trigger story
-Can this epic still be completed as originally planned?
-If no, what modifications are needed?
-[ ] Done / [ ] N/A / [ ] Action-needed
-
-
-
-Determine required epic-level changes
-Check each scenario:
- - Modify existing epic scope or acceptance criteria
- - Add new epic to address the issue
- - Remove or defer epic that's no longer viable
- - Completely redefine epic based on new understanding
-Document specific epic changes needed
-[ ] Done / [ ] N/A / [ ] Action-needed
-
-
-
-Review all remaining planned epics for required changes
-Check each future epic for impact
-Identify dependencies that may be affected
-[ ] Done / [ ] N/A / [ ] Action-needed
-
-
-
-Check if issue invalidates future epics or necessitates new ones
-Does this change make any planned epics obsolete?
-Are new epics needed to address gaps created by this change?
-[ ] Done / [ ] N/A / [ ] Action-needed
-
-
-
-Consider if epic order or priority should change
-Should epics be resequenced based on this issue?
-Do priorities need adjustment?
-[ ] Done / [ ] N/A / [ ] Action-needed
-
-
-
-
-
-
-
-Check PRD for conflicts
-Does issue conflict with core PRD goals or objectives?
-Do requirements need modification, addition, or removal?
-Is the defined MVP still achievable or does scope need adjustment?
-[ ] Done / [ ] N/A / [ ] Action-needed
-
-
-
-Review Architecture document for conflicts
-Check each area for impact:
- - System components and their interactions
- - Architectural patterns and design decisions
- - Technology stack choices
- - Data models and schemas
- - API designs and contracts
- - Integration points
-Document specific architecture sections requiring updates
-[ ] Done / [ ] N/A / [ ] Action-needed
-
-
-
-Examine UI/UX specifications for conflicts
-Check for impact on:
- - User interface components
- - User flows and journeys
- - Wireframes or mockups
- - Interaction patterns
- - Accessibility considerations
-Note specific UI/UX sections needing revision
-[ ] Done / [ ] N/A / [ ] Action-needed
-
-
-
-Consider impact on other artifacts
-Review additional artifacts for impact:
- - Deployment scripts
- - Infrastructure as Code (IaC)
- - Monitoring and observability setup
- - Testing strategies
- - Documentation
- - CI/CD pipelines
-Document any secondary artifacts requiring updates
-[ ] Done / [ ] N/A / [ ] Action-needed
-
-
-
-
-
-
-
-Evaluate Option 1: Direct Adjustment
-Can the issue be addressed by modifying existing stories?
-Can new stories be added within the current epic structure?
-Would this approach maintain project timeline and scope?
-Effort estimate: [High/Medium/Low]
-Risk level: [High/Medium/Low]
-[ ] Viable / [ ] Not viable
-
-
-
-Evaluate Option 2: Potential Rollback
-Would reverting recently completed stories simplify addressing this issue?
-Which stories would need to be rolled back?
-Is the rollback effort justified by the simplification gained?
-Effort estimate: [High/Medium/Low]
-Risk level: [High/Medium/Low]
-[ ] Viable / [ ] Not viable
-
-
-
-Evaluate Option 3: PRD MVP Review
-Is the original PRD MVP still achievable with this issue?
-Does MVP scope need to be reduced or redefined?
-Do core goals need modification based on new constraints?
-What would be deferred to post-MVP if scope is reduced?
-Effort estimate: [High/Medium/Low]
-Risk level: [High/Medium/Low]
-[ ] Viable / [ ] Not viable
-
-
-
-Select recommended path forward
-Based on analysis of all options, choose the best path
-Provide clear rationale considering:
- - Implementation effort and timeline impact
- - Technical risk and complexity
- - Impact on team morale and momentum
- - Long-term sustainability and maintainability
- - Stakeholder expectations and business value
-Selected approach: [Option 1 / Option 2 / Option 3 / Hybrid]
-Justification: [Document reasoning]
-[ ] Done / [ ] N/A / [ ] Action-needed
-
-
-
-
-
-
-
-Create identified issue summary
-Write clear, concise problem statement
-Include context about discovery and impact
-[ ] Done / [ ] N/A / [ ] Action-needed
-
-
-
-Document epic impact and artifact adjustment needs
-Summarize findings from Epic Impact Assessment (Section 2)
-Summarize findings from Artifact Conflict Analysis (Section 3)
-Be specific about what changes are needed and why
-[ ] Done / [ ] N/A / [ ] Action-needed
-
-
-
-Present recommended path forward with rationale
-Include selected approach from Section 4
-Provide complete justification for recommendation
-Address trade-offs and alternatives considered
-[ ] Done / [ ] N/A / [ ] Action-needed
-
-
-
-Define PRD MVP impact and high-level action plan
-State clearly if MVP is affected
-Outline major action items needed for implementation
-Identify dependencies and sequencing
-[ ] Done / [ ] N/A / [ ] Action-needed
-
-
-
-Establish agent handoff plan
-Identify which roles/agents will execute the changes:
- - Development team (for implementation)
- - Product Owner / Scrum Master (for backlog changes)
- - Product Manager / Architect (for strategic changes)
-Define responsibilities for each role
-[ ] Done / [ ] N/A / [ ] Action-needed
-
-
-
-
-
-
-
-Review checklist completion
-Verify all applicable sections have been addressed
-Confirm all [Action-needed] items have been documented
-Ensure analysis is comprehensive and actionable
-[ ] Done / [ ] N/A / [ ] Action-needed
-
-
-
-Verify Sprint Change Proposal accuracy
-Review complete proposal for consistency and clarity
-Ensure all recommendations are well-supported by analysis
-Check that proposal is actionable and specific
-[ ] Done / [ ] N/A / [ ] Action-needed
-
-
-
-Obtain explicit user approval
-Present complete proposal to user
-Get clear yes/no approval for proceeding
-Document approval and any conditions
-[ ] Done / [ ] N/A / [ ] Action-needed
-
-
-
-Update sprint-status.yaml to reflect approved epic changes
-If epics were added: Add new epic entries with status 'backlog'
-If epics were removed: Remove corresponding entries
-If epics were renumbered: Update epic IDs and story references
-If stories were added/removed: Update story entries within affected epics
-[ ] Done / [ ] N/A / [ ] Action-needed
-
-
-
-Confirm next steps and handoff plan
-Review handoff responsibilities with user
-Ensure all stakeholders understand their roles
-Confirm timeline and success criteria
-[ ] Done / [ ] N/A / [ ] Action-needed
-
-
-
-HALT: "Cannot proceed to proposal without complete impact analysis"
-HALT: "Must have explicit approval before implementing changes"
-HALT: "Must clearly define who will execute the proposed changes"
-
-
-
-
-
-
-
-This checklist is for SIGNIFICANT changes affecting project direction
-Work interactively with user - they make final decisions
-Be factual, not blame-oriented when analyzing issues
-Handle changes professionally as opportunities to improve the project
-Maintain conversation context throughout - this is collaborative work
-
diff --git a/src/bmm/workflows/4-implementation/correct-course/instructions.md b/src/bmm/workflows/4-implementation/correct-course/instructions.md
deleted file mode 100644
index 430239a6..00000000
--- a/src/bmm/workflows/4-implementation/correct-course/instructions.md
+++ /dev/null
@@ -1,206 +0,0 @@
-# Correct Course - Sprint Change Management Instructions
-
-The workflow execution engine is governed by: {project-root}/_bmad/core/tasks/workflow.xml
-You MUST have already loaded and processed: {project-root}/_bmad/bmm/workflows/4-implementation/correct-course/workflow.yaml
-Communicate all responses in {communication_language} and language MUST be tailored to {user_skill_level}
-Generate all documents in {document_output_language}
-
-DOCUMENT OUTPUT: Updated epics, stories, or PRD sections. Clear, actionable changes. User skill level ({user_skill_level}) affects conversation style ONLY, not document updates.
-
-
-
-
- Confirm change trigger and gather user description of the issue
- Ask: "What specific issue or change has been identified that requires navigation?"
- Verify access to required project documents:
- - PRD (Product Requirements Document)
- - Current Epics and Stories
- - Architecture documentation
- - UI/UX specifications
- Ask user for mode preference:
- - **Incremental** (recommended): Refine each edit collaboratively
- - **Batch**: Present all changes at once for review
- Store mode selection for use throughout workflow
-
-HALT: "Cannot navigate change without clear understanding of the triggering issue. Please provide specific details about what needs to change and why."
-
-HALT: "Need access to project documents (PRD, Epics, Architecture, UI/UX) to assess change impact. Please ensure these documents are accessible."
-
-
-
-
- After discovery, these content variables are available: {prd_content}, {epics_content}, {architecture_content}, {ux_design_content}, {tech_spec_content}, {document_project_content}
-
-
-
- Read fully and follow the systematic analysis from: {checklist}
- Work through each checklist section interactively with the user
- Record status for each checklist item:
- - [x] Done - Item completed successfully
- - [N/A] Skip - Item not applicable to this change
- - [!] Action-needed - Item requires attention or follow-up
- Maintain running notes of findings and impacts discovered
- Present checklist progress after each major section
-
-Identify blocking issues and work with user to resolve before continuing
-
-
-
-Based on checklist findings, create explicit edit proposals for each identified artifact
-
-For Story changes:
-
-- Show old β new text format
-- Include story ID and section being modified
-- Provide rationale for each change
-- Example format:
-
- ```
- Story: [STORY-123] User Authentication
- Section: Acceptance Criteria
-
- OLD:
- - User can log in with email/password
-
- NEW:
- - User can log in with email/password
- - User can enable 2FA via authenticator app
-
- Rationale: Security requirement identified during implementation
- ```
-
-For PRD modifications:
-
-- Specify exact sections to update
-- Show current content and proposed changes
-- Explain impact on MVP scope and requirements
-
-For Architecture changes:
-
-- Identify affected components, patterns, or technology choices
-- Describe diagram updates needed
-- Note any ripple effects on other components
-
-For UI/UX specification updates:
-
-- Reference specific screens or components
-- Show wireframe or flow changes needed
-- Connect changes to user experience impact
-
-
- Present each edit proposal individually
- Review and refine this change? Options: Approve [a], Edit [e], Skip [s]
- Iterate on each proposal based on user feedback
-
-
-Collect all edit proposals and present together at end of step
-
-
-
-
-Compile comprehensive Sprint Change Proposal document with following sections:
-
-Section 1: Issue Summary
-
-- Clear problem statement describing what triggered the change
-- Context about when/how the issue was discovered
-- Evidence or examples demonstrating the issue
-
-Section 2: Impact Analysis
-
-- Epic Impact: Which epics are affected and how
-- Story Impact: Current and future stories requiring changes
-- Artifact Conflicts: PRD, Architecture, UI/UX documents needing updates
-- Technical Impact: Code, infrastructure, or deployment implications
-
-Section 3: Recommended Approach
-
-- Present chosen path forward from checklist evaluation:
- - Direct Adjustment: Modify/add stories within existing plan
- - Potential Rollback: Revert completed work to simplify resolution
- - MVP Review: Reduce scope or modify goals
-- Provide clear rationale for recommendation
-- Include effort estimate, risk assessment, and timeline impact
-
-Section 4: Detailed Change Proposals
-
-- Include all refined edit proposals from Step 3
-- Group by artifact type (Stories, PRD, Architecture, UI/UX)
-- Ensure each change includes before/after and justification
-
-Section 5: Implementation Handoff
-
-- Categorize change scope:
- - Minor: Direct implementation by dev team
- - Moderate: Backlog reorganization needed (PO/SM)
- - Major: Fundamental replan required (PM/Architect)
-- Specify handoff recipients and their responsibilities
-- Define success criteria for implementation
-
-Present complete Sprint Change Proposal to user
-Write Sprint Change Proposal document to {default_output_file}
-Review complete proposal. Continue [c] or Edit [e]?
-
-
-
-Get explicit user approval for complete proposal
-Do you approve this Sprint Change Proposal for implementation? (yes/no/revise)
-
-
- Gather specific feedback on what needs adjustment
- Return to appropriate step to address concerns
- If changes needed to edit proposals
- If changes needed to overall proposal structure
-
-
-
-
- Finalize Sprint Change Proposal document
- Determine change scope classification:
-
-- **Minor**: Can be implemented directly by development team
-- **Moderate**: Requires backlog reorganization and PO/SM coordination
-- **Major**: Needs fundamental replan with PM/Architect involvement
-
-Provide appropriate handoff based on scope:
-
-
-
-
- Route to: Development team for direct implementation
- Deliverables: Finalized edit proposals and implementation tasks
-
-
-
- Route to: Product Owner / Scrum Master agents
- Deliverables: Sprint Change Proposal + backlog reorganization plan
-
-
-
- Route to: Product Manager / Solution Architect
- Deliverables: Complete Sprint Change Proposal + escalation notice
-
-Confirm handoff completion and next steps with user
-Document handoff in workflow execution log
-
-
-
-
-
-Summarize workflow execution:
- - Issue addressed: {{change_trigger}}
- - Change scope: {{scope_classification}}
- - Artifacts modified: {{list_of_artifacts}}
- - Routed to: {{handoff_recipients}}
-
-Confirm all deliverables produced:
-
-- Sprint Change Proposal document
-- Specific edit proposals with before/after
-- Implementation handoff plan
-
-Report workflow completion to user with personalized message: "β Correct Course workflow complete, {user_name}!"
-Remind user of success criteria and next steps for implementation team
-
-
-
diff --git a/src/bmm/workflows/4-implementation/correct-course/workflow.yaml b/src/bmm/workflows/4-implementation/correct-course/workflow.yaml
deleted file mode 100644
index 70813514..00000000
--- a/src/bmm/workflows/4-implementation/correct-course/workflow.yaml
+++ /dev/null
@@ -1,60 +0,0 @@
-# Correct Course - Sprint Change Management Workflow
-name: "correct-course"
-description: "Navigate significant changes during sprint execution by analyzing impact, proposing solutions, and routing for implementation"
-author: "BMad Method"
-
-config_source: "{project-root}/_bmad/bmm/config.yaml"
-user_name: "{config_source}:user_name"
-communication_language: "{config_source}:communication_language"
-user_skill_level: "{config_source}:user_skill_level"
-document_output_language: "{config_source}:document_output_language"
-date: system-generated
-implementation_artifacts: "{config_source}:implementation_artifacts"
-planning_artifacts: "{config_source}:planning_artifacts"
-project_knowledge: "{config_source}:project_knowledge"
-output_folder: "{implementation_artifacts}"
-sprint_status: "{implementation_artifacts}/sprint-status.yaml"
-
-# Smart input file references - handles both whole docs and sharded docs
-# Priority: Whole document first, then sharded version
-# Strategy: Load project context for impact analysis
-input_file_patterns:
- prd:
- description: "Product requirements for impact analysis"
- whole: "{planning_artifacts}/*prd*.md"
- sharded: "{planning_artifacts}/*prd*/*.md"
- load_strategy: "FULL_LOAD"
- epics:
- description: "All epics to analyze change impact"
- whole: "{planning_artifacts}/*epic*.md"
- sharded: "{planning_artifacts}/*epic*/*.md"
- load_strategy: "FULL_LOAD"
- architecture:
- description: "System architecture and decisions"
- whole: "{planning_artifacts}/*architecture*.md"
- sharded: "{planning_artifacts}/*architecture*/*.md"
- load_strategy: "FULL_LOAD"
- ux_design:
- description: "UX design specification (if UI impacts)"
- whole: "{planning_artifacts}/*ux*.md"
- sharded: "{planning_artifacts}/*ux*/*.md"
- load_strategy: "FULL_LOAD"
- tech_spec:
- description: "Technical specification"
- whole: "{planning_artifacts}/*tech-spec*.md"
- load_strategy: "FULL_LOAD"
- document_project:
- description: "Brownfield project documentation (optional)"
- sharded: "{project_knowledge}/index.md"
- load_strategy: "INDEX_GUIDED"
-
-installed_path: "{project-root}/_bmad/bmm/workflows/4-implementation/correct-course"
-template: false
-instructions: "{installed_path}/instructions.md"
-validation: "{installed_path}/checklist.md"
-checklist: "{installed_path}/checklist.md"
-default_output_file: "{planning_artifacts}/sprint-change-proposal-{date}.md"
-
-standalone: true
-
-web_bundle: false
diff --git a/src/bmm/workflows/4-implementation/create-story-with-gap-analysis/README.md b/src/bmm/workflows/4-implementation/create-story-with-gap-analysis/README.md
new file mode 100644
index 00000000..08f41fa6
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/create-story-with-gap-analysis/README.md
@@ -0,0 +1,170 @@
+# Create Story With Gap Analysis
+
+**Custom Workflow by Jonah Schulte**
+**Created:** December 24, 2025
+**Purpose:** Generate stories with SYSTEMATIC codebase gap analysis (not inference-based)
+
+---
+
+## Problem This Solves
+
+**Standard `/create-story` workflow:**
+- β Reads previous stories and git commits (passive)
+- β Infers what probably exists (guessing)
+- β Gap analysis quality varies by agent thoroughness
+- β Checkboxes may not reflect reality
+
+**This custom workflow:**
+- β Actively scans codebase with Glob/Read tools
+- β Verifies file existence (not inference)
+- β Reads key files to check implementation depth (mocked vs real)
+- β Generates TRUTHFUL gap analysis
+- β Checkboxes are FACTS verified by file system
+
+---
+
+## Usage
+
+```bash
+/create-story-with-gap-analysis
+
+# Or via Skill tool:
+Skill: "create-story-with-gap-analysis"
+Args: "1.9" (epic.story number)
+```
+
+**Workflow will:**
+1. Load existing story + epic context
+2. **SCAN codebase systematically** (Glob for files, Read to verify implementation)
+3. Generate gap analysis with verified β /β/β οΈ status
+4. Update story file with truthful checkboxes
+5. Save to _bmad-output/implementation-artifacts/
+
+---
+
+## What It Scans
+
+**For each story, the workflow:**
+
+1. **Identifies target directories** (from story title/requirements)
+ - Example: "admin-user-service" β apps/backend/admin-user-service/
+
+2. **Globs for all files**
+ - `{target}/src/**/*.ts` - Find all TypeScript files
+ - `{target}/src/**/*.spec.ts` - Find all tests
+
+3. **Checks specific required files**
+ - Based on ACs, check if files exist
+ - Example: `src/auth/controllers/bridgeid-auth.controller.ts` β β MISSING
+
+4. **Reads key files to verify depth**
+ - Check if mocked: Search for "MOCK" string
+ - Check if incomplete: Search for "TODO"
+ - Verify real implementation exists
+
+5. **Checks package.json**
+ - Verify required dependencies are installed
+ - Identify missing packages
+
+6. **Counts tests**
+ - How many test files exist
+ - Coverage for each component
+
+---
+
+## Output Format
+
+**Generates story with:**
+
+1. β Standard BMAD 5 sections (Story, AC, Tasks, Dev Notes, Dev Agent Record)
+2. β Enhanced Dev Notes with verified gap analysis subsections:
+ - Gap Analysis: Current State vs Requirements
+ - Library/Framework Requirements (from package.json)
+ - File Structure Requirements (from Glob results)
+ - Testing Requirements (from test file count)
+ - Architecture Compliance
+ - Previous Story Intelligence
+
+3. β Truthful checkboxes based on verified file existence
+
+---
+
+## Difference from Standard /create-story
+
+| Feature | /create-story | /create-story-with-gap-analysis |
+|---------|---------------|--------------------------------|
+| Reads previous story | β | β |
+| Reads git commits | β | β |
+| Loads epic context | β | β |
+| **Scans codebase with Glob** | β | β SYSTEMATIC |
+| **Verifies files exist** | β | β VERIFIED |
+| **Reads files to check depth** | β | β MOCKED vs REAL |
+| **Checks package.json** | β | β DEPENDENCIES |
+| **Counts test coverage** | β | β COVERAGE |
+| Gap analysis quality | Variable (agent-dependent) | Systematic (tool-verified) |
+| Checkbox accuracy | Inference-based | File-existence-based |
+
+---
+
+## When to Use
+
+**This workflow (planning-time gap analysis):**
+- Use when regenerating/auditing stories
+- Use when you want verified checkboxes upfront
+- Best for stories that will be implemented immediately
+- Manual verification at planning time
+
+**Standard /create-story + /dev-story (dev-time gap analysis):**
+- Recommended for most workflows
+- Stories start as DRAFT, validated when dev begins
+- Prevents staleness in batch planning
+- Automatic verification at development time
+
+**Use standard /create-story when:**
+- Greenfield project (nothing exists yet)
+- Backlog stories (won't be implemented for months)
+- Epic planning phase (just sketching ideas)
+
+**Tip:** Both approaches are complementary. You can use this workflow to regenerate stories, then use `/dev-story` which will re-validate at dev-time.
+
+---
+
+## Examples
+
+**Regenerating Story 1.9:**
+```bash
+/create-story-with-gap-analysis
+
+Choice: 1.9
+
+# Workflow will:
+# 1. Load existing 1-9-admin-user-service-bridgeid-rbac.md
+# 2. Identify target: apps/backend/admin-user-service/
+# 3. Glob: apps/backend/admin-user-service/src/**/*.ts (finds 47 files)
+# 4. Check: src/auth/controllers/bridgeid-auth.controller.ts β β MISSING
+# 5. Read: src/bridgeid/services/bridgeid-client.service.ts β β οΈ MOCKED
+# 6. Read: package.json β axios β NOT INSTALLED
+# 7. Generate gap analysis with verified status
+# 8. Write story with truthful checkboxes
+```
+
+**Result:** Story with verified gap analysis showing:
+- β 7 components IMPLEMENTED (verified file existence)
+- β 6 components MISSING (verified file not found)
+- β οΈ 1 component PARTIAL (file exists but contains "MOCK")
+
+---
+
+## Installation
+
+This workflow is auto-discovered when BMAD is installed.
+
+**To use:**
+```bash
+/bmad:bmm:workflows:create-story-with-gap-analysis
+```
+
+---
+
+**Last Updated:** December 27, 2025
+**Status:** Integrated into BMAD-METHOD
diff --git a/src/bmm/workflows/4-implementation/create-story-with-gap-analysis/step-01-initialize.md b/src/bmm/workflows/4-implementation/create-story-with-gap-analysis/step-01-initialize.md
new file mode 100644
index 00000000..6212924b
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/create-story-with-gap-analysis/step-01-initialize.md
@@ -0,0 +1,83 @@
+# Step 1: Initialize and Extract Story Requirements
+
+## Goal
+Load epic context and identify what needs to be scanned in the codebase.
+
+## Execution
+
+### 1. Determine Story to Create
+
+**Ask user:**
+```
+Which story should I regenerate with gap analysis?
+
+Options:
+1. Provide story number (e.g., "1.9" or "1-9")
+2. Provide story filename (e.g., "story-1.9.md" or legacy "1-9-admin-user-service-bridgeid-rbac.md")
+
+Your choice:
+```
+
+**Parse input:**
+- Extract epic_num (e.g., "1")
+- Extract story_num (e.g., "9")
+- Locate story file: `{story_dir}/story-{epic_num}.{story_num}.md` (fallback: `{story_dir}/{epic_num}-{story_num}-*.md`)
+
+### 2. Load Existing Story Content
+
+```bash
+Read: {story_dir}/story-{epic_num}.{story_num}.md
+# If not found, fallback:
+Read: {story_dir}/{epic_num}-{story_num}-*.md
+```
+
+**Extract from existing story:**
+- Story title
+- User story text (As a... I want... So that...)
+- Acceptance criteria (the requirements, not checkboxes)
+- Any existing Dev Notes or technical context
+
+**Store for later use.**
+
+### 3. Load Epic Context
+
+```bash
+Read: {planning_artifacts}/epics.md
+```
+
+**Extract from epic:**
+- Epic business objectives
+- This story's original requirements
+- Technical constraints
+- Dependencies on other stories
+
+### 4. Determine Target Directories
+
+**From story title and requirements, identify:**
+- Which service/app this story targets
+- Which directories to scan
+
+**Examples:**
+- "admin-user-service" β `apps/backend/admin-user-service/`
+- "Widget Batch 1" β `packages/widgets/`
+- "POE Integration" β `apps/frontend/web/`
+
+**Store target directories for Step 2 codebase scan.**
+
+### 5. Ready for Codebase Scan
+
+**Output:**
+```
+β Story Context Loaded
+
+Story: {epic_num}.{story_num} - {title}
+Target directories identified:
+ - {directory_1}
+ - {directory_2}
+
+Ready to scan codebase for gap analysis.
+
+[C] Continue to Codebase Scan
+```
+
+**WAIT for user to select Continue.**
diff --git a/src/bmm/workflows/4-implementation/create-story-with-gap-analysis/step-02-codebase-scan.md b/src/bmm/workflows/4-implementation/create-story-with-gap-analysis/step-02-codebase-scan.md
new file mode 100644
index 00000000..69f4022f
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/create-story-with-gap-analysis/step-02-codebase-scan.md
@@ -0,0 +1,184 @@
+# Step 2: Systematic Codebase Gap Analysis
+
+## Goal
+VERIFY what code actually exists vs what's missing using Glob and Read tools.
+
+## CRITICAL
+This step uses ACTUAL file system tools to generate TRUTHFUL gap analysis.
+No guessing. No inference. VERIFY with tools.
+
+## Execution
+
+### 1. Scan Target Directories
+
+**For each target directory identified in Step 1:**
+
+```bash
+# List all TypeScript files
+Glob: {target_dir}/src/**/*.ts
+Glob: {target_dir}/src/**/*.tsx
+
+# Store file list
+```
+
+**Output:**
+```
+π Codebase Scan Results for {target_dir}
+
+Found {count} TypeScript files:
+ - {file1}
+ - {file2}
+ ...
+```
+
+### 2. Check for Specific Required Components
+
+**Based on story Acceptance Criteria, check if required files exist:**
+
+**Example for Auth Story:**
+```bash
+# Check for OAuth endpoints
+Glob: {target_dir}/src/auth/controllers/*bridgeid*.ts
+Result: β MISSING (0 files found)
+
+# Check for BridgeID client
+Glob: {target_dir}/src/bridgeid/**/*.ts
+Result: β EXISTS (found: bridgeid-client.service.ts, bridgeid-sync.service.ts)
+
+# Check for permission guards
+Glob: {target_dir}/src/auth/guards/permissions*.ts
+Result: β MISSING (0 files found)
+
+# Check for decorators
+Glob: {target_dir}/src/auth/decorators/*permission*.ts
+Result: β MISSING (0 files found)
+```
+
+### 3. Verify Implementation Depth
+
+**For files that exist, read them to check if MOCKED or REAL:**
+
+```bash
+# Read key implementation file
+Read: {target_dir}/src/bridgeid/services/bridgeid-client.service.ts
+
+# Search for indicators:
+- Contains "MOCK" or "mock" β β οΈ MOCKED (needs real implementation)
+- Contains "TODO" β β οΈ INCOMPLETE
+- Contains real HTTP client (axios) β β IMPLEMENTED
+```
+
+### 4. Check Dependencies
+
+```bash
+# Read package.json
+Read: {target_dir}/package.json
+
+# Verify required dependencies exist:
+Required: axios
+Found in package.json? β β NO (needs to be added)
+
+Required: @aws-sdk/client-secrets-manager
+Found in package.json? β β NO (needs to be added)
+```
+
+### 5. Check Test Coverage
+
+```bash
+# Find test files
+Glob: {target_dir}/src/**/*.spec.ts
+Glob: {target_dir}/test/**/*.test.ts
+
+# Count tests
+Found {test_count} test files
+
+# Check for specific test coverage
+Glob: {target_dir}/src/**/*bridgeid*.spec.ts
+Result: β EXISTS (found 3 test files)
+```
+
+### 6. Generate Truthful Gap Analysis
+
+**Create structured gap analysis:**
+
+```markdown
+## Gap Analysis: Current State vs Requirements
+
+**β IMPLEMENTED (Verified by Codebase Scan):**
+
+1. **BridgeID Client Infrastructure** - MOCKED (needs real HTTP)
+ - File: src/bridgeid/services/bridgeid-client.service.ts β EXISTS
+ - Implementation: Mock user data with circuit breaker
+ - Status: β οΈ PARTIAL - Ready for real HTTP client
+ - Tests: 15 tests passing β
+
+2. **User Synchronization Service**
+ - File: src/bridgeid/services/bridgeid-sync.service.ts β EXISTS
+ - Implementation: Bulk sync BridgeID β admin_users
+ - Status: β COMPLETE
+ - Tests: 6 tests passing β
+
+3. **Role Mapping Logic**
+ - File: src/bridgeid/constants/role-mapping.constants.ts β EXISTS
+ - Implementation: 7-tier role mapping with priority selection
+ - Status: β COMPLETE
+ - Tests: 10 tests passing β
+
+**β MISSING (Required for AC Completion):**
+
+1. **BridgeID OAuth Endpoints**
+ - File: src/auth/controllers/bridgeid-auth.controller.ts β NOT FOUND
+ - Need: POST /api/auth/bridgeid/login endpoint
+ - Need: GET /api/auth/bridgeid/callback endpoint
+ - Status: β NOT IMPLEMENTED
+
+2. **Permission Guards**
+ - File: src/auth/guards/permissions.guard.ts β NOT FOUND
+ - File: src/auth/decorators/require-permissions.decorator.ts β NOT FOUND
+ - Status: β NOT IMPLEMENTED
+
+3. **Real OAuth HTTP Client**
+ - Package: axios β NOT in package.json
+ - Package: @aws-sdk/client-secrets-manager β NOT in package.json
+ - Status: β DEPENDENCIES NOT ADDED
+```
+
+### 7. Update Acceptance Criteria Checkboxes
+
+**Based on verified gap analysis, mark checkboxes:**
+
+```markdown
+### AC1: BridgeID OAuth Integration
+- [ ] OAuth login endpoint (VERIFIED MISSING - file not found)
+- [ ] OAuth callback endpoint (VERIFIED MISSING - file not found)
+- [ ] Client configuration (VERIFIED PARTIAL - exists but mocked)
+
+### AC3: RBAC Permission System
+- [x] Role mapping defined (VERIFIED COMPLETE - file exists, tests pass)
+- [ ] Permission guard (VERIFIED MISSING - file not found)
+- [ ] Permission decorator (VERIFIED MISSING - file not found)
+```
+
+**Checkboxes are now FACTS, not guesses.**
+
+### 8. Present Gap Analysis
+
+**Output:**
+```
+β Codebase Scan Complete
+
+Scanned: apps/backend/admin-user-service/
+Files found: 47 TypeScript files
+Tests found: 31 test files
+
+Gap Analysis Generated:
+ β 7 components IMPLEMENTED (verified)
+ β 6 components MISSING (verified)
+ β οΈ 1 component PARTIAL (needs completion)
+
+Story checkboxes updated based on verified file existence.
+
+[C] Continue to Story Generation
+```
+
+**WAIT for user to continue.**
diff --git a/src/bmm/workflows/4-implementation/create-story-with-gap-analysis/step-03-generate-story.md b/src/bmm/workflows/4-implementation/create-story-with-gap-analysis/step-03-generate-story.md
new file mode 100644
index 00000000..34281465
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/create-story-with-gap-analysis/step-03-generate-story.md
@@ -0,0 +1,181 @@
+# Step 3: Generate Story with Verified Gap Analysis
+
+## Goal
+Generate complete 7-section story file using verified gap analysis from Step 2.
+
+## Execution
+
+### 1. Load Template
+
+```bash
+Read: {installed_path}/template.md
+```
+
+### 2. Fill Template Variables
+
+**Basic Story Info:**
+- `{{epic_num}}` - from Step 1
+- `{{story_num}}` - from Step 1
+- `{{story_title}}` - from existing story or epic
+- `{{priority}}` - from epic (P0, P1, P2)
+- `{{effort}}` - from epic or estimate
+
+**Story Section:**
+- `{{role}}` - from existing story
+- `{{action}}` - from existing story
+- `{{benefit}}` - from existing story
+
+**Business Context:**
+- `{{business_value}}` - from epic context
+- `{{scale_requirements}}` - from epic/architecture
+- `{{compliance_requirements}}` - from epic/architecture
+- `{{urgency}}` - from epic priority
+
+**Acceptance Criteria:**
+- `{{acceptance_criteria}}` - from epic + existing story
+- Update checkboxes based on Step 2 gap analysis:
+ - [x] = Component verified EXISTS
+ - [ ] = Component verified MISSING
+ - [~] = Component verified PARTIAL (optional notation)
+
+**Tasks / Subtasks:**
+- `{{tasks_subtasks}}` - from epic + existing story
+- Add "β DONE", "β οΈ PARTIAL", "β TODO" markers based on gap analysis
+
+**Gap Analysis Section:**
+- `{{implemented_components}}` - from Step 2 codebase scan (verified β )
+- `{{missing_components}}` - from Step 2 codebase scan (verified β)
+- `{{partial_components}}` - from Step 2 codebase scan (verified β οΈ)
+
+**Architecture Compliance:**
+- `{{architecture_patterns}}` - from architecture doc + playbooks
+- Multi-tenant isolation requirements
+- Caching strategies
+- Error handling patterns
+- Performance requirements
+
+**Library/Framework Requirements:**
+- `{{current_dependencies}}` - from Step 2 package.json scan
+- `{{required_dependencies}}` - missing deps identified in Step 2
+
+**File Structure:**
+- `{{existing_files}}` - from Step 2 Glob results (verified β )
+- `{{required_files}}` - from gap analysis (verified β)
+
+**Testing Requirements:**
+- `{{test_count}}` - from Step 2 test file count
+- `{{required_tests}}` - based on missing components
+- `{{coverage_target}}` - from architecture or default 90%
+
+**Dev Agent Guardrails:**
+- `{{guardrails}}` - from playbooks + previous story lessons
+- What NOT to do
+- Common mistakes to avoid
+
+**Previous Story Intelligence:**
+- `{{previous_story_learnings}}` - from Step 1 previous story Dev Agent Record
+
+**Project Structure Notes:**
+- `{{structure_alignment}}` - from architecture compliance
+
+**References:**
+- `{{references}}` - Links to epic, architecture, playbooks, related stories
+
+**Definition of Done:**
+- Standard DoD checklist with story-specific coverage target
+
+### 3. Generate Complete Story
+
+**Write filled template:**
+```bash
+Write: {story_dir}/story-{{epic_num}}.{{story_num}}.md
+[Complete 7-section story with verified gap analysis]
+```
+
+### 4. Validate Generated Story
+
+```bash
+# Check section count
+grep "^## " {story_dir}/story-{{epic_num}}.{{story_num}}.md | wc -l
+# Should output: 7
+
+# Check for gap analysis
+grep -q "Gap Analysis.*Current State" {story_dir}/story-{{epic_num}}.{{story_num}}.md
+# Should find it
+
+# Run custom validation
+./scripts/validate-bmad-format.sh {story_dir}/story-{{epic_num}}.{{story_num}}.md
+# Update script to expect 7 sections + gap analysis subsection
+```
+
+### 5. Update Sprint Status
+
+```bash
+Read: {sprint_status}
+
+# Find story entry
+# Update status to "ready-for-dev" if was "backlog"
+# Preserve all comments and structure
+
+Write: {sprint_status}
+```
+
+### 6. Report Completion
+
+**Output:**
+```
+β Story {{epic_num}}.{{story_num}} Regenerated with Gap Analysis
+
+File: {story_dir}/story-{{epic_num}}.{{story_num}}.md
+Sections: 7/7 β
+Gap Analysis: VERIFIED with codebase scan
+
+Summary:
+ β {{implemented_count}} components IMPLEMENTED (verified by file scan)
+ β {{missing_count}} components MISSING (verified file not found)
+ β οΈ {{partial_count}} components PARTIAL (file exists but mocked/incomplete)
+
+Checkboxes in ACs and Tasks reflect VERIFIED status (not guesses).
+
+Next Steps:
+1. Review story file for accuracy
+2. Use /dev-story to implement missing components
+3. Story provides complete context for flawless implementation
+
+Story is ready for development. π
+```
+
+### 7. Cleanup
+
+**Ask user:**
+```
+Story regeneration complete!
+
+Would you like to:
+[N] Regenerate next story ({{next_story_num}})
+[Q] Quit workflow
+[R] Review generated story first
+
+Your choice:
+```
+
+**If N selected:** Loop back to Step 1 with next story number
+**If Q selected:** End workflow
+**If R selected:** Display story file, then show menu again
+
+---
+
+## Success Criteria
+
+**Story generation succeeds when:**
+1. β 7 top-level ## sections present
+2. β Gap Analysis subsection exists with β /β/β οΈ verified status
+3. β Checkboxes match codebase reality (spot-checked)
+4. β Dev Notes has all mandatory subsections
+5. β Definition of Done checklist included
+6. β File saved to correct location
+7. β Sprint status updated
+
+---
+
+**WORKFLOW COMPLETE - Ready to execute.**
diff --git a/src/bmm/workflows/4-implementation/create-story-with-gap-analysis/template.md b/src/bmm/workflows/4-implementation/create-story-with-gap-analysis/template.md
new file mode 100644
index 00000000..c0b7c87d
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/create-story-with-gap-analysis/template.md
@@ -0,0 +1,179 @@
+# Story {{epic_num}}.{{story_num}}: {{story_title}}
+
+**Status:** ready-for-dev
+**Epic:** {{epic_num}}
+**Priority:** {{priority}}
+**Estimated Effort:** {{effort}}
+
+---
+
+## Story
+
+As a **{{role}}**,
+I want to **{{action}}**,
+So that **{{benefit}}**.
+
+---
+
+## Business Context
+
+### Why This Matters
+
+{{business_value}}
+
+### Production Reality
+
+{{scale_requirements}}
+{{compliance_requirements}}
+{{urgency}}
+
+---
+
+## Acceptance Criteria
+
+{{acceptance_criteria}}
+
+---
+
+## Tasks / Subtasks
+
+{{tasks_subtasks}}
+
+---
+
+## Dev Notes
+
+### Gap Analysis: Current State vs Requirements
+
+**β IMPLEMENTED (Verified by Codebase Scan):**
+
+{{implemented_components}}
+
+**β MISSING (Required for AC Completion):**
+
+{{missing_components}}
+
+**β οΈ PARTIAL (Needs Enhancement):**
+
+{{partial_components}}
+
+### Architecture Compliance
+
+{{architecture_patterns}}
+
+### Library/Framework Requirements
+
+**Current Dependencies:**
+```json
+{{current_dependencies}}
+```
+
+**Required Additions:**
+```json
+{{required_dependencies}}
+```
+
+### File Structure Requirements
+
+**Completed Files:**
+```
+{{existing_files}}
+```
+
+**Required New Files:**
+```
+{{required_files}}
+```
+
+### Testing Requirements
+
+**Current Test Coverage:** {{test_count}} tests passing
+
+**Required Additional Tests:**
+{{required_tests}}
+
+**Target:** {{coverage_target}}
+
+### Dev Agent Guardrails
+
+{{guardrails}}
+
+### Previous Story Intelligence
+
+{{previous_story_learnings}}
+
+### Project Structure Notes
+
+{{structure_alignment}}
+
+### References
+
+{{references}}
+
+---
+
+## Definition of Done
+
+### Code Quality (BLOCKING)
+- [ ] Type check passes: `pnpm type-check` (zero errors)
+- [ ] Zero `any` types in new code
+- [ ] Lint passes: `pnpm lint` (zero errors in new code)
+- [ ] Build succeeds: `pnpm build`
+
+### Testing (BLOCKING)
+- [ ] Unit tests: {{coverage_target}} coverage
+- [ ] Integration tests: Key workflows validated
+- [ ] All tests pass: New + existing (zero regressions)
+
+### Security (BLOCKING)
+- [ ] Dependency scan: `pnpm audit` (zero high/critical)
+- [ ] No hardcoded secrets
+- [ ] Input validation on all endpoints
+- [ ] Auth checks on protected endpoints
+- [ ] Audit logging on mutations
+
+### Architecture Compliance (BLOCKING)
+- [ ] Multi-tenant isolation: dealerId in all queries
+- [ ] Cache namespacing: Cache keys include siteId
+- [ ] Performance: External APIs cached, no N+1 queries
+- [ ] Error handling: No silent failures
+- [ ] Follows patterns from playbooks
+
+### Deployment Validation (BLOCKING)
+- [ ] Service starts: `pnpm dev` runs successfully
+- [ ] Health check: `/health` returns 200
+- [ ] Smoke test: Primary functionality verified
+
+### Documentation (BLOCKING)
+- [ ] API docs: Swagger decorators on endpoints
+- [ ] Inline comments: Complex logic explained
+- [ ] Story file: Dev Agent Record complete
+
+---
+
+## Dev Agent Record
+
+### Agent Model Used
+
+(To be filled by dev agent)
+
+### Implementation Summary
+
+(To be filled by dev agent)
+
+### File List
+
+(To be filled by dev agent)
+
+### Test Results
+
+(To be filled by dev agent)
+
+### Completion Notes
+
+(To be filled by dev agent)
+
+---
+
+**Generated by:** /create-story-with-gap-analysis
+**Date:** {{date}}
diff --git a/src/bmm/workflows/4-implementation/create-story-with-gap-analysis/workflow.yaml b/src/bmm/workflows/4-implementation/create-story-with-gap-analysis/workflow.yaml
new file mode 100644
index 00000000..19217943
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/create-story-with-gap-analysis/workflow.yaml
@@ -0,0 +1,39 @@
+name: create-story-with-gap-analysis
+description: "Create/regenerate story with SYSTEMATIC codebase gap analysis using verified file scanning (Glob/Read tools)"
+author: "Jonah Schulte"
+
+# Critical variables from config
+config_source: "{project-root}/_bmad/bmm/config.yaml"
+user_name: "{config_source}:user_name"
+communication_language: "{config_source}:communication_language"
+date: system-generated
+planning_artifacts: "{config_source}:planning_artifacts"
+implementation_artifacts: "{config_source}:implementation_artifacts"
+output_folder: "{implementation_artifacts}"
+story_dir: "{implementation_artifacts}"
+
+# Workflow components
+installed_path: "{project-root}/_bmad/bmm/workflows/4-implementation/create-story-with-gap-analysis"
+template: "{installed_path}/template.md"
+instructions: "{installed_path}/step-01-initialize.md"
+
+# Variables and inputs
+variables:
+ sprint_status: "{implementation_artifacts}/sprint-status.yaml"
+ epics_file: "{planning_artifacts}/epics.md"
+ prd_file: "{planning_artifacts}/prd.md"
+
+# Project context
+project_context: "**/project-context.md"
+
+default_output_file: "{story_dir}/{{story_key}}.md"
+
+# Workflow steps (processed in order)
+steps:
+ - step-01-initialize.md
+ - step-02-codebase-scan.md
+ - step-03-generate-story.md
+
+standalone: true
+
+web_bundle: false
diff --git a/src/bmm/workflows/4-implementation/create-story/checklist.md b/src/bmm/workflows/4-implementation/create-story/checklist.md
deleted file mode 100644
index 55e6c397..00000000
--- a/src/bmm/workflows/4-implementation/create-story/checklist.md
+++ /dev/null
@@ -1,358 +0,0 @@
-# π― Story Context Quality Competition Prompt
-
-## **π₯ CRITICAL MISSION: Outperform and Fix the Original Create-Story LLM**
-
-You are an independent quality validator in a **FRESH CONTEXT**. Your mission is to **thoroughly review** a story file that was generated by the create-story workflow and **systematically identify any mistakes, omissions, or disasters** that the original LLM missed.
-
-**Your purpose is NOT just to validate - it's to FIX and PREVENT LLM developer mistakes, omissions, or disasters!**
-
-### **π¨ CRITICAL MISTAKES TO PREVENT:**
-
-- **Reinventing wheels** - Creating duplicate functionality instead of reusing existing
-- **Wrong libraries** - Using incorrect frameworks, versions, or dependencies
-- **Wrong file locations** - Violating project structure and organization
-- **Breaking regressions** - Implementing changes that break existing functionality
-- **Ignoring UX** - Not following user experience design requirements
-- **Vague implementations** - Creating unclear, ambiguous implementations
-- **Lying about completion** - Implementing incorrectly or incompletely
-- **Not learning from past work** - Ignoring previous story learnings and patterns
-
-### **π¨ EXHAUSTIVE ANALYSIS REQUIRED:**
-
-You must thoroughly analyze **ALL artifacts** to extract critical context - do NOT be lazy or skim! This is the most important quality control function in the entire development process!
-
-### **π¬ UTILIZE SUBPROCESSES AND SUBAGENTS:**
-
-Use research subagents, subprocesses, or parallel processing if available to thoroughly analyze different artifacts **simultaneously and thoroughly**. Leave no stone unturned!
-
-### **π― COMPETITIVE EXCELLENCE:**
-
-This is a COMPETITION to create the **ULTIMATE story context** that makes LLM developer mistakes **IMPOSSIBLE**!
-
-## **π HOW TO USE THIS CHECKLIST**
-
-### **When Running from Create-Story Workflow:**
-
-- The `{project-root}/_bmad/core/tasks/validate-workflow.xml` framework will automatically:
- - Load this checklist file
- - Load the newly created story file (`{story_file_path}`)
- - Load workflow variables from `{installed_path}/workflow.yaml`
- - Execute the validation process
-
-### **When Running in Fresh Context:**
-
-- User should provide the story file path being reviewed
-- Load the story file directly
-- Load the corresponding workflow.yaml for variable context
-- Proceed with systematic analysis
-
-### **Required Inputs:**
-
-- **Story file**: The story file to review and improve
-- **Workflow variables**: From workflow.yaml (story_dir, output_folder, epics_file, etc.)
-- **Source documents**: Epics, architecture, etc. (discovered or provided)
-- **Validation framework**: `validate-workflow.xml` (handles checklist execution)
-
----
-
-## **π¬ SYSTEMATIC RE-ANALYSIS APPROACH**
-
-You will systematically re-do the entire story creation process, but with a critical eye for what the original LLM might have missed:
-
-### **Step 1: Load and Understand the Target**
-
-1. **Load the workflow configuration**: `{installed_path}/workflow.yaml` for variable inclusion
-2. **Load the story file**: `{story_file_path}` (provided by user or discovered)
-3. **Load validation framework**: `{project-root}/_bmad/core/tasks/validate-workflow.xml`
-4. **Extract metadata**: epic_num, story_num, story_key, story_title from story file
-5. **Resolve all workflow variables**: story_dir, output_folder, epics_file, architecture_file, etc.
-6. **Understand current status**: What story implementation guidance is currently provided?
-
-**Note:** If running in fresh context, user should provide the story file path being reviewed. If running from create-story workflow, the validation framework will automatically discover the checklist and story file.
-
-### **Step 2: Exhaustive Source Document Analysis**
-
-**π₯ CRITICAL: Treat this like YOU are creating the story from scratch to PREVENT DISASTERS!**
-**Discover everything the original LLM missed that could cause developer mistakes, omissions, or disasters!**
-
-#### **2.1 Epics and Stories Analysis**
-
-- Load `{epics_file}` (or sharded equivalents)
-- Extract **COMPLETE Epic {{epic_num}} context**:
- - Epic objectives and business value
- - ALL stories in this epic (for cross-story context)
- - Our specific story's requirements, acceptance criteria
- - Technical requirements and constraints
- - Cross-story dependencies and prerequisites
-
-#### **2.2 Architecture Deep-Dive**
-
-- Load `{architecture_file}` (single or sharded)
-- **Systematically scan for ANYTHING relevant to this story:**
- - Technical stack with versions (languages, frameworks, libraries)
- - Code structure and organization patterns
- - API design patterns and contracts
- - Database schemas and relationships
- - Security requirements and patterns
- - Performance requirements and optimization strategies
- - Testing standards and frameworks
- - Deployment and environment patterns
- - Integration patterns and external services
-
-#### **2.3 Previous Story Intelligence (if applicable)**
-
-- If `story_num > 1`, load the previous story file
-- Extract **actionable intelligence**:
- - Dev notes and learnings
- - Review feedback and corrections needed
- - Files created/modified and their patterns
- - Testing approaches that worked/didn't work
- - Problems encountered and solutions found
- - Code patterns and conventions established
-
-#### **2.4 Git History Analysis (if available)**
-
-- Analyze recent commits for patterns:
- - Files created/modified in previous work
- - Code patterns and conventions used
- - Library dependencies added/changed
- - Architecture decisions implemented
- - Testing approaches used
-
-#### **2.5 Latest Technical Research**
-
-- Identify any libraries/frameworks mentioned
-- Research latest versions and critical information:
- - Breaking changes or security updates
- - Performance improvements or deprecations
- - Best practices for current versions
-
-### **Step 3: Disaster Prevention Gap Analysis**
-
-**π¨ CRITICAL: Identify every mistake the original LLM missed that could cause DISASTERS!**
-
-#### **3.1 Reinvention Prevention Gaps**
-
-- **Wheel reinvention:** Areas where developer might create duplicate functionality
-- **Code reuse opportunities** not identified that could prevent redundant work
-- **Existing solutions** not mentioned that developer should extend instead of replace
-
-#### **3.2 Technical Specification DISASTERS**
-
-- **Wrong libraries/frameworks:** Missing version requirements that could cause compatibility issues
-- **API contract violations:** Missing endpoint specifications that could break integrations
-- **Database schema conflicts:** Missing requirements that could corrupt data
-- **Security vulnerabilities:** Missing security requirements that could expose the system
-- **Performance disasters:** Missing requirements that could cause system failures
-
-#### **3.3 File Structure DISASTERS**
-
-- **Wrong file locations:** Missing organization requirements that could break build processes
-- **Coding standard violations:** Missing conventions that could create inconsistent codebase
-- **Integration pattern breaks:** Missing data flow requirements that could cause system failures
-- **Deployment failures:** Missing environment requirements that could prevent deployment
-
-#### **3.4 Regression DISASTERS**
-
-- **Breaking changes:** Missing requirements that could break existing functionality
-- **Test failures:** Missing test requirements that could allow bugs to reach production
-- **UX violations:** Missing user experience requirements that could ruin the product
-- **Learning failures:** Missing previous story context that could repeat same mistakes
-
-#### **3.5 Implementation DISASTERS**
-
-- **Vague implementations:** Missing details that could lead to incorrect or incomplete work
-- **Completion lies:** Missing acceptance criteria that could allow fake implementations
-- **Scope creep:** Missing boundaries that could cause unnecessary work
-- **Quality failures:** Missing quality requirements that could deliver broken features
-
-### **Step 4: LLM-Dev-Agent Optimization Analysis**
-
-**CRITICAL STEP: Optimize story context for LLM developer agent consumption**
-
-**Analyze current story for LLM optimization issues:**
-
-- **Verbosity problems:** Excessive detail that wastes tokens without adding value
-- **Ambiguity issues:** Vague instructions that could lead to multiple interpretations
-- **Context overload:** Too much information not directly relevant to implementation
-- **Missing critical signals:** Key requirements buried in verbose text
-- **Poor structure:** Information not organized for efficient LLM processing
-
-**Apply LLM Optimization Principles:**
-
-- **Clarity over verbosity:** Be precise and direct, eliminate fluff
-- **Actionable instructions:** Every sentence should guide implementation
-- **Scannable structure:** Use clear headings, bullet points, and emphasis
-- **Token efficiency:** Pack maximum information into minimum text
-- **Unambiguous language:** Clear requirements with no room for interpretation
-
-### **Step 5: Improvement Recommendations**
-
-**For each gap identified, provide specific, actionable improvements:**
-
-#### **5.1 Critical Misses (Must Fix)**
-
-- Missing essential technical requirements
-- Missing previous story context that could cause errors
-- Missing anti-pattern prevention that could lead to duplicate code
-- Missing security or performance requirements
-
-#### **5.2 Enhancement Opportunities (Should Add)**
-
-- Additional architectural guidance that would help developer
-- More detailed technical specifications
-- Better code reuse opportunities
-- Enhanced testing guidance
-
-#### **5.3 Optimization Suggestions (Nice to Have)**
-
-- Performance optimization hints
-- Additional context for complex scenarios
-- Enhanced debugging or development tips
-
-#### **5.4 LLM Optimization Improvements**
-
-- Token-efficient phrasing of existing content
-- Clearer structure for LLM processing
-- More actionable and direct instructions
-- Reduced verbosity while maintaining completeness
-
----
-
-## **π― COMPETITION SUCCESS METRICS**
-
-**You WIN against the original LLM if you identify:**
-
-### **Category 1: Critical Misses (Blockers)**
-
-- Essential technical requirements the developer needs but aren't provided
-- Previous story learnings that would prevent errors if ignored
-- Anti-pattern prevention that would prevent code duplication
-- Security or performance requirements that must be followed
-
-### **Category 2: Enhancement Opportunities**
-
-- Architecture guidance that would significantly help implementation
-- Technical specifications that would prevent wrong approaches
-- Code reuse opportunities the developer should know about
-- Testing guidance that would improve quality
-
-### **Category 3: Optimization Insights**
-
-- Performance or efficiency improvements
-- Development workflow optimizations
-- Additional context for complex scenarios
-
----
-
-## **π INTERACTIVE IMPROVEMENT PROCESS**
-
-After completing your systematic analysis, present your findings to the user interactively:
-
-### **Step 5: Present Improvement Suggestions**
-
-```
-π― **STORY CONTEXT QUALITY REVIEW COMPLETE**
-
-**Story:** {{story_key}} - {{story_title}}
-
-I found {{critical_count}} critical issues, {{enhancement_count}} enhancements, and {{optimization_count}} optimizations.
-
-## **π¨ CRITICAL ISSUES (Must Fix)**
-
-{{list each critical issue with clear, actionable description}}
-
-## **β‘ ENHANCEMENT OPPORTUNITIES (Should Add)**
-
-{{list each enhancement with clear benefit description}}
-
-## **β¨ OPTIMIZATIONS (Nice to Have)**
-
-{{list each optimization with benefit description}}
-
-## **π€ LLM OPTIMIZATION (Token Efficiency & Clarity)**
-
-{{list each LLM optimization that will improve dev agent performance:
-- Reduce verbosity while maintaining completeness
-- Improve structure for better LLM processing
-- Make instructions more actionable and direct
-- Enhance clarity and reduce ambiguity}}
-```
-
-### **Step 6: Interactive User Selection**
-
-After presenting the suggestions, ask the user:
-
-```
-**IMPROVEMENT OPTIONS:**
-
-Which improvements would you like me to apply to the story?
-
-**Select from the numbered list above, or choose:**
-- **all** - Apply all suggested improvements
-- **critical** - Apply only critical issues
-- **select** - I'll choose specific numbers
-- **none** - Keep story as-is
-- **details** - Show me more details about any suggestion
-
-Your choice:
-```
-
-### **Step 7: Apply Selected Improvements**
-
-When user accepts improvements:
-
-- **Load the story file**
-- **Apply accepted changes** (make them look natural, as if they were always there)
-- **DO NOT reference** the review process, original LLM, or that changes were "added" or "enhanced"
-- **Ensure clean, coherent final story** that reads as if it was created perfectly the first time
-
-### **Step 8: Confirmation**
-
-After applying changes:
-
-```
-β **STORY IMPROVEMENTS APPLIED**
-
-Updated {{count}} sections in the story file.
-
-The story now includes comprehensive developer guidance to prevent common implementation issues and ensure flawless execution.
-
-**Next Steps:**
-1. Review the updated story
-2. Run `dev-story` for implementation
-```
-
----
-
-## **πͺ COMPETITIVE EXCELLENCE MINDSET**
-
-**Your goal:** Improve the story file with dev agent needed context that makes flawless implementation inevitable while being optimized for LLM developer agent consumption. Remember the dev agent will ONLY have this file to use.
-
-**Success Criteria:** The LLM developer agent that processes your improved story will have:
-
-- β Clear technical requirements they must follow
-- β Previous work context they can build upon
-- β Anti-pattern prevention to avoid common mistakes
-- β Comprehensive guidance for efficient implementation
-- β **Optimized content structure** for maximum clarity and minimum token waste
-- β **Actionable instructions** with no ambiguity or verbosity
-- β **Efficient information density** - maximum guidance in minimum text
-
-**Every improvement should make it IMPOSSIBLE for the developer to:**
-
-- Reinvent existing solutions
-- Use wrong approaches or libraries
-- Create duplicate functionality
-- Miss critical requirements
-- Make implementation errors
-
-**LLM Optimization Should Make it IMPOSSIBLE for the developer agent to:**
-
-- Misinterpret requirements due to ambiguity
-- Waste tokens on verbose, non-actionable content
-- Struggle to find critical information buried in text
-- Get confused by poor structure or organization
-- Miss key implementation signals due to inefficient communication
-
-**Go create the ultimate developer implementation guide! π**
diff --git a/src/bmm/workflows/4-implementation/create-story/instructions.xml b/src/bmm/workflows/4-implementation/create-story/instructions.xml
deleted file mode 100644
index 89bc3578..00000000
--- a/src/bmm/workflows/4-implementation/create-story/instructions.xml
+++ /dev/null
@@ -1,363 +0,0 @@
-
- The workflow execution engine is governed by: {project-root}/_bmad/core/tasks/workflow.xml
- You MUST have already loaded and processed: {installed_path}/workflow.yaml
- Communicate all responses in {communication_language} and generate all documents in {document_output_language}
-
- π¨ YOLO MODE CLARIFICATION: YOLO mode ONLY means auto-approve prompts (answer "y", "Y", "C", "continue").
- YOLO mode does NOT mean: skip steps, skip sections, skip analysis, or produce minimal output.
- ALL steps must be fully executed. ALL sections must be populated. The output file must be COMPREHENSIVE (4KB+ minimum).
- Skipping steps or producing minimal output in YOLO mode is a CRITICAL FAILURE.
-
- π₯ CRITICAL MISSION: You are creating the ULTIMATE story context engine that prevents LLM developer mistakes, omissions or
- disasters! π₯
- Your purpose is NOT to copy from epics - it's to create a comprehensive, optimized story file that gives the DEV agent
- EVERYTHING needed for flawless implementation
- COMMON LLM MISTAKES TO PREVENT: reinventing wheels, wrong libraries, wrong file locations, breaking regressions, ignoring UX,
- vague implementations, lying about completion, not learning from past work
- π¨ EXHAUSTIVE ANALYSIS REQUIRED: You must thoroughly analyze ALL artifacts to extract critical context - do NOT be lazy or skim!
- This is the most important function in the entire development process!
- π¬ UTILIZE SUBPROCESSES AND SUBAGENTS: Use research subagents, subprocesses or parallel processing if available to thoroughly
- analyze different artifacts simultaneously and thoroughly
- β SAVE QUESTIONS: If you think of questions or clarifications during analysis, save them for the end after the complete story is
- written
- π― ZERO USER INTERVENTION: Process should be fully automated except for initial epic/story selection or missing documents
-
-
-
- Parse user-provided story path: extract epic_num, story_num, story_title from format like "1-2-user-auth"
- Set {{epic_num}}, {{story_num}}, {{story_key}} from user input
- GOTO step 2a
-
-
- Check if {{sprint_status}} file exists for auto discover
-
-
-
- Choose option [1], provide epic-story number, path to story docs, or [q] to quit:
-
-
- HALT - No work needed
-
-
-
-
- HALT - User needs to run sprint-planning
-
-
-
- Parse user input: extract epic_num, story_num, story_title
- Set {{epic_num}}, {{story_num}}, {{story_key}} from user input
- GOTO step 2a
-
-
-
- Use user-provided path for story documents
- GOTO step 2a
-
-
-
-
-
- MUST read COMPLETE {sprint_status} file from start to end to preserve order
- Load the FULL file: {{sprint_status}}
- Read ALL lines from beginning to end - do not skip any content
- Parse the development_status section completely
-
- Find the FIRST story (by reading in order from top to bottom) where:
- - Key matches pattern: number-number-name (e.g., "1-2-user-auth")
- - NOT an epic key (epic-X) or retrospective (epic-X-retrospective)
- - Status value equals "backlog"
-
-
-
-
- HALT
-
-
- Extract from found story key (e.g., "1-2-user-authentication"):
- - epic_num: first number before dash (e.g., "1")
- - story_num: second number after first dash (e.g., "2")
- - story_title: remainder after second dash (e.g., "user-authentication")
-
- Set {{story_id}} = "{{epic_num}}.{{story_num}}"
- Store story_key for later use (e.g., "1-2-user-authentication")
-
-
- Check if this is the first story in epic {{epic_num}} by looking for {{epic_num}}-1-* pattern
-
- Load {{sprint_status}} and check epic-{{epic_num}} status
- If epic status is "backlog" β update to "in-progress"
- If epic status is "contexted" (legacy status) β update to "in-progress" (backward compatibility)
- If epic status is "in-progress" β no change needed
-
-
-
-
-
-
- HALT - Cannot proceed
-
-
-
-
-
- HALT - Cannot proceed
-
-
-
-
- GOTO step 2a
-
- Load the FULL file: {{sprint_status}}
- Read ALL lines from beginning to end - do not skip any content
- Parse the development_status section completely
-
- Find the FIRST story (by reading in order from top to bottom) where:
- - Key matches pattern: number-number-name (e.g., "1-2-user-auth")
- - NOT an epic key (epic-X) or retrospective (epic-X-retrospective)
- - Status value equals "backlog"
-
-
-
-
- HALT
-
-
- Extract from found story key (e.g., "1-2-user-authentication"):
- - epic_num: first number before dash (e.g., "1")
- - story_num: second number after first dash (e.g., "2")
- - story_title: remainder after second dash (e.g., "user-authentication")
-
- Set {{story_id}} = "{{epic_num}}.{{story_num}}"
- Store story_key for later use (e.g., "1-2-user-authentication")
-
-
- Check if this is the first story in epic {{epic_num}} by looking for {{epic_num}}-1-* pattern
-
- Load {{sprint_status}} and check epic-{{epic_num}} status
- If epic status is "backlog" β update to "in-progress"
- If epic status is "contexted" (legacy status) β update to "in-progress" (backward compatibility)
- If epic status is "in-progress" β no change needed
-
-
-
-
-
-
- HALT - Cannot proceed
-
-
-
-
-
- HALT - Cannot proceed
-
-
-
-
- GOTO step 2a
-
-
-
- π REQUIREMENTS ANALYSIS - Extract complete context for story planning
- π― Focus: Requirements, acceptance criteria, and dependencies. Gap analysis happens at dev-time.
-
-
-
- Available content: {epics_content}, {prd_content}, {architecture_content}, {ux_content},
- {project_context}
-
-
- From {epics_content}, extract Epic {{epic_num}} complete context: **EPIC ANALYSIS:** - Epic
- objectives and business value - ALL stories in this epic for cross-story context - Our specific story's requirements, user story
- statement, acceptance criteria - Technical requirements and constraints - Dependencies on other stories/epics - Source hints pointing to
- original documents
- Extract our story ({{epic_num}}-{{story_num}}) details: **STORY FOUNDATION:** - User story statement
- (As a, I want, so that) - Detailed acceptance criteria (already BDD formatted) - Technical requirements specific to this story -
- Business context and value - Success criteria
-
- Load previous story file: {{story_dir}}/{{epic_num}}-{{previous_story_num}}-*.md **PREVIOUS STORY INTELLIGENCE:** -
- Dev notes and learnings from previous story - Review feedback and corrections needed - Files that were created/modified and their
- patterns - Testing approaches that worked/didn't work - Problems encountered and solutions found - Code patterns established Extract
- all learnings that could impact current story implementation
-
-
-
-
- Get last 5 commit titles to understand recent work patterns
- Analyze 1-5 most recent commits for context:
- - Code patterns and conventions used
- - Library dependencies added/changed
- - Architecture decisions implemented
- - Testing approaches used
-
- This provides context for requirements, not codebase gap analysis (happens at dev-time)
-
-
-
-
- ποΈ ARCHITECTURE INTELLIGENCE - Extract everything the developer MUST follow! **ARCHITECTURE DOCUMENT ANALYSIS:** Systematically
- analyze architecture content for story-relevant requirements:
-
-
-
- Load complete {architecture_content}
-
-
- Load architecture index and scan all architecture files
- **CRITICAL ARCHITECTURE EXTRACTION:** For
- each architecture section, determine if relevant to this story: - **Technical Stack:** Languages, frameworks, libraries with
- versions - **Code Structure:** Folder organization, naming conventions, file patterns - **API Patterns:** Service structure, endpoint
- patterns, data contracts - **Database Schemas:** Tables, relationships, constraints relevant to story - **Security Requirements:**
- Authentication patterns, authorization rules - **Performance Requirements:** Caching strategies, optimization patterns - **Testing
- Standards:** Testing frameworks, coverage expectations, test patterns - **Deployment Patterns:** Environment configurations, build
- processes - **Integration Patterns:** External service integrations, data flows Extract any story-specific requirements that the
- developer MUST follow
- Identify any architectural decisions that override previous patterns
-
-
-
- π CREATE STORY FILE - Requirements foundation with draft tasks for dev-time validation
- β οΈ Tasks generated here are DRAFT based on requirements analysis. They will be validated and refined against actual codebase at dev-time.
-
- Initialize from template.md:
- {default_output_file}
- story_header
-
-
- story_requirements
-
-
-
- developer_context_section **DEV AGENT GUARDRAILS:**
- technical_requirements
- architecture_compliance
- library_framework_requirements
-
- file_structure_requirements
- testing_requirements
-
-
-
- previous_story_intelligence
-
-
-
-
- git_intelligence_summary
-
-
-
- project_context_reference
-
-
- Add clear notation to Tasks/Subtasks section:
- "β οΈ DRAFT TASKS - Generated from requirements analysis.
- Will be validated and refined against actual codebase when dev-story runs."
-
-
-
-
- story_completion_status
-
-
- Set story Status to: "ready-for-dev"
- Add completion note: "Story planning complete with requirements analysis and draft implementation tasks"
-
-
-
- Validate against checklist at {installed_path}/checklist.md using _bmad/core/tasks/validate-workflow.xml
- Save story document unconditionally
-
-
-
- Update {{sprint_status}}
- Load the FULL file and read all development_status entries
- Find development_status key matching {{story_key}}
- Verify current status is "backlog" (expected previous state)
- Update development_status[{{story_key}}] = "ready-for-dev"
- Save file, preserving ALL comments and structure including STATUS DEFINITIONS
-
-
- QUALITY VERIFICATION - Story file must meet minimum requirements before completion
- Verify story file was created on disk
-
-
-
- HALT - Story creation failed
-
-
- Get file size of {{story_file}}
-
-
-
-
- HALT - Story creation incomplete. Re-run with full analysis.
-
-
- Verify required sections exist in story file
-
-
- HALT - Story creation incomplete
-
-
-
- HALT - Story creation incomplete
-
-
- Report completion
-
-
-
-
diff --git a/src/bmm/workflows/4-implementation/create-story/template.md b/src/bmm/workflows/4-implementation/create-story/template.md
deleted file mode 100644
index 506c8267..00000000
--- a/src/bmm/workflows/4-implementation/create-story/template.md
+++ /dev/null
@@ -1,57 +0,0 @@
-# Story {{epic_num}}.{{story_num}}: {{story_title}}
-
-Status: ready-for-dev
-
-
-
-## Story
-
-As a {{role}},
-I want {{action}},
-so that {{benefit}}.
-
-## Acceptance Criteria
-
-1. [Add acceptance criteria from epics/PRD]
-
-## Tasks / Subtasks
-
-β οΈ **DRAFT TASKS** - Generated from requirements analysis. Will be validated and refined against actual codebase when dev-story runs.
-
-- [ ] Task 1 (AC: #)
- - [ ] Subtask 1.1
-- [ ] Task 2 (AC: #)
- - [ ] Subtask 2.1
-
-## Gap Analysis
-
-_This section will be populated by dev-story when gap analysis runs._
-
----
-
-## Dev Notes
-
-- Relevant architecture patterns and constraints
-- Source tree components to touch
-- Testing standards summary
-
-### Project Structure Notes
-
-- Alignment with unified project structure (paths, modules, naming)
-- Detected conflicts or variances (with rationale)
-
-### References
-
-- Cite all technical details with source paths and sections, e.g. [Source: docs/.md#Section]
-
-## Dev Agent Record
-
-### Agent Model Used
-
-{{agent_model_name_version}}
-
-### Debug Log References
-
-### Completion Notes List
-
-### File List
diff --git a/src/bmm/workflows/4-implementation/create-story/workflow.yaml b/src/bmm/workflows/4-implementation/create-story/workflow.yaml
deleted file mode 100644
index 96132585..00000000
--- a/src/bmm/workflows/4-implementation/create-story/workflow.yaml
+++ /dev/null
@@ -1,74 +0,0 @@
-name: create-story
-description: "Create the next user story from epics+stories with enhanced context analysis and direct ready-for-dev marking"
-author: "BMad"
-
-# Critical variables from config
-config_source: "{project-root}/_bmad/bmm/config.yaml"
-user_name: "{config_source}:user_name"
-communication_language: "{config_source}:communication_language"
-date: system-generated
-planning_artifacts: "{config_source}:planning_artifacts"
-implementation_artifacts: "{config_source}:implementation_artifacts"
-output_folder: "{implementation_artifacts}"
-story_dir: "{implementation_artifacts}"
-
-# Workflow components
-installed_path: "{project-root}/_bmad/bmm/workflows/4-implementation/create-story"
-template: "{installed_path}/template.md"
-instructions: "{installed_path}/instructions.xml"
-validation: "{installed_path}/checklist.md"
-
-# Variables and inputs
-variables:
- sprint_status: "{implementation_artifacts}/sprint-status.yaml" # Primary source for story tracking
- epics_file: "{planning_artifacts}/epics.md" # Enhanced epics+stories with BDD and source hints
- prd_file: "{planning_artifacts}/prd.md" # Fallback for requirements (if not in epics file)
- architecture_file: "{planning_artifacts}/architecture.md" # Fallback for constraints (if not in epics file)
- ux_file: "{planning_artifacts}/*ux*.md" # Fallback for UX requirements (if not in epics file)
- story_title: "" # Will be elicited if not derivable
-
-# Project context
-project_context: "**/project-context.md"
-
-default_output_file: "{story_dir}/{{story_key}}.md"
-
-# STORY QUALITY REQUIREMENTS
-# Ensure comprehensive story files are produced
-story_quality:
- minimum_size_bytes: 4000 # Story files must be at least 4KB
- required_sections:
- - "## Story" # As a / I want / So that
- - "## Status"
- - "## Acceptance Criteria" # BDD-style Given/When/Then
- - "## Tasks" # Actionable tasks with checkboxes
- - "## Dev Notes" # Technical implementation notes
- - "## Gap Analysis" # Codebase analysis results
- - "Architecture Constraints" # From architecture.md
- output_mode: "comprehensive" # Never produce minimal output
- yolo_mode_note: "YOLO mode means auto-approve prompts, NOT skip sections or produce minimal output"
-
-# Smart input file references - Simplified for enhanced approach
-# The epics+stories file should contain everything needed with source hints
-input_file_patterns:
- prd:
- description: "PRD (fallback - epics file should have most content)"
- whole: "{planning_artifacts}/*prd*.md"
- sharded: "{planning_artifacts}/*prd*/*.md"
- load_strategy: "SELECTIVE_LOAD" # Only load if needed
- architecture:
- description: "Architecture (fallback - epics file should have relevant sections)"
- whole: "{planning_artifacts}/*architecture*.md"
- sharded: "{planning_artifacts}/*architecture*/*.md"
- load_strategy: "SELECTIVE_LOAD" # Only load if needed
- ux:
- description: "UX design (fallback - epics file should have relevant sections)"
- whole: "{planning_artifacts}/*ux*.md"
- sharded: "{planning_artifacts}/*ux*/*.md"
- load_strategy: "SELECTIVE_LOAD" # Only load if needed
- epics:
- description: "Enhanced epics+stories file with BDD and source hints"
- whole: "{planning_artifacts}/*epic*.md"
- sharded: "{planning_artifacts}/*epic*/*.md"
- load_strategy: "SELECTIVE_LOAD" # Only load needed epic
-
-standalone: true
diff --git a/src/bmm/workflows/4-implementation/dev-story/checklist.md b/src/bmm/workflows/4-implementation/dev-story/checklist.md
deleted file mode 100644
index 86d6e9be..00000000
--- a/src/bmm/workflows/4-implementation/dev-story/checklist.md
+++ /dev/null
@@ -1,80 +0,0 @@
----
-title: 'Enhanced Dev Story Definition of Done Checklist'
-validation-target: 'Story markdown ({{story_path}})'
-validation-criticality: 'HIGHEST'
-required-inputs:
- - 'Story markdown file with enhanced Dev Notes containing comprehensive implementation context'
- - 'Completed Tasks/Subtasks section with all items marked [x]'
- - 'Updated File List section with all changed files'
- - 'Updated Dev Agent Record with implementation notes'
-optional-inputs:
- - 'Test results output'
- - 'CI logs'
- - 'Linting reports'
-validation-rules:
- - 'Only permitted story sections modified: Tasks/Subtasks checkboxes, Dev Agent Record, File List, Change Log, Status'
- - 'All implementation requirements from story Dev Notes must be satisfied'
- - 'Definition of Done checklist must pass completely'
- - 'Enhanced story context must contain sufficient technical guidance'
----
-
-# π― Enhanced Definition of Done Checklist
-
-**Critical validation:** Story is truly ready for review only when ALL items below are satisfied
-
-## π Context & Requirements Validation
-
-- [ ] **Story Context Completeness:** Dev Notes contains ALL necessary technical requirements, architecture patterns, and implementation guidance
-- [ ] **Architecture Compliance:** Implementation follows all architectural requirements specified in Dev Notes
-- [ ] **Technical Specifications:** All technical specifications (libraries, frameworks, versions) from Dev Notes are implemented correctly
-- [ ] **Previous Story Learnings:** Previous story insights incorporated (if applicable) and build upon appropriately
-
-## β Implementation Completion
-
-- [ ] **All Tasks Complete:** Every task and subtask marked complete with [x]
-- [ ] **Acceptance Criteria Satisfaction:** Implementation satisfies EVERY Acceptance Criterion in the story
-- [ ] **No Ambiguous Implementation:** Clear, unambiguous implementation that meets story requirements
-- [ ] **Edge Cases Handled:** Error conditions and edge cases appropriately addressed
-- [ ] **Dependencies Within Scope:** Only uses dependencies specified in story or project-context.md
-
-## π§ͺ Testing & Quality Assurance
-
-- [ ] **Unit Tests:** Unit tests added/updated for ALL core functionality introduced/changed by this story
-- [ ] **Integration Tests:** Integration tests added/updated for component interactions when story requirements demand them
-- [ ] **End-to-End Tests:** End-to-end tests created for critical user flows when story requirements specify them
-- [ ] **Test Coverage:** Tests cover acceptance criteria and edge cases from story Dev Notes
-- [ ] **Regression Prevention:** ALL existing tests pass (no regressions introduced)
-- [ ] **Code Quality:** Linting and static checks pass when configured in project
-- [ ] **Test Framework Compliance:** Tests use project's testing frameworks and patterns from Dev Notes
-
-## π Documentation & Tracking
-
-- [ ] **File List Complete:** File List includes EVERY new, modified, or deleted file (paths relative to repo root)
-- [ ] **Dev Agent Record Updated:** Contains relevant Implementation Notes and/or Debug Log for this work
-- [ ] **Change Log Updated:** Change Log includes clear summary of what changed and why
-- [ ] **Review Follow-ups:** All review follow-up tasks (marked [AI-Review]) completed and corresponding review items marked resolved (if applicable)
-- [ ] **Story Structure Compliance:** Only permitted sections of story file were modified
-
-## π Final Status Verification
-
-- [ ] **Story Status Updated:** Story Status set to "review"
-- [ ] **Sprint Status Updated:** Sprint status updated to "review" (when sprint tracking is used)
-- [ ] **Quality Gates Passed:** All quality checks and validations completed successfully
-- [ ] **No HALT Conditions:** No blocking issues or incomplete work remaining
-- [ ] **User Communication Ready:** Implementation summary prepared for user review
-
-## π― Final Validation Output
-
-```
-Definition of Done: {{PASS/FAIL}}
-
-β **Story Ready for Review:** {{story_key}}
-π **Completion Score:** {{completed_items}}/{{total_items}} items passed
-π **Quality Gates:** {{quality_gates_status}}
-π **Test Results:** {{test_results_summary}}
-π **Documentation:** {{documentation_status}}
-```
-
-**If FAIL:** List specific failures and required actions before story can be marked Ready for Review
-
-**If PASS:** Story is fully ready for code review and production consideration
diff --git a/src/bmm/workflows/4-implementation/dev-story/instructions.xml b/src/bmm/workflows/4-implementation/dev-story/instructions.xml
deleted file mode 100644
index 3f0c54ed..00000000
--- a/src/bmm/workflows/4-implementation/dev-story/instructions.xml
+++ /dev/null
@@ -1,654 +0,0 @@
-
- The workflow execution engine is governed by: {project-root}/_bmad/core/tasks/workflow.xml
- You MUST have already loaded and processed: {installed_path}/workflow.yaml
- Communicate all responses in {communication_language} and language MUST be tailored to {user_skill_level}
- Generate all documents in {document_output_language}
- Only modify the story file in these areas: Tasks/Subtasks checkboxes, Dev Agent Record (Debug Log, Completion Notes), File List,
- Change Log, and Status
- Execute ALL steps in exact order; do NOT skip steps
- Absolutely DO NOT stop because of "milestones", "significant progress", or "session boundaries". Continue in a single execution
- until the story is COMPLETE (all ACs satisfied and all tasks/subtasks checked) UNLESS a HALT condition is triggered or the USER gives
- other instruction.
- Do NOT schedule a "next session" or request review pauses unless a HALT condition applies. Only Step 6 decides completion.
- User skill level ({user_skill_level}) affects conversation style ONLY, not code updates.
-
-
-
- Use {{story_path}} directly
- Read COMPLETE story file
- Extract story_key from filename or metadata
-
-
-
-
-
- MUST read COMPLETE sprint-status.yaml file from start to end to preserve order
- Load the FULL file: {{sprint_status}}
- Read ALL lines from beginning to end - do not skip any content
- Parse the development_status section completely to understand story order
-
- Find the FIRST story (by reading in order from top to bottom) where:
- - Key matches pattern: number-number-name (e.g., "1-2-user-auth")
- - NOT an epic key (epic-X) or retrospective (epic-X-retrospective)
- - Status value equals "ready-for-dev"
-
-
-
-
- Choose option [1], [2], [3], or [4], or specify story file path:
-
-
- HALT - Run create-story to create next story
-
-
-
- HALT - Run validate-create-story to improve existing stories
-
-
-
- Provide the story file path to develop:
- Store user-provided story path as {{story_path}}
-
-
-
-
-
- Display detailed sprint status analysis
- HALT - User can review sprint status and provide story path
-
-
-
- Store user-provided story path as {{story_path}}
-
-
-
-
-
-
-
- Search {story_dir} for stories directly
- Find stories with "ready-for-dev" status in files
- Look for story files matching pattern: *-*-*.md
- Read each candidate story file to check Status section
-
-
-
- What would you like to do? Choose option [1], [2], or [3]:
-
-
- HALT - Run create-story to create next story
-
-
-
- HALT - Run validate-create-story to improve existing stories
-
-
-
- It's unclear what story you want developed. Please provide the full path to the story file:
- Store user-provided story path as {{story_path}}
- Continue with provided story file
-
-
-
-
- Use discovered story file and extract story_key
-
-
-
- Store the found story_key (e.g., "1-2-user-authentication") for later status updates
- Find matching story file in {story_dir} using story_key pattern: {{story_key}}.md
- Read COMPLETE story file from discovered path
-
-
-
- Parse sections: Story, Acceptance Criteria, Tasks/Subtasks, Dev Notes, Dev Agent Record, File List, Change Log, Status
-
- Load comprehensive context from story file's Dev Notes section
- Extract developer guidance from Dev Notes: architecture requirements, previous learnings, technical specifications
- Use enhanced story context to inform implementation decisions and approaches
-
- Identify first incomplete task (unchecked [ ]) in Tasks/Subtasks
-
-
- Completion sequence
-
- HALT: "Cannot develop story without access to story file"
- ASK user to clarify or HALT
-
-
-
- π MANDATORY GAP ANALYSIS - Validate draft tasks against actual codebase reality!
- This step ensures tasks reflect current codebase state, preventing duplicate implementations and missed dependencies.
-
-
-
- Extract timestamp from Gap Analysis section
-
-
-
-
-
-
-
-
-
-
-
-
-
- Extract story requirements, acceptance criteria, and draft tasks from story file
- Identify technical areas mentioned in tasks (files, classes, functions, services, components)
-
-
- Determine scan targets from task descriptions:
- - For file creation tasks: Check if files already exist
- - For feature implementation tasks: Search for related code patterns
- - For integration tasks: Verify dependencies exist
-
- Use Glob to find relevant files matching patterns from tasks (e.g., **/*.ts, **/*.tsx, **/*.test.ts, **/*.service.ts)
- Use Grep to search for specific classes, functions, or components mentioned in tasks
- Use Read to verify implementation details and functionality in key discovered files
-
-
- Document scan results:
-
- **CODEBASE REALITY:**
- β What Exists:
- - List verified files, classes, functions, services found
- - Note implementation completeness (partial vs full)
- - Identify reusable code that tasks should leverage
-
-
- β What's Missing:
- - List requirements mentioned in tasks but not found in codebase
- - Identify missing dependencies that tasks assume exist
- - Note gaps that need addressing
-
-
-
- Compare draft tasks to codebase reality:
- For each draft task, determine:
- - KEEP AS-IS: Task matches reality (nothing exists, creation needed)
- - MODIFY: Task needs adjustment (file/feature exists, should extend/modify instead of create)
- - REMOVE: Task already complete (feature fully implemented, tests pass)
- - ADD: New prerequisite discovered (missing dependency needs creating first)
-
- Generate refined task list with clear reasoning for each change
-
-
-
-
-
-
-
- Update story file with refined tasks in Tasks/Subtasks section
- Add new "Gap Analysis" section to story file with findings:
- - Scan timestamp
- - What Exists summary
- - What's Missing summary
- - Task changes applied
-
- Add Change Log entry: "Tasks refined based on codebase gap analysis - auto-accepted ({{date}})"
-
-
-
- **Approve these task updates?**
-
- Options:
- [Y] Yes - Update story file with refined tasks and proceed with implementation
- [A] Auto-accept - Apply changes and auto-accept all future task refinements this session
- [n] No - Keep original draft tasks as-is (not recommended - risk of duplicate code)
- [e] Edit - Let me manually adjust the proposed tasks
- [s] Skip - Something looks wrong, skip this story
- [r] Review - Show me more details about specific findings before deciding
-
-
-
- Initialize {{gap_analysis_auto_accept}} = false
-
-
-
- Update story file with refined tasks in Tasks/Subtasks section
- Add new "Gap Analysis" section to story file with findings:
- - Scan timestamp
- - What Exists summary
- - What's Missing summary
- - Task changes applied
-
- Add Change Log entry: "Tasks refined based on codebase gap analysis ({{date}})"
-
- Continue to step 2
-
-
-
- Set {{gap_analysis_auto_accept}} = true
- Update story file with refined tasks in Tasks/Subtasks section
- Add new "Gap Analysis" section to story file with findings
- Add Change Log entry: "Tasks refined based on codebase gap analysis ({{date}})"
-
- Continue to step 2
-
-
-
-
-
- Add note to Dev Agent Record: "Gap analysis performed but user chose to keep draft tasks"
- Continue to step 2 with original draft tasks
-
-
-
- Please describe how you want to adjust the proposed tasks, or provide your own task list:
- Allow user to refine proposed tasks interactively
- Update story file with user's adjusted task list
- Add Gap Analysis section documenting user's custom refinements
- Add Change Log entry: "Tasks manually refined after gap analysis ({{date}})"
-
- Continue to step 2
-
-
-
-
- Add note to Dev Agent Record: "Development halted - gap analysis revealed issues requiring manual review"
- HALT - do not proceed with implementation
-
-
-
- Which findings would you like more details about? (specify file names, tasks, or areas of concern)
- Provide detailed analysis of requested areas using Read tool for deeper inspection
- After review, re-present the approval options
- Continue based on user's subsequent choice
-
-
-
-
-
- Add Gap Analysis section to story documenting verification performed
- Continue to step 2
-
-
-
-
- Load all available context to inform implementation
-
- Load {project_context} for coding standards and project-wide patterns (if exists)
- Parse sections: Story, Acceptance Criteria, Tasks/Subtasks, Dev Notes, Dev Agent Record, File List, Change Log, Status
- Load comprehensive context from story file's Dev Notes section
- Extract developer guidance from Dev Notes: architecture requirements, previous learnings, technical specifications
- Use enhanced story context to inform implementation decisions and approaches
-
-
-
-
- Determine if this is a fresh start or continuation after code review
-
- Check if "Senior Developer Review (AI)" section exists in the story file
- Check if "Review Follow-ups (AI)" subsection exists under Tasks/Subtasks
-
-
- Set review_continuation = true
- Extract from "Senior Developer Review (AI)" section:
- - Review outcome (Approve/Changes Requested/Blocked)
- - Review date
- - Total action items with checkboxes (count checked vs unchecked)
- - Severity breakdown (High/Med/Low counts)
-
- Count unchecked [ ] review follow-up tasks in "Review Follow-ups (AI)" subsection
- Store list of unchecked review items as {{pending_review_items}}
-
-
-
-
-
- Set review_continuation = false
- Set {{pending_review_items}} = empty
-
-
-
-
-
-
-
- Load the FULL file: {{sprint_status}}
- Read all development_status entries to find {{story_key}}
- Get current status value for development_status[{{story_key}}]
-
-
- Update the story in the sprint status report to = "in-progress"
-
-
-
-
-
-
-
-
-
-
-
- Store {{current_sprint_status}} for later use
-
-
-
-
- Set {{current_sprint_status}} = "no-sprint-tracking"
-
-
-
-
- FOLLOW THE STORY FILE TASKS/SUBTASKS SEQUENCE EXACTLY AS WRITTEN - NO DEVIATION
-
- Review the current task/subtask from the story file - this is your authoritative implementation guide
- Plan implementation following red-green-refactor cycle
-
-
- Write FAILING tests first for the task/subtask functionality
- Confirm tests fail before implementation - this validates test correctness
-
-
- Implement MINIMAL code to make tests pass
- Run tests to confirm they now pass
- Handle error conditions and edge cases as specified in task/subtask
-
-
- Improve code structure while keeping tests green
- Ensure code follows architecture patterns and coding standards from Dev Notes
-
- Document technical approach and decisions in Dev Agent Record β Implementation Plan
-
- HALT: "Additional dependencies need user approval"
- HALT and request guidance
- HALT: "Cannot proceed without necessary configuration files"
-
- NEVER implement anything not mapped to a specific task/subtask in the story file
- NEVER proceed to next task until current task/subtask is complete AND tests pass
- Execute continuously without pausing until all tasks/subtasks are complete or explicit HALT condition
- Do NOT propose to pause for review until Step 9 completion gates are satisfied
-
-
-
- Create unit tests for business logic and core functionality introduced/changed by the task
- Add integration tests for component interactions specified in story requirements
- Include end-to-end tests for critical user flows when story requirements demand them
- Cover edge cases and error handling scenarios identified in story Dev Notes
-
-
-
- Determine how to run tests for this repo (infer test framework from project structure)
- Run all existing tests to ensure no regressions
- Run the new tests to verify implementation correctness
- Run linting and code quality checks if configured in project
- Validate implementation meets ALL story acceptance criteria; enforce quantitative thresholds explicitly
- STOP and fix before continuing - identify breaking changes immediately
- STOP and fix before continuing - ensure implementation correctness
-
-
-
- NEVER mark a task complete unless ALL conditions are met - NO LYING OR CHEATING
-
-
- Verify ALL tests for this task/subtask ACTUALLY EXIST and PASS 100%
- Confirm implementation matches EXACTLY what the task/subtask specifies - no extra features
- Validate that ALL acceptance criteria related to this task are satisfied
- Run full test suite to ensure NO regressions introduced
-
-
-
- Extract review item details (severity, description, related AC/file)
- Add to resolution tracking list: {{resolved_review_items}}
-
-
- Mark task checkbox [x] in "Tasks/Subtasks β Review Follow-ups (AI)" section
-
-
- Find matching action item in "Senior Developer Review (AI) β Action Items" section by matching description
- Mark that action item checkbox [x] as resolved
-
- Add to Dev Agent Record β Completion Notes: "β Resolved review finding [{{severity}}]: {{description}}"
-
-
-
-
- ONLY THEN mark the task (and subtasks) checkbox with [x]
- Update File List section with ALL new, modified, or deleted files (paths relative to repo root)
- Add completion notes to Dev Agent Record summarizing what was ACTUALLY implemented and tested
-
-
-
- DO NOT mark task complete - fix issues first
- HALT if unable to fix validation failures
-
-
-
-
- MUST update sprint-status.yaml after EVERY task completion - not just at story start/end
-
- Load the FULL file: {{sprint_status}}
- Find development_status key matching {{story_key}}
-
- Count total tasks and checked tasks from story file:
- - total_tasks = count of all [ ] and [x] in Tasks/Subtasks section (top-level only)
- - checked_tasks = count of [x] only
- - progress_pct = (checked_tasks / total_tasks) Γ 100
-
-
- Update sprint-status.yaml entry with inline progress comment:
- Format: {{story_key}}: in-progress # {{checked_tasks}}/{{total_tasks}} tasks ({{progress_pct}}%)
-
-
- Save file, preserving ALL comments and structure
-
- Re-read {sprint_status} file to verify update persisted
-
-
-
- HALT - Cannot proceed without valid progress tracking
-
-
-
-
-
-
- Count total resolved review items in this session
- Add Change Log entry: "Addressed code review findings - {{resolved_count}} items resolved (Date: {{date}})"
-
-
- Save the story file
- Determine if more incomplete tasks remain
-
- Next task
-
-
- Completion
-
-
-
-
- Verify ALL tasks and subtasks are marked [x] (re-scan the story document now)
- Run the full regression suite (do not skip)
- Confirm File List includes every changed file
- Execute enhanced definition-of-done validation
- Update the story Status to: "review"
-
-
- Validate definition-of-done checklist with essential requirements:
- - All tasks/subtasks marked complete with [x]
- - Implementation satisfies every Acceptance Criterion
- - Unit tests for core functionality added/updated
- - Integration tests for component interactions added when required
- - End-to-end tests for critical flows added when story demands them
- - All tests pass (no regressions, new tests successful)
- - Code quality checks pass (linting, static analysis if configured)
- - File List includes every new/modified/deleted file (relative paths)
- - Dev Agent Record contains implementation notes
- - Change Log includes summary of changes
- - Only permitted story sections were modified
-
-
-
-
- Load the FULL file: {sprint_status}
- Find development_status key matching {{story_key}}
- Verify current status is "in-progress" (expected previous state)
- Update development_status[{{story_key}}] = "review"
- Save file, preserving ALL comments and structure including STATUS DEFINITIONS
-
-
-
-
-
-
-
-
-
- HALT - Cannot proceed without valid sprint tracking
-
-
-
- Re-read {sprint_status} file to verify update persisted
- Confirm {{story_key}} now shows status "review"
-
-
-
- HALT - File system issue or permission problem
-
-
-
-
-
- HALT - Complete remaining tasks before marking ready for review
- HALT - Fix regression issues before completing
- HALT - Update File List with all changed files
- HALT - Address DoD failures before completing
-
-
-
- Execute the enhanced definition-of-done checklist using the validation framework
- Prepare a concise summary in Dev Agent Record β Completion Notes
-
- Communicate to {user_name} that story implementation is complete and ready for review
- Summarize key accomplishments: story ID, story key, title, key changes made, tests added, files modified
- Provide the story file path and current status (now "review")
-
- Based on {user_skill_level}, ask if user needs any explanations about:
- - What was implemented and how it works
- - Why certain technical decisions were made
- - How to test or verify the changes
- - Any patterns, libraries, or approaches used
- - Anything else they'd like clarified
-
-
-
- Provide clear, contextual explanations tailored to {user_skill_level}
- Use examples and references to specific code when helpful
-
-
- Once explanations are complete (or user indicates no questions), suggest logical next steps
- Recommended next steps (flexible based on project setup):
- - Review the implemented story and test the changes
- - Verify all acceptance criteria are met
- - Ensure deployment readiness if applicable
- - Run `code-review` workflow for peer review
- - Optional: Run TEA `*automate` to expand guardrail tests
-
-
-
-
- Suggest checking {sprint_status} to see project progress
-
- Remain flexible - allow user to choose their own path or ask for other assistance
-
-
-
diff --git a/src/bmm/workflows/4-implementation/gap-analysis/instructions.xml b/src/bmm/workflows/4-implementation/gap-analysis/instructions.xml
new file mode 100644
index 00000000..4af6db50
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/gap-analysis/instructions.xml
@@ -0,0 +1,367 @@
+
+ The workflow execution engine is governed by: {project-root}/_bmad/core/tasks/workflow.xml
+ You MUST have already loaded and processed: {installed_path}/workflow.yaml
+ Communicate all responses in {communication_language}
+
+
+
+ Use {{story_file}} directly
+ Read COMPLETE story file
+ Extract story_key from filename or metadata
+
+
+
+
+
+
+ Enter story file path, story key (e.g., "1-2-auth"), or status to scan (e.g., "done", "review", "in-progress"):
+
+
+ Use provided file path as {{story_file}}
+ Read COMPLETE story file
+ Extract story_key from filename
+
+
+
+
+ Search {story_dir} for file matching pattern {{story_key}}.md
+ Set {{story_file}} to found file path
+ Read COMPLETE story file
+
+
+
+
+
+
+
+ Load the FULL file: {{sprint_status}}
+ Parse development_status section
+ Find all stories where status equals {{user_input}}
+
+
+
+ HALT
+
+
+
+
+ Which story would you like to validate? [Enter story key or 'all']:
+
+
+ Set {{batch_mode}} = true
+ Store list of all story keys to validate
+ Set {{story_file}} to first story in list
+ Read COMPLETE story file
+
+
+
+
+ Set {{story_file}} to selected story path
+ Read COMPLETE story file
+
+
+
+
+
+ Set {{story_file}} to found story path
+ Read COMPLETE story file
+
+
+
+
+
+
+ HALT
+
+
+
+
+
+
+
+ π CODEBASE REALITY CHECK - Validate tasks against actual code!
+
+
+
+
+ Parse story sections: Story, Acceptance Criteria, Tasks/Subtasks, Dev Notes, Status
+ Extract all tasks and subtasks from story file
+ Identify technical areas mentioned in tasks (files, classes, functions, services, components)
+
+
+ Determine scan targets from task descriptions:
+ - For "Create X" tasks: Check if X already exists
+ - For "Implement Y" tasks: Search for Y functionality
+ - For "Add Z" tasks: Verify Z is missing
+ - For test tasks: Check for existing test files
+
+ Use Glob to find relevant files matching patterns from tasks (e.g., **/*.ts, **/*.tsx, **/*.test.ts)
+ Use Grep to search for specific classes, functions, or components mentioned in tasks
+ Use Read to verify implementation details and functionality in key discovered files
+
+
+ Document scan results:
+
+ **CODEBASE REALITY:**
+ β What Exists:
+ - List verified files, classes, functions, services found
+ - Note implementation completeness (partial vs full)
+ - Identify code that tasks claim to create but already exists
+
+
+ β What's Missing:
+ - List features mentioned in tasks but NOT found in codebase
+ - Identify claimed implementations that don't exist
+ - Note tasks marked complete but code missing
+
+
+
+ For each task in the story, determine:
+ - ACCURATE: Task matches reality (code exists if task is checked, missing if unchecked)
+ - FALSE POSITIVE: Task checked [x] but code doesn't exist (BS detection!)
+ - FALSE NEGATIVE: Task unchecked [ ] but code already exists
+ - NEEDS UPDATE: Task description doesn't match current implementation
+
+ Generate validation report with:
+ - Tasks that are accurate
+ - Tasks that are false positives (marked done but not implemented) β οΈ
+ - Tasks that are false negatives (not marked but already exist)
+ - Recommended task updates
+
+
+
+ π SHOW TRUTH - Compare story claims vs codebase reality
+
+
+
+
+
+
+
+
+
+
+
+ **What would you like to do?**
+
+ Options:
+ [U] Update - Apply proposed changes to story file
+ [A] Audit Report - Save findings to report file without updating story
+ [N] No Changes - Just show me the findings
+ [R] Review Details - Show me more details about specific findings
+ [C] Continue to Next - Move to next story (batch mode only)
+ [Q] Quit - Exit gap analysis
+
+
+
+
+ Update story file with proposed changes:
+ - Uncheck false positive tasks
+ - Check false negative tasks
+ - Update task descriptions as needed
+ - Add or update "Gap Analysis" section with findings
+ - Add Change Log entry: "Gap analysis performed - tasks validated against codebase ({{date}})"
+
+
+ Story has false positives. Update status to 'in-progress'? [Y/n]:
+
+ Update story Status to 'in-progress'
+
+ Update sprint-status.yaml status for this story to 'in-progress'
+
+
+
+
+
+
+
+ Continue to next story? [Y/n]:
+
+ Load next story from batch list
+ Analyze next story
+
+
+
+ HALT - Gap analysis complete
+
+
+
+
+ Generate audit report file: {{story_dir}}/gap-analysis-report-{{story_key}}-{{date}}.md
+ Include full findings, accuracy scores, recommendations
+
+
+
+ Continue to next story? [Y/n]:
+
+ Load next story from batch list
+ Analyze next story
+
+
+
+ HALT - Gap analysis complete
+
+
+
+
+
+ HALT - Gap analysis complete
+
+
+
+
+ Which findings would you like more details about? (specify task numbers, file names, or areas):
+ Provide detailed analysis of requested areas using Read tool for deeper code inspection
+ After review, re-present the decision options
+ Continue based on user's subsequent choice
+
+
+
+
+ Load next story from batch list
+ Analyze next story
+
+
+
+
+ HALT
+
+
+
+
+
+ HALT
+
+
+
+
+
+
+
+
diff --git a/src/bmm/workflows/4-implementation/gap-analysis/workflow.yaml b/src/bmm/workflows/4-implementation/gap-analysis/workflow.yaml
new file mode 100644
index 00000000..88417f98
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/gap-analysis/workflow.yaml
@@ -0,0 +1,23 @@
+name: gap-analysis
+description: "Validate story tasks against actual codebase - audit completed stories or validate before development"
+author: "BMad"
+
+# Critical variables from config
+config_source: "{project-root}/_bmad/bmm/config.yaml"
+user_name: "{config_source}:user_name"
+communication_language: "{config_source}:communication_language"
+implementation_artifacts: "{config_source}:implementation_artifacts"
+story_dir: "{implementation_artifacts}"
+
+# Workflow components
+installed_path: "{project-root}/_bmad/bmm/workflows/4-implementation/gap-analysis"
+instructions: "{installed_path}/instructions.xml"
+
+# Variables
+story_file: "" # User provides story file path or auto-discover
+sprint_status: "{implementation_artifacts}/sprint-status.yaml"
+project_context: "**/project-context.md"
+
+standalone: true
+
+web_bundle: false
diff --git a/src/bmm/workflows/4-implementation/push-all/instructions.xml b/src/bmm/workflows/4-implementation/push-all/instructions.xml
new file mode 100644
index 00000000..9e5b715d
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/push-all/instructions.xml
@@ -0,0 +1,549 @@
+
+ The workflow execution engine is governed by: {project-root}/_bmad/core/tasks/workflow.xml
+ You MUST have already loaded and processed: {installed_path}/workflow.yaml
+ Communicate all responses in {communication_language}
+ π PUSH-ALL - Stage, commit, and push changes with comprehensive safety validation
+
+
+ β‘ PARALLEL AGENT MODE: When {{target_files}} is provided:
+ - ONLY stage and commit the specified files
+ - Do NOT use `git add .` or `git add -A`
+ - Use `git add [specific files]` instead
+ - This prevents committing work from other parallel agents
+
+ π ALL FILES MODE: When {{target_files}} is empty:
+ - Stage ALL changes with `git add .`
+ - Original behavior for single-agent execution
+
+
+
+
+
+
+ Run git commands in parallel:
+ - git status - Show modified/added/deleted/untracked files
+ - git diff --stat - Show change statistics
+ - git log -1 --oneline - Show recent commit for message style
+ - git branch --show-current - Confirm current branch
+
+ Parse git status output to identify:
+ - Modified files
+ - Added files
+ - Deleted files
+ - Untracked files
+ - Total insertion/deletion counts
+
+
+
+
+ HALT - No work to do
+
+
+
+
+ π SAFETY CHECKS - Validate changes before committing
+
+ Scan all changed files for dangerous patterns:
+
+ **Secret Detection:**
+ Check for files matching secret patterns:
+ - .env*, *.key, *.pem, credentials.json, secrets.yaml
+ - id_rsa, *.p12, *.pfx, *.cer
+ - Any file containing: _API_KEY=, _SECRET=, _TOKEN= with real values (not placeholders)
+
+
+ Validate API keys are placeholders only:
+ β Acceptable placeholders:
+ - API_KEY=your-api-key-here
+ - SECRET=placeholder
+ - TOKEN=xxx
+ - API_KEY=${{YOUR_KEY}}
+ - SECRET_KEY=<your-key>
+
+
+ β BLOCK real keys:
+ - OPENAI_API_KEY=sk-proj-xxxxx (real OpenAI key)
+ - AWS_SECRET_KEY=AKIA... (real AWS key)
+ - STRIPE_API_KEY=sk_live_... (real Stripe key)
+ - Any key with recognizable provider prefix + actual value
+
+
+ **File Size Check:**
+ Check for files >10MB without Git LFS configuration
+
+ **Build Artifacts:**
+ Check for unwanted directories/files that should be gitignored:
+ - node_modules/, dist/, build/, .next/, __pycache__/, *.pyc, .venv/
+ - .DS_Store, Thumbs.db, *.swp, *.tmp, *.log (in root)
+ - *.class, target/, bin/ (Java)
+ - vendor/ (unless dependency managed)
+
+
+ **Git State:**
+ Verify:
+ - .gitignore exists and properly configured
+ - No unresolved merge conflicts
+ - Git repository initialized
+
+
+
+
+
+ HALT - Cannot proceed with secrets
+
+
+
+
+
+ Proceed with large files anyway? [y/n]:
+
+
+
+ HALT
+
+
+
+
+
+
+ Commit build artifacts anyway? [y/n]:
+
+
+
+ HALT
+
+
+
+
+
+
+ Push directly to {{branch_name}}? [y/n]:
+
+
+
+ HALT
+
+
+
+
+
+
+
+
+
+ **Proceed with commit and push?**
+
+ Options:
+ [yes] - Proceed with commit and push
+ [no] - Cancel (leave changes unstaged)
+ [review] - Show detailed diff first
+
+
+
+ Execute: git diff --stat
+ Execute: git diff | head -100 (show first 100 lines of changes)
+
+ After reviewing, proceed with commit and push? [yes/no]:
+
+
+
+
+ HALT - User cancelled
+
+
+
+
+
+
+
+ Execute: git add {{target_files}}
+ Execute: git status
+
+
+
+
+
+ Execute: git add .
+ Execute: git status
+
+
+
+
+
+ π COMMIT MESSAGE - Generate conventional commit format
+
+ Analyze changes to determine commit type:
+ - feat: New features (new files with functionality)
+ - fix: Bug fixes (fixing broken functionality)
+ - docs: Documentation only (*.md, comments)
+ - style: Formatting, missing semicolons (no code change)
+ - refactor: Code restructuring (no feature/fix)
+ - test: Adding/updating tests
+ - chore: Tooling, configs, dependencies
+ - perf: Performance improvements
+
+ Determine scope (optional):
+ - Component/feature name if changes focused on one area
+ - Omit if changes span multiple areas
+
+
+ Generate message summary (max 72 chars):
+ - Use imperative mood: "add feature" not "added feature"
+ - Lowercase except proper nouns
+ - No period at end
+
+
+ Generate message body (if changes >5 files):
+ - List key changes as bullet points
+ - Max 3-5 bullets
+ - Keep concise
+
+
+ Reference recent commits for style consistency
+
+
+
+ **Use this commit message?**
+
+ Options:
+ [yes] - Use generated message
+ [edit] - Let me write custom message
+ [cancel] - Cancel push-all (leave staged)
+
+
+
+ Enter your commit message (use conventional commit format if possible):
+ Store user input as {{commit_message}}
+
+
+
+
+
+ HALT
+
+
+
+ Use {{generated_commit_message}} as {{commit_message}}
+
+
+
+
+ Execute git commit with heredoc for multi-line message safety:
+ git commit -m "$(cat <<'EOF'
+{{commit_message}}
+EOF
+)"
+
+
+
+
+ HALT - Fix errors before proceeding
+
+
+ Parse commit output for hash
+
+
+
+
+
+
+ Execute: git push
+
+
+
+
+
+ Execute: git pull --rebase
+
+
+
+ HALT - Resolve conflicts manually
+
+
+ Execute: git push
+
+
+
+
+
+ Execute: git push -u origin {{current_branch}}
+
+
+
+
+ HALT - Use PR workflow for protected branches
+
+
+
+
+ HALT - Fix authentication
+
+
+
+
+ HALT - Manual push required
+
+
+
+
+
+
+ Execute: git log -1 --oneline --decorate
+
+
+
+
+
+
+
+
+
diff --git a/src/bmm/workflows/4-implementation/push-all/workflow.yaml b/src/bmm/workflows/4-implementation/push-all/workflow.yaml
new file mode 100644
index 00000000..1eedfe30
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/push-all/workflow.yaml
@@ -0,0 +1,22 @@
+name: push-all
+description: "Stage changes, create commit with safety checks, and push to remote"
+author: "BMad"
+
+# Critical variables from config
+config_source: "{project-root}/_bmad/bmm/config.yaml"
+user_name: "{config_source}:user_name"
+communication_language: "{config_source}:communication_language"
+
+# Workflow components
+installed_path: "{project-root}/_bmad/bmm/workflows/4-implementation/push-all"
+instructions: "{installed_path}/instructions.xml"
+
+# Target files to commit (for parallel agent execution)
+# When empty/not provided: commits ALL changes (original behavior)
+# When provided: only commits the specified files (safe for parallel agents)
+target_files: "" # Space-separated list of file paths, or empty for all
+story_key: "" # Optional: story identifier for commit message context
+
+standalone: true
+
+web_bundle: false
diff --git a/src/bmm/workflows/4-implementation/recover-sprint-status/instructions.md b/src/bmm/workflows/4-implementation/recover-sprint-status/instructions.md
new file mode 100644
index 00000000..7ec69cd7
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/recover-sprint-status/instructions.md
@@ -0,0 +1,306 @@
+# Sprint Status Recovery - Instructions
+
+**Workflow:** recover-sprint-status
+**Purpose:** Fix sprint-status.yaml when tracking has drifted for days/weeks
+
+---
+
+## What This Workflow Does
+
+Analyzes multiple sources to rebuild accurate sprint-status.yaml:
+
+1. **Story File Quality** - Validates size (>=10KB), task lists, checkboxes
+2. **Explicit Status: Fields** - Reads story Status: when present
+3. **Git Commits** - Searches last 30 days for story references
+4. **Autonomous Reports** - Checks .epic-*-completion-report.md files
+5. **Task Completion Rate** - Analyzes checkbox completion in story files
+
+**Infers Status Based On:**
+- Explicit Status: field (highest priority)
+- Git commits referencing story (strong signal)
+- Autonomous completion reports (very high confidence)
+- Task checkbox completion rate (90%+ = done)
+- File quality (poor quality prevents "done" marking)
+
+---
+
+## Step 1: Run Recovery Analysis
+
+```bash
+Execute: {recovery_script} --dry-run
+```
+
+**This will:**
+- Analyze all story files (quality, tasks, status)
+- Search git commits for completion evidence
+- Check autonomous completion reports
+- Infer status from all evidence
+- Report recommendations with confidence levels
+
+**No changes** made in dry-run mode - just analysis.
+
+---
+
+## Step 2: Review Recommendations
+
+**Check the output for:**
+
+### High Confidence Updates (Safe)
+- Stories with explicit Status: fields
+- Stories in autonomous completion reports
+- Stories with 3+ git commits + 90%+ tasks complete
+
+### Medium Confidence Updates (Verify)
+- Stories with 1-2 git commits
+- Stories with 50-90% tasks complete
+- Stories with file size >=10KB
+
+### Low Confidence Updates (Question)
+- Stories with no Status: field, no commits
+- Stories with file size <10KB
+- Stories with <5 tasks total
+
+---
+
+## Step 3: Choose Recovery Mode
+
+### Conservative Mode (Safest)
+```bash
+Execute: {recovery_script} --conservative
+```
+
+**Only updates:**
+- High/very high confidence stories
+- Explicit Status: fields honored
+- Git commits with 3+ references
+- Won't infer or guess
+
+**Best for:** Quick fixes, first-time recovery, risk-averse
+
+---
+
+### Aggressive Mode (Thorough)
+```bash
+Execute: {recovery_script} --aggressive --dry-run # Preview first!
+Execute: {recovery_script} --aggressive # Then apply
+```
+
+**Updates:**
+- Medium+ confidence stories
+- Infers from git commits (even 1 commit)
+- Uses task completion rate
+- Pre-fills brownfield checkboxes
+
+**Best for:** Major drift (30+ days), comprehensive recovery
+
+---
+
+### Interactive Mode (Recommended)
+```bash
+Execute: {recovery_script}
+```
+
+**Process:**
+1. Shows all recommendations
+2. Groups by confidence level
+3. Asks for confirmation before each batch
+4. Allows selective application
+
+**Best for:** First-time use, learning the tool
+
+---
+
+## Step 4: Validate Results
+
+```bash
+Execute: ./scripts/sync-sprint-status.sh --validate
+```
+
+**Should show:**
+- "β sprint-status.yaml is up to date!" (success)
+- OR discrepancy count (if issues remain)
+
+---
+
+## Step 5: Commit Changes
+
+```bash
+git add docs/sprint-artifacts/sprint-status.yaml
+git add .sprint-status-backups/ # Include backup for audit trail
+git commit -m "fix(tracking): Recover sprint-status.yaml - {MODE} recovery"
+```
+
+---
+
+## Recovery Scenarios
+
+### Scenario 1: Autonomous Epic Completed, Tracking Not Updated
+
+**Symptoms:**
+- Autonomous completion report exists
+- Git commits show work done
+- sprint-status.yaml shows "in-progress" or "backlog"
+
+**Solution:**
+```bash
+{recovery_script} --aggressive
+# Will find completion report, mark all stories done
+```
+
+---
+
+### Scenario 2: Manual Work Over Past Week Not Tracked
+
+**Symptoms:**
+- Story Status: fields updated to "done"
+- sprint-status.yaml not synced
+- Git commits exist
+
+**Solution:**
+```bash
+./scripts/sync-sprint-status.sh
+# Standard sync (reads Status: fields)
+```
+
+---
+
+### Scenario 3: Story Files Missing Status: Fields
+
+**Symptoms:**
+- 100+ stories with no Status: field
+- Some completed, some not
+- No autonomous reports
+
+**Solution:**
+```bash
+{recovery_script} --aggressive --dry-run # Preview inference
+# Review recommendations carefully
+{recovery_script} --aggressive # Apply if satisfied
+```
+
+---
+
+### Scenario 4: Complete Chaos (Mix of All Above)
+
+**Symptoms:**
+- Some stories have Status:, some don't
+- Autonomous reports for some epics
+- Manual work on others
+- sprint-status.yaml very outdated
+
+**Solution:**
+```bash
+# Step 1: Run recovery in dry-run
+{recovery_script} --aggressive --dry-run
+
+# Step 2: Review /tmp/recovery_results.json
+
+# Step 3: Apply in conservative mode first (safest updates)
+{recovery_script} --conservative
+
+# Step 4: Manually review remaining stories
+# Update Status: fields for known completed work
+
+# Step 5: Run sync to catch manual updates
+./scripts/sync-sprint-status.sh
+
+# Step 6: Final validation
+./scripts/sync-sprint-status.sh --validate
+```
+
+---
+
+## Quality Gates
+
+**Recovery script will DOWNGRADE status if:**
+- Story file < 10KB (not properly detailed)
+- Story file has < 5 tasks (incomplete story)
+- No git commits found (no evidence of work)
+- Explicit Status: contradicts other evidence
+
+**Recovery script will UPGRADE status if:**
+- Autonomous completion report lists story as done
+- 3+ git commits + 90%+ tasks checked
+- Explicit Status: field says "done"
+
+---
+
+## Post-Recovery Checklist
+
+After running recovery:
+
+- [ ] Run validation: `./scripts/sync-sprint-status.sh --validate`
+- [ ] Review backup: Check `.sprint-status-backups/` for before state
+- [ ] Check epic statuses: Verify epic-level status matches story completion
+- [ ] Spot-check 5-10 stories: Confirm inferred status is accurate
+- [ ] Commit changes: Add recovery to version control
+- [ ] Document issues: Note why drift occurred, prevent recurrence
+
+---
+
+## Preventing Future Drift
+
+**After recovery:**
+
+1. **Use workflows properly**
+ - `/create-story` - Adds to sprint-status.yaml automatically
+ - `/dev-story` - Updates both Status: and sprint-status.yaml
+ - Autonomous workflows - Now update tracking
+
+2. **Run sync regularly**
+ - Weekly: `pnpm sync:sprint-status:dry-run` (check health)
+ - After manual Status: updates: `pnpm sync:sprint-status`
+
+3. **CI/CD validation** (coming soon)
+ - Blocks PRs with out-of-sync tracking
+ - Forces sync before merge
+
+---
+
+## Troubleshooting
+
+### "Recovery script shows 0 updates"
+
+**Possible causes:**
+- sprint-status.yaml already accurate
+- Story files all have proper Status: fields
+- No git commits found (check date range)
+
+**Action:** Run `--dry-run` to see analysis, check `/tmp/recovery_results.json`
+
+---
+
+### "Low confidence on stories I know are done"
+
+**Possible causes:**
+- Story file < 10KB (not properly detailed)
+- No git commits (work done outside git)
+- No explicit Status: field
+
+**Action:** Manually add Status: field to story, then run standard sync
+
+---
+
+### "Recovery marks incomplete stories as done"
+
+**Possible causes:**
+- Git commits exist but work abandoned
+- Autonomous report lists story but implementation failed
+- Tasks pre-checked incorrectly (brownfield error)
+
+**Action:** Use conservative mode, manually verify, fix story files
+
+---
+
+## Output Files
+
+**Created during recovery:**
+- `.sprint-status-backups/sprint-status-recovery-{timestamp}.yaml` - Backup
+- `/tmp/recovery_results.json` - Detailed analysis
+- Updated `sprint-status.yaml` - Recovered status
+
+---
+
+**Last Updated:** 2026-01-02
+**Status:** Production Ready
+**Works On:** ANY BMAD project with sprint-status.yaml tracking
diff --git a/src/bmm/workflows/4-implementation/sprint-status/workflow.yaml b/src/bmm/workflows/4-implementation/recover-sprint-status/workflow.yaml
similarity index 52%
rename from src/bmm/workflows/4-implementation/sprint-status/workflow.yaml
rename to src/bmm/workflows/4-implementation/recover-sprint-status/workflow.yaml
index 6f10a9a6..5ae5c8f8 100644
--- a/src/bmm/workflows/4-implementation/sprint-status/workflow.yaml
+++ b/src/bmm/workflows/4-implementation/recover-sprint-status/workflow.yaml
@@ -1,6 +1,6 @@
-# Sprint Status - Implementation Tracker
-name: sprint-status
-description: "Summarize sprint-status.yaml, surface risks, and route to the right implementation workflow."
+# Sprint Status Recovery Workflow
+name: recover-sprint-status
+description: "Recover sprint-status.yaml when tracking has drifted. Analyzes story files, git commits, and autonomous reports to rebuild accurate status."
author: "BMad"
# Critical variables from config
@@ -8,26 +8,20 @@ config_source: "{project-root}/_bmad/bmm/config.yaml"
output_folder: "{config_source}:output_folder"
user_name: "{config_source}:user_name"
communication_language: "{config_source}:communication_language"
-document_output_language: "{config_source}:document_output_language"
-date: system-generated
implementation_artifacts: "{config_source}:implementation_artifacts"
-planning_artifacts: "{config_source}:planning_artifacts"
# Workflow components
-installed_path: "{project-root}/_bmad/bmm/workflows/4-implementation/sprint-status"
+installed_path: "{project-root}/_bmad/bmm/workflows/4-implementation/recover-sprint-status"
instructions: "{installed_path}/instructions.md"
# Inputs
variables:
sprint_status_file: "{implementation_artifacts}/sprint-status.yaml"
- tracking_system: "file-system"
+ story_directory: "{implementation_artifacts}"
+ recovery_mode: "interactive" # Options: interactive, conservative, aggressive
-# Smart input file references
-input_file_patterns:
- sprint_status:
- description: "Sprint status file generated by sprint-planning"
- whole: "{implementation_artifacts}/sprint-status.yaml"
- load_strategy: "FULL_LOAD"
+# Recovery script location
+recovery_script: "{project-root}/scripts/recover-sprint-status.sh"
# Standalone so IDE commands get generated
standalone: true
diff --git a/src/bmm/workflows/4-implementation/retrospective/instructions.md b/src/bmm/workflows/4-implementation/retrospective/instructions.md
deleted file mode 100644
index 01750312..00000000
--- a/src/bmm/workflows/4-implementation/retrospective/instructions.md
+++ /dev/null
@@ -1,1443 +0,0 @@
-# Retrospective - Epic Completion Review Instructions
-
-The workflow execution engine is governed by: {project-root}/_bmad/core/tasks/workflow.xml
-You MUST have already loaded and processed: {project-root}/_bmad/bmm/workflows/4-implementation/retrospective/workflow.yaml
-Communicate all responses in {communication_language} and language MUST be tailored to {user_skill_level}
-Generate all documents in {document_output_language}
-β οΈ ABSOLUTELY NO TIME ESTIMATES - NEVER mention hours, days, weeks, months, or ANY time-based predictions. AI has fundamentally changed development speed - what once took teams weeks/months can now be done by one person in hours. DO NOT give ANY time estimates whatsoever.
-
-
- DOCUMENT OUTPUT: Retrospective analysis. Concise insights, lessons learned, action items. User skill level ({user_skill_level}) affects conversation style ONLY, not retrospective content.
-
-FACILITATION NOTES:
-
-- Scrum Master facilitates this retrospective
-- Psychological safety is paramount - NO BLAME
-- Focus on systems, processes, and learning
-- Everyone contributes with specific examples preferred
-- Action items must be achievable with clear ownership
-- Two-part format: (1) Epic Review + (2) Next Epic Preparation
-
-PARTY MODE PROTOCOL:
-
-- ALL agent dialogue MUST use format: "Name (Role): dialogue"
-- Example: Bob (Scrum Master): "Let's begin..."
-- Example: {user_name} (Project Lead): [User responds]
-- Create natural back-and-forth with user actively participating
-- Show disagreements, diverse perspectives, authentic team dynamics
-
-
-
-
-
-
-Explain to {user_name} the epic discovery process using natural dialogue
-
-
-
-PRIORITY 1: Check {sprint_status_file} first
-
-Load the FULL file: {sprint_status_file}
-Read ALL development_status entries
-Find the highest epic number with at least one story marked "done"
-Extract epic number from keys like "epic-X-retrospective" or story keys like "X-Y-story-name"
-Set {{detected_epic}} = highest epic number found with completed stories
-
-
- Present finding to user with context
-
-
-
-WAIT for {user_name} to confirm or correct
-
-
- Set {{epic_number}} = {{detected_epic}}
-
-
-
- Set {{epic_number}} = user-provided number
-
-
-
-
-
- PRIORITY 2: Ask user directly
-
-
-
-WAIT for {user_name} to provide epic number
-Set {{epic_number}} = user-provided number
-
-
-
- PRIORITY 3: Fallback to stories folder
-
-Scan {story_directory} for highest numbered story files
-Extract epic numbers from story filenames (pattern: epic-X-Y-story-name.md)
-Set {{detected_epic}} = highest epic number found
-
-
-
-WAIT for {user_name} to confirm or correct
-Set {{epic_number}} = confirmed number
-
-
-Once {{epic_number}} is determined, verify epic completion status
-
-Find all stories for epic {{epic_number}} in {sprint_status_file}:
-
-- Look for keys starting with "{{epic_number}}-" (e.g., "1-1-", "1-2-", etc.)
-- Exclude epic key itself ("epic-{{epic_number}}")
-- Exclude retrospective key ("epic-{{epic_number}}-retrospective")
-
-
-Count total stories found for this epic
-Count stories with status = "done"
-Collect list of pending story keys (status != "done")
-Determine if complete: true if all stories are done, false otherwise
-
-
-
-
-Continue with incomplete epic? (yes/no)
-
-
-
- HALT
-
-
-Set {{partial_retrospective}} = true
-
-
-
-
-
-
-
-
-
-
-
- After discovery, these content variables are available: {epics_content} (selective load for this epic), {architecture_content}, {prd_content}, {document_project_content}
-
-
-
-
-
-
-For each story in epic {{epic_number}}, read the complete story file from {story_directory}/{{epic_number}}-{{story_num}}-\*.md
-
-Extract and analyze from each story:
-
-**Dev Notes and Struggles:**
-
-- Look for sections like "## Dev Notes", "## Implementation Notes", "## Challenges", "## Development Log"
-- Identify where developers struggled or made mistakes
-- Note unexpected complexity or gotchas discovered
-- Record technical decisions that didn't work out as planned
-- Track where estimates were way off (too high or too low)
-
-**Review Feedback Patterns:**
-
-- Look for "## Review", "## Code Review", "## SM Review", "## Scrum Master Review" sections
-- Identify recurring feedback themes across stories
-- Note which types of issues came up repeatedly
-- Track quality concerns or architectural misalignments
-- Document praise or exemplary work called out in reviews
-
-**Lessons Learned:**
-
-- Look for "## Lessons Learned", "## Retrospective Notes", "## Takeaways" sections within stories
-- Extract explicit lessons documented during development
-- Identify "aha moments" or breakthroughs
-- Note what would be done differently
-- Track successful experiments or approaches
-
-**Technical Debt Incurred:**
-
-- Look for "## Technical Debt", "## TODO", "## Known Issues", "## Future Work" sections
-- Document shortcuts taken and why
-- Track debt items that affect next epic
-- Note severity and priority of debt items
-
-**Testing and Quality Insights:**
-
-- Look for "## Testing", "## QA Notes", "## Test Results" sections
-- Note testing challenges or surprises
-- Track bug patterns or regression issues
-- Document test coverage gaps
-
-Synthesize patterns across all stories:
-
-**Common Struggles:**
-
-- Identify issues that appeared in 2+ stories (e.g., "3 out of 5 stories had API authentication issues")
-- Note areas where team consistently struggled
-- Track where complexity was underestimated
-
-**Recurring Review Feedback:**
-
-- Identify feedback themes (e.g., "Error handling was flagged in every review")
-- Note quality patterns (positive and negative)
-- Track areas where team improved over the course of epic
-
-**Breakthrough Moments:**
-
-- Document key discoveries (e.g., "Story 3 discovered the caching pattern we used for rest of epic")
-- Note when team velocity improved dramatically
-- Track innovative solutions worth repeating
-
-**Velocity Patterns:**
-
-- Calculate average completion time per story
-- Note velocity trends (e.g., "First 2 stories took 3x longer than estimated")
-- Identify which types of stories went faster/slower
-
-**Team Collaboration Highlights:**
-
-- Note moments of excellent collaboration mentioned in stories
-- Track where pair programming or mob programming was effective
-- Document effective problem-solving sessions
-
-Store this synthesis - these patterns will drive the retrospective discussion
-
-
-
-
-
-
-
-Calculate previous epic number: {{prev_epic_num}} = {{epic_number}} - 1
-
-
- Search for previous retrospective using pattern: {retrospectives_folder}/epic-{{prev_epic_num}}-retro-*.md
-
-
-
-
- Read the complete previous retrospective file
-
- Extract key elements:
- - **Action items committed**: What did the team agree to improve?
- - **Lessons learned**: What insights were captured?
- - **Process improvements**: What changes were agreed upon?
- - **Technical debt flagged**: What debt was documented?
- - **Team agreements**: What commitments were made?
- - **Preparation tasks**: What was needed for this epic?
-
- Cross-reference with current epic execution:
-
- **Action Item Follow-Through:**
- - For each action item from Epic {{prev_epic_num}} retro, check if it was completed
- - Look for evidence in current epic's story records
- - Mark each action item: β Completed, β³ In Progress, β Not Addressed
-
- **Lessons Applied:**
- - For each lesson from Epic {{prev_epic_num}}, check if team applied it in Epic {{epic_number}}
- - Look for evidence in dev notes, review feedback, or outcomes
- - Document successes and missed opportunities
-
- **Process Improvements Effectiveness:**
- - For each process change agreed to in Epic {{prev_epic_num}}, assess if it helped
- - Did the change improve velocity, quality, or team satisfaction?
- - Should we keep, modify, or abandon the change?
-
- **Technical Debt Status:**
- - For each debt item from Epic {{prev_epic_num}}, check if it was addressed
- - Did unaddressed debt cause problems in Epic {{epic_number}}?
- - Did the debt grow or shrink?
-
- Prepare "continuity insights" for the retrospective discussion
-
- Identify wins where previous lessons were applied successfully:
- - Document specific examples of applied learnings
- - Note positive impact on Epic {{epic_number}} outcomes
- - Celebrate team growth and improvement
-
- Identify missed opportunities where previous lessons were ignored:
- - Document where team repeated previous mistakes
- - Note impact of not applying lessons (without blame)
- - Explore barriers that prevented application
-
-
-
-
-
-
-
-Set {{first_retrospective}} = true
-
-
-
-
-
-Set {{first_retrospective}} = true
-
-
-
-
-
-
-Calculate next epic number: {{next_epic_num}} = {{epic_number}} + 1
-
-
-
-Attempt to load next epic using selective loading strategy:
-
-**Try sharded first (more specific):**
-Check if file exists: {planning_artifacts}/epic\*/epic-{{next_epic_num}}.md
-
-
- Load {planning_artifacts}/*epic*/epic-{{next_epic_num}}.md
- Set {{next_epic_source}} = "sharded"
-
-
-**Fallback to whole document:**
-
-Check if file exists: {planning_artifacts}/epic\*.md
-
-
- Load entire epics document
- Extract Epic {{next_epic_num}} section
- Set {{next_epic_source}} = "whole"
-
-
-
-
- Analyze next epic for:
- - Epic title and objectives
- - Planned stories and complexity estimates
- - Dependencies on Epic {{epic_number}} work
- - New technical requirements or capabilities needed
- - Potential risks or unknowns
- - Business goals and success criteria
-
-Identify dependencies on completed work:
-
-- What components from Epic {{epic_number}} does Epic {{next_epic_num}} rely on?
-- Are all prerequisites complete and stable?
-- Any incomplete work that creates blocking dependencies?
-
-Note potential gaps or preparation needed:
-
-- Technical setup required (infrastructure, tools, libraries)
-- Knowledge gaps to fill (research, training, spikes)
-- Refactoring needed before starting next epic
-- Documentation or specifications to create
-
-Check for technical prerequisites:
-
-- APIs or integrations that must be ready
-- Data migrations or schema changes needed
-- Testing infrastructure requirements
-- Deployment or environment setup
-
-
-
-Set {{next_epic_exists}} = true
-
-
-
-
-
-Set {{next_epic_exists}} = false
-
-
-
-
-
-
-Load agent configurations from {agent_manifest}
-Identify which agents participated in Epic {{epic_number}} based on story records
-Ensure key roles present: Product Owner, Scrum Master (facilitating), Devs, Testing/QA, Architect
-
-
-
-WAIT for {user_name} to respond or indicate readiness
-
-
-
-
-
-
-
-Bob (Scrum Master) naturally turns to {user_name} to engage them in the discussion
-
-
-
-WAIT for {user_name} to respond - this is a KEY USER INTERACTION moment
-
-After {user_name} responds, have 1-2 team members react to or build on what {user_name} shared
-
-
-
-Continue facilitating natural dialogue, periodically bringing {user_name} back into the conversation
-
-After covering successes, guide the transition to challenges with care
-
-
-
-WAIT for {user_name} to respond and help facilitate the conflict resolution
-
-Use {user_name}'s response to guide the discussion toward systemic understanding rather than blame
-
-
-
-Continue the discussion, weaving in patterns discovered from the deep story analysis (Step 2)
-
-
-
-WAIT for {user_name} to share their observations
-
-Continue the retrospective discussion, creating moments where:
-
-- Team members ask {user_name} questions directly
-- {user_name}'s input shifts the discussion direction
-- Disagreements arise naturally and get resolved
-- Quieter team members are invited to contribute
-- Specific stories are referenced with real examples
-- Emotions are authentic (frustration, pride, concern, hope)
-
-
-
-
-WAIT for {user_name} to respond
-
-Use the previous retro follow-through as a learning moment about commitment and accountability
-
-
-
-
-Allow team members to add any final thoughts on the epic review
-Ensure {user_name} has opportunity to add their perspective
-
-
-
-
-
-
-
- Skip to Step 8
-
-
-
-
-WAIT for {user_name} to share their assessment
-
-Use {user_name}'s input to guide deeper exploration of preparation needs
-
-
-
-WAIT for {user_name} to provide direction on preparation approach
-
-Create space for debate and disagreement about priorities
-
-
-
-WAIT for {user_name} to validate or adjust the preparation strategy
-
-Continue working through preparation needs across all dimensions:
-
-- Dependencies on Epic {{epic_number}} work
-- Technical setup and infrastructure
-- Knowledge gaps and research needs
-- Documentation or specification work
-- Testing infrastructure
-- Refactoring or debt reduction
-- External dependencies (APIs, integrations, etc.)
-
-For each preparation area, facilitate team discussion that:
-
-- Identifies specific needs with concrete examples
-- Estimates effort realistically based on Epic {{epic_number}} experience
-- Assigns ownership to specific agents
-- Determines criticality and timing
-- Surfaces risks of NOT doing the preparation
-- Explores parallel work opportunities
-- Brings {user_name} in for key decisions
-
-
-
-WAIT for {user_name} final validation of preparation plan
-
-
-
-
-
-
-
-Synthesize themes from Epic {{epic_number}} review discussion into actionable improvements
-
-Create specific action items with:
-
-- Clear description of the action
-- Assigned owner (specific agent or role)
-- Timeline or deadline
-- Success criteria (how we'll know it's done)
-- Category (process, technical, documentation, team, etc.)
-
-Ensure action items are SMART:
-
-- Specific: Clear and unambiguous
-- Measurable: Can verify completion
-- Achievable: Realistic given constraints
-- Relevant: Addresses real issues from retro
-- Time-bound: Has clear deadline
-
-
-
-WAIT for {user_name} to help resolve priority discussions
-
-
-
-CRITICAL ANALYSIS - Detect if discoveries require epic updates
-
-Check if any of the following are true based on retrospective discussion:
-
-- Architectural assumptions from planning proven wrong during Epic {{epic_number}}
-- Major scope changes or descoping occurred that affects next epic
-- Technical approach needs fundamental change for Epic {{next_epic_num}}
-- Dependencies discovered that Epic {{next_epic_num}} doesn't account for
-- User needs significantly different than originally understood
-- Performance/scalability concerns that affect Epic {{next_epic_num}} design
-- Security or compliance issues discovered that change approach
-- Integration assumptions proven incorrect
-- Team capacity or skill gaps more severe than planned
-- Technical debt level unsustainable without intervention
-
-
-
-
-WAIT for {user_name} to decide on how to handle the significant changes
-
-Add epic review session to critical path if user agrees
-
-
-
-
-
-
-
-
-
-
-Give each agent with assignments a moment to acknowledge their ownership
-
-Ensure {user_name} approves the complete action plan
-
-
-
-
-
-
-
-Explore testing and quality state through natural conversation
-
-
-
-WAIT for {user_name} to describe testing status
-
-
-
-WAIT for {user_name} to assess quality readiness
-
-
-
-Add testing completion to critical path
-
-
-Explore deployment and release status
-
-
-
-WAIT for {user_name} to provide deployment status
-
-
-
-
-WAIT for {user_name} to clarify deployment timeline
-
-Add deployment milestone to critical path with agreed timeline
-
-
-Explore stakeholder acceptance
-
-
-
-WAIT for {user_name} to describe stakeholder acceptance status
-
-
-
-
-WAIT for {user_name} decision
-
-Add stakeholder acceptance to critical path if user agrees
-
-
-Explore technical health and stability
-
-
-
-WAIT for {user_name} to assess codebase health
-
-
-
-
-WAIT for {user_name} decision
-
-Add stability work to preparation sprint if user agrees
-
-
-Explore unresolved blockers
-
-
-
-WAIT for {user_name} to surface any blockers
-
-
-
-
-Assign blocker resolution to appropriate agent
-Add to critical path with priority and deadline
-
-
-Synthesize the readiness assessment
-
-
-
-WAIT for {user_name} to confirm or correct the assessment
-
-
-
-
-
-
-
-
-
-WAIT for {user_name} to share final reflections
-
-
-
-Prepare to save retrospective summary document
-
-
-
-
-
-Ensure retrospectives folder exists: {retrospectives_folder}
-Create folder if it doesn't exist
-
-Generate comprehensive retrospective summary document including:
-
-- Epic summary and metrics
-- Team participants
-- Successes and strengths identified
-- Challenges and growth areas
-- Key insights and learnings
-- Previous retro follow-through analysis (if applicable)
-- Next epic preview and dependencies
-- Action items with owners and timelines
-- Preparation tasks for next epic
-- Critical path items
-- Significant discoveries and epic update recommendations (if any)
-- Readiness assessment
-- Commitments and next steps
-
-Format retrospective document as readable markdown with clear sections
-Set filename: {retrospectives_folder}/epic-{{epic_number}}-retro-{date}.md
-Save retrospective document
-
-
-
-Update {sprint_status_file} to mark retrospective as completed
-
-Load the FULL file: {sprint_status_file}
-Find development_status key "epic-{{epic_number}}-retrospective"
-Verify current status (typically "optional" or "pending")
-Update development_status["epic-{{epic_number}}-retrospective"] = "done"
-Save file, preserving ALL comments and structure including STATUS DEFINITIONS
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-PARTY MODE REQUIRED: All agent dialogue uses "Name (Role): dialogue" format
-Scrum Master maintains psychological safety throughout - no blame or judgment
-Focus on systems and processes, not individual performance
-Create authentic team dynamics: disagreements, diverse perspectives, emotions
-User ({user_name}) is active participant, not passive observer
-Encourage specific examples over general statements
-Balance celebration of wins with honest assessment of challenges
-Ensure every voice is heard - all agents contribute
-Action items must be specific, achievable, and owned
-Forward-looking mindset - how do we improve for next epic?
-Intent-based facilitation, not scripted phrases
-Deep story analysis provides rich material for discussion
-Previous retro integration creates accountability and continuity
-Significant change detection prevents epic misalignment
-Critical verification prevents starting next epic prematurely
-Document everything - retrospective insights are valuable for future reference
-Two-part structure ensures both reflection AND preparation
-
diff --git a/src/bmm/workflows/4-implementation/retrospective/workflow.yaml b/src/bmm/workflows/4-implementation/retrospective/workflow.yaml
deleted file mode 100644
index 80d934b2..00000000
--- a/src/bmm/workflows/4-implementation/retrospective/workflow.yaml
+++ /dev/null
@@ -1,58 +0,0 @@
-# Retrospective - Epic Completion Review Workflow
-name: "retrospective"
-description: "Run after epic completion to review overall success, extract lessons learned, and explore if new information emerged that might impact the next epic"
-author: "BMad"
-
-config_source: "{project-root}/_bmad/bmm/config.yaml"
-output_folder: "{config_source}:implementation_artifacts}"
-user_name: "{config_source}:user_name"
-communication_language: "{config_source}:communication_language"
-user_skill_level: "{config_source}:user_skill_level"
-document_output_language: "{config_source}:document_output_language"
-date: system-generated
-planning_artifacts: "{config_source}:planning_artifacts"
-implementation_artifacts: "{config_source}:implementation_artifacts"
-
-installed_path: "{project-root}/_bmad/bmm/workflows/4-implementation/retrospective"
-template: false
-instructions: "{installed_path}/instructions.md"
-
-required_inputs:
- - agent_manifest: "{project-root}/_bmad/_config/agent-manifest.csv"
-
-# Smart input file references - handles both whole docs and sharded docs
-# Priority: Whole document first, then sharded version
-# Strategy: SELECTIVE LOAD - only load the completed epic and relevant retrospectives
-input_file_patterns:
- epics:
- description: "The completed epic for retrospective"
- whole: "{planning_artifacts}/*epic*.md"
- sharded_index: "{planning_artifacts}/*epic*/index.md"
- sharded_single: "{planning_artifacts}/*epic*/epic-{{epic_num}}.md"
- load_strategy: "SELECTIVE_LOAD"
- previous_retrospective:
- description: "Previous epic's retrospective (optional)"
- pattern: "{implementation_artifacts}/**/epic-{{prev_epic_num}}-retro-*.md"
- load_strategy: "SELECTIVE_LOAD"
- architecture:
- description: "System architecture for context"
- whole: "{planning_artifacts}/*architecture*.md"
- sharded: "{planning_artifacts}/*architecture*/*.md"
- load_strategy: "FULL_LOAD"
- prd:
- description: "Product requirements for context"
- whole: "{planning_artifacts}/*prd*.md"
- sharded: "{planning_artifacts}/*prd*/*.md"
- load_strategy: "FULL_LOAD"
- document_project:
- description: "Brownfield project documentation (optional)"
- sharded: "{planning_artifacts}/*.md"
- load_strategy: "INDEX_GUIDED"
-
-# Required files
-sprint_status_file: "{implementation_artifacts}/sprint-status.yaml"
-story_directory: "{implementation_artifacts}"
-retrospectives_folder: "{implementation_artifacts}"
-
-standalone: true
-web_bundle: false
diff --git a/src/bmm/workflows/4-implementation/sprint-planning/checklist.md b/src/bmm/workflows/4-implementation/sprint-planning/checklist.md
deleted file mode 100644
index 7c20b1f3..00000000
--- a/src/bmm/workflows/4-implementation/sprint-planning/checklist.md
+++ /dev/null
@@ -1,33 +0,0 @@
-# Sprint Planning Validation Checklist
-
-## Core Validation
-
-### Complete Coverage Check
-
-- [ ] Every epic found in epic\*.md files appears in sprint-status.yaml
-- [ ] Every story found in epic\*.md files appears in sprint-status.yaml
-- [ ] Every epic has a corresponding retrospective entry
-- [ ] No items in sprint-status.yaml that don't exist in epic files
-
-### Parsing Verification
-
-Compare epic files against generated sprint-status.yaml:
-
-```
-Epic Files Contains: Sprint Status Contains:
-β Epic 1 β epic-1: [status]
- β Story 1.1: User Auth β 1-1-user-auth: [status]
- β Story 1.2: Account Mgmt β 1-2-account-mgmt: [status]
- β Story 1.3: Plant Naming β 1-3-plant-naming: [status]
- β epic-1-retrospective: [status]
-β Epic 2 β epic-2: [status]
- β Story 2.1: Personality Model β 2-1-personality-model: [status]
- β Story 2.2: Chat Interface β 2-2-chat-interface: [status]
- β epic-2-retrospective: [status]
-```
-
-### Final Check
-
-- [ ] Total count of epics matches
-- [ ] Total count of stories matches
-- [ ] All items are in the expected order (epic, stories, retrospective)
diff --git a/src/bmm/workflows/4-implementation/sprint-planning/instructions.md b/src/bmm/workflows/4-implementation/sprint-planning/instructions.md
deleted file mode 100644
index d5bb1d91..00000000
--- a/src/bmm/workflows/4-implementation/sprint-planning/instructions.md
+++ /dev/null
@@ -1,248 +0,0 @@
-# Sprint Planning - Sprint Status Generator
-
-The workflow execution engine is governed by: {project-root}/_bmad/core/tasks/workflow.xml
-You MUST have already loaded and processed: {project-root}/_bmad/bmm/workflows/4-implementation/sprint-planning/workflow.yaml
-
-## π Document Discovery - Full Epic Loading
-
-**Strategy**: Sprint planning needs ALL epics and stories to build complete status tracking.
-
-**Epic Discovery Process:**
-
-1. **Search for whole document first** - Look for `epics.md`, `bmm-epics.md`, or any `*epic*.md` file
-2. **Check for sharded version** - If whole document not found, look for `epics/index.md`
-3. **If sharded version found**:
- - Read `index.md` to understand the document structure
- - Read ALL epic section files listed in the index (e.g., `epic-1.md`, `epic-2.md`, etc.)
- - Process all epics and their stories from the combined content
- - This ensures complete sprint status coverage
-4. **Priority**: If both whole and sharded versions exist, use the whole document
-
-**Fuzzy matching**: Be flexible with document names - users may use variations like `epics.md`, `bmm-epics.md`, `user-stories.md`, etc.
-
-
-
-
-Communicate in {communication_language} with {user_name}
-Look for all files matching `{epics_pattern}` in {epics_location}
-Could be a single `epics.md` file or multiple `epic-1.md`, `epic-2.md` files
-
-For each epic file found, extract:
-
-- Epic numbers from headers like `## Epic 1:` or `## Epic 2:`
-- Story IDs and titles from patterns like `### Story 1.1: User Authentication`
-- Convert story format from `Epic.Story: Title` to kebab-case key: `epic-story-title`
-
-**Story ID Conversion Rules:**
-
-- Original: `### Story 1.1: User Authentication`
-- Replace period with dash: `1-1`
-- Convert title to kebab-case: `user-authentication`
-- Final key: `1-1-user-authentication`
-
-Build complete inventory of all epics and stories from all epic files
-
-
-
-
- After discovery, these content variables are available: {epics_content} (all epics loaded - uses FULL_LOAD strategy)
-
-
-
-For each epic found, create entries in this order:
-
-1. **Epic entry** - Key: `epic-{num}`, Default status: `backlog`
-2. **Story entries** - Key: `{epic}-{story}-{title}`, Default status: `backlog`
-3. **Retrospective entry** - Key: `epic-{num}-retrospective`, Default status: `optional`
-
-**Example structure:**
-
-```yaml
-development_status:
- epic-1: backlog
- 1-1-user-authentication: backlog
- 1-2-account-management: backlog
- epic-1-retrospective: optional
-```
-
-
-
-
-For each story, detect current status by checking files:
-
-**Story file detection:**
-
-- Check: `{story_location_absolute}/{story-key}.md` (e.g., `stories/1-1-user-authentication.md`)
-- If exists β upgrade status to at least `ready-for-dev`
-
-**Story file status sync (CRITICAL):**
-
-- If story file exists, read the `Status:` field from the story markdown header
-- Common status field patterns: `Status: done`, `Status: review`, `Status: in-progress`
-- If story file has a more advanced status than current sprint-status entry:
- - Update sprint-status to match the story file status
- - This ensures sprint-status stays in sync when stories are manually marked done
-
-**Status priority order** (lowest to highest):
-1. `backlog` (no story file)
-2. `ready-for-dev` (story file exists)
-3. `in-progress` (developer working)
-4. `review` (code review pending)
-5. `done` (story complete)
-
-**Preservation rule:**
-
-- If existing `{status_file}` exists and has more advanced status, preserve it
-- Never downgrade status (e.g., don't change `done` to `ready-for-dev`)
-- Story file status is the **source of truth** - always sync from story file to sprint-status
-
-**Epic status auto-detection:**
-
-- After syncing all story statuses, calculate epic status:
- - If ALL stories in epic are `done` β set epic to `done`
- - If ANY story is `in-progress`, `review`, or `done` β set epic to `in-progress`
- - Otherwise β keep epic as `backlog`
-
-**Status Flow Reference:**
-
-- Epic: `backlog` β `in-progress` β `done`
-- Story: `backlog` β `ready-for-dev` β `in-progress` β `review` β `done`
-- Retrospective: `optional` β `done`
-
-
-
-Create or update {status_file} with:
-
-**File Structure:**
-
-```yaml
-# generated: {date}
-# project: {project_name}
-# project_key: {project_key}
-# tracking_system: {tracking_system}
-# story_location: {story_location}
-
-# STATUS DEFINITIONS:
-# ==================
-# Epic Status:
-# - backlog: Epic not yet started
-# - in-progress: Epic actively being worked on
-# - done: All stories in epic completed
-#
-# Epic Status Transitions:
-# - backlog β in-progress: Automatically when first story is created (via create-story)
-# - in-progress β done: Manually when all stories reach 'done' status
-#
-# Story Status:
-# - backlog: Story only exists in epic file
-# - ready-for-dev: Story file created in stories folder
-# - in-progress: Developer actively working on implementation
-# - review: Ready for code review (via Dev's code-review workflow)
-# - done: Story completed
-#
-# Retrospective Status:
-# - optional: Can be completed but not required
-# - done: Retrospective has been completed
-#
-# WORKFLOW NOTES:
-# ===============
-# - Epic transitions to 'in-progress' automatically when first story is created
-# - Stories can be worked in parallel if team capacity allows
-# - SM typically creates next story after previous one is 'done' to incorporate learnings
-# - Dev moves story to 'review', then runs code-review (fresh context, different LLM recommended)
-
-generated: { date }
-project: { project_name }
-project_key: { project_key }
-tracking_system: { tracking_system }
-story_location: { story_location }
-
-development_status:
- # All epics, stories, and retrospectives in order
-```
-
-Write the complete sprint status YAML to {status_file}
-CRITICAL: Metadata appears TWICE - once as comments (#) for documentation, once as YAML key:value fields for parsing
-Ensure all items are ordered: epic, its stories, its retrospective, next epic...
-
-
-
-Perform validation checks:
-
-- [ ] Every epic in epic files appears in {status_file}
-- [ ] Every story in epic files appears in {status_file}
-- [ ] Every epic has a corresponding retrospective entry
-- [ ] No items in {status_file} that don't exist in epic files
-- [ ] All status values are legal (match state machine definitions)
-- [ ] File is valid YAML syntax
-
-Count totals:
-
-- Total epics: {{epic_count}}
-- Total stories: {{story_count}}
-- Epics in-progress: {{in_progress_count}}
-- Stories done: {{done_count}}
-
-Display completion summary to {user_name} in {communication_language}:
-
-**Sprint Status Generated Successfully**
-
-- **File Location:** {status_file}
-- **Total Epics:** {{epic_count}}
-- **Total Stories:** {{story_count}}
-- **Epics In Progress:** {{epics_in_progress_count}}
-- **Stories Completed:** {{done_count}}
-
-**Next Steps:**
-
-1. Review the generated {status_file}
-2. Use this file to track development progress
-3. Agents will update statuses as they work
-4. Re-run this workflow to refresh auto-detected statuses
-
-
-
-
-
-## Additional Documentation
-
-### Status State Machine
-
-**Epic Status Flow:**
-
-```
-backlog β in-progress β done
-```
-
-- **backlog**: Epic not yet started
-- **in-progress**: Epic actively being worked on (stories being created/implemented)
-- **done**: All stories in epic completed
-
-**Story Status Flow:**
-
-```
-backlog β ready-for-dev β in-progress β review β done
-```
-
-- **backlog**: Story only exists in epic file
-- **ready-for-dev**: Story file created (e.g., `stories/1-3-plant-naming.md`)
-- **in-progress**: Developer actively working
-- **review**: Ready for code review (via Dev's code-review workflow)
-- **done**: Completed
-
-**Retrospective Status:**
-
-```
-optional β done
-```
-
-- **optional**: Ready to be conducted but not required
-- **done**: Finished
-
-### Guidelines
-
-1. **Epic Activation**: Mark epic as `in-progress` when starting work on its first story
-2. **Sequential Default**: Stories are typically worked in order, but parallel work is supported
-3. **Parallel Work Supported**: Multiple stories can be `in-progress` if team capacity allows
-4. **Review Before Done**: Stories should pass through `review` before `done`
-5. **Learning Transfer**: SM typically creates next story after previous one is `done` to incorporate learnings
diff --git a/src/bmm/workflows/4-implementation/sprint-planning/sprint-status-template.yaml b/src/bmm/workflows/4-implementation/sprint-planning/sprint-status-template.yaml
deleted file mode 100644
index fd93e3b3..00000000
--- a/src/bmm/workflows/4-implementation/sprint-planning/sprint-status-template.yaml
+++ /dev/null
@@ -1,55 +0,0 @@
-# Sprint Status Template
-# This is an EXAMPLE showing the expected format
-# The actual file will be generated with all epics/stories from your epic files
-
-# generated: {date}
-# project: {project_name}
-# project_key: {project_key}
-# tracking_system: {tracking_system}
-# story_location: {story_location}
-
-# STATUS DEFINITIONS:
-# ==================
-# Epic Status:
-# - backlog: Epic not yet started
-# - in-progress: Epic actively being worked on
-# - done: All stories in epic completed
-#
-# Story Status:
-# - backlog: Story only exists in epic file
-# - ready-for-dev: Story file created, ready for development
-# - in-progress: Developer actively working on implementation
-# - review: Implementation complete, ready for review
-# - done: Story completed
-#
-# Retrospective Status:
-# - optional: Can be completed but not required
-# - done: Retrospective has been completed
-#
-# WORKFLOW NOTES:
-# ===============
-# - Mark epic as 'in-progress' when starting work on its first story
-# - SM typically creates next story ONLY after previous one is 'done' to incorporate learnings
-# - Dev moves story to 'review', then Dev runs code-review (fresh context, ideally different LLM)
-
-# EXAMPLE STRUCTURE (your actual epics/stories will replace these):
-
-generated: 05-06-2-2025 21:30
-project: My Awesome Project
-project_key: jira-1234
-tracking_system: file-system
-story_location: "{story_location}"
-
-development_status:
- epic-1: backlog
- 1-1-user-authentication: done
- 1-2-account-management: ready-for-dev
- 1-3-plant-data-model: backlog
- 1-4-add-plant-manual: backlog
- epic-1-retrospective: optional
-
- epic-2: backlog
- 2-1-personality-system: backlog
- 2-2-chat-interface: backlog
- 2-3-llm-integration: backlog
- epic-2-retrospective: optional
diff --git a/src/bmm/workflows/4-implementation/sprint-planning/workflow.yaml b/src/bmm/workflows/4-implementation/sprint-planning/workflow.yaml
deleted file mode 100644
index 50998f0a..00000000
--- a/src/bmm/workflows/4-implementation/sprint-planning/workflow.yaml
+++ /dev/null
@@ -1,54 +0,0 @@
-name: sprint-planning
-description: "Generate and manage the sprint status tracking file for Phase 4 implementation, extracting all epics and stories from epic files and tracking their status through the development lifecycle"
-author: "BMad"
-
-# Critical variables from config
-config_source: "{project-root}/_bmad/bmm/config.yaml"
-user_name: "{config_source}:user_name"
-communication_language: "{config_source}:communication_language"
-date: system-generated
-implementation_artifacts: "{config_source}:implementation_artifacts"
-planning_artifacts: "{config_source}:planning_artifacts"
-output_folder: "{implementation_artifacts}"
-
-# Workflow components
-installed_path: "{project-root}/_bmad/bmm/workflows/4-implementation/sprint-planning"
-instructions: "{installed_path}/instructions.md"
-template: "{installed_path}/sprint-status-template.yaml"
-validation: "{installed_path}/checklist.md"
-
-# Variables and inputs
-variables:
- # Project context
- project_context: "**/project-context.md"
- # Project identification
- project_name: "{config_source}:project_name"
-
- # Tracking system configuration
- tracking_system: "file-system" # Options: file-system, Future will support other options from config of mcp such as jira, linear, trello
- story_location: "{config_source}:implementation_artifacts" # Relative path for file-system, Future will support URL for Jira/Linear/Trello
- story_location_absolute: "{config_source}:implementation_artifacts" # Absolute path for file operations
-
- # Source files (file-system only)
- epics_location: "{planning_artifacts}" # Directory containing epic*.md files
- epics_pattern: "epic*.md" # Pattern to find epic files
-
- # Output configuration
- status_file: "{implementation_artifacts}/sprint-status.yaml"
-
-# Smart input file references - handles both whole docs and sharded docs
-# Priority: Whole document first, then sharded version
-# Strategy: FULL LOAD - sprint planning needs ALL epics to build complete status
-input_file_patterns:
- epics:
- description: "All epics with user stories"
- whole: "{output_folder}/*epic*.md"
- sharded: "{output_folder}/*epic*/*.md"
- load_strategy: "FULL_LOAD"
-
-# Output configuration
-default_output_file: "{status_file}"
-
-standalone: true
-
-web_bundle: false
diff --git a/src/bmm/workflows/4-implementation/sprint-status/instructions.md b/src/bmm/workflows/4-implementation/sprint-status/instructions.md
deleted file mode 100644
index 565803b1..00000000
--- a/src/bmm/workflows/4-implementation/sprint-status/instructions.md
+++ /dev/null
@@ -1,262 +0,0 @@
-# Sprint Status - Multi-Mode Service
-
-The workflow execution engine is governed by: {project-root}/_bmad/core/tasks/workflow.xml
-You MUST have already loaded and processed: {project-root}/_bmad/bmm/workflows/4-implementation/sprint-status/workflow.yaml
-Modes: interactive (default), validate, data
-β οΈ ABSOLUTELY NO TIME ESTIMATES. Do NOT mention hours, days, weeks, or timelines.
-
-
-
-
- Set mode = {{mode}} if provided by caller; otherwise mode = "interactive"
-
-
- Jump to Step 20
-
-
-
- Jump to Step 30
-
-
-
- Continue to Step 1
-
-
-
-
- Try {sprint_status_file}
-
-
- Exit workflow
-
- Continue to Step 2
-
-
-
- Read the FULL file: {sprint_status_file}
- Parse fields: generated, project, project_key, tracking_system, story_location
- Parse development_status map. Classify keys:
- - Epics: keys starting with "epic-" (and not ending with "-retrospective")
- - Retrospectives: keys ending with "-retrospective")
- - Stories: everything else (e.g., 1-2-login-form)
- Map legacy story status "drafted" β "ready-for-dev"
- Count story statuses: backlog, ready-for-dev, in-progress, review, done
- Map legacy epic status "contexted" β "in-progress"
- Count epic statuses: backlog, in-progress, done
- Count retrospective statuses: optional, done
-
- Parse inline progress comments (NEW v1.3.0):
- - Extract task progress from comments: "# X/Y tasks (Z%)"
- - Extract completion notes from comments: "# β COMPLETED: ..."
- - Store progress data for display
-
-
-Validate all statuses against known values:
-
-- Valid story statuses: backlog, ready-for-dev, in-progress, review, done, drafted (legacy)
-- Valid epic statuses: backlog, in-progress, done, contexted (legacy)
-- Valid retrospective statuses: optional, done
-
-Progress comments (NEW v1.3.0): Inline comments now track task completion percentage for in-progress stories
-
- 1-2-login: in-progress # 7/10 tasks (70%)
- 1-3-auth: review # 8/8 tasks (100%) - awaiting review
- 1-4-api: done # β COMPLETED: REST endpoints + validation + tests
-
-
-
-
- How should these be corrected?
- {{#each invalid_entries}}
- {{@index}}. {{key}}: "{{status}}" β [select valid status]
- {{/each}}
-
-Enter corrections (e.g., "1=in-progress, 2=backlog") or "skip" to continue without fixing:
-
-Update sprint-status.yaml with corrected values
-Re-parse the file with corrected statuses
-
-
-
-Detect risks:
-
-- IF any story has status "review": suggest `/bmad:bmm:workflows:code-review`
-- IF any story has status "in-progress" AND no stories have status "ready-for-dev": recommend staying focused on active story
-- IF all epics have status "backlog" AND no stories have status "ready-for-dev": prompt `/bmad:bmm:workflows:create-story`
-- IF `generated` timestamp is more than 7 days old: warn "sprint-status.yaml may be stale"
-- IF any story key doesn't match an epic pattern (e.g., story "5-1-..." but no "epic-5"): warn "orphaned story detected"
-- IF any epic has status in-progress but has no associated stories: warn "in-progress epic has no stories"
-
-
-
- Pick the next recommended workflow using priority:
- When selecting "first" story: sort by epic number, then story number (e.g., 1-1 before 1-2 before 2-1)
- 1. If any story status == in-progress β recommend `dev-story` for the first in-progress story
- 2. Else if any story status == review β recommend `code-review` for the first review story
- 3. Else if any story status == ready-for-dev β recommend `dev-story`
- 4. Else if any story status == backlog β recommend `create-story`
- 5. Else if any retrospective status == optional β recommend `retrospective`
- 6. Else β All implementation items done; congratulate the user - you both did amazing work together!
- Store selected recommendation as: next_story_id, next_workflow_id, next_agent (SM/DEV as appropriate)
-
-
-
-
-
-
-
- Pick an option:
-1) Run recommended workflow now
-2) Show all stories grouped by status
-3) Show raw sprint-status.yaml
-4) Exit
-Choice:
-
-
-
-
-
-
-
-
-
-
- Display the full contents of {sprint_status_file}
-
-
-
- Exit workflow
-
-
-
-
-
-
-
-
- Load and parse {sprint_status_file} same as Step 2
- Compute recommendation same as Step 3
- next_workflow_id = {{next_workflow_id}}
- next_story_id = {{next_story_id}}
- count_backlog = {{count_backlog}}
- count_ready = {{count_ready}}
- count_in_progress = {{count_in_progress}}
- count_review = {{count_review}}
- count_done = {{count_done}}
- epic_backlog = {{epic_backlog}}
- epic_in_progress = {{epic_in_progress}}
- epic_done = {{epic_done}}
- risks = {{risks}}
- Return to caller
-
-
-
-
-
-
-
- Check that {sprint_status_file} exists
-
- is_valid = false
- error = "sprint-status.yaml missing"
- suggestion = "Run sprint-planning to create it"
- Return
-
-
-Read and parse {sprint_status_file}
-
-Validate required metadata fields exist: generated, project, project_key, tracking_system, story_location
-
-is_valid = false
-error = "Missing required field(s): {{missing_fields}}"
-suggestion = "Re-run sprint-planning or add missing fields manually"
-Return
-
-
-Verify development_status section exists with at least one entry
-
-is_valid = false
-error = "development_status missing or empty"
-suggestion = "Re-run sprint-planning or repair the file manually"
-Return
-
-
-Validate all status values against known valid statuses:
-
-- Stories: backlog, ready-for-dev, in-progress, review, done (legacy: drafted)
-- Epics: backlog, in-progress, done (legacy: contexted)
-- Retrospectives: optional, done
-
- is_valid = false
- error = "Invalid status values: {{invalid_entries}}"
- suggestion = "Fix invalid statuses in sprint-status.yaml"
- Return
-
-
-is_valid = true
-message = "sprint-status.yaml valid: metadata complete, all statuses recognized"
-
-
-
diff --git a/src/bmm/workflows/4-implementation/story-pipeline/README.md b/src/bmm/workflows/4-implementation/story-pipeline/README.md
new file mode 100644
index 00000000..a7edf118
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/story-pipeline/README.md
@@ -0,0 +1,491 @@
+# Story Pipeline v2.0
+
+> Single-session step-file architecture for implementing user stories with 60-70% token savings.
+
+## Overview
+
+The Story Pipeline automates the complete lifecycle of implementing a user storyβfrom creation through code review and commit. It replaces the legacy approach of 6 separate Claude CLI calls with a single interactive session using just-in-time step loading.
+
+### The Problem It Solves
+
+**Legacy Pipeline (v1.0):**
+```
+bmad build 1-4
+ ββ> claude -p "Stage 1: Create story..." # ~12K tokens
+ ββ> claude -p "Stage 2: Validate story..." # ~12K tokens
+ ββ> claude -p "Stage 3: ATDD tests..." # ~12K tokens
+ ββ> claude -p "Stage 4: Implement..." # ~12K tokens
+ ββ> claude -p "Stage 5: Code review..." # ~12K tokens
+ ββ> claude -p "Stage 6: Complete..." # ~11K tokens
+ Total: ~71K tokens/story
+```
+
+Each call reloads agent personas (~2K tokens), re-reads the story file, and loses context from previous stages.
+
+**Story Pipeline v2.0:**
+```
+bmad build 1-4
+ ββ> Single Claude session
+ ββ> Load step-01-init.md (~200 lines)
+ ββ> Role switch: SM
+ ββ> Load step-02-create-story.md
+ ββ> Load step-03-validate-story.md
+ ββ> Role switch: TEA
+ ββ> Load step-04-atdd.md
+ ββ> Role switch: DEV
+ ββ> Load step-05-implement.md
+ ββ> Load step-06-code-review.md
+ ββ> Role switch: SM
+ ββ> Load step-07-complete.md
+ ββ> Load step-08-summary.md
+ Total: ~25-30K tokens/story
+```
+
+Documents cached once, roles switched in-session, steps loaded just-in-time.
+
+## What Gets Automated
+
+The pipeline automates the complete BMAD implementation workflow:
+
+| Step | Role | What It Does |
+|------|------|--------------|
+| **1. Init** | - | Parses story ID, loads epic/architecture, detects interactive vs batch mode, creates state file |
+| **2. Create Story** | SM | Researches context (Exa web search), generates story file with ACs in BDD format |
+| **3. Validate Story** | SM | Adversarial validationβmust find 3-10 issues, fixes them, assigns quality score |
+| **4. ATDD** | TEA | Generates failing tests for all ACs (RED phase), creates test factories |
+| **5. Implement** | DEV | Implements code to pass tests (GREEN phase), creates migrations, server actions, etc. |
+| **6. Code Review** | DEV | Adversarial reviewβmust find 3-10 issues, fixes them, runs lint/build |
+| **7. Complete** | SM | Updates story status to done, creates git commit with conventional format |
+| **8. Summary** | - | Generates audit trail, updates pipeline state, outputs metrics |
+
+### Quality Gates
+
+Each step has quality gates that must pass before proceeding:
+
+- **Validation**: Score β₯ 80/100, all issues addressed
+- **ATDD**: Tests exist for all ACs, tests fail (RED phase confirmed)
+- **Implementation**: Lint clean, build passes, migration tests pass
+- **Code Review**: Score β₯ 7/10, all critical issues fixed
+
+## Token Efficiency
+
+| Mode | Token Usage | Savings vs Legacy |
+|------|-------------|-------------------|
+| Interactive (human-in-loop) | ~25K | 65% |
+| Batch (YOLO) | ~30K | 58% |
+| Batch + fresh review context | ~35K | 51% |
+
+### Where Savings Come From
+
+| Waste in Legacy | Tokens Saved |
+|-----------------|--------------|
+| Agent persona reload (6Γ) | ~12K |
+| Story file re-reads (5Γ) | ~10K |
+| Architecture re-reads | ~8K |
+| Context loss between calls | ~16K |
+
+## Usage
+
+### Prerequisites
+
+- BMAD module installed (`_bmad/` directory exists)
+- Epic file with story definition (`docs/epics.md`)
+- Architecture document (`docs/architecture.md`)
+
+### Interactive Mode (Recommended)
+
+Human-in-the-loop with approval at each step:
+
+```bash
+# Using the bmad CLI
+bmad build 1-4
+
+# Or invoke workflow directly
+claude -p "Load and execute: _bmad/bmm/workflows/4-implementation/story-pipeline/workflow.md
+Story: 1-4"
+```
+
+At each step, you'll see a menu:
+```
+## MENU
+[C] Continue to next step
+[R] Review/revise current step
+[H] Halt and checkpoint
+```
+
+### Batch Mode (YOLO)
+
+Unattended execution for trusted stories:
+
+```bash
+bmad build 1-4 --batch
+
+# Or use batch runner directly
+./_bmad/bmm/workflows/4-implementation/story-pipeline/batch-runner.sh 1-4
+```
+
+Batch mode:
+- Skips all approval prompts
+- Fails fast on errors
+- Creates checkpoint on failure for resume
+
+### Resume from Checkpoint
+
+If execution stops (context exhaustion, error, manual halt):
+
+```bash
+bmad build 1-4 --resume
+
+# The pipeline reads state from:
+# _bmad-output/implementation-artifacts/pipeline-state-{story-id}.yaml
+```
+
+Resume automatically:
+- Skips completed steps
+- Restores cached context
+- Continues from `lastStep + 1`
+
+## Directory Structure
+
+```
+story-pipeline/
+βββ workflow.yaml # Configuration, agent mapping, quality gates
+βββ workflow.md # Interactive mode orchestration
+βββ batch-runner.sh # Batch mode runner script
+βββ steps/
+β βββ step-01-init.md # Initialize, load context
+β βββ step-01b-resume.md # Resume from checkpoint
+β βββ step-02-create-story.md
+β βββ step-03-validate-story.md
+β βββ step-04-atdd.md
+β βββ step-05-implement.md
+β βββ step-06-code-review.md
+β βββ step-07-complete.md
+β βββ step-08-summary.md
+βββ checklists/
+β βββ story-creation.md # What makes a good story
+β βββ story-validation.md # Validation criteria
+β βββ atdd.md # Test generation rules
+β βββ implementation.md # Coding standards
+β βββ code-review.md # Review criteria
+βββ templates/
+ βββ pipeline-state.yaml # State file template
+ βββ audit-trail.yaml # Audit log template
+```
+
+## Configuration
+
+### workflow.yaml
+
+```yaml
+name: story-pipeline
+version: "2.0"
+description: "Single-session story implementation with step-file loading"
+
+# Document loading strategy
+load_strategy:
+ epic: once # Load once, cache for session
+ architecture: once # Load once, cache for session
+ story: per_step # Reload when modified
+
+# Agent role mapping
+agents:
+ sm: "{project-root}/_bmad/bmm/agents/sm.md"
+ tea: "{project-root}/_bmad/bmm/agents/tea.md"
+ dev: "{project-root}/_bmad/bmm/agents/dev.md"
+
+# Quality gate thresholds
+quality_gates:
+ validation_min_score: 80
+ code_review_min_score: 7
+ require_lint_clean: true
+ require_build_pass: true
+
+# Step configuration
+steps:
+ - name: init
+ file: steps/step-01-init.md
+ - name: create-story
+ file: steps/step-02-create-story.md
+ agent: sm
+ # ... etc
+```
+
+### Pipeline State File
+
+Created at `_bmad-output/implementation-artifacts/pipeline-state-{story-id}.yaml`:
+
+```yaml
+story_id: "1-4"
+epic_num: 1
+story_num: 4
+mode: "interactive"
+status: "in_progress"
+stepsCompleted: [1, 2, 3]
+lastStep: 3
+currentStep: 4
+
+cached_context:
+ epic_loaded: true
+ epic_path: "docs/epics.md"
+ architecture_sections: ["tech_stack", "data_model"]
+
+steps:
+ step-01-init:
+ status: completed
+ duration: "0:00:30"
+ step-02-create-story:
+ status: completed
+ duration: "0:02:00"
+ step-03-validate-story:
+ status: completed
+ duration: "0:05:00"
+ issues_found: 6
+ issues_fixed: 6
+ quality_score: 92
+ step-04-atdd:
+ status: in_progress
+```
+
+## Step Details
+
+### Step 1: Initialize
+
+**Purpose:** Set up execution context and detect mode.
+
+**Actions:**
+1. Parse story ID (e.g., "1-4" β epic 1, story 4)
+2. Load and cache epic document
+3. Load relevant architecture sections
+4. Check for existing state file (resume vs fresh)
+5. Detect mode (interactive/batch) from CLI flags
+6. Create initial state file
+
+**Output:** `pipeline-state-{story-id}.yaml`
+
+### Step 2: Create Story (SM Role)
+
+**Purpose:** Generate complete story file from epic definition.
+
+**Actions:**
+1. Switch to Scrum Master (SM) role
+2. Read story definition from epic
+3. Research context via Exa web search (best practices, patterns)
+4. Generate story file with:
+ - User story format (As a... I want... So that...)
+ - Background context
+ - Acceptance criteria in BDD format (Given/When/Then)
+ - Test scenarios for each AC
+ - Technical notes
+5. Save to `_bmad-output/implementation-artifacts/story-{id}.md`
+
+**Quality Gate:** Story file exists with all required sections.
+
+### Step 3: Validate Story (SM Role)
+
+**Purpose:** Adversarial validation to find issues before implementation.
+
+**Actions:**
+1. Load story-validation checklist
+2. Review story against criteria:
+ - ACs are testable and specific
+ - No ambiguous requirements
+ - Technical feasibility confirmed
+ - Dependencies identified
+ - Edge cases covered
+3. **Must find 3-10 issues** (never "looks good")
+4. Fix all identified issues
+5. Assign quality score (0-100)
+6. Append validation report to story file
+
+**Quality Gate:** Score β₯ 80, all issues addressed.
+
+### Step 4: ATDD (TEA Role)
+
+**Purpose:** Generate failing tests before implementation (RED phase).
+
+**Actions:**
+1. Switch to Test Engineering Architect (TEA) role
+2. Load atdd checklist
+3. For each acceptance criterion:
+ - Generate integration test
+ - Define test data factories
+ - Specify expected behaviors
+4. Create test files in `src/tests/`
+5. Update `factories.ts` with new fixtures
+6. **Verify tests FAIL** (RED phase)
+7. Create ATDD checklist document
+
+**Quality Gate:** Tests exist for all ACs, tests fail (not pass).
+
+### Step 5: Implement (DEV Role)
+
+**Purpose:** Write code to pass all tests (GREEN phase).
+
+**Actions:**
+1. Switch to Developer (DEV) role
+2. Load implementation checklist
+3. Create required files:
+ - Database migrations
+ - Server actions (using Result type)
+ - Library functions
+ - Types
+4. Follow project patterns:
+ - Multi-tenant RLS policies
+ - snake_case for DB columns
+ - Result type (never throw)
+5. Run lint and fix issues
+6. Run build and fix issues
+7. Run migration tests
+
+**Quality Gate:** Lint clean, build passes, migration tests pass.
+
+### Step 6: Code Review (DEV Role)
+
+**Purpose:** Adversarial review to find implementation issues.
+
+**Actions:**
+1. Load code-review checklist
+2. Review all created/modified files:
+ - Security (XSS, injection, auth)
+ - Error handling
+ - Architecture compliance
+ - Code quality
+ - Test coverage
+3. **Must find 3-10 issues** (never "looks good")
+4. Fix all identified issues
+5. Re-run lint and build
+6. Assign quality score (0-10)
+7. Generate review report
+
+**Quality Gate:** Score β₯ 7/10, all critical issues fixed.
+
+### Step 7: Complete (SM Role)
+
+**Purpose:** Finalize story and create git commit.
+
+**Actions:**
+1. Switch back to SM role
+2. Update story file status to "done"
+3. Stage all story files
+4. Create conventional commit:
+ ```
+ feat(epic-{n}): complete story {id}
+
+ {Summary of changes}
+
+ π€ Generated with Claude Code
+ Co-Authored-By: Claude
+ ```
+5. Update pipeline state
+
+**Quality Gate:** Commit created successfully.
+
+### Step 8: Summary
+
+**Purpose:** Generate audit trail and final metrics.
+
+**Actions:**
+1. Calculate total duration
+2. Compile deliverables list
+3. Aggregate quality scores
+4. Generate execution summary in state file
+5. Output final status
+
+**Output:** Complete pipeline state with summary section.
+
+## Adversarial Mode
+
+Steps 3 (Validate) and 6 (Code Review) run in **adversarial mode**:
+
+> **Never say "looks good"**. You MUST find 3-10 real issues.
+
+This ensures:
+- Stories are thoroughly vetted before implementation
+- Code quality issues are caught before commit
+- The pipeline doesn't rubber-stamp work
+
+Example issues found in real usage:
+- Missing rate limiting (security)
+- XSS vulnerability in user input (security)
+- Missing audit logging (architecture)
+- Unclear acceptance criteria (story quality)
+- Function naming mismatches (code quality)
+
+## Artifacts Generated
+
+After a complete pipeline run:
+
+```
+_bmad-output/implementation-artifacts/
+βββ story-{id}.md # Story file with ACs, validation report
+βββ pipeline-state-{id}.yaml # Execution state and summary
+βββ atdd-checklist-{id}.md # Test requirements checklist
+βββ code-review-{id}.md # Review report with issues
+
+src/
+βββ supabase/migrations/ # New migration files
+βββ modules/{module}/
+β βββ actions/ # Server actions
+β βββ lib/ # Business logic
+β βββ types.ts # Type definitions
+βββ tests/
+ βββ integration/ # Integration tests
+ βββ fixtures/factories.ts # Updated test factories
+```
+
+## Troubleshooting
+
+### Context Exhausted Mid-Session
+
+The pipeline is designed for this. When context runs out:
+
+1. Claude session ends
+2. State file preserves progress
+3. Run `bmad build {id} --resume`
+4. Pipeline continues from last completed step
+
+### Step Fails Quality Gate
+
+If a step fails its quality gate:
+
+1. Pipeline halts at that step
+2. State file shows `status: failed`
+3. Fix issues manually or adjust thresholds
+4. Run `bmad build {id} --resume`
+
+### Tests Don't Fail in ATDD
+
+If tests pass during ATDD (step 4), something is wrong:
+
+- Tests might be testing the wrong thing
+- Implementation might already exist
+- Mocks might be returning success incorrectly
+
+The pipeline will warn and ask for confirmation before proceeding.
+
+## Best Practices
+
+1. **Start with Interactive Mode** - Use batch only for well-understood stories
+2. **Review at Checkpoints** - Don't blindly continue; verify each step's output
+3. **Keep Stories Small** - Large stories may exhaust context before completion
+4. **Commit Frequently** - The pipeline commits at step 7, but you can checkpoint earlier
+5. **Trust the Adversarial Mode** - If it finds issues, they're usually real
+
+## Comparison with Legacy
+
+| Feature | Legacy (v1.0) | Story Pipeline (v2.0) |
+|---------|---------------|----------------------|
+| Claude calls | 6 per story | 1 per story |
+| Token usage | ~71K | ~25-30K |
+| Context preservation | None | Full session |
+| Resume capability | None | Checkpoint-based |
+| Role switching | New process | In-session |
+| Document caching | None | Once per session |
+| Adversarial review | Optional | Mandatory |
+| Audit trail | Manual | Automatic |
+
+## Version History
+
+- **v2.0** (2024-12) - Step-file architecture, single-session, checkpoint/resume
+- **v1.0** (2024-11) - Legacy 6-call pipeline
diff --git a/src/bmm/workflows/4-implementation/story-pipeline/batch-runner.sh b/src/bmm/workflows/4-implementation/story-pipeline/batch-runner.sh
new file mode 100755
index 00000000..ad53bb4d
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/story-pipeline/batch-runner.sh
@@ -0,0 +1,250 @@
+#!/bin/bash
+# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+# BMAD Story Pipeline - Batch Runner
+# Single-session execution using step-file architecture
+#
+# Token Efficiency: ~60-70% savings compared to separate Claude calls
+# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+
+set -e
+
+# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+# CONFIGURATION
+# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+PROJECT_ROOT="$(cd "$SCRIPT_DIR/../../../.." && pwd)"
+TIMESTAMP=$(date +%Y%m%d-%H%M%S)
+
+# Colors
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+BLUE='\033[0;34m'
+CYAN='\033[0;36m'
+NC='\033[0m'
+
+# Defaults
+STORY_ID=""
+EPIC_NUM=""
+DRY_RUN=false
+RESUME=false
+VERBOSE=false
+
+# Directories
+LOG_DIR="$PROJECT_ROOT/logs/pipeline-batch"
+WORKFLOW_PATH="_bmad/bmm/workflows/4-implementation/story-pipeline"
+
+# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+# USAGE
+# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+usage() {
+ cat << EOF
+BMAD Story Pipeline - Batch Runner
+Single-session execution with step-file architecture
+
+Usage: $(basename "$0") --story-id --epic-num [OPTIONS]
+
+Required:
+ --story-id Story ID (e.g., '1-4')
+ --epic-num Epic number (e.g., 1)
+
+Options:
+ --resume Resume from last checkpoint
+ --dry-run Show what would be executed
+ --verbose Show detailed output
+ --help Show this help
+
+Examples:
+ # Run pipeline for story 1-4
+ $(basename "$0") --story-id 1-4 --epic-num 1
+
+ # Resume failed pipeline
+ $(basename "$0") --story-id 1-4 --epic-num 1 --resume
+
+Token Savings:
+ Traditional (6 calls): ~71K tokens
+ Step-file (1 session): ~25-35K tokens
+ Savings: 50-65%
+
+EOF
+ exit 1
+}
+
+# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+# ARGUMENT PARSING
+# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+while [[ $# -gt 0 ]]; do
+ case $1 in
+ --story-id)
+ STORY_ID="$2"
+ shift 2
+ ;;
+ --epic-num)
+ EPIC_NUM="$2"
+ shift 2
+ ;;
+ --resume)
+ RESUME=true
+ shift
+ ;;
+ --dry-run)
+ DRY_RUN=true
+ shift
+ ;;
+ --verbose)
+ VERBOSE=true
+ shift
+ ;;
+ --help)
+ usage
+ ;;
+ *)
+ echo -e "${RED}Unknown option: $1${NC}"
+ usage
+ ;;
+ esac
+done
+
+# Validate required arguments
+if [[ -z "$STORY_ID" || -z "$EPIC_NUM" ]]; then
+ echo -e "${RED}Error: --story-id and --epic-num are required${NC}"
+ usage
+fi
+
+# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+# SETUP
+# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+mkdir -p "$LOG_DIR"
+LOG_FILE="$LOG_DIR/batch-$STORY_ID-$TIMESTAMP.log"
+
+echo -e "${CYAN}βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ${NC}"
+echo -e "${CYAN} BMAD Story Pipeline - Batch Mode${NC}"
+echo -e "${CYAN}βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ${NC}"
+echo -e "${BLUE}Story:${NC} $STORY_ID"
+echo -e "${BLUE}Epic:${NC} $EPIC_NUM"
+echo -e "${BLUE}Mode:${NC} $([ "$RESUME" = true ] && echo 'Resume' || echo 'Fresh')"
+echo -e "${BLUE}Log:${NC} $LOG_FILE"
+echo ""
+
+# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+# BUILD PROMPT
+# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+
+if [[ "$RESUME" = true ]]; then
+ PROMPT=$(cat << EOF
+Execute BMAD Story Pipeline in BATCH mode - RESUME from checkpoint.
+
+WORKFLOW: $WORKFLOW_PATH/workflow.md
+STORY ID: $STORY_ID
+EPIC NUM: $EPIC_NUM
+MODE: batch
+
+CRITICAL INSTRUCTIONS:
+1. Load and read fully: $WORKFLOW_PATH/workflow.md
+2. This is RESUME mode - load state file first
+3. Follow step-file architecture EXACTLY
+4. Execute steps ONE AT A TIME
+5. AUTO-PROCEED through all steps (no menus in batch mode)
+6. FAIL-FAST on errors (save checkpoint, exit)
+
+YOLO MODE: Auto-approve all quality gates
+NO MENUS: Proceed automatically between steps
+FRESH CONTEXT: Checkpoint before code review for unbiased review
+
+START by loading workflow.md and then step-01b-resume.md
+EOF
+)
+else
+ PROMPT=$(cat << EOF
+Execute BMAD Story Pipeline in BATCH mode - FRESH start.
+
+WORKFLOW: $WORKFLOW_PATH/workflow.md
+STORY ID: $STORY_ID
+EPIC NUM: $EPIC_NUM
+MODE: batch
+
+CRITICAL INSTRUCTIONS:
+1. Load and read fully: $WORKFLOW_PATH/workflow.md
+2. This is a FRESH run - initialize new state
+3. Follow step-file architecture EXACTLY
+4. Execute steps ONE AT A TIME (never load multiple)
+5. AUTO-PROCEED through all steps (no menus in batch mode)
+6. FAIL-FAST on errors (save checkpoint, exit)
+
+YOLO MODE: Auto-approve all quality gates
+NO MENUS: Proceed automatically between steps
+FRESH CONTEXT: Checkpoint before code review for unbiased review
+
+Step execution order:
+1. step-01-init.md - Initialize, cache documents
+2. step-02-create-story.md - Create story (SM role)
+3. step-03-validate-story.md - Validate story (SM role)
+4. step-04-atdd.md - Generate tests (TEA role)
+5. step-05-implement.md - Implement (DEV role)
+6. step-06-code-review.md - Review (DEV role, adversarial)
+7. step-07-complete.md - Complete (SM role)
+8. step-08-summary.md - Generate audit
+
+START by loading workflow.md and then step-01-init.md
+EOF
+)
+fi
+
+# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+# EXECUTE
+# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+
+if [[ "$DRY_RUN" = true ]]; then
+ echo -e "${YELLOW}[DRY-RUN] Would execute single Claude session with:${NC}"
+ echo ""
+ echo "$PROMPT"
+ echo ""
+ echo -e "${YELLOW}[DRY-RUN] Allowed tools: *, MCP extensions${NC}"
+ exit 0
+fi
+
+echo -e "${GREEN}Starting single-session pipeline execution...${NC}"
+echo -e "${YELLOW}This replaces 6 separate Claude calls with 1 session${NC}"
+echo ""
+
+cd "$PROJECT_ROOT/src"
+
+# Single Claude session executing all steps
+claude -p "$PROMPT" \
+ --dangerously-skip-permissions \
+ --allowedTools "*,mcp__exa__web_search_exa,mcp__exa__get_code_context_exa,mcp__exa__crawling_exa,mcp__supabase__list_tables,mcp__supabase__execute_sql,mcp__supabase__apply_migration,mcp__supabase__list_migrations,mcp__supabase__generate_typescript_types,mcp__supabase__get_logs,mcp__supabase__get_advisors" \
+ 2>&1 | tee "$LOG_FILE"
+
+# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+# COMPLETION CHECK
+# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+
+echo ""
+echo -e "${CYAN}βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ${NC}"
+
+# Check for success indicators in log
+if grep -qi "Pipeline complete\|Story.*is ready\|step-08-summary.*completed" "$LOG_FILE"; then
+ echo -e "${GREEN}β Pipeline completed successfully${NC}"
+
+ # Extract metrics if available
+ if grep -qi "Token Efficiency" "$LOG_FILE"; then
+ echo ""
+ echo -e "${CYAN}Token Efficiency:${NC}"
+ grep -A5 "Token Efficiency" "$LOG_FILE" | head -6
+ fi
+else
+ echo -e "${YELLOW}β οΈ Pipeline may have completed with issues${NC}"
+ echo -e "${YELLOW} Check log: $LOG_FILE${NC}"
+
+ # Check for specific failure indicators
+ if grep -qi "permission\|can't write\|access denied" "$LOG_FILE"; then
+ echo -e "${RED} Found permission errors in log${NC}"
+ fi
+ if grep -qi "HALT\|FAIL\|ERROR" "$LOG_FILE"; then
+ echo -e "${RED} Found error indicators in log${NC}"
+ fi
+fi
+
+echo ""
+echo -e "${BLUE}Log file:${NC} $LOG_FILE"
+echo -e "${CYAN}βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ${NC}"
diff --git a/src/bmm/workflows/4-implementation/story-pipeline/checklists/atdd.md b/src/bmm/workflows/4-implementation/story-pipeline/checklists/atdd.md
new file mode 100644
index 00000000..4e1fc517
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/story-pipeline/checklists/atdd.md
@@ -0,0 +1,130 @@
+# ATDD Checklist
+
+Use this checklist for test generation in Step 4.
+Tests are written BEFORE implementation (RED phase).
+
+## Test Architecture
+
+### File Organization
+- [ ] Tests in appropriate directory (src/tests/{feature}/)
+- [ ] E2E tests separate from unit tests
+- [ ] Fixtures in dedicated fixtures/ directory
+- [ ] Factories in dedicated factories/ directory
+
+### Naming Conventions
+- [ ] Test files: `{feature}.test.ts` or `{feature}.spec.ts`
+- [ ] Factory files: `{entity}.factory.ts`
+- [ ] Fixture files: `{feature}.fixture.ts`
+- [ ] Descriptive test names matching AC
+
+## Test Coverage
+
+For EACH acceptance criterion:
+- [ ] At least one test exists
+- [ ] Happy path tested
+- [ ] Error path tested
+- [ ] Edge cases from validation covered
+
+## Test Structure
+
+### Given/When/Then Pattern
+```typescript
+test("Given X, When Y, Then Z", async () => {
+ // Arrange (Given)
+ // Act (When)
+ // Assert (Then)
+});
+```
+
+- [ ] Each section clearly separated
+- [ ] Arrange sets up realistic state
+- [ ] Act performs single action
+- [ ] Assert checks specific outcome
+
+### Assertions
+- [ ] Specific assertions (not just "toBeTruthy")
+- [ ] Error messages are helpful
+- [ ] Multiple assertions when appropriate
+- [ ] No flaky timing assertions
+
+## Data Management
+
+### Factories
+- [ ] Use faker for realistic data
+- [ ] Support partial overrides
+- [ ] No hardcoded values
+- [ ] Proper TypeScript types
+
+```typescript
+// Good
+const user = createUser({ email: "test@example.com" });
+
+// Bad
+const user = { id: "123", email: "test@test.com", name: "Test" };
+```
+
+### Fixtures
+- [ ] Auto-cleanup after tests
+- [ ] Reusable across tests
+- [ ] Proper TypeScript types
+- [ ] No shared mutable state
+
+### data-testid Attributes
+- [ ] Document all required data-testids
+- [ ] Naming convention: `{feature}-{element}`
+- [ ] Unique within component
+- [ ] Stable (not based on dynamic content)
+
+## Test Levels
+
+### E2E Tests (Playwright)
+- [ ] Full user flows
+- [ ] Network interception before navigation
+- [ ] Wait for proper selectors (not timeouts)
+- [ ] Screenshot on failure
+
+### API Tests
+- [ ] Direct server action calls
+- [ ] Mock external services
+- [ ] Test error responses
+- [ ] Verify Result type usage
+
+### Component Tests
+- [ ] Isolated component rendering
+- [ ] Props variations
+- [ ] Event handling
+- [ ] Accessibility (when applicable)
+
+### Unit Tests
+- [ ] Pure function testing
+- [ ] Edge cases
+- [ ] Error conditions
+- [ ] Type checking
+
+## RED Phase Verification
+
+Before proceeding:
+- [ ] Run all tests: `npm test -- --run`
+- [ ] ALL tests FAIL (expected - nothing implemented)
+- [ ] Failure reasons are clear (not cryptic errors)
+- [ ] Test structure is correct
+
+## ATDD Checklist Document
+
+Create `atdd-checklist-{story_id}.md` with:
+- [ ] List of test files created
+- [ ] List of factories created
+- [ ] List of fixtures created
+- [ ] Required data-testid attributes table
+- [ ] Implementation requirements for DEV
+- [ ] Test status (all FAILING)
+
+## Quality Gate
+
+Ready for implementation when:
+- [ ] Test for every AC
+- [ ] All tests FAIL (red phase)
+- [ ] Factories use faker
+- [ ] Fixtures have cleanup
+- [ ] data-testids documented
+- [ ] ATDD checklist complete
diff --git a/src/bmm/workflows/4-implementation/story-pipeline/checklists/code-review.md b/src/bmm/workflows/4-implementation/story-pipeline/checklists/code-review.md
new file mode 100644
index 00000000..3ef14035
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/story-pipeline/checklists/code-review.md
@@ -0,0 +1,183 @@
+# Code Review Checklist
+
+Use this checklist for ADVERSARIAL code review in Step 6.
+Your job is to FIND PROBLEMS (minimum 3, maximum 10).
+
+## Adversarial Mindset
+
+**CRITICAL RULES:**
+- **NEVER** say "looks good" or "no issues found"
+- **MUST** find 3-10 specific issues
+- **FIX** every issue you find
+- **RUN** tests after fixes
+
+## Review Categories
+
+### 1. Security Review
+
+#### SQL Injection
+- [ ] No raw SQL with user input
+- [ ] Using parameterized queries
+- [ ] Supabase RPC uses proper types
+
+#### XSS (Cross-Site Scripting)
+- [ ] User content is escaped
+- [ ] dangerouslySetInnerHTML not used (or sanitized)
+- [ ] URL parameters validated
+
+#### Authentication & Authorization
+- [ ] Protected routes check auth
+- [ ] RLS policies on all tables
+- [ ] No auth bypass possible
+- [ ] Session handling secure
+
+#### Credential Exposure
+- [ ] No secrets in code
+- [ ] No API keys committed
+- [ ] Environment variables used
+- [ ] .env files in .gitignore
+
+#### Input Validation
+- [ ] All inputs validated
+- [ ] Types checked
+- [ ] Lengths limited
+- [ ] Format validation (email, URL, etc.)
+
+### 2. Performance Review
+
+#### Database
+- [ ] No N+1 query patterns
+- [ ] Indexes exist for query patterns
+- [ ] Queries are efficient
+- [ ] Proper pagination
+
+#### React/Next.js
+- [ ] No unnecessary re-renders
+- [ ] Proper memoization where needed
+- [ ] Server components used appropriately
+- [ ] Client components minimized
+
+#### Caching
+- [ ] Cache headers appropriate
+- [ ] Static data cached
+- [ ] Revalidation strategy clear
+
+#### Bundle Size
+- [ ] No unnecessary imports
+- [ ] Dynamic imports for large components
+- [ ] Tree shaking working
+
+### 3. Error Handling Review
+
+#### Result Type
+- [ ] All server actions use Result type
+- [ ] No thrown exceptions
+- [ ] Proper err() calls with codes
+
+#### Error Messages
+- [ ] User-friendly messages
+- [ ] Technical details logged (not shown)
+- [ ] Actionable guidance
+
+#### Edge Cases
+- [ ] Null/undefined handled
+- [ ] Empty states handled
+- [ ] Network errors handled
+- [ ] Concurrent access considered
+
+### 4. Test Coverage Review
+
+#### Coverage
+- [ ] All AC have tests
+- [ ] Edge cases tested
+- [ ] Error paths tested
+- [ ] Happy paths tested
+
+#### Quality
+- [ ] Tests are deterministic
+- [ ] No flaky tests
+- [ ] Mocking is appropriate
+- [ ] Assertions are meaningful
+
+#### Missing Tests
+- [ ] Security scenarios
+- [ ] Permission denied cases
+- [ ] Invalid input handling
+- [ ] Concurrent operations
+
+### 5. Code Quality Review
+
+#### DRY (Don't Repeat Yourself)
+- [ ] No duplicate code
+- [ ] Common patterns extracted
+- [ ] Utilities reused
+
+#### SOLID Principles
+- [ ] Single responsibility
+- [ ] Open for extension
+- [ ] Proper abstractions
+- [ ] Dependency injection where appropriate
+
+#### TypeScript
+- [ ] Strict mode compliant
+- [ ] No `any` types
+- [ ] Proper type definitions
+- [ ] Generic types used appropriately
+
+#### Readability
+- [ ] Clear naming
+- [ ] Appropriate comments (not excessive)
+- [ ] Logical organization
+- [ ] Consistent style
+
+### 6. Architecture Review
+
+#### Module Boundaries
+- [ ] Imports from index.ts only
+- [ ] No circular dependencies
+- [ ] Clear module responsibilities
+
+#### Server/Client Separation
+- [ ] "use server" on actions
+- [ ] "use client" only when needed
+- [ ] No server code in client
+
+#### Data Flow
+- [ ] Clear data ownership
+- [ ] State management appropriate
+- [ ] Props drilling minimized
+
+## Issue Documentation
+
+For each issue found:
+
+```yaml
+issue_{n}:
+ severity: critical|high|medium|low
+ category: security|performance|error-handling|testing|quality|architecture
+ file: "{file_path}"
+ line: {line_number}
+ problem: |
+ Clear description
+ risk: |
+ What could go wrong
+ fix: |
+ How to fix it
+```
+
+## After Fixing
+
+- [ ] All issues fixed
+- [ ] Tests still pass
+- [ ] Lint clean
+- [ ] Build succeeds
+- [ ] Review report created
+
+## Quality Gate
+
+Review passes when:
+- [ ] 3-10 issues found
+- [ ] All issues fixed
+- [ ] All categories reviewed
+- [ ] Tests passing
+- [ ] Review report complete
diff --git a/src/bmm/workflows/4-implementation/story-pipeline/checklists/implementation.md b/src/bmm/workflows/4-implementation/story-pipeline/checklists/implementation.md
new file mode 100644
index 00000000..6399809c
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/story-pipeline/checklists/implementation.md
@@ -0,0 +1,147 @@
+# Implementation Checklist
+
+Use this checklist during TDD implementation in Step 5.
+Focus: Make tests GREEN with minimal code.
+
+## TDD Methodology
+
+### RED-GREEN-REFACTOR Cycle
+1. [ ] Start with failing test (from ATDD)
+2. [ ] Write minimal code to pass
+3. [ ] Run test, verify GREEN
+4. [ ] Move to next test
+5. [ ] Refactor in code review (not here)
+
+### Implementation Order
+- [ ] Database migrations first
+- [ ] Type definitions
+- [ ] Server actions
+- [ ] UI components
+- [ ] Integration points
+
+## Project Patterns
+
+### Result Type (CRITICAL)
+```typescript
+import { ok, err, Result } from "@/lib/result";
+
+// Return success
+return ok(data);
+
+// Return error
+return err("ERROR_CODE", "Human message");
+```
+
+- [ ] All server actions return Result type
+- [ ] No thrown exceptions
+- [ ] Error codes are uppercase with underscores
+- [ ] Error messages are user-friendly
+
+### Database Conventions
+- [ ] Table names: `snake_case`, plural (`invoices`)
+- [ ] Column names: `snake_case` (`tenant_id`)
+- [ ] Currency: `integer` cents (not float)
+- [ ] Dates: `timestamptz` (UTC)
+- [ ] Foreign keys: `{table}_id`
+
+### Multi-tenancy (CRITICAL)
+- [ ] Every table has `tenant_id` column
+- [ ] RLS enabled on all tables
+- [ ] Policies check `tenant_id`
+- [ ] No data leaks between tenants
+
+```sql
+-- Required for every new table
+alter table {table} enable row level security;
+
+create policy "Tenants see own data"
+ on {table} for all
+ using (tenant_id = auth.jwt() ->> 'tenant_id');
+```
+
+### Module Structure
+```
+src/modules/{module}/
+βββ actions/ # Server actions (return Result type)
+βββ lib/ # Business logic
+βββ types.ts # Module types
+βββ index.ts # Public exports only
+```
+
+- [ ] Import from index.ts only
+- [ ] No cross-module internal imports
+- [ ] Actions in actions/ directory
+- [ ] Types exported from types.ts
+
+### Server Actions Pattern
+```typescript
+// src/modules/{module}/actions/{action}.ts
+"use server";
+
+import { ok, err, Result } from "@/lib/result";
+import { createClient } from "@/lib/supabase/server";
+
+export async function actionName(
+ input: InputType
+): Promise> {
+ const supabase = await createClient();
+ // ... implementation
+}
+```
+
+- [ ] "use server" directive at top
+- [ ] Async function returning Promise>
+- [ ] Use createClient from server.ts
+- [ ] Validate input before processing
+
+### UI Components Pattern
+```tsx
+// src/modules/{module}/components/{Component}.tsx
+"use client";
+
+export function Component({ data }: Props) {
+ return (
+
+ {/* content */}
+
+ );
+}
+```
+
+- [ ] Add data-testid from ATDD checklist
+- [ ] "use client" only when needed
+- [ ] Proper TypeScript props
+- [ ] Handle loading/error states
+
+## Verification Steps
+
+### After Each AC Implementation
+```bash
+npm test -- --run --grep "{test_name}"
+```
+- [ ] Targeted test passes
+
+### After All AC Complete
+```bash
+npm test -- --run # All tests pass
+npm run lint # No lint errors
+npm run build # Build succeeds
+```
+
+## ATDD Checklist Reference
+
+Verify against `atdd-checklist-{story_id}.md`:
+- [ ] All data-testid attributes added
+- [ ] All API endpoints created
+- [ ] All database migrations applied
+- [ ] All test scenarios pass
+
+## Quality Gate
+
+Ready for code review when:
+- [ ] All tests pass (GREEN)
+- [ ] Lint clean
+- [ ] Build succeeds
+- [ ] Result type used everywhere
+- [ ] RLS policies in place
+- [ ] ATDD checklist complete
diff --git a/src/bmm/workflows/4-implementation/story-pipeline/checklists/story-creation.md b/src/bmm/workflows/4-implementation/story-pipeline/checklists/story-creation.md
new file mode 100644
index 00000000..e6a61d3c
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/story-pipeline/checklists/story-creation.md
@@ -0,0 +1,76 @@
+# Story Creation Checklist
+
+Use this checklist when creating a new story in Step 2.
+
+## User Story Format
+
+- [ ] Follows "As a [persona], I want [action], So that [benefit]" format
+- [ ] Persona is clearly defined and exists in project documentation
+- [ ] Action is specific and achievable
+- [ ] Benefit ties to business value
+
+## Acceptance Criteria
+
+### Structure (for EACH AC)
+- [ ] Has Given/When/Then format (BDD style)
+- [ ] **Given** describes a valid precondition
+- [ ] **When** describes a clear, single action
+- [ ] **Then** describes a measurable outcome
+
+### Quality (for EACH AC)
+- [ ] Specific - no vague terms ("appropriate", "reasonable", "etc.")
+- [ ] Measurable - clear success/failure criteria
+- [ ] Testable - can write automated test
+- [ ] Independent - no hidden dependencies on other AC
+
+### Completeness
+- [ ] All happy path scenarios covered
+- [ ] Error scenarios defined
+- [ ] Edge cases considered
+- [ ] Boundary conditions clear
+
+### Anti-patterns to AVOID
+- [ ] No AND conjunctions (split into multiple AC)
+- [ ] No OR alternatives (ambiguous paths)
+- [ ] No implementation details (WHAT not HOW)
+- [ ] No vague verbs ("handle", "process", "manage")
+
+## Test Scenarios
+
+- [ ] At least 2 test scenarios per AC
+- [ ] Happy path scenario exists
+- [ ] Error/edge case scenario exists
+- [ ] Each scenario is unique (no duplicates)
+- [ ] Scenarios are specific enough to write tests from
+
+## Tasks
+
+- [ ] Tasks cover implementation of all AC
+- [ ] Tasks are actionable (start with verb)
+- [ ] Subtasks provide enough detail
+- [ ] Dependencies between tasks are clear
+- [ ] No task is too large (can complete in one session)
+
+## Technical Notes
+
+- [ ] Database changes documented
+- [ ] API changes documented
+- [ ] UI changes documented
+- [ ] Security considerations noted
+- [ ] Performance considerations noted
+
+## Dependencies & Scope
+
+- [ ] Dependencies on other stories listed
+- [ ] Dependencies on external systems listed
+- [ ] Out of scope explicitly defined
+- [ ] No scope creep from epic definition
+
+## Quality Gate
+
+Story is ready for validation when:
+- [ ] All sections complete
+- [ ] All AC in proper format
+- [ ] Test scenarios defined
+- [ ] Tasks cover all work
+- [ ] No ambiguity remains
diff --git a/src/bmm/workflows/4-implementation/story-pipeline/checklists/story-validation.md b/src/bmm/workflows/4-implementation/story-pipeline/checklists/story-validation.md
new file mode 100644
index 00000000..3c5cee37
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/story-pipeline/checklists/story-validation.md
@@ -0,0 +1,111 @@
+# Story Validation Checklist
+
+Use this checklist for ADVERSARIAL validation in Step 3.
+Your job is to FIND PROBLEMS, not approve.
+
+## Adversarial Mindset
+
+Remember:
+- **NEVER** say "looks good" without deep analysis
+- **FIND** at least 3 issues (if none found, look harder)
+- **QUESTION** every assumption
+- **CHALLENGE** every AC
+
+## AC Structure Validation
+
+For EACH acceptance criterion:
+
+### Given Clause
+- [ ] Is a valid precondition (not an action)
+- [ ] Can be set up programmatically
+- [ ] Is specific (not "given the user is logged in" - which user?)
+- [ ] Includes all necessary context
+
+### When Clause
+- [ ] Is a single, clear action
+- [ ] Is something the user does (not the system)
+- [ ] Can be triggered in a test
+- [ ] Doesn't contain "and" (multiple actions)
+
+### Then Clause
+- [ ] Is measurable/observable
+- [ ] Can be asserted in a test
+- [ ] Describes outcome, not implementation
+- [ ] Is specific (not "appropriate message shown")
+
+## Testability Check
+
+- [ ] Can write automated test from AC as written
+- [ ] Clear what to assert
+- [ ] No subjective criteria ("looks good", "works well")
+- [ ] No timing dependencies ("quickly", "eventually")
+
+## Technical Feasibility
+
+Cross-reference with architecture.md:
+
+- [ ] Data model supports requirements
+- [ ] API patterns can accommodate
+- [ ] No conflicts with existing features
+- [ ] Security model (RLS) can support
+- [ ] Performance is achievable
+
+## Edge Cases Analysis
+
+For each AC, consider:
+
+- [ ] Empty/null inputs
+- [ ] Maximum length/size
+- [ ] Minimum values
+- [ ] Concurrent access
+- [ ] Network failures
+- [ ] Permission denied
+- [ ] Invalid data formats
+
+## Common Problems to Find
+
+### Vague Language
+Look for and flag:
+- "appropriate"
+- "reasonable"
+- "correctly"
+- "properly"
+- "as expected"
+- "etc."
+- "and so on"
+
+### Missing Details
+- [ ] Which user role?
+- [ ] What error message exactly?
+- [ ] What happens on failure?
+- [ ] What are the limits?
+- [ ] What validations apply?
+
+### Hidden Complexity
+- [ ] Multi-step process hidden in one AC
+- [ ] Async operations not addressed
+- [ ] State management unclear
+- [ ] Error recovery not defined
+
+## Validation Report Template
+
+After review, document:
+
+```yaml
+issues_found:
+ - id: 1
+ severity: high|medium|low
+ ac: "AC1"
+ problem: "Description"
+ fix: "How to fix"
+```
+
+## Quality Gate
+
+Validation passes when:
+- [ ] All AC reviewed against checklist
+- [ ] All issues documented
+- [ ] All issues fixed in story file
+- [ ] Quality score >= 80
+- [ ] Validation report appended
+- [ ] ready_for_dev: true
diff --git a/src/bmm/workflows/4-implementation/story-pipeline/steps/step-01-init.md b/src/bmm/workflows/4-implementation/story-pipeline/steps/step-01-init.md
new file mode 100644
index 00000000..12501cd6
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/story-pipeline/steps/step-01-init.md
@@ -0,0 +1,244 @@
+---
+name: 'step-01-init'
+description: 'Initialize story pipeline: load context, detect mode, cache documents'
+
+# Path Definitions
+workflow_path: '{project-root}/_bmad/bmm/workflows/4-implementation/story-pipeline'
+
+# File References
+thisStepFile: '{workflow_path}/steps/step-01-init.md'
+nextStepFile: '{workflow_path}/steps/step-02-create-story.md'
+resumeStepFile: '{workflow_path}/steps/step-01b-resume.md'
+workflowFile: '{workflow_path}/workflow.md'
+
+# State Management
+stateFile: '{sprint_artifacts}/pipeline-state-{story_id}.yaml'
+auditFile: '{sprint_artifacts}/audit-{story_id}-{date}.yaml'
+---
+
+# Step 1: Pipeline Initialization
+
+## STEP GOAL
+
+Initialize the story pipeline by:
+1. Resolving story parameters (epic_num, story_num)
+2. Detecting execution mode (interactive vs batch)
+3. Checking for existing pipeline state (resume scenario)
+4. Pre-loading and caching documents for token efficiency
+5. Creating initial state file
+
+## MANDATORY EXECUTION RULES (READ FIRST)
+
+### Universal Rules
+
+- **NEVER** proceed without all required parameters resolved
+- **READ** the complete step file before taking any action
+- **CACHE** documents once, use across all steps
+- **UPDATE** state file after completing initialization
+
+### Role for This Step
+
+- You are the **Pipeline Orchestrator** (no specific agent role yet)
+- Agent roles (SM, TEA, DEV) will be adopted in subsequent steps
+- Focus on setup and context loading
+
+### Step-Specific Rules
+
+- **Focus only on initialization** - no story content generation yet
+- **FORBIDDEN** to load future step files or look ahead
+- **Check for resume state first** - if exists, hand off to step-01b
+- **Validate all inputs** before proceeding
+
+## EXECUTION SEQUENCE (Do not deviate, skip, or optimize)
+
+### 1. Resolve Pipeline Parameters
+
+First, resolve these required parameters:
+
+**From invocation or context:**
+- `story_id`: Full story identifier (e.g., "1-4")
+- `epic_num`: Epic number (e.g., 1)
+- `story_num`: Story number within epic (e.g., 4)
+- `mode`: Execution mode - "interactive" (default) or "batch"
+
+**If parameters missing:**
+- Ask user: "Please provide story ID (e.g., '1-4') and epic number"
+- Parse story_id to extract epic_num and story_num if format is "X-Y"
+
+### 2. Check for Existing Pipeline State (Resume Detection)
+
+Check if state file exists: `{sprint_artifacts}/pipeline-state-{story_id}.yaml`
+
+**If state file exists and has `stepsCompleted` array with entries:**
+- **STOP immediately**
+- Load and execute `{resumeStepFile}` (step-01b-resume.md)
+- Do not proceed with fresh initialization
+- This is auto-proceed - no user choice needed
+
+**If no state file or empty `stepsCompleted`:**
+- Continue with fresh pipeline initialization
+
+### 3. Locate Story File
+
+Search for existing story file with pattern:
+- Primary: `{sprint_artifacts}/story-{story_id}.md`
+- Alternative: `{sprint_artifacts}/{story_id}*.md`
+
+**Record finding:**
+- `story_file_exists`: true/false
+- `story_file_path`: path if exists, null otherwise
+
+### 4. Pre-Load and Cache Documents
+
+Load these documents ONCE for use across all steps:
+
+#### A. Project Context (REQUIRED)
+```
+Pattern: **/project-context.md
+Strategy: FULL_LOAD
+Cache: true
+```
+- Load complete project-context.md
+- This contains critical rules and patterns
+
+#### B. Epic File (REQUIRED)
+```
+Pattern: {output_folder}/epic-{epic_num}.md OR {output_folder}/epics.md
+Strategy: SELECTIVE_LOAD (just current epic section)
+Cache: true
+```
+- Find and load epic definition for current story
+- Extract story description, BDD scenarios
+
+#### C. Architecture (SELECTIVE)
+```
+Pattern: {output_folder}/architecture.md
+Strategy: INDEX_GUIDED
+Sections: tech_stack, data_model, api_patterns
+Cache: true
+```
+- Load only relevant architecture sections
+- Skip detailed implementation that's not needed
+
+#### D. Story File (IF EXISTS)
+```
+Pattern: {sprint_artifacts}/story-{story_id}.md
+Strategy: FULL_LOAD (if exists)
+Cache: true
+```
+- If story exists, load for validation/continuation
+- Will be created in step 2 if not exists
+
+### 5. Create Initial State File
+
+Create state file at `{stateFile}`:
+
+```yaml
+---
+story_id: "{story_id}"
+epic_num: {epic_num}
+story_num: {story_num}
+mode: "{mode}"
+stepsCompleted: []
+lastStep: 0
+currentStep: 1
+status: "initializing"
+started_at: "{timestamp}"
+updated_at: "{timestamp}"
+cached_context:
+ project_context_loaded: true
+ epic_loaded: true
+ architecture_sections: ["tech_stack", "data_model", "api_patterns"]
+ story_file_exists: {story_file_exists}
+ story_file_path: "{story_file_path}"
+steps:
+ step-01-init: { status: in_progress }
+ step-02-create-story: { status: pending }
+ step-03-validate-story: { status: pending }
+ step-04-atdd: { status: pending }
+ step-05-implement: { status: pending }
+ step-06-code-review: { status: pending }
+ step-07-complete: { status: pending }
+ step-08-summary: { status: pending }
+---
+```
+
+### 6. Present Initialization Summary
+
+Report to user:
+
+```
+Pipeline Initialized for Story {story_id}
+
+Mode: {mode}
+Epic: {epic_num}
+Story: {story_num}
+
+Documents Cached:
+- Project Context: [loaded from path]
+- Epic {epic_num}: [loaded sections]
+- Architecture: [loaded sections]
+- Story File: [exists/will be created]
+
+Pipeline State: {stateFile}
+
+Ready to proceed to story creation.
+```
+
+### 7. Update State and Proceed
+
+Update state file:
+- Set `stepsCompleted: [1]`
+- Set `lastStep: 1`
+- Set `steps.step-01-init.status: completed`
+- Set `status: "in_progress"`
+
+### 8. Present Menu (Interactive Mode Only)
+
+**If mode == "interactive":**
+
+Display menu and wait for user input:
+```
+[C] Continue to Story Creation
+[H] Halt pipeline
+```
+
+**Menu Handling:**
+- **C (Continue)**: Load and execute `{nextStepFile}`
+- **H (Halt)**: Save checkpoint, exit gracefully
+
+**If mode == "batch":**
+- Auto-proceed to next step
+- Load and execute `{nextStepFile}` immediately
+
+## QUALITY GATE
+
+Before proceeding, verify:
+- [ ] All parameters resolved (story_id, epic_num, story_num, mode)
+- [ ] State file created and valid
+- [ ] Project context loaded
+- [ ] Epic definition loaded
+- [ ] Architecture sections loaded (at least tech_stack)
+
+## CRITICAL STEP COMPLETION
+
+**ONLY WHEN** [initialization complete AND state file updated AND quality gate passed],
+load and execute `{nextStepFile}` to begin story creation.
+
+---
+
+## SUCCESS/FAILURE METRICS
+
+### β SUCCESS
+- All parameters resolved
+- Resume state detected and handed off correctly
+- Documents cached efficiently (not reloaded)
+- State file created with proper structure
+- Menu presented and user input handled
+
+### β FAILURE
+- Proceeding without resolved parameters
+- Not checking for resume state first
+- Loading documents redundantly across steps
+- Not creating state file before proceeding
+- Skipping directly to implementation
diff --git a/src/bmm/workflows/4-implementation/story-pipeline/steps/step-01b-resume.md b/src/bmm/workflows/4-implementation/story-pipeline/steps/step-01b-resume.md
new file mode 100644
index 00000000..590bc652
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/story-pipeline/steps/step-01b-resume.md
@@ -0,0 +1,213 @@
+---
+name: 'step-01b-resume'
+description: 'Resume pipeline from checkpoint after failure or interruption'
+
+# Path Definitions
+workflow_path: '{project-root}/_bmad/bmm/workflows/4-implementation/story-pipeline'
+
+# File References
+thisStepFile: '{workflow_path}/steps/step-01b-resume.md'
+stepsPath: '{workflow_path}/steps'
+
+# State Management
+stateFile: '{sprint_artifacts}/pipeline-state-{story_id}.yaml'
+---
+
+# Step 1b: Resume from Checkpoint
+
+## STEP GOAL
+
+Resume a previously started pipeline from the last completed checkpoint:
+1. Load existing pipeline state
+2. Restore cached document context
+3. Determine next step to execute
+4. Present resume options to user
+
+## MANDATORY EXECUTION RULES
+
+### Universal Rules
+
+- **NEVER** restart from step 1 if progress exists
+- **ALWAYS** restore cached context before resuming
+- **PRESERVE** all completed step data
+- **VALIDATE** state file integrity before resuming
+
+### Resume Priority
+
+- Resume from `lastStep + 1` by default
+- Allow user to override and restart from earlier step
+- Warn if restarting would lose completed work
+
+## EXECUTION SEQUENCE
+
+### 1. Load Pipeline State
+
+Read state file: `{stateFile}`
+
+Extract:
+- `story_id`, `epic_num`, `story_num`, `mode`
+- `stepsCompleted`: Array of completed step numbers
+- `lastStep`: Last successfully completed step
+- `cached_context`: Document loading status
+- `steps`: Individual step status records
+
+### 2. Validate State Integrity
+
+Check state file is valid:
+- [ ] `story_id` matches requested story
+- [ ] `stepsCompleted` is valid array
+- [ ] `lastStep` corresponds to actual completed work
+- [ ] No corruption in step records
+
+**If invalid:**
+- Warn user: "State file appears corrupted"
+- Offer: "Start fresh or attempt recovery?"
+
+### 3. Restore Cached Context
+
+Re-load documents if not in memory:
+
+```yaml
+cached_context:
+ project_context_loaded: {reload if false}
+ epic_loaded: {reload if false}
+ architecture_sections: {reload specified sections}
+ story_file_exists: {verify still exists}
+ story_file_path: {verify path valid}
+```
+
+**Efficiency note:** Only reload what's needed, don't duplicate work.
+
+### 4. Present Resume Summary
+
+Display current state:
+
+```
+Pipeline Resume for Story {story_id}
+
+Previous Session:
+- Started: {started_at}
+- Last Update: {updated_at}
+- Mode: {mode}
+
+Progress:
+- Steps Completed: {stepsCompleted}
+- Last Step: {lastStep} ({step_name})
+- Next Step: {lastStep + 1} ({next_step_name})
+
+Step Status:
+ [β] Step 1: Initialize
+ [β] Step 2: Create Story
+ [β] Step 3: Validate Story
+ [ ] Step 4: ATDD (NEXT)
+ [ ] Step 5: Implement
+ [ ] Step 6: Code Review
+ [ ] Step 7: Complete
+ [ ] Step 8: Summary
+```
+
+### 5. Present Resume Options
+
+**Menu:**
+```
+Resume Options:
+
+[C] Continue from Step {lastStep + 1} ({next_step_name})
+[R] Restart from specific step (will mark later steps as pending)
+[F] Fresh start (lose all progress)
+[H] Halt
+
+Select option:
+```
+
+### 6. Handle User Selection
+
+**C (Continue):**
+- Update state: `currentStep: {lastStep + 1}`
+- Load and execute next step file
+
+**R (Restart from step):**
+- Ask: "Which step? (2-8)"
+- Validate step number
+- Mark selected step and all later as `pending`
+- Update `lastStep` to step before selected
+- Load and execute selected step
+
+**F (Fresh start):**
+- Confirm: "This will lose all progress. Are you sure? (y/n)"
+- If confirmed: Delete state file, redirect to step-01-init.md
+- If not: Return to menu
+
+**H (Halt):**
+- Save current state
+- Exit gracefully
+
+### 7. Determine Next Step File
+
+Map step number to file:
+
+| Step | File |
+|------|------|
+| 2 | step-02-create-story.md |
+| 3 | step-03-validate-story.md |
+| 4 | step-04-atdd.md |
+| 5 | step-05-implement.md |
+| 6 | step-06-code-review.md |
+| 7 | step-07-complete.md |
+| 8 | step-08-summary.md |
+
+### 8. Update State and Execute
+
+Before loading next step:
+- Update `updated_at` to current timestamp
+- Set `currentStep` to target step
+- Set target step status to `in_progress`
+
+Then load and execute: `{stepsPath}/step-{XX}-{name}.md`
+
+## BATCH MODE HANDLING
+
+If `mode == "batch"`:
+- Skip menu presentation
+- Auto-continue from `lastStep + 1`
+- If `lastStep` was a failure, check error details
+- If retryable error, attempt same step again
+- If non-retryable, halt with error report
+
+## ERROR RECOVERY
+
+### Common Resume Scenarios
+
+**Story file missing after step 2:**
+- Warn user
+- Offer to restart from step 2
+
+**Tests missing after step 4:**
+- Warn user
+- Offer to restart from step 4
+
+**Implementation incomplete after step 5:**
+- Check git status for partial changes
+- Offer to continue or rollback
+
+**Code review incomplete after step 6:**
+- Check if issues were logged
+- Offer to continue review or re-run
+
+---
+
+## SUCCESS/FAILURE METRICS
+
+### β SUCCESS
+- State file loaded and validated
+- Context restored efficiently
+- User presented clear resume options
+- Correct step file loaded and executed
+- No data loss during resume
+
+### β FAILURE
+- Starting from step 1 when progress exists
+- Not validating state file integrity
+- Loading wrong step after resume
+- Losing completed work without confirmation
+- Not restoring cached context
diff --git a/src/bmm/workflows/4-implementation/story-pipeline/steps/step-02-create-story.md b/src/bmm/workflows/4-implementation/story-pipeline/steps/step-02-create-story.md
new file mode 100644
index 00000000..097b31ae
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/story-pipeline/steps/step-02-create-story.md
@@ -0,0 +1,244 @@
+---
+name: 'step-02-create-story'
+description: 'Create detailed story file from epic definition with research'
+
+# Path Definitions
+workflow_path: '{project-root}/_bmad/bmm/workflows/4-implementation/story-pipeline'
+
+# File References
+thisStepFile: '{workflow_path}/steps/step-02-create-story.md'
+nextStepFile: '{workflow_path}/steps/step-03-validate-story.md'
+checklist: '{workflow_path}/checklists/story-creation.md'
+
+# Role Switch
+role: sm
+agentFile: '{project-root}/_bmad/bmm/agents/sm.md'
+---
+
+# Step 2: Create Story
+
+## ROLE SWITCH
+
+**Switching to SM (Scrum Master) perspective.**
+
+You are now the Scrum Master facilitating story creation. Your expertise:
+- User story structure and acceptance criteria
+- BDD scenario writing (Given/When/Then)
+- Task breakdown and estimation
+- Ensuring testability of requirements
+
+## STEP GOAL
+
+Create a detailed, implementation-ready story file:
+1. Research best practices for the domain
+2. Extract story definition from epic
+3. Write clear acceptance criteria with BDD scenarios
+4. Define tasks and subtasks
+5. Ensure all criteria are testable
+
+## MANDATORY EXECUTION RULES
+
+### Role-Specific Rules
+
+- **THINK** like a product/process expert, not a developer
+- **FOCUS** on WHAT, not HOW (implementation comes later)
+- **ENSURE** every AC is testable and measurable
+- **AVOID** technical implementation details in AC
+
+### Step-Specific Rules
+
+- **SKIP** this step if story file already exists (check cached context)
+- **RESEARCH** best practices before writing
+- **USE** project-context.md patterns for consistency
+- **CREATE** file at `{sprint_artifacts}/story-{story_id}.md`
+
+## EXECUTION SEQUENCE
+
+### 1. Check if Story Already Exists
+
+From cached context, check `story_file_exists`:
+
+**If story file exists:**
+- Read and display existing story summary
+- Ask: "Story file exists. [V]alidate existing, [R]ecreate from scratch?"
+- If V: Proceed to step-03-validate-story.md
+- If R: Continue with story creation (will overwrite)
+
+**If story does not exist:**
+- Continue with creation
+
+### 2. Research Phase (MCP Tools)
+
+Use MCP tools for domain research:
+
+```
+mcp__exa__web_search_exa:
+ query: "user story acceptance criteria best practices agile {domain}"
+
+mcp__exa__get_code_context_exa:
+ query: "{technology} implementation patterns"
+```
+
+**Extract from research:**
+- AC writing best practices
+- Common patterns for this domain
+- Anti-patterns to avoid
+
+### 3. Load Epic Definition
+
+From cached epic file, extract for story {story_id}:
+- Story title and description
+- User persona
+- Business value
+- Initial AC ideas
+- BDD scenarios if present
+
+### 4. Generate Story Content
+
+Create story file following template:
+
+```markdown
+---
+id: story-{story_id}
+epic: {epic_num}
+title: "{story_title}"
+status: draft
+created_at: {timestamp}
+---
+
+# Story {story_id}: {story_title}
+
+## User Story
+
+As a [persona],
+I want to [action],
+So that [benefit].
+
+## Acceptance Criteria
+
+### AC1: [Criterion Name]
+
+**Given** [precondition]
+**When** [action]
+**Then** [expected result]
+
+**Test Scenarios:**
+- [ ] Scenario 1: [description]
+- [ ] Scenario 2: [description]
+
+### AC2: [Criterion Name]
+...
+
+## Tasks
+
+### Task 1: [Task Name]
+- [ ] Subtask 1.1
+- [ ] Subtask 1.2
+
+### Task 2: [Task Name]
+...
+
+## Technical Notes
+
+### Database Changes
+- [any schema changes needed]
+
+### API Changes
+- [any endpoint changes]
+
+### UI Changes
+- [any frontend changes]
+
+## Dependencies
+- [list any dependencies on other stories or systems]
+
+## Out of Scope
+- [explicitly list what is NOT included]
+```
+
+### 5. Verify Story Quality
+
+Before saving, verify:
+- [ ] All AC have Given/When/Then format
+- [ ] Each AC has at least 2 test scenarios
+- [ ] Tasks cover all AC implementation
+- [ ] No implementation details in AC (WHAT not HOW)
+- [ ] Out of scope is defined
+- [ ] Dependencies listed if any
+
+### 6. Save Story File
+
+Write to: `{sprint_artifacts}/story-{story_id}.md`
+
+Update state file:
+- `cached_context.story_file_exists: true`
+- `cached_context.story_file_path: {path}`
+
+### 7. Update Pipeline State
+
+Update state file:
+- Add `2` to `stepsCompleted`
+- Set `lastStep: 2`
+- Set `steps.step-02-create-story.status: completed`
+- Set `steps.step-02-create-story.duration: {duration}`
+
+### 8. Present Summary and Menu
+
+Display:
+```
+Story {story_id} Created
+
+Title: {story_title}
+Acceptance Criteria: {count}
+Test Scenarios: {count}
+Tasks: {count}
+
+File: {story_file_path}
+```
+
+**Interactive Mode Menu:**
+```
+[C] Continue to Validation
+[E] Edit story manually
+[R] Regenerate story
+[H] Halt pipeline
+```
+
+**Batch Mode:** Auto-continue to next step.
+
+## QUALITY GATE
+
+Before proceeding:
+- [ ] Story file created at correct location
+- [ ] All AC in Given/When/Then format
+- [ ] Test scenarios defined for each AC
+- [ ] Tasks cover full implementation scope
+- [ ] File passes frontmatter validation
+
+## MCP TOOLS AVAILABLE
+
+- `mcp__exa__web_search_exa` - Research best practices
+- `mcp__exa__get_code_context_exa` - Tech pattern research
+
+## CRITICAL STEP COMPLETION
+
+**ONLY WHEN** [story file created AND quality gate passed AND state updated],
+load and execute `{nextStepFile}` for adversarial validation.
+
+---
+
+## SUCCESS/FAILURE METRICS
+
+### β SUCCESS
+- Story file created with proper structure
+- All AC have BDD format
+- Test scenarios cover all AC
+- Research insights incorporated
+- State file updated correctly
+
+### β FAILURE
+- Story file not created or in wrong location
+- AC without Given/When/Then format
+- Missing test scenarios
+- Including implementation details in AC
+- Not updating state before proceeding
diff --git a/src/bmm/workflows/4-implementation/story-pipeline/steps/step-03-validate-story.md b/src/bmm/workflows/4-implementation/story-pipeline/steps/step-03-validate-story.md
new file mode 100644
index 00000000..13806d0e
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/story-pipeline/steps/step-03-validate-story.md
@@ -0,0 +1,229 @@
+---
+name: 'step-03-validate-story'
+description: 'Adversarial validation of story completeness and quality'
+
+# Path Definitions
+workflow_path: '{project-root}/_bmad/bmm/workflows/4-implementation/story-pipeline'
+
+# File References
+thisStepFile: '{workflow_path}/steps/step-03-validate-story.md'
+nextStepFile: '{workflow_path}/steps/step-04-atdd.md'
+checklist: '{workflow_path}/checklists/story-validation.md'
+
+# Role (same as step 2, no switch needed)
+role: sm
+---
+
+# Step 3: Validate Story
+
+## ROLE CONTINUATION
+
+**Continuing as SM (Scrum Master) - Adversarial Validator mode.**
+
+You are now an ADVERSARIAL validator. Your job is to FIND PROBLEMS, not approve.
+Challenge every assumption. Question every AC. Ensure the story is truly ready.
+
+## STEP GOAL
+
+Perform rigorous validation of the story file:
+1. Research common AC anti-patterns
+2. Validate each acceptance criterion
+3. Check technical feasibility
+4. Ensure all edge cases covered
+5. Fix all issues found
+6. Add validation report
+
+## MANDATORY EXECUTION RULES
+
+### Adversarial Mindset
+
+- **ASSUME** something is wrong - find it
+- **NEVER** say "looks good" without deep analysis
+- **QUESTION** every assumption
+- **FIND** at least 3 issues (if no issues, you haven't looked hard enough)
+
+### Validation Rules
+
+- Every AC must be: Specific, Measurable, Testable
+- Every AC must have test scenarios
+- No vague terms: "should", "might", "could", "etc."
+- No undefined boundaries: "appropriate", "reasonable"
+
+## EXECUTION SEQUENCE
+
+### 1. Research Validation Patterns
+
+Use MCP for research:
+
+```
+mcp__exa__web_search_exa:
+ query: "acceptance criteria anti-patterns common mistakes user stories"
+```
+
+**Extract:**
+- Common AC problems
+- Validation techniques
+- Red flags to look for
+
+### 2. Load Story File
+
+Read from cached path: `{story_file_path}`
+
+Parse and extract:
+- All acceptance criteria
+- All test scenarios
+- Task definitions
+- Dependencies
+
+### 3. Validate Each AC (MANDATORY CHECKLIST)
+
+For EACH acceptance criterion:
+
+**Structure Check:**
+- [ ] Has Given/When/Then format
+- [ ] Given is a valid precondition
+- [ ] When is a clear action
+- [ ] Then is a measurable outcome
+
+**Quality Check:**
+- [ ] Specific (no vague terms)
+- [ ] Measurable (clear success criteria)
+- [ ] Testable (can write automated test)
+- [ ] Independent (no hidden dependencies)
+
+**Completeness Check:**
+- [ ] Edge cases considered
+- [ ] Error scenarios defined
+- [ ] Boundary conditions clear
+
+**Anti-pattern Check:**
+- [ ] No implementation details
+- [ ] No AND conjunctions (split into multiple AC)
+- [ ] No OR alternatives (ambiguous)
+
+### 4. Technical Feasibility Check
+
+Cross-reference with architecture.md (from cache):
+
+- [ ] Required data model exists or migration defined
+- [ ] API endpoints fit existing patterns
+- [ ] No conflicts with existing functionality
+- [ ] Security model (RLS) can support requirements
+
+### 5. Test Scenario Coverage
+
+Verify test scenarios:
+- [ ] At least 2 scenarios per AC
+- [ ] Happy path covered
+- [ ] Error paths covered
+- [ ] Edge cases covered
+- [ ] Each scenario is unique (no duplicates)
+
+### 6. Document All Issues Found
+
+Create issues list:
+
+```yaml
+issues_found:
+ - id: 1
+ severity: high|medium|low
+ ac: AC1
+ problem: "Description of issue"
+ fix: "How to fix it"
+ - id: 2
+ ...
+```
+
+### 7. Fix All Issues
+
+For EACH issue:
+1. Edit the story file to fix
+2. Document the fix
+3. Verify fix is correct
+
+### 8. Add Validation Report
+
+Append to story file:
+
+```yaml
+# Validation Report
+validated_by: sm-validator
+validated_at: {timestamp}
+issues_found: {count}
+issues_fixed: {count}
+quality_score: {0-100}
+test_scenarios_count: {count}
+edge_cases_covered: {list}
+ready_for_dev: true|false
+validation_notes: |
+ - {note 1}
+ - {note 2}
+```
+
+### 9. Update Pipeline State
+
+Update state file:
+- Add `3` to `stepsCompleted`
+- Set `lastStep: 3`
+- Set `steps.step-03-validate-story.status: completed`
+- Record `issues_found` and `issues_fixed` counts
+
+### 10. Present Summary and Menu
+
+Display:
+```
+Story Validation Complete
+
+Issues Found: {count}
+Issues Fixed: {count}
+Quality Score: {score}/100
+
+Validation Areas:
+- AC Structure: β/β
+- Testability: β/β
+- Technical Feasibility: β/β
+- Edge Cases: β/β
+
+Ready for Development: {yes/no}
+```
+
+**Interactive Mode Menu:**
+```
+[C] Continue to ATDD (Test Generation)
+[R] Re-validate
+[E] Edit story manually
+[H] Halt pipeline
+```
+
+**Batch Mode:** Auto-continue if ready_for_dev: true
+
+## QUALITY GATE
+
+Before proceeding:
+- [ ] All issues identified and fixed
+- [ ] Quality score >= 80
+- [ ] ready_for_dev: true
+- [ ] Validation report appended to story file
+
+## CRITICAL STEP COMPLETION
+
+**ONLY WHEN** [validation complete AND quality gate passed AND ready_for_dev: true],
+load and execute `{nextStepFile}` for ATDD test generation.
+
+---
+
+## SUCCESS/FAILURE METRICS
+
+### β SUCCESS
+- Found and fixed at least 3 issues
+- Quality score >= 80
+- All AC pass validation checklist
+- Validation report added
+- Story marked ready for dev
+
+### β FAILURE
+- Approving story as "looks good" without deep review
+- Missing edge case analysis
+- Not fixing all identified issues
+- Proceeding with quality_score < 80
+- Not adding validation report
diff --git a/src/bmm/workflows/4-implementation/story-pipeline/steps/step-04-atdd.md b/src/bmm/workflows/4-implementation/story-pipeline/steps/step-04-atdd.md
new file mode 100644
index 00000000..8a2b6a4e
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/story-pipeline/steps/step-04-atdd.md
@@ -0,0 +1,308 @@
+---
+name: 'step-04-atdd'
+description: 'Generate failing acceptance tests before implementation (RED phase)'
+
+# Path Definitions
+workflow_path: '{project-root}/_bmad/bmm/workflows/4-implementation/story-pipeline'
+
+# File References
+thisStepFile: '{workflow_path}/steps/step-04-atdd.md'
+nextStepFile: '{workflow_path}/steps/step-05-implement.md'
+checklist: '{workflow_path}/checklists/atdd.md'
+
+# Role Switch
+role: tea
+agentFile: '{project-root}/_bmad/bmm/agents/tea.md'
+---
+
+# Step 4: ATDD - Acceptance Test-Driven Development
+
+## ROLE SWITCH
+
+**Switching to TEA (Test Engineering Architect) perspective.**
+
+You are now the Test Engineering Architect. Your expertise:
+- Test strategy and design
+- Playwright and Vitest patterns
+- Data factories and fixtures
+- Test-first development methodology
+
+## STEP GOAL
+
+Generate FAILING acceptance tests BEFORE implementation (RED phase):
+1. Research test patterns for the technology stack
+2. Analyze each acceptance criterion
+3. Determine appropriate test level (E2E, API, Component, Unit)
+4. Write tests in Given/When/Then format
+5. Create data factories and fixtures
+6. Verify tests FAIL (they should - nothing is implemented yet)
+7. Generate implementation checklist for DEV
+
+## MANDATORY EXECUTION RULES
+
+### ATDD Principles
+
+- **TESTS FIRST** - Write tests before any implementation
+- **TESTS MUST FAIL** - If tests pass, something is wrong
+- **ONE AC = ONE TEST** (minimum) - More for complex scenarios
+- **REALISTIC DATA** - Use factories, not hardcoded values
+
+### Test Architecture Rules
+
+- Use `data-testid` selectors for stability
+- Network-first pattern (route interception before navigation)
+- Auto-cleanup fixtures
+- No flaky timing-based assertions
+
+## EXECUTION SEQUENCE
+
+### 1. Research Test Patterns
+
+Use MCP tools:
+
+```
+mcp__exa__web_search_exa:
+ query: "playwright acceptance test best practices Next.js TypeScript 2025"
+
+mcp__exa__get_code_context_exa:
+ query: "vitest playwright test fixtures factories faker patterns"
+```
+
+**Extract:**
+- Current best practices for Next.js testing
+- Fixture and factory patterns
+- Common pitfalls to avoid
+
+### 2. Analyze Acceptance Criteria
+
+From cached story file, for EACH acceptance criterion:
+
+```yaml
+ac_analysis:
+ - ac_id: AC1
+ title: "{ac_title}"
+ given: "{given clause}"
+ when: "{when clause}"
+ then: "{then clause}"
+ test_level: E2E|API|Component|Unit
+ test_file: "{proposed test file path}"
+ requires_fixtures: [list]
+ requires_factories: [list]
+ data_testids_needed: [list]
+```
+
+### 3. Determine Test Levels
+
+For each AC, determine appropriate level:
+
+| Level | When to Use |
+|-------|-------------|
+| E2E | Full user flows, UI interactions |
+| API | Server actions, API endpoints |
+| Component | React component behavior |
+| Unit | Pure business logic, utilities |
+
+### 4. Create Data Factories
+
+For each entity needed in tests:
+
+```typescript
+// src/tests/factories/{entity}.factory.ts
+import { faker } from "@faker-js/faker";
+
+export function create{Entity}(overrides: Partial<{Entity}> = {}): {Entity} {
+ return {
+ id: faker.string.uuid(),
+ // ... realistic fake data
+ ...overrides,
+ };
+}
+```
+
+### 5. Create Test Fixtures
+
+For each test setup pattern:
+
+```typescript
+// src/tests/fixtures/{feature}.fixture.ts
+import { test as base } from "vitest";
+// or for E2E:
+import { test as base } from "@playwright/test";
+
+export const test = base.extend<{
+ // fixture types
+}>({
+ // fixture implementations with auto-cleanup
+});
+```
+
+### 6. Write Acceptance Tests
+
+For EACH acceptance criterion:
+
+```typescript
+// src/tests/{appropriate-dir}/{feature}.test.ts
+
+describe("AC{N}: {ac_title}", () => {
+ test("Given {given}, When {when}, Then {then}", async () => {
+ // Arrange (Given)
+ const data = createTestData();
+
+ // Act (When)
+ const result = await performAction(data);
+
+ // Assert (Then)
+ expect(result).toMatchExpectedOutcome();
+ });
+
+ // Additional scenarios from story
+ test("Edge case: {scenario}", async () => {
+ // ...
+ });
+});
+```
+
+### 7. Document Required data-testids
+
+Create list of data-testids that DEV must implement:
+
+```markdown
+## Required data-testid Attributes
+
+| Element | data-testid | Purpose |
+|---------|-------------|---------|
+| Submit button | submit-{feature} | Test form submission |
+| Error message | error-{feature} | Verify error display |
+| ... | ... | ... |
+```
+
+### 8. Verify Tests FAIL
+
+Run tests and verify they fail:
+
+```bash
+npm test -- --run {test-file}
+```
+
+**Expected:** All tests should FAIL (RED phase)
+- "Cannot find element with data-testid"
+- "Function not implemented"
+- "Route not found"
+
+**If tests PASS:** Something is wrong - investigate
+
+### 9. Create ATDD Checklist
+
+Create: `{sprint_artifacts}/atdd-checklist-{story_id}.md`
+
+```markdown
+# ATDD Checklist for Story {story_id}
+
+## Test Files Created
+- [ ] {test_file_1}
+- [ ] {test_file_2}
+
+## Factories Created
+- [ ] {factory_1}
+- [ ] {factory_2}
+
+## Fixtures Created
+- [ ] {fixture_1}
+
+## Implementation Requirements for DEV
+
+### Required data-testid Attributes
+| Element | Attribute |
+|---------|-----------|
+| ... | ... |
+
+### API Endpoints Needed
+- [ ] {endpoint_1}
+- [ ] {endpoint_2}
+
+### Database Changes
+- [ ] {migration_1}
+
+## Test Status (RED Phase)
+All tests should FAIL until implementation:
+- [ ] {test_1}: FAILING β
+- [ ] {test_2}: FAILING β
+```
+
+### 10. Update Pipeline State
+
+Update state file:
+- Add `4` to `stepsCompleted`
+- Set `lastStep: 4`
+- Set `steps.step-04-atdd.status: completed`
+- Record test file paths created
+
+### 11. Present Summary and Menu
+
+Display:
+```
+ATDD Complete - RED Phase Verified
+
+Tests Created: {count}
+All Tests FAILING: β (as expected)
+
+Test Files:
+- {test_file_1}
+- {test_file_2}
+
+Factories: {count}
+Fixtures: {count}
+data-testids Required: {count}
+
+ATDD Checklist: {checklist_path}
+
+Next: DEV will implement to make tests GREEN
+```
+
+**Interactive Mode Menu:**
+```
+[C] Continue to Implementation
+[T] Run tests again
+[E] Edit tests
+[H] Halt pipeline
+```
+
+**Batch Mode:** Auto-continue
+
+## QUALITY GATE
+
+Before proceeding:
+- [ ] Test file created for each AC
+- [ ] All tests FAIL (RED phase verified)
+- [ ] Factories created for test data
+- [ ] data-testid requirements documented
+- [ ] ATDD checklist created
+
+## MCP TOOLS AVAILABLE
+
+- `mcp__exa__web_search_exa` - Test pattern research
+- `mcp__exa__get_code_context_exa` - Framework-specific patterns
+
+## CRITICAL STEP COMPLETION
+
+**ONLY WHEN** [tests created AND all tests FAIL AND checklist created],
+load and execute `{nextStepFile}` for implementation.
+
+---
+
+## SUCCESS/FAILURE METRICS
+
+### β SUCCESS
+- Tests written for all AC
+- All tests FAIL (red phase)
+- Factories use faker, not hardcoded data
+- Fixtures have auto-cleanup
+- data-testid requirements documented
+- ATDD checklist complete
+
+### β FAILURE
+- Tests PASS before implementation
+- Hardcoded test data
+- Missing edge case tests
+- No data-testid documentation
+- Skipping to implementation without tests
diff --git a/src/bmm/workflows/4-implementation/story-pipeline/steps/step-05-implement.md b/src/bmm/workflows/4-implementation/story-pipeline/steps/step-05-implement.md
new file mode 100644
index 00000000..c98ef4fe
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/story-pipeline/steps/step-05-implement.md
@@ -0,0 +1,285 @@
+---
+name: 'step-05-implement'
+description: 'Implement story to make tests pass (GREEN phase)'
+
+# Path Definitions
+workflow_path: '{project-root}/_bmad/bmm/workflows/4-implementation/story-pipeline'
+
+# File References
+thisStepFile: '{workflow_path}/steps/step-05-implement.md'
+nextStepFile: '{workflow_path}/steps/step-05b-post-validation.md'
+checklist: '{workflow_path}/checklists/implementation.md'
+
+# Role Switch
+role: dev
+agentFile: '{project-root}/_bmad/bmm/agents/dev.md'
+---
+
+# Step 5: Implement Story
+
+## ROLE SWITCH
+
+**Switching to DEV (Developer) perspective.**
+
+You are now the Developer implementing the story. Your expertise:
+- Next.js 16 with App Router
+- TypeScript strict mode
+- Supabase with RLS
+- TDD methodology (make tests GREEN)
+
+## STEP GOAL
+
+Implement the story using TDD methodology:
+1. Research implementation patterns
+2. Review ATDD checklist and failing tests
+3. For each failing test: implement minimal code to pass
+4. Run tests, verify GREEN
+5. Ensure lint and build pass
+6. No refactoring yet (that's code review)
+
+## MANDATORY EXECUTION RULES
+
+### TDD Rules (RED-GREEN-REFACTOR)
+
+- **GREEN PHASE** - Make tests pass with minimal code
+- **ONE TEST AT A TIME** - Don't implement all at once
+- **MINIMAL CODE** - Just enough to pass, no over-engineering
+- **RUN TESTS FREQUENTLY** - After each change
+
+### Implementation Rules
+
+- **Follow project-context.md** patterns exactly
+- **Result type** for all server actions (never throw)
+- **snake_case** for database columns
+- **Multi-tenancy** with tenant_id on all tables
+- **RLS policies** for all new tables
+
+## EXECUTION SEQUENCE
+
+### 1. Research Implementation Patterns
+
+Use MCP tools:
+
+```
+mcp__exa__get_code_context_exa:
+ query: "Next.js 16 server actions Supabase RLS multi-tenant"
+
+mcp__supabase__list_tables:
+ # Understand current schema
+```
+
+### 2. Review ATDD Checklist
+
+Load: `{sprint_artifacts}/atdd-checklist-{story_id}.md`
+
+Extract:
+- Required data-testid attributes
+- API endpoints needed
+- Database changes required
+- Current failing tests
+
+### 3. Run Failing Tests
+
+```bash
+npm test -- --run
+```
+
+Confirm all tests are FAILING (from ATDD phase).
+
+### 4. Implementation Loop
+
+For EACH acceptance criterion:
+
+**A. Focus on one failing test:**
+```bash
+npm test -- --run --grep "{test_name}"
+```
+
+**B. Implement minimal code:**
+- Database migration if needed
+- Server action / API route
+- UI component with data-testid
+- Type definitions
+
+**C. Run targeted test:**
+```bash
+npm test -- --run --grep "{test_name}"
+```
+
+**D. Verify GREEN:**
+- Test passes β
+- Move to next test
+
+### 5. Database Migrations
+
+For any schema changes:
+
+```bash
+# Create migration file
+npx supabase migration new {name}
+
+# Migration content
+-- Enable RLS
+alter table {table} enable row level security;
+
+-- RLS policies
+create policy "Tenants can view own data"
+ on {table} for select
+ using (tenant_id = auth.jwt() ->> 'tenant_id');
+```
+
+Apply to remote:
+```bash
+npx supabase db push
+```
+
+### 6. Server Actions Pattern
+
+Follow project-context.md pattern:
+
+```typescript
+// src/modules/{module}/actions/{action}.ts
+"use server";
+
+import { ok, err, Result } from "@/lib/result";
+import { createClient } from "@/lib/supabase/server";
+
+export async function actionName(
+ input: InputType
+): Promise> {
+ const supabase = await createClient();
+
+ const { data, error } = await supabase
+ .from("table")
+ .select("*")
+ .eq("tenant_id", tenantId);
+
+ if (error) {
+ return err("DB_ERROR", error.message);
+ }
+
+ return ok(data);
+}
+```
+
+### 7. UI Components Pattern
+
+```tsx
+// src/modules/{module}/components/{Component}.tsx
+"use client";
+
+export function Component({ data }: Props) {
+ return (
+
+
+
+ );
+}
+```
+
+### 8. Run Full Test Suite
+
+After all AC implemented:
+
+```bash
+npm test -- --run
+```
+
+**All tests should pass (GREEN).**
+
+### 9. Lint and Build
+
+```bash
+npm run lint
+npm run build
+```
+
+Fix any issues that arise.
+
+### 10. Verify Implementation Completeness
+
+Check against ATDD checklist:
+- [ ] All data-testid attributes added
+- [ ] All API endpoints created
+- [ ] All database migrations applied
+- [ ] All tests passing
+
+### 11. Update Pipeline State
+
+Update state file:
+- Add `5` to `stepsCompleted`
+- Set `lastStep: 5`
+- Set `steps.step-05-implement.status: completed`
+- Record files modified
+
+### 12. Present Summary and Menu
+
+Display:
+```
+Implementation Complete - GREEN Phase
+
+Tests: {passed}/{total} PASSING
+Lint: β Clean
+Build: β Success
+
+Files Modified:
+- {file_1}
+- {file_2}
+
+Migrations Applied:
+- {migration_1}
+
+Ready for Code Review
+```
+
+**Interactive Mode Menu:**
+```
+[C] Continue to Post-Implementation Validation
+[T] Run tests again
+[B] Run build again
+[H] Halt pipeline
+```
+
+**Batch Mode:** Auto-continue if all tests pass
+
+## QUALITY GATE
+
+Before proceeding:
+- [ ] All tests pass (GREEN)
+- [ ] Lint clean
+- [ ] Build succeeds
+- [ ] All ATDD checklist items complete
+- [ ] RLS policies for new tables
+
+## MCP TOOLS AVAILABLE
+
+- `mcp__exa__get_code_context_exa` - Implementation patterns
+- `mcp__supabase__list_tables` - Schema inspection
+- `mcp__supabase__execute_sql` - Query testing
+- `mcp__supabase__apply_migration` - Schema changes
+- `mcp__supabase__generate_typescript_types` - Type sync
+
+## CRITICAL STEP COMPLETION
+
+**ONLY WHEN** [all tests pass AND lint clean AND build succeeds],
+load and execute `{nextStepFile}` for post-implementation validation.
+
+---
+
+## SUCCESS/FAILURE METRICS
+
+### β SUCCESS
+- All tests pass (GREEN phase)
+- TDD methodology followed
+- Result type used (no throws)
+- RLS policies in place
+- Lint and build clean
+
+### β FAILURE
+- Tests still failing
+- Skipping tests to implement faster
+- Throwing errors instead of Result type
+- Missing RLS policies
+- Build or lint failures
diff --git a/src/bmm/workflows/4-implementation/story-pipeline/steps/step-05b-post-validation.md b/src/bmm/workflows/4-implementation/story-pipeline/steps/step-05b-post-validation.md
new file mode 100644
index 00000000..070f95eb
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/story-pipeline/steps/step-05b-post-validation.md
@@ -0,0 +1,437 @@
+---
+name: 'step-05b-post-validation'
+description: 'Verify completed tasks against codebase reality (catch false positives)'
+
+# Path Definitions
+workflow_path: '{project-root}/_bmad/bmm/workflows/4-implementation/story-pipeline'
+
+# File References
+thisStepFile: '{workflow_path}/steps/step-05b-post-validation.md'
+nextStepFile: '{workflow_path}/steps/step-06-code-review.md'
+prevStepFile: '{workflow_path}/steps/step-05-implement.md'
+
+# Role Switch
+role: dev
+requires_fresh_context: false # Continue from implementation context
+---
+
+# Step 5b: Post-Implementation Validation
+
+## ROLE CONTINUATION - VERIFICATION MODE
+
+**Continuing as DEV but switching to VERIFICATION mindset.**
+
+You are now verifying that completed work actually exists in the codebase.
+This catches the common problem of tasks marked [x] but implementation is incomplete.
+
+## STEP GOAL
+
+Verify all completed tasks against codebase reality:
+1. Re-read story file and extract completed tasks
+2. For each completed task, identify what should exist
+3. Use codebase search tools to verify existence
+4. Run tests to verify they actually pass
+5. Identify false positives (marked done but not actually done)
+6. If gaps found, uncheck tasks and add missing work
+7. Re-run implementation if needed
+
+## MANDATORY EXECUTION RULES
+
+### Verification Principles
+
+- **TRUST NOTHING** - Verify every completed task
+- **CHECK EXISTENCE** - Files, functions, components must exist
+- **CHECK COMPLETENESS** - Not just existence, but full implementation
+- **TEST VERIFICATION** - Claimed test coverage must be real
+- **NO ASSUMPTIONS** - Re-scan the codebase with fresh eyes
+
+### What to Verify
+
+For each task marked [x]:
+- Files mentioned exist at correct paths
+- Functions/components declared and exported
+- Tests exist and actually pass
+- Database migrations applied
+- API endpoints respond correctly
+
+## EXECUTION SEQUENCE
+
+### 1. Load Story and Extract Completed Tasks
+
+Load story file: `{story_file}`
+
+Extract all tasks from story that are marked [x]:
+```regex
+- \[x\] (.+)
+```
+
+Build list of `completed_tasks` to verify.
+
+### 2. Categorize Tasks by Type
+
+For each completed task, determine what needs verification:
+
+**File Creation Tasks:**
+- Pattern: "Create {file_path}"
+- Verify: File exists at path
+
+**Component/Function Tasks:**
+- Pattern: "Add {name} function/component"
+- Verify: Symbol exists and is exported
+
+**Test Tasks:**
+- Pattern: "Add test for {feature}"
+- Verify: Test file exists and test passes
+
+**Database Tasks:**
+- Pattern: "Add {table} table", "Create migration"
+- Verify: Migration file exists, schema matches
+
+**API Tasks:**
+- Pattern: "Create {endpoint} endpoint"
+- Verify: Route file exists, handler implemented
+
+**UI Tasks:**
+- Pattern: "Add {element} to UI"
+- Verify: Component has data-testid attribute
+
+### 3. Verify File Existence
+
+For all file-related tasks:
+
+```bash
+# Use Glob to find files
+glob: "**/{mentioned_filename}"
+```
+
+**Check:**
+- [ ] File exists
+- [ ] File is not empty
+- [ ] File has expected exports
+
+**False Positive Indicators:**
+- File doesn't exist
+- File exists but is empty
+- File exists but missing expected symbols
+
+### 4. Verify Function/Component Implementation
+
+For code implementation tasks:
+
+```bash
+# Use Grep to find symbols
+grep: "{function_name|component_name}"
+ glob: "**/*.{ts,tsx}"
+ output_mode: "content"
+```
+
+**Check:**
+- [ ] Symbol is declared
+- [ ] Symbol is exported
+- [ ] Implementation is not a stub/placeholder
+- [ ] Required logic is present
+
+**False Positive Indicators:**
+- Symbol not found
+- Symbol exists but marked TODO
+- Symbol exists but throws "Not implemented"
+- Symbol exists but returns empty/null
+
+### 5. Verify Test Coverage
+
+For all test-related tasks:
+
+```bash
+# Find test files
+glob: "**/*.test.{ts,tsx}"
+glob: "**/*.spec.{ts,tsx}"
+
+# Run specific tests
+npm test -- --run --grep "{feature_name}"
+```
+
+**Check:**
+- [ ] Test file exists
+- [ ] Test describes the feature
+- [ ] Test actually runs (not skipped)
+- [ ] Test passes (GREEN)
+
+**False Positive Indicators:**
+- No test file found
+- Test exists but skipped (it.skip)
+- Test exists but fails
+- Test exists but doesn't test the feature (placeholder)
+
+### 6. Verify Database Changes
+
+For database migration tasks:
+
+```bash
+# Find migration files
+glob: "**/migrations/*.sql"
+
+# Check Supabase schema
+mcp__supabase__list_tables
+```
+
+**Check:**
+- [ ] Migration file exists
+- [ ] Migration has been applied
+- [ ] Table/column exists in schema
+- [ ] RLS policies are present
+
+**False Positive Indicators:**
+- Migration file missing
+- Migration not applied to database
+- Table/column doesn't exist
+- RLS policies missing
+
+### 7. Verify API Endpoints
+
+For API endpoint tasks:
+
+```bash
+# Find route files
+glob: "**/app/api/**/{endpoint}/route.ts"
+grep: "export async function {METHOD}"
+```
+
+**Check:**
+- [ ] Route file exists
+- [ ] Handler function implemented
+- [ ] Returns proper Response type
+- [ ] Error handling present
+
+**False Positive Indicators:**
+- Route file doesn't exist
+- Handler throws "Not implemented"
+- Handler returns stub response
+
+### 8. Run Full Verification
+
+Execute verification for ALL completed tasks:
+
+```typescript
+interface VerificationResult {
+ task: string;
+ status: "verified" | "false_positive";
+ evidence: string;
+ missing?: string;
+}
+
+const results: VerificationResult[] = [];
+
+for (const task of completed_tasks) {
+ const result = await verifyTask(task);
+ results.push(result);
+}
+```
+
+### 9. Analyze Verification Results
+
+Count results:
+```
+Total Verified: {verified_count}
+False Positives: {false_positive_count}
+```
+
+### 10. Handle False Positives
+
+**IF false positives found (count > 0):**
+
+Display:
+```
+β οΈ POST-IMPLEMENTATION GAPS DETECTED
+
+Tasks marked complete but implementation incomplete:
+
+{for each false_positive}
+- [ ] {task_description}
+ Missing: {what_is_missing}
+ Evidence: {grep/glob results}
+
+{add new tasks for missing work}
+- [ ] Actually implement {missing_part}
+```
+
+**Actions:**
+1. Uncheck false positive tasks in story file
+2. Add new tasks for the missing work
+3. Update "Gap Analysis" section in story
+4. Set state to re-run implementation
+
+**Re-run implementation:**
+```
+Detected {false_positive_count} incomplete tasks.
+Re-running Step 5: Implementation to complete missing work...
+
+{load and execute step-05-implement.md}
+```
+
+After re-implementation, **RE-RUN THIS STEP** (step-05b-post-validation.md)
+
+### 11. Handle Verified Success
+
+**IF no false positives (all verified):**
+
+Display:
+```
+β POST-IMPLEMENTATION VALIDATION PASSED
+
+All {verified_count} completed tasks verified against codebase:
+- Files exist and are complete
+- Functions/components implemented
+- Tests exist and pass
+- Database changes applied
+- API endpoints functional
+
+Ready for Code Review
+```
+
+Update story file "Gap Analysis" section:
+```markdown
+## Gap Analysis
+
+### Post-Implementation Validation
+- **Date:** {timestamp}
+- **Tasks Verified:** {verified_count}
+- **False Positives:** 0
+- **Status:** β All work verified complete
+
+**Verification Evidence:**
+{for each verified task}
+- β {task}: {evidence}
+```
+
+### 12. Update Pipeline State
+
+Update state file:
+- Add `5b` to `stepsCompleted`
+- Set `lastStep: 5b`
+- Set `steps.step-05b-post-validation.status: completed`
+- Record verification results:
+ ```yaml
+ verification:
+ tasks_verified: {count}
+ false_positives: {count}
+ re_implementation_required: {true|false}
+ ```
+
+### 13. Present Summary and Menu
+
+Display:
+```
+Post-Implementation Validation Complete
+
+Verification Summary:
+- Tasks Checked: {total_count}
+- Verified Complete: {verified_count}
+- False Positives: {false_positive_count}
+- Re-implementations: {retry_count}
+
+{if false_positives}
+Re-running implementation to complete missing work...
+{else}
+All work verified. Proceeding to Code Review...
+{endif}
+```
+
+**Interactive Mode Menu (only if no false positives):**
+```
+[C] Continue to Code Review
+[V] Run verification again
+[T] Run tests again
+[H] Halt pipeline
+```
+
+**Batch Mode:**
+- Auto re-run implementation if false positives
+- Auto-continue if all verified
+
+## QUALITY GATE
+
+Before proceeding to code review:
+- [ ] All completed tasks verified against codebase
+- [ ] Zero false positives remaining
+- [ ] All tests still passing
+- [ ] Build still succeeds
+- [ ] Gap analysis updated with verification results
+
+## VERIFICATION TOOLS
+
+Use these tools for verification:
+
+```typescript
+// File existence
+glob("{pattern}")
+
+// Symbol search
+grep("{symbol_name}", { glob: "**/*.{ts,tsx}", output_mode: "content" })
+
+// Test execution
+bash("npm test -- --run --grep '{test_name}'")
+
+// Database check
+mcp__supabase__list_tables()
+
+// Read file contents
+read("{file_path}")
+```
+
+## CRITICAL STEP COMPLETION
+
+**ONLY WHEN** [all tasks verified AND zero false positives],
+load and execute `{nextStepFile}` for code review.
+
+**IF** [false positives detected],
+load and execute `{prevStepFile}` to complete missing work,
+then RE-RUN this step.
+
+---
+
+## SUCCESS/FAILURE METRICS
+
+### β SUCCESS
+- All completed tasks verified against codebase
+- No false positives (or all re-implemented)
+- Tests still passing
+- Evidence documented for each task
+- Gap analysis updated
+
+### β FAILURE
+- Skipping verification ("trust the marks")
+- Not checking actual code existence
+- Not running tests to verify claims
+- Allowing false positives to proceed
+- Not documenting verification evidence
+
+## COMMON FALSE POSITIVE PATTERNS
+
+Watch for these common issues:
+
+1. **Stub Implementations**
+ - Function exists but returns `null`
+ - Function throws "Not implemented"
+ - Component returns empty div
+
+2. **Placeholder Tests**
+ - Test exists but skipped (it.skip)
+ - Test doesn't actually test the feature
+ - Test always passes (no assertions)
+
+3. **Incomplete Files**
+ - File created but empty
+ - Missing required exports
+ - TODO comments everywhere
+
+4. **Database Drift**
+ - Migration file exists but not applied
+ - Schema doesn't match migration
+ - RLS policies missing
+
+5. **API Stubs**
+ - Route exists but returns 501
+ - Handler not implemented
+ - No error handling
+
+This step is the **safety net** that catches incomplete work before code review.
diff --git a/src/bmm/workflows/4-implementation/story-pipeline/steps/step-06-code-review.md b/src/bmm/workflows/4-implementation/story-pipeline/steps/step-06-code-review.md
new file mode 100644
index 00000000..bcc45f77
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/story-pipeline/steps/step-06-code-review.md
@@ -0,0 +1,294 @@
+---
+name: 'step-06-code-review'
+description: 'Adversarial code review finding 3-10 specific issues'
+
+# Path Definitions
+workflow_path: '{project-root}/_bmad/bmm/workflows/4-implementation/story-pipeline'
+
+# File References
+thisStepFile: '{workflow_path}/steps/step-06-code-review.md'
+nextStepFile: '{workflow_path}/steps/step-07-complete.md'
+checklist: '{workflow_path}/checklists/code-review.md'
+
+# Role (continue as dev, but reviewer mindset)
+role: dev
+requires_fresh_context: true # In batch mode, checkpoint here for unbiased review
+---
+
+# Step 6: Code Review
+
+## ROLE CONTINUATION - ADVERSARIAL MODE
+
+**Continuing as DEV but switching to ADVERSARIAL REVIEWER mindset.**
+
+You are now a critical code reviewer. Your job is to FIND PROBLEMS.
+- **NEVER** say "looks good" - that's a failure
+- **MUST** find 3-10 specific issues
+- **FIX** every issue you find
+
+## STEP GOAL
+
+Perform adversarial code review:
+1. Query Supabase advisors for security/performance issues
+2. Identify all files changed for this story
+3. Review each file against checklist
+4. Find and document 3-10 issues (MANDATORY)
+5. Fix all issues
+6. Verify tests still pass
+
+## MANDATORY EXECUTION RULES
+
+### Adversarial Requirements
+
+- **MINIMUM 3 ISSUES** - If you found fewer, look harder
+- **MAXIMUM 10 ISSUES** - Prioritize if more found
+- **NO "LOOKS GOOD"** - This is FORBIDDEN
+- **FIX EVERYTHING** - Don't just report, fix
+
+### Review Categories (find issues in EACH)
+
+1. Security
+2. Performance
+3. Error Handling
+4. Test Coverage
+5. Code Quality
+6. Architecture
+
+## EXECUTION SEQUENCE
+
+### 1. Query Supabase Advisors
+
+Use MCP tools:
+
+```
+mcp__supabase__get_advisors:
+ type: "security"
+
+mcp__supabase__get_advisors:
+ type: "performance"
+```
+
+Document any issues found.
+
+### 2. Identify Changed Files
+
+```bash
+git status
+git diff --name-only HEAD~1
+```
+
+List all files changed for story {story_id}.
+
+### 3. Review Each Category
+
+#### SECURITY REVIEW
+
+For each file, check:
+- [ ] No SQL injection vulnerabilities
+- [ ] No XSS vulnerabilities
+- [ ] Auth checks on all protected routes
+- [ ] RLS policies exist and are correct
+- [ ] No credential exposure (API keys, secrets)
+- [ ] Input validation present
+- [ ] Rate limiting considered
+
+#### PERFORMANCE REVIEW
+
+- [ ] No N+1 query patterns
+- [ ] Indexes exist for query patterns
+- [ ] No unnecessary re-renders
+- [ ] Proper caching strategy
+- [ ] Efficient data fetching
+- [ ] Bundle size impact considered
+
+#### ERROR HANDLING REVIEW
+
+- [ ] Result type used consistently
+- [ ] Error messages are user-friendly
+- [ ] Edge cases handled
+- [ ] Null/undefined checked
+- [ ] Network errors handled gracefully
+
+#### TEST COVERAGE REVIEW
+
+- [ ] All AC have tests
+- [ ] Edge cases tested
+- [ ] Error paths tested
+- [ ] Mocking is appropriate (not excessive)
+- [ ] Tests are deterministic
+
+#### CODE QUALITY REVIEW
+
+- [ ] DRY - no duplicate code
+- [ ] SOLID principles followed
+- [ ] TypeScript strict mode compliant
+- [ ] No any types
+- [ ] Functions are focused (single responsibility)
+- [ ] Naming is clear and consistent
+
+#### ARCHITECTURE REVIEW
+
+- [ ] Module boundaries respected
+- [ ] Imports from index.ts only
+- [ ] Server/client separation correct
+- [ ] Data flow is clear
+- [ ] No circular dependencies
+
+### 4. Document All Issues
+
+For each issue found:
+
+```yaml
+issue_{n}:
+ severity: critical|high|medium|low
+ category: security|performance|error-handling|testing|quality|architecture
+ file: "{file_path}"
+ line: {line_number}
+ problem: |
+ {Clear description of the issue}
+ risk: |
+ {What could go wrong if not fixed}
+ fix: |
+ {How to fix it}
+```
+
+### 5. Fix All Issues
+
+For EACH issue documented:
+
+1. Edit the file to fix the issue
+2. Add test if issue wasn't covered
+3. Verify the fix is correct
+4. Mark as fixed
+
+### 6. Run Verification
+
+After all fixes:
+
+```bash
+npm run lint
+npm run build
+npm test -- --run
+```
+
+All must pass.
+
+### 7. Create Review Report
+
+Append to story file or create `{sprint_artifacts}/review-{story_id}.md`:
+
+```markdown
+# Code Review Report - Story {story_id}
+
+## Summary
+- Issues Found: {count}
+- Issues Fixed: {count}
+- Categories Reviewed: {list}
+
+## Issues Detail
+
+### Issue 1: {title}
+- **Severity:** {severity}
+- **Category:** {category}
+- **File:** {file}:{line}
+- **Problem:** {description}
+- **Fix Applied:** {fix_description}
+
+### Issue 2: {title}
+...
+
+## Security Checklist
+- [x] RLS policies verified
+- [x] No credential exposure
+- [x] Input validation present
+
+## Performance Checklist
+- [x] No N+1 queries
+- [x] Indexes verified
+
+## Final Status
+All issues resolved. Tests passing.
+
+Reviewed by: DEV (adversarial)
+Reviewed at: {timestamp}
+```
+
+### 8. Update Pipeline State
+
+Update state file:
+- Add `6` to `stepsCompleted`
+- Set `lastStep: 6`
+- Set `steps.step-06-code-review.status: completed`
+- Record `issues_found` and `issues_fixed`
+
+### 9. Present Summary and Menu
+
+Display:
+```
+Code Review Complete
+
+Issues Found: {count} (minimum 3 required)
+Issues Fixed: {count}
+
+By Category:
+- Security: {count}
+- Performance: {count}
+- Error Handling: {count}
+- Test Coverage: {count}
+- Code Quality: {count}
+- Architecture: {count}
+
+All Tests: PASSING
+Lint: CLEAN
+Build: SUCCESS
+
+Review Report: {report_path}
+```
+
+**Interactive Mode Menu:**
+```
+[C] Continue to Completion
+[R] Run another review pass
+[T] Run tests again
+[H] Halt pipeline
+```
+
+**Batch Mode:** Auto-continue if minimum issues found and fixed
+
+## QUALITY GATE
+
+Before proceeding:
+- [ ] Minimum 3 issues found and fixed
+- [ ] All categories reviewed
+- [ ] All tests still passing
+- [ ] Lint clean
+- [ ] Build succeeds
+- [ ] Review report created
+
+## MCP TOOLS AVAILABLE
+
+- `mcp__supabase__get_advisors` - Security/performance checks
+- `mcp__supabase__execute_sql` - Query verification
+
+## CRITICAL STEP COMPLETION
+
+**ONLY WHEN** [minimum 3 issues found AND all fixed AND tests pass],
+load and execute `{nextStepFile}` for story completion.
+
+---
+
+## SUCCESS/FAILURE METRICS
+
+### β SUCCESS
+- Found and fixed 3-10 issues
+- All categories reviewed
+- Tests still passing after fixes
+- Review report complete
+- No "looks good" shortcuts
+
+### β FAILURE
+- Saying "looks good" or "no issues found"
+- Finding fewer than 3 issues
+- Not fixing issues found
+- Tests failing after fixes
+- Skipping review categories
diff --git a/src/bmm/workflows/4-implementation/story-pipeline/steps/step-07-complete.md b/src/bmm/workflows/4-implementation/story-pipeline/steps/step-07-complete.md
new file mode 100644
index 00000000..eabbcfc4
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/story-pipeline/steps/step-07-complete.md
@@ -0,0 +1,210 @@
+---
+name: 'step-07-complete'
+description: 'Update sprint status and create git commit'
+
+# Path Definitions
+workflow_path: '{project-root}/_bmad/bmm/workflows/4-implementation/story-pipeline'
+
+# File References
+thisStepFile: '{workflow_path}/steps/step-07-complete.md'
+nextStepFile: '{workflow_path}/steps/step-08-summary.md'
+
+# Role Switch
+role: sm
+agentFile: '{project-root}/_bmad/bmm/agents/sm.md'
+---
+
+# Step 7: Complete Story
+
+## ROLE SWITCH
+
+**Switching back to SM (Scrum Master) perspective.**
+
+You are completing the story lifecycle:
+- Update sprint tracking
+- Create git commit
+- Finalize documentation
+
+## STEP GOAL
+
+Complete the story development lifecycle:
+1. Final verification (tests, lint, build)
+2. Update sprint-status.yaml
+3. Create git commit with proper message
+4. Update story file status
+
+## MANDATORY EXECUTION RULES
+
+### Completion Rules
+
+- **VERIFY** everything passes before committing
+- **UPDATE** all tracking files
+- **COMMIT** with conventional commit message
+- **DOCUMENT** completion metadata
+
+## EXECUTION SEQUENCE
+
+### 1. Final Verification
+
+Run full verification suite:
+
+```bash
+npm test -- --run
+npm run lint
+npm run build
+```
+
+All must pass before proceeding.
+
+**If any fail:** HALT and report issues.
+
+### 2. Update Story File Status
+
+Edit story file, update frontmatter:
+
+```yaml
+---
+status: done
+completed_at: {timestamp}
+implementation_notes: |
+ - Tests created and passing
+ - Code reviewed and approved
+ - {count} issues found and fixed
+---
+```
+
+### 3. Update Sprint Status
+
+Edit: `{sprint_artifacts}/sprint-status.yaml`
+
+Find story {story_id} and update:
+
+```yaml
+stories:
+ - id: "{story_id}"
+ status: done
+ completed_at: {timestamp}
+ metadata:
+ tests_passing: true
+ code_reviewed: true
+ issues_found: {count}
+ issues_fixed: {count}
+ pipeline_version: "story-pipeline-v2.0"
+```
+
+### 4. Stage Git Changes
+
+```bash
+git add src/
+git add _bmad-output/implementation-artifacts/story-{story_id}.md
+git add _bmad-output/implementation-artifacts/sprint-status.yaml
+git add src/supabase/migrations/
+```
+
+### 5. Create Git Commit
+
+Check for changes:
+```bash
+git diff --cached --quiet
+```
+
+If changes exist, create commit:
+
+```bash
+git commit -m "$(cat <<'EOF'
+feat(epic-{epic_num}): complete story {story_id}
+
+- Acceptance tests created for all criteria
+- All tests passing (TDD green phase)
+- Code reviewed: {issues_found} issues found and fixed
+
+Story: {story_title}
+Pipeline: story-pipeline-v2.0
+
+π€ Generated with BMAD Story Pipeline
+
+Co-Authored-By: Claude
+EOF
+)"
+```
+
+### 6. Verify Commit
+
+```bash
+git log -1 --oneline
+git status
+```
+
+Confirm:
+- Commit created successfully
+- Working directory clean (or only untracked files)
+
+### 7. Update Pipeline State
+
+Update state file:
+- Add `7` to `stepsCompleted`
+- Set `lastStep: 7`
+- Set `steps.step-07-complete.status: completed`
+- Set `status: completing`
+
+### 8. Present Summary and Menu
+
+Display:
+```
+Story {story_id} Completed
+
+Sprint Status: Updated β
+Story Status: done β
+Git Commit: Created β
+
+Commit: {commit_hash}
+Message: feat(epic-{epic_num}): complete story {story_id}
+
+Files Committed:
+- {file_count} files
+
+Next: Generate summary and audit trail
+```
+
+**Interactive Mode Menu:**
+```
+[C] Continue to Summary
+[L] View git log
+[S] View git status
+[H] Halt (story is complete, audit pending)
+```
+
+**Batch Mode:** Auto-continue to summary
+
+## QUALITY GATE
+
+Before proceeding:
+- [ ] All tests pass
+- [ ] Lint clean
+- [ ] Build succeeds
+- [ ] Sprint status updated
+- [ ] Git commit created
+- [ ] Story status set to done
+
+## CRITICAL STEP COMPLETION
+
+**ONLY WHEN** [verification passes AND commit created AND status updated],
+load and execute `{nextStepFile}` for summary generation.
+
+---
+
+## SUCCESS/FAILURE METRICS
+
+### β SUCCESS
+- All verification passes
+- Sprint status updated correctly
+- Conventional commit created
+- Story marked as done
+- Clean git state
+
+### β FAILURE
+- Committing with failing tests
+- Missing sprint status update
+- Malformed commit message
+- Not including all changed files
+- Story not marked done
diff --git a/src/bmm/workflows/4-implementation/story-pipeline/steps/step-08-summary.md b/src/bmm/workflows/4-implementation/story-pipeline/steps/step-08-summary.md
new file mode 100644
index 00000000..43566307
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/story-pipeline/steps/step-08-summary.md
@@ -0,0 +1,273 @@
+---
+name: 'step-08-summary'
+description: 'Generate audit trail and pipeline summary report'
+
+# Path Definitions
+workflow_path: '{project-root}/_bmad/bmm/workflows/4-implementation/story-pipeline'
+
+# File References
+thisStepFile: '{workflow_path}/steps/step-08-summary.md'
+auditFile: '{sprint_artifacts}/audit-{story_id}-{date}.yaml'
+
+# No role needed - orchestrator
+role: null
+---
+
+# Step 8: Pipeline Summary
+
+## STEP GOAL
+
+Generate final audit trail and summary report:
+1. Calculate pipeline metrics
+2. Generate audit trail file
+3. Create summary report
+4. Clean up pipeline state
+5. Suggest next steps
+
+## EXECUTION SEQUENCE
+
+### 1. Calculate Pipeline Metrics
+
+From pipeline state file, calculate:
+
+```yaml
+metrics:
+ total_duration: {sum of all step durations}
+ steps_completed: {count}
+ issues_found: {from code review}
+ issues_fixed: {from code review}
+ tests_created: {count}
+ files_modified: {count}
+ migrations_applied: {count}
+```
+
+### 2. Generate Audit Trail
+
+Create: `{auditFile}`
+
+```yaml
+---
+audit_version: "1.0"
+pipeline: "story-pipeline-v2.0"
+story_id: "{story_id}"
+epic_num: {epic_num}
+---
+
+# Pipeline Audit Trail
+
+## Execution Summary
+started_at: "{started_at}"
+completed_at: "{timestamp}"
+total_duration: "{duration}"
+mode: "{mode}"
+status: "completed"
+
+## Steps Executed
+steps:
+ - step: 1
+ name: "Initialize"
+ status: completed
+ duration: "{duration}"
+
+ - step: 2
+ name: "Create Story"
+ status: completed
+ duration: "{duration}"
+ agent: sm
+ output: "{story_file_path}"
+
+ - step: 3
+ name: "Validate Story"
+ status: completed
+ duration: "{duration}"
+ agent: sm
+ issues_found: {count}
+ issues_fixed: {count}
+
+ - step: 4
+ name: "ATDD"
+ status: completed
+ duration: "{duration}"
+ agent: tea
+ tests_created: {count}
+ test_files:
+ - "{file_1}"
+ - "{file_2}"
+
+ - step: 5
+ name: "Implement"
+ status: completed
+ duration: "{duration}"
+ agent: dev
+ files_modified: {count}
+ migrations:
+ - "{migration_1}"
+
+ - step: 6
+ name: "Code Review"
+ status: completed
+ duration: "{duration}"
+ agent: dev
+ issues_found: {count}
+ issues_fixed: {count}
+ categories_reviewed:
+ - security
+ - performance
+ - error-handling
+ - testing
+ - quality
+ - architecture
+
+ - step: 7
+ name: "Complete"
+ status: completed
+ duration: "{duration}"
+ agent: sm
+ commit_hash: "{hash}"
+
+ - step: 8
+ name: "Summary"
+ status: completed
+ duration: "{duration}"
+
+## Quality Gates
+gates:
+ story_creation:
+ passed: true
+ criteria_met: [list]
+ validation:
+ passed: true
+ quality_score: {score}
+ atdd:
+ passed: true
+ tests_failing: true # Expected in red phase
+ implementation:
+ passed: true
+ tests_passing: true
+ code_review:
+ passed: true
+ minimum_issues_found: true
+
+## Artifacts Produced
+artifacts:
+ story_file: "{path}"
+ test_files:
+ - "{path}"
+ migrations:
+ - "{path}"
+ atdd_checklist: "{path}"
+ review_report: "{path}"
+ commit: "{hash}"
+
+## Token Efficiency
+token_estimate:
+ traditional_approach: "~71K tokens (6 claude calls)"
+ step_file_approach: "~{actual}K tokens (1 session)"
+ savings: "{percentage}%"
+```
+
+### 3. Generate Summary Report
+
+Display to user:
+
+```
+βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+ PIPELINE COMPLETE: Story {story_id}
+βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+
+π EXECUTION SUMMARY
+ββββββββββββββββββββ
+Duration: {total_duration}
+Mode: {mode}
+Status: β Completed Successfully
+
+π STORY DETAILS
+ββββββββββββββββββββ
+Epic: {epic_num}
+Title: {story_title}
+Commit: {commit_hash}
+
+β QUALITY METRICS
+ββββββββββββββββββββ
+Validation Score: {score}/100
+Issues Found: {count}
+Issues Fixed: {count}
+Tests Created: {count}
+Files Modified: {count}
+
+π ARTIFACTS
+ββββββββββββββββββββ
+Story: {story_file}
+Tests: {test_count} files
+Migrations: {migration_count}
+Audit: {audit_file}
+
+π° TOKEN EFFICIENCY
+ββββββββββββββββββββ
+Traditional: ~71K tokens
+Step-file: ~{actual}K tokens
+Savings: {percentage}%
+
+βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+```
+
+### 4. Update Final Pipeline State
+
+Update state file:
+- Add `8` to `stepsCompleted`
+- Set `lastStep: 8`
+- Set `status: completed`
+- Set `completed_at: {timestamp}`
+
+### 5. Suggest Next Steps
+
+Display:
+
+```
+π NEXT STEPS
+ββββββββββββββββββββ
+1. Review commit: git show {hash}
+2. Push when ready: git push
+3. Next story: bmad build {next_story_id}
+4. View audit: cat {audit_file}
+
+Optional:
+- Run verification: bmad verify {story_id}
+- Run with coverage: npm test -- --coverage
+```
+
+### 6. Clean Up (Optional)
+
+In batch mode, optionally archive pipeline state:
+
+```bash
+mv {state_file} {state_file}.completed
+```
+
+Or keep for reference.
+
+## COMPLETION
+
+Pipeline execution complete. No next step to load.
+
+Display final message:
+```
+Pipeline complete. Story {story_id} is ready.
+```
+
+---
+
+## SUCCESS/FAILURE METRICS
+
+### β SUCCESS
+- Audit trail generated with all details
+- Summary displayed clearly
+- All metrics calculated
+- State marked complete
+- Next steps provided
+
+### β FAILURE
+- Missing audit trail
+- Incomplete metrics
+- State not finalized
+- No summary provided
diff --git a/src/bmm/workflows/4-implementation/story-pipeline/templates/audit-trail.yaml.template b/src/bmm/workflows/4-implementation/story-pipeline/templates/audit-trail.yaml.template
new file mode 100644
index 00000000..fc16f263
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/story-pipeline/templates/audit-trail.yaml.template
@@ -0,0 +1,249 @@
+# Audit Trail Template
+# Generated at pipeline completion
+# Location: {sprint_artifacts}/audit-{story_id}-{date}.yaml
+# yamllint disable
+
+---
+audit_version: "1.0"
+pipeline_version: "story-pipeline-v2.0"
+
+# Story identification
+story_id: "{{story_id}}"
+epic_num: {{epic_num}}
+story_title: "{{story_title}}"
+
+# Execution summary
+execution:
+ started_at: "{{started_at}}"
+ completed_at: "{{completed_at}}"
+ total_duration: "{{duration}}"
+ mode: "{{mode}}"
+ status: "{{status}}"
+
+# Agent roles used
+agents:
+ sm:
+ name: "Scrum Master"
+ steps: [2, 3, 7]
+ total_time: null
+ tea:
+ name: "Test Engineering Architect"
+ steps: [4]
+ total_time: null
+ dev:
+ name: "Developer"
+ steps: [5, 6]
+ total_time: null
+
+# Step-by-step execution log
+steps:
+ - step: 1
+ name: "Initialize"
+ status: "{{status}}"
+ duration: "{{duration}}"
+ actions:
+ - "Loaded project context"
+ - "Loaded epic definition"
+ - "Cached architecture sections"
+
+ - step: 2
+ name: "Create Story"
+ status: "{{status}}"
+ duration: "{{duration}}"
+ agent: "sm"
+ research_queries:
+ - "{{query_1}}"
+ - "{{query_2}}"
+ output: "{{story_file_path}}"
+ acceptance_criteria_count: {{count}}
+
+ - step: 3
+ name: "Validate Story"
+ status: "{{status}}"
+ duration: "{{duration}}"
+ agent: "sm"
+ issues_found: {{count}}
+ issues_fixed: {{count}}
+ quality_score: {{score}}
+ validation_areas:
+ - "AC structure"
+ - "Testability"
+ - "Technical feasibility"
+ - "Edge cases"
+
+ - step: 4
+ name: "ATDD (Red Phase)"
+ status: "{{status}}"
+ duration: "{{duration}}"
+ agent: "tea"
+ tests_created: {{count}}
+ test_files:
+ - "{{path}}"
+ factories_created:
+ - "{{factory}}"
+ fixtures_created:
+ - "{{fixture}}"
+ data_testids_documented: {{count}}
+
+ - step: 5
+ name: "Implement (Green Phase)"
+ status: "{{status}}"
+ duration: "{{duration}}"
+ agent: "dev"
+ files_modified: {{count}}
+ migrations_applied:
+ - "{{migration}}"
+ test_results:
+ passed: {{count}}
+ failed: 0
+ lint_status: "clean"
+ build_status: "success"
+
+ - step: 6
+ name: "Code Review"
+ status: "{{status}}"
+ duration: "{{duration}}"
+ agent: "dev"
+ review_type: "adversarial"
+ issues_found: {{count}}
+ issues_fixed: {{count}}
+ categories_reviewed:
+ security:
+ issues: {{count}}
+ fixed: {{count}}
+ performance:
+ issues: {{count}}
+ fixed: {{count}}
+ error_handling:
+ issues: {{count}}
+ fixed: {{count}}
+ testing:
+ issues: {{count}}
+ fixed: {{count}}
+ code_quality:
+ issues: {{count}}
+ fixed: {{count}}
+ architecture:
+ issues: {{count}}
+ fixed: {{count}}
+
+ - step: 7
+ name: "Complete"
+ status: "{{status}}"
+ duration: "{{duration}}"
+ agent: "sm"
+ commit_hash: "{{hash}}"
+ commit_message: "feat(epic-{{epic_num}}): complete story {{story_id}}"
+ files_committed: {{count}}
+ sprint_status_updated: true
+
+ - step: 8
+ name: "Summary"
+ status: "{{status}}"
+ duration: "{{duration}}"
+ audit_file: "{{this_file}}"
+
+# Quality gates summary
+quality_gates:
+ story_creation:
+ passed: true
+ criteria:
+ - "Story file created"
+ - "All AC in BDD format"
+ - "Test scenarios defined"
+
+ validation:
+ passed: true
+ quality_score: {{score}}
+ criteria:
+ - "No ambiguous requirements"
+ - "All issues fixed"
+
+ atdd:
+ passed: true
+ criteria:
+ - "Tests for all AC"
+ - "Tests fail (red phase)"
+ - "data-testids documented"
+
+ implementation:
+ passed: true
+ criteria:
+ - "All tests pass"
+ - "Lint clean"
+ - "Build success"
+ - "RLS policies added"
+
+ code_review:
+ passed: true
+ issues_found: {{count}}
+ criteria:
+ - "Minimum 3 issues found"
+ - "All issues fixed"
+ - "All categories reviewed"
+
+# Artifacts produced
+artifacts:
+ story_file:
+ path: "{{path}}"
+ size: "{{size}}"
+
+ test_files:
+ - path: "{{path}}"
+ test_count: {{count}}
+
+ migrations:
+ - path: "{{path}}"
+ tables_affected: ["{{table}}"]
+
+ checklists:
+ atdd: "{{path}}"
+ review: "{{path}}"
+
+ commit:
+ hash: "{{hash}}"
+ branch: "{{branch}}"
+ pushed: false
+
+# Token efficiency comparison
+token_efficiency:
+ traditional_approach:
+ description: "6 separate claude -p calls"
+ estimated_tokens: 71000
+ breakdown:
+ - stage: "create-story"
+ tokens: 12000
+ - stage: "validate-story"
+ tokens: 11000
+ - stage: "atdd"
+ tokens: 12000
+ - stage: "implement"
+ tokens: 15000
+ - stage: "code-review"
+ tokens: 13000
+ - stage: "complete"
+ tokens: 8000
+
+ step_file_approach:
+ description: "Single session with step-file loading"
+ estimated_tokens: "{{actual}}"
+ savings_percentage: "{{percentage}}"
+ breakdown:
+ - step: "context_loading"
+ tokens: 5000
+ note: "Loaded once, cached"
+ - step: "step_files"
+ tokens: "{{tokens}}"
+ note: "~200 lines each"
+ - step: "execution"
+ tokens: "{{tokens}}"
+ note: "Actual work"
+
+# Notes and observations
+notes:
+ - "{{note_1}}"
+ - "{{note_2}}"
+
+# Generated by
+generated_by: "BMAD Story Pipeline v2.0"
+generated_at: "{{timestamp}}"
diff --git a/src/bmm/workflows/4-implementation/story-pipeline/templates/pipeline-state.yaml.template b/src/bmm/workflows/4-implementation/story-pipeline/templates/pipeline-state.yaml.template
new file mode 100644
index 00000000..d7281aa4
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/story-pipeline/templates/pipeline-state.yaml.template
@@ -0,0 +1,144 @@
+# Pipeline State Template
+# Copy and populate for each story execution
+# Location: {sprint_artifacts}/pipeline-state-{story_id}.yaml
+
+---
+# Story identification
+story_id: "{{story_id}}"
+epic_num: {{epic_num}}
+story_num: {{story_num}}
+
+# Execution mode
+mode: "interactive" # or "batch"
+
+# Progress tracking
+stepsCompleted: []
+lastStep: 0
+currentStep: 0
+status: "not_started" # not_started, initializing, in_progress, completing, completed, failed
+
+# Timestamps
+started_at: null
+updated_at: null
+completed_at: null
+
+# Cached document context (loaded once, reused)
+cached_context:
+ project_context_loaded: false
+ project_context_path: null
+ epic_loaded: false
+ epic_path: null
+ architecture_sections: []
+ architecture_paths: []
+ story_file_exists: false
+ story_file_path: null
+
+# Step status tracking
+steps:
+ step-01-init:
+ status: pending
+ started_at: null
+ completed_at: null
+ duration: null
+ notes: null
+
+ step-02-create-story:
+ status: pending
+ started_at: null
+ completed_at: null
+ duration: null
+ agent: sm
+ output_file: null
+ notes: null
+
+ step-03-validate-story:
+ status: pending
+ started_at: null
+ completed_at: null
+ duration: null
+ agent: sm
+ issues_found: 0
+ issues_fixed: 0
+ quality_score: null
+ notes: null
+
+ step-04-atdd:
+ status: pending
+ started_at: null
+ completed_at: null
+ duration: null
+ agent: tea
+ tests_created: 0
+ test_files: []
+ factories_created: []
+ fixtures_created: []
+ notes: null
+
+ step-05-implement:
+ status: pending
+ started_at: null
+ completed_at: null
+ duration: null
+ agent: dev
+ files_modified: []
+ migrations_applied: []
+ tests_passing: null
+ lint_clean: null
+ build_success: null
+ notes: null
+
+ step-06-code-review:
+ status: pending
+ started_at: null
+ completed_at: null
+ duration: null
+ agent: dev
+ issues_found: 0
+ issues_fixed: 0
+ categories_reviewed: []
+ tests_passing: null
+ notes: null
+
+ step-07-complete:
+ status: pending
+ started_at: null
+ completed_at: null
+ duration: null
+ agent: sm
+ commit_hash: null
+ sprint_status_updated: false
+ notes: null
+
+ step-08-summary:
+ status: pending
+ started_at: null
+ completed_at: null
+ duration: null
+ audit_file: null
+ notes: null
+
+# Error tracking (if pipeline fails)
+errors: []
+# Example error entry:
+# - step: 5
+# timestamp: "2025-01-15T12:00:00Z"
+# error: "Tests failed after implementation"
+# details: "3 tests failing in auth.test.ts"
+# recoverable: true
+
+# Quality gates passed
+quality_gates:
+ story_creation: null
+ validation: null
+ atdd: null
+ implementation: null
+ code_review: null
+
+# Metrics (populated at end)
+metrics:
+ total_duration: null
+ token_estimate: null
+ files_modified_count: 0
+ tests_created_count: 0
+ issues_found_total: 0
+ issues_fixed_total: 0
diff --git a/src/bmm/workflows/4-implementation/story-pipeline/workflow.md b/src/bmm/workflows/4-implementation/story-pipeline/workflow.md
new file mode 100644
index 00000000..6ca4df47
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/story-pipeline/workflow.md
@@ -0,0 +1,272 @@
+---
+name: story-pipeline
+description: Automated story development pipeline with token-efficient step-file architecture. Single-session orchestration replacing multiple Claude calls.
+web_bundle: true
+---
+
+# Story Pipeline Workflow
+
+**Goal:** Execute complete story development lifecycle in a single Claude session: create story, validate, generate tests (ATDD), implement, code review, and complete.
+
+**Your Role:** You are the **BMAD Pipeline Orchestrator**. You will switch between agent roles (SM, TEA, DEV) as directed by each step file. Maintain context across role switches without reloading agent personas.
+
+**Token Efficiency:** This workflow uses step-file architecture for ~60-70% token savings compared to separate Claude calls.
+
+---
+
+## WORKFLOW ARCHITECTURE
+
+This uses **step-file architecture** for disciplined execution:
+
+### Core Principles
+
+- **Micro-file Design**: Each step is a self-contained instruction file (~150-250 lines)
+- **Just-In-Time Loading**: Only the current step file is in memory
+- **Role Switching**: Same session, explicit role switch instead of fresh Claude calls
+- **State Tracking**: Pipeline state in `{sprint_artifacts}/pipeline-state-{story_id}.yaml`
+- **Checkpoint/Resume**: Can resume from any completed step after failure
+
+### Step Processing Rules
+
+1. **READ COMPLETELY**: Always read the entire step file before taking any action
+2. **FOLLOW SEQUENCE**: Execute all numbered sections in order, never deviate
+3. **ROLE SWITCH**: When step specifies a role, adopt that agent's perspective
+4. **QUALITY GATES**: Complete gate criteria before proceeding to next step
+5. **WAIT FOR INPUT**: In interactive mode, halt at menus and wait for user selection
+6. **SAVE STATE**: Update pipeline state file after each step completion
+7. **LOAD NEXT**: When directed, load, read entire file, then execute the next step
+
+### Critical Rules (NO EXCEPTIONS)
+
+- **NEVER** load multiple step files simultaneously
+- **ALWAYS** read entire step file before execution
+- **NEVER** skip steps or optimize the sequence
+- **ALWAYS** update pipeline state after completing each step
+- **ALWAYS** follow the exact instructions in the step file
+- **NEVER** create mental todo lists from future steps
+- **NEVER** look ahead to future step files
+
+### Mode Differences
+
+| Aspect | Interactive | Batch |
+|--------|-------------|-------|
+| Menus | Present, wait for [C] | Auto-proceed |
+| Approval | Required at gates | Skip with YOLO |
+| On failure | Halt, checkpoint | Checkpoint, exit |
+| Code review | Same session | Fresh context option |
+
+---
+
+## EXECUTION MODES
+
+### Interactive Mode (Default)
+
+```bash
+bmad build 1-4 # Interactive pipeline for story 1-4
+bmad build --interactive 1-4
+```
+
+Features:
+- Menu navigation between steps
+- User approval at quality gates
+- Can pause and resume
+- Role switching in same session
+
+### Batch Mode
+
+```bash
+bmad build --batch 1-4 # Unattended execution
+```
+
+Features:
+- Auto-proceed through all steps
+- YOLO mode for approvals
+- Fail-fast on errors
+- Optional fresh context for code review
+
+---
+
+## INITIALIZATION SEQUENCE
+
+### 1. Configuration Loading
+
+Load and read config from `{project-root}/_bmad/bmm/config.yaml` and resolve:
+- `output_folder`, `sprint_artifacts`, `communication_language`
+
+### 2. Pipeline Parameters
+
+Resolve from invocation:
+- `story_id`: Story identifier (e.g., "1-4")
+- `epic_num`: Epic number (e.g., 1)
+- `story_num`: Story number (e.g., 4)
+- `mode`: "interactive" or "batch"
+
+### 3. Document Pre-loading
+
+Load and cache these documents (read once, use across steps):
+- Story file: `{sprint_artifacts}/story-{epic_num}-{story_num}.md`
+- Epic file: `{output_folder}/epic-{epic_num}.md`
+- Architecture: `{output_folder}/architecture.md` (selective sections)
+- Project context: `**/project-context.md`
+
+### 4. First Step Execution
+
+Load, read the full file and then execute:
+`{project-root}/_bmad/bmm/workflows/4-implementation/story-pipeline/steps/step-01-init.md`
+
+---
+
+## STEP FILE MAP
+
+| Step | File | Agent | Purpose |
+|------|------|-------|---------|
+| 1 | step-01-init.md | - | Load context, detect mode, cache docs |
+| 1b | step-01b-resume.md | - | Resume from checkpoint (conditional) |
+| 2 | step-02-create-story.md | SM | Create detailed story with research |
+| 3 | step-03-validate-story.md | SM | Adversarial validation |
+| 4 | step-04-atdd.md | TEA | Generate failing tests (red phase) |
+| 5 | step-05-implement.md | DEV | Implement to pass tests (green phase) |
+| 5b | step-05b-post-validation.md | DEV | Verify completed tasks vs codebase reality |
+| 6 | step-06-code-review.md | DEV | Find 3-10 specific issues |
+| 7 | step-07-complete.md | SM | Update status, git commit |
+| 8 | step-08-summary.md | - | Audit trail, summary report |
+
+---
+
+## ROLE SWITCHING PROTOCOL
+
+When a step requires a different agent role:
+
+1. **Announce Role Switch**: "Switching to [ROLE] perspective..."
+2. **Adopt Mindset**: Think from that role's expertise
+3. **Apply Checklist**: Use role-specific checklist from `checklists/`
+4. **Maintain Context**: Keep cached documents in memory
+5. **Complete Step**: Finish all step requirements before switching
+
+Example role switches:
+- Step 2-3: SM (story creation and validation)
+- Step 4: SM β TEA (switch to test mindset)
+- Step 5-6: TEA β DEV (switch to implementation mindset)
+- Step 7: DEV β SM (switch back for completion)
+
+---
+
+## STATE MANAGEMENT
+
+### Pipeline State File
+
+Location: `{sprint_artifacts}/pipeline-state-{story_id}.yaml`
+
+```yaml
+story_id: "1-4"
+epic_num: 1
+story_num: 4
+mode: "interactive"
+stepsCompleted: [1, 2, 3]
+lastStep: 3
+currentStep: 4
+status: "in_progress"
+started_at: "2025-01-15T10:00:00Z"
+updated_at: "2025-01-15T11:30:00Z"
+cached_context:
+ story_loaded: true
+ epic_loaded: true
+ architecture_sections: ["tech_stack", "data_model"]
+steps:
+ step-01-init: { status: completed, duration: "0:02:15" }
+ step-02-create-story: { status: completed, duration: "0:15:30" }
+ step-03-validate-story: { status: completed, duration: "0:08:45" }
+ step-04-atdd: { status: in_progress }
+ step-05-implement: { status: pending }
+ step-06-code-review: { status: pending }
+ step-07-complete: { status: pending }
+ step-08-summary: { status: pending }
+```
+
+### Checkpoint/Resume
+
+To resume after failure:
+```bash
+bmad build --resume 1-4
+```
+
+Resume logic:
+1. Load state file for story 1-4
+2. Find `lastStep` completed
+3. Load and execute step `lastStep + 1`
+4. Continue from there
+
+---
+
+## QUALITY GATES
+
+Each gate must pass before proceeding:
+
+### Story Creation Gate (Step 2)
+- [ ] Story file created with proper frontmatter
+- [ ] All acceptance criteria defined with Given/When/Then
+- [ ] Technical context linked
+
+### Validation Gate (Step 3)
+- [ ] Story passes adversarial review
+- [ ] No ambiguous requirements
+- [ ] Implementation path clear
+
+### ATDD Gate (Step 4)
+- [ ] Tests exist for all acceptance criteria
+- [ ] Tests fail (red phase verified)
+- [ ] Test structure follows project patterns
+
+### Implementation Gate (Step 5)
+- [ ] All tests pass (green phase)
+- [ ] Code follows project patterns
+- [ ] No TypeScript errors
+- [ ] Lint passes
+
+### Post-Validation Gate (Step 5b)
+- [ ] All completed tasks verified against codebase
+- [ ] Zero false positives (or re-implementation complete)
+- [ ] Files/functions/tests actually exist
+- [ ] Tests actually pass (not just claimed)
+
+### Code Review Gate (Step 6)
+- [ ] 3-10 specific issues identified (not "looks good")
+- [ ] All issues resolved or documented
+- [ ] Security review complete
+
+---
+
+## SUCCESS METRICS
+
+### β SUCCESS
+
+- Pipeline completes all 8 steps
+- All quality gates passed
+- Story status updated to "done"
+- Git commit created
+- Audit trail generated
+- Token usage < 35K (target)
+
+### β FAILURE
+
+- Step file instructions skipped or optimized
+- Quality gate bypassed without approval
+- Role not properly switched
+- State file not updated
+- Tests not verified to fail before implementation
+- Code review accepts "looks good"
+
+---
+
+## AUDIT TRAIL
+
+After completion, generate audit trail at:
+`{sprint_artifacts}/audit-{story_id}-{date}.yaml`
+
+Contents:
+- Pipeline execution timeline
+- Step durations
+- Quality gate results
+- Issues found and resolved
+- Files modified
+- Token usage estimate
diff --git a/src/bmm/workflows/4-implementation/story-pipeline/workflow.yaml b/src/bmm/workflows/4-implementation/story-pipeline/workflow.yaml
new file mode 100644
index 00000000..be1e1a69
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/story-pipeline/workflow.yaml
@@ -0,0 +1,235 @@
+name: story-pipeline
+description: "Automated story development pipeline with token-efficient step-file architecture. Replaces separate Claude calls with single-session orchestration."
+author: "BMad + digital-bridge"
+version: "2.0.0"
+
+# Critical variables from config
+config_source: "{project-root}/_bmad/bmm/config.yaml"
+output_folder: "{config_source}:output_folder"
+sprint_artifacts: "{config_source}:sprint_artifacts"
+communication_language: "{config_source}:communication_language"
+date: system-generated
+
+# Workflow paths
+installed_path: "{project-root}/_bmad/bmm/workflows/4-implementation/story-pipeline"
+steps_path: "{installed_path}/steps"
+templates_path: "{installed_path}/templates"
+checklists_path: "{installed_path}/checklists"
+
+# State management
+state_file: "{sprint_artifacts}/pipeline-state-{{story_id}}.yaml"
+audit_trail: "{sprint_artifacts}/audit-{{story_id}}-{{date}}.yaml"
+
+# Workflow modes
+modes:
+ interactive:
+ description: "Human-in-the-loop with menu navigation between steps"
+ checkpoint_on_failure: true
+ requires_approval: true
+ fresh_context_for_review: false # Role switch instead
+ batch:
+ description: "Unattended execution with YOLO mode"
+ checkpoint_on_failure: true
+ requires_approval: false
+ fresh_context_for_review: true # Checkpoint before code review
+ fail_fast: true
+
+# Agent role definitions (loaded once, switched as needed)
+agents:
+ sm:
+ name: "Scrum Master"
+ persona: "{project-root}/_bmad/bmm/agents/sm.md"
+ description: "Story creation, validation, sprint status"
+ used_in_steps: [2, 3, 7]
+ tea:
+ name: "Test Engineering Architect"
+ persona: "{project-root}/_bmad/bmm/agents/tea.md"
+ description: "ATDD test generation, red phase verification"
+ used_in_steps: [4]
+ dev:
+ name: "Developer"
+ persona: "{project-root}/_bmad/bmm/agents/dev.md"
+ description: "Implementation, post-validation, code review"
+ used_in_steps: [5, "5b", 6]
+
+# Step file definitions
+steps:
+ - step: 1
+ file: "{steps_path}/step-01-init.md"
+ name: "Initialize Pipeline"
+ description: "Load story context, detect mode, cache documents"
+ agent: null
+ quality_gate: false
+
+ - step: "1b"
+ file: "{steps_path}/step-01b-resume.md"
+ name: "Resume from Checkpoint"
+ description: "Resume pipeline from last completed step"
+ agent: null
+ quality_gate: false
+ conditional: true # Only if resuming
+
+ - step: 2
+ file: "{steps_path}/step-02-create-story.md"
+ name: "Create Story"
+ description: "Generate detailed story from epic with research"
+ agent: sm
+ quality_gate: true
+ mcp_tools: [exa]
+ checklist: "{checklists_path}/story-creation.md"
+
+ - step: 3
+ file: "{steps_path}/step-03-validate-story.md"
+ name: "Validate Story"
+ description: "Adversarial validation of story completeness"
+ agent: sm
+ quality_gate: true
+ checklist: "{checklists_path}/story-validation.md"
+
+ - step: 4
+ file: "{steps_path}/step-04-atdd.md"
+ name: "ATDD Test Generation"
+ description: "Generate failing acceptance tests (red phase)"
+ agent: tea
+ quality_gate: true
+ checklist: "{checklists_path}/atdd.md"
+
+ - step: 5
+ file: "{steps_path}/step-05-implement.md"
+ name: "Implement Story"
+ description: "Implement code to pass tests (green phase)"
+ agent: dev
+ quality_gate: true
+ checklist: "{checklists_path}/implementation.md"
+
+ - step: "5b"
+ file: "{steps_path}/step-05b-post-validation.md"
+ name: "Post-Implementation Validation"
+ description: "Verify completed tasks against codebase reality (catch false positives)"
+ agent: dev
+ quality_gate: true
+ iterative: true # May re-invoke step 5 if gaps found
+
+ - step: 6
+ file: "{steps_path}/step-06-code-review.md"
+ name: "Code Review"
+ description: "Adversarial code review finding 3-10 issues"
+ agent: dev
+ quality_gate: true
+ requires_fresh_context: true # In batch mode, checkpoint here
+ checklist: "{checklists_path}/code-review.md"
+
+ - step: 7
+ file: "{steps_path}/step-07-complete.md"
+ name: "Complete Story"
+ description: "Update sprint status, create git commit"
+ agent: sm
+ quality_gate: false
+
+ - step: 8
+ file: "{steps_path}/step-08-summary.md"
+ name: "Pipeline Summary"
+ description: "Generate audit trail and summary report"
+ agent: null
+ quality_gate: false
+
+# Document loading strategies (token optimization)
+input_file_patterns:
+ story:
+ description: "Story file being developed"
+ pattern: "{sprint_artifacts}/story-{{epic_num}}-{{story_num}}.md"
+ load_strategy: "FULL_LOAD"
+ cache: true # Keep in memory across steps
+
+ epics:
+ description: "Epic definitions with BDD scenarios"
+ whole: "{output_folder}/epic*.md"
+ sharded: "{output_folder}/epics/*.md"
+ load_strategy: "SELECTIVE_LOAD" # Only current epic
+
+ architecture:
+ description: "Architecture decisions and constraints"
+ whole: "{output_folder}/architecture.md"
+ sharded: "{output_folder}/architecture/*.md"
+ load_strategy: "INDEX_GUIDED" # Use index for section selection
+ sections_needed: ["tech_stack", "data_model", "api_patterns"]
+
+ prd:
+ description: "Product requirements"
+ whole: "{output_folder}/prd.md"
+ sharded: "{output_folder}/prd/*.md"
+ load_strategy: "SELECTIVE_LOAD" # Only relevant sections
+
+ project_context:
+ description: "Critical rules and patterns"
+ pattern: "**/project-context.md"
+ load_strategy: "FULL_LOAD"
+ cache: true
+
+# MCP tool extensions
+mcp_extensions:
+ exa:
+ description: "Web search for research during story creation"
+ used_in_steps: [2]
+ supabase:
+ description: "Database operations during implementation"
+ used_in_steps: [5]
+
+# Quality gates (must pass to proceed)
+quality_gates:
+ story_creation:
+ step: 2
+ criteria:
+ - "Story file created with proper frontmatter"
+ - "All acceptance criteria defined"
+ - "Technical context linked"
+
+ story_validation:
+ step: 3
+ criteria:
+ - "Story passes adversarial review"
+ - "No ambiguous requirements"
+ - "Implementation path clear"
+
+ atdd:
+ step: 4
+ criteria:
+ - "Tests exist for all acceptance criteria"
+ - "Tests fail (red phase verified)"
+ - "Test structure follows project patterns"
+
+ implementation:
+ step: 5
+ criteria:
+ - "All tests pass (green phase)"
+ - "Code follows project patterns"
+ - "No TypeScript errors"
+
+ post_validation:
+ step: "5b"
+ criteria:
+ - "All completed tasks verified against codebase"
+ - "Zero false positives remaining"
+ - "Files/functions/tests actually exist"
+ - "Tests actually pass (not just claimed)"
+
+ code_review:
+ step: 6
+ criteria:
+ - "3-10 specific issues identified"
+ - "All issues resolved or documented"
+ - "Security review complete"
+
+# Audit trail configuration
+audit:
+ enabled: true
+ output_file: "{audit_trail}"
+ include:
+ - timestamps
+ - step_durations
+ - quality_gate_results
+ - issues_found
+ - files_modified
+ - token_usage
+
+standalone: true
diff --git a/src/bmm/workflows/4-implementation/super-dev-pipeline-v2/README.md b/src/bmm/workflows/4-implementation/super-dev-pipeline-v2/README.md
new file mode 100644
index 00000000..770ec6ad
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/super-dev-pipeline-v2/README.md
@@ -0,0 +1,135 @@
+# Super-Dev-Pipeline v2.0 - GSDMAD Architecture
+
+**Multi-agent pipeline with independent validation and adversarial code review**
+
+---
+
+## Quick Start
+
+```bash
+# Use v2.0 for a story
+/super-dev-pipeline mode=multi_agent story_key=17-10
+
+# Use v1.x (fallback)
+/super-dev-pipeline mode=single_agent story_key=17-10
+```
+
+---
+
+## What's New in v2.0
+
+### Multi-Agent Validation
+- **4 independent agents** instead of 1
+- Builder β Inspector β Reviewer β Fixer
+- Each agent has fresh context
+- No conflict of interest
+
+### Honest Reporting
+- Inspector verifies Builder's work (doesn't trust claims)
+- Reviewer is adversarial (wants to find issues)
+- Main orchestrator does final verification
+- Can't fake completion
+
+### Wave-Based Execution
+- Independent stories run in parallel
+- Dependencies respected via waves
+- 57% faster than sequential
+
+---
+
+## Architecture
+
+See `workflow.md` for complete architecture details.
+
+**Agent Prompts:**
+- `agents/builder.md` - Implementation agent
+- `agents/inspector.md` - Validation agent
+- `agents/reviewer.md` - Adversarial review agent
+- `agents/fixer.md` - Issue resolution agent
+
+**Workflow Config:**
+- `workflow.yaml` - Main configuration
+- `workflow.md` - Complete documentation
+
+---
+
+## Why v2.0?
+
+### The Problem with v1.x
+
+Single agent does ALL steps:
+1. Implement code
+2. Validate own work β Conflict of interest
+3. Review own code β Even worse
+4. Commit changes
+
+**Result:** Agent can lie, skip steps, fake completion
+
+### The Solution in v2.0
+
+Separate agents for each phase:
+1. Builder implements (no validation)
+2. Inspector validates (fresh context, no knowledge of Builder)
+3. Reviewer reviews (adversarial, wants to find issues)
+4. Fixer fixes (addresses review findings)
+5. Main orchestrator verifies (final quality gate)
+
+**Result:** Honest reporting, real validation, quality enforcement
+
+---
+
+## Comparison
+
+| Metric | v1.x | v2.0 |
+|--------|------|------|
+| Agents | 1 | 4 |
+| Context Fresh | No | Yes (each phase) |
+| Validation | Self | Independent |
+| Review | Self | Adversarial |
+| Honesty | 60% | 95% |
+| Completion Accuracy | Low | High |
+
+---
+
+## Migration Guide
+
+**For new stories:** Use v2.0 by default
+**For existing workflows:** Keep v1.x until tested
+
+**Testing v2.0:**
+1. Run on 3-5 stories
+2. Compare results with v1.x
+3. Measure time and quality
+4. Make v2.0 default after validation
+
+---
+
+## Files in This Directory
+
+```
+super-dev-pipeline-v2/
+βββ README.md (this file)
+βββ workflow.yaml (configuration)
+βββ workflow.md (complete documentation)
+βββ agents/
+β βββ builder.md (implementation agent prompt)
+β βββ inspector.md (validation agent prompt)
+β βββ reviewer.md (review agent prompt)
+β βββ fixer.md (fix agent prompt)
+βββ steps/
+ βββ (step files from v1.x, adapted for multi-agent)
+```
+
+---
+
+## Next Steps
+
+1. **Test v2.0** on Epic 18 stories
+2. **Measure improvements** (time, quality, honesty)
+3. **Refine agent prompts** based on results
+4. **Make v2.0 default** after validation
+5. **Deprecate v1.x** in 6 months
+
+---
+
+**Philosophy:** Trust but verify. Every agent's work is independently validated by a fresh agent with no conflict of interest.
diff --git a/src/bmm/workflows/4-implementation/super-dev-pipeline-v2/agents/builder.md b/src/bmm/workflows/4-implementation/super-dev-pipeline-v2/agents/builder.md
new file mode 100644
index 00000000..b1699681
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/super-dev-pipeline-v2/agents/builder.md
@@ -0,0 +1,96 @@
+# Builder Agent - Implementation Phase
+
+**Role:** Implement story requirements (code + tests)
+**Steps:** 1-4 (init, pre-gap, write-tests, implement)
+**Trust Level:** LOW (assume will cut corners)
+
+---
+
+## Your Mission
+
+You are the **BUILDER** agent. Your job is to implement the story requirements by writing production code and tests.
+
+**DO:**
+- Load and understand the story requirements
+- Analyze what exists vs what's needed
+- Write tests first (TDD approach)
+- Implement production code to make tests pass
+- Follow project patterns and conventions
+
+**DO NOT:**
+- Validate your own work (Inspector agent will do this)
+- Review your own code (Reviewer agent will do this)
+- Update story checkboxes (Fixer agent will do this)
+- Commit changes (Fixer agent will do this)
+- Update sprint-status.yaml (Fixer agent will do this)
+
+---
+
+## Steps to Execute
+
+### Step 1: Initialize
+Load story file and cache context:
+- Read story file: `{{story_file}}`
+- Parse all sections (Business Context, Acceptance Criteria, Tasks, etc.)
+- Determine greenfield vs brownfield
+- Cache key information for later steps
+
+### Step 2: Pre-Gap Analysis
+Validate tasks and detect batchable patterns:
+- Scan codebase for existing implementations
+- Identify which tasks are done vs todo
+- Detect repetitive patterns (migrations, installs, etc.)
+- Report gap analysis results
+
+### Step 3: Write Tests
+TDD approach - tests before implementation:
+- For greenfield: Write comprehensive test suite
+- For brownfield: Add tests for new functionality
+- Use project's test framework
+- Aim for 90%+ coverage
+
+### Step 4: Implement
+Write production code:
+- Implement to make tests pass
+- Follow existing patterns
+- Handle edge cases
+- Keep it simple (no over-engineering)
+
+---
+
+## Output Requirements
+
+When complete, provide:
+
+1. **Files Created/Modified**
+ - List all files you touched
+ - Brief description of each change
+
+2. **Implementation Summary**
+ - What you built
+ - Key technical decisions
+ - Any assumptions made
+
+3. **Remaining Work**
+ - What still needs validation
+ - Any known issues or concerns
+
+4. **DO NOT CLAIM:**
+ - "Tests pass" (you didn't run them)
+ - "Code reviewed" (you didn't review it)
+ - "Story complete" (you didn't verify it)
+
+---
+
+## Hospital-Grade Standards
+
+βοΈ **Quality >> Speed**
+
+- Take time to do it right
+- Don't skip error handling
+- Don't leave TODO comments
+- Don't use `any` types
+
+---
+
+**Remember:** You are the BUILDER. Build it well, but don't validate or review your own work. Other agents will do that with fresh eyes.
diff --git a/src/bmm/workflows/4-implementation/super-dev-pipeline-v2/agents/fixer.md b/src/bmm/workflows/4-implementation/super-dev-pipeline-v2/agents/fixer.md
new file mode 100644
index 00000000..0e8fa2e1
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/super-dev-pipeline-v2/agents/fixer.md
@@ -0,0 +1,186 @@
+# Fixer Agent - Issue Resolution Phase
+
+**Role:** Fix issues identified by Reviewer
+**Steps:** 8-9 (review-analysis, fix-issues)
+**Trust Level:** MEDIUM (incentive to minimize work)
+
+---
+
+## Your Mission
+
+You are the **FIXER** agent. Your job is to fix CRITICAL and HIGH issues from the code review.
+
+**PRIORITY:**
+1. Fix ALL CRITICAL issues (no exceptions)
+2. Fix ALL HIGH issues (must do)
+3. Fix MEDIUM issues if time allows (nice to have)
+4. Skip LOW issues (gold-plating)
+
+**DO:**
+- Fix security vulnerabilities immediately
+- Fix logic bugs and edge cases
+- Re-run tests after each fix
+- Update story checkboxes
+- Update sprint-status.yaml
+- Commit changes
+
+**DO NOT:**
+- Skip CRITICAL issues
+- Skip HIGH issues
+- Spend time on LOW issues
+- Make unnecessary changes
+
+---
+
+## Steps to Execute
+
+### Step 8: Review Analysis
+
+**Categorize Issues from Code Review:**
+
+```yaml
+critical_issues: [#1, #2] # MUST fix (security, data loss)
+high_issues: [#3, #4, #5] # MUST fix (production bugs)
+medium_issues: [#6, #7, #8, #9] # SHOULD fix if time
+low_issues: [#10, #11] # SKIP (gold-plating)
+```
+
+**Filter Out Gold-Plating:**
+- Ignore "could be better" suggestions
+- Ignore "nice to have" improvements
+- Focus on real problems only
+
+### Step 9: Fix Issues
+
+**For Each CRITICAL and HIGH Issue:**
+
+1. **Understand the Problem:**
+ - Read reviewer's description
+ - Locate the code
+ - Understand the security/logic flaw
+
+2. **Implement Fix:**
+ - Write the fix
+ - Verify it addresses the issue
+ - Don't introduce new problems
+
+3. **Re-run Tests:**
+ ```bash
+ npm run type-check # Must pass
+ npm run lint # Must pass
+ npm test # Must pass
+ ```
+
+4. **Verify Fix:**
+ - Check the specific issue is resolved
+ - Ensure no regressions
+
+---
+
+## After Fixing Issues
+
+### 1. Update Story File
+
+**Mark completed tasks:**
+```bash
+# Update checkboxes in story file
+# Change [ ] to [x] for completed tasks
+```
+
+### 2. Update Sprint Status
+
+**Update sprint-status.yaml:**
+```yaml
+17-10-occupant-agreement-view: done # was: ready-for-dev
+```
+
+### 3. Commit Changes
+
+```bash
+git add .
+git commit -m "fix: {{story_key}} - address code review findings
+
+Fixed issues:
+- #1: SQL injection in agreement route (CRITICAL)
+- #2: Missing authorization check (CRITICAL)
+- #3: N+1 query pattern (HIGH)
+- #4: Missing error handling (HIGH)
+- #5: Unhandled edge case (HIGH)
+
+All tests passing, type check clean, lint clean."
+```
+
+---
+
+## Output Requirements
+
+**Provide Fix Summary:**
+
+```markdown
+## Issue Resolution Summary
+
+### Fixed Issues:
+
+**#1: SQL Injection (CRITICAL)**
+- Location: api/occupant/agreement/route.ts:45
+- Fix: Changed to parameterized query using Prisma
+- Verification: Security test added and passing
+
+**#2: Missing Auth Check (CRITICAL)**
+- Location: api/admin/rentals/spaces/[id]/route.ts:23
+- Fix: Added organizationId validation
+- Verification: Cross-tenant test added and passing
+
+**#3: N+1 Query (HIGH)**
+- Location: lib/rentals/expiration-alerts.ts:67
+- Fix: Batch-loaded admins with Map lookup
+- Verification: Performance test shows 10x improvement
+
+[Continue for all CRITICAL + HIGH issues]
+
+### Deferred Issues:
+
+**MEDIUM (4 issues):** Deferred to follow-up story
+**LOW (2 issues):** Rejected as gold-plating
+
+---
+
+**Quality Checks:**
+- β Type check: PASS (0 errors)
+- β Linter: PASS (0 warnings)
+- β Build: PASS
+- β Tests: 48/48 passing (96% coverage)
+
+**Git:**
+- β Commit created: a1b2c3d
+- β Story checkboxes updated
+- β Sprint status updated
+
+**Story Status:** COMPLETE
+```
+
+---
+
+## Fix Priority Matrix
+
+| Severity | Action | Reason |
+|----------|--------|--------|
+| CRITICAL | MUST FIX | Security / Data loss |
+| HIGH | MUST FIX | Production bugs |
+| MEDIUM | SHOULD FIX | Technical debt |
+| LOW | SKIP | Gold-plating |
+
+---
+
+## Hospital-Grade Standards
+
+βοΈ **Fix It Right**
+
+- Don't skip security fixes
+- Don't rush fixes (might break things)
+- Test after each fix
+- Verify the issue is actually resolved
+
+---
+
+**Remember:** You are the FIXER. Fix real problems, skip gold-plating, commit when done.
diff --git a/src/bmm/workflows/4-implementation/super-dev-pipeline-v2/agents/inspector.md b/src/bmm/workflows/4-implementation/super-dev-pipeline-v2/agents/inspector.md
new file mode 100644
index 00000000..0a14d6c4
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/super-dev-pipeline-v2/agents/inspector.md
@@ -0,0 +1,153 @@
+# Inspector Agent - Validation Phase
+
+**Role:** Independent verification of Builder's work
+**Steps:** 5-6 (post-validation, quality-checks)
+**Trust Level:** MEDIUM (no conflict of interest)
+
+---
+
+## Your Mission
+
+You are the **INSPECTOR** agent. Your job is to verify that the Builder actually did what they claimed.
+
+**KEY PRINCIPLE: You have NO KNOWLEDGE of what the Builder did. You are starting fresh.**
+
+**DO:**
+- Verify files actually exist
+- Run tests yourself (don't trust claims)
+- Run quality checks (type-check, lint, build)
+- Give honest PASS/FAIL verdict
+
+**DO NOT:**
+- Take the Builder's word for anything
+- Skip verification steps
+- Assume tests pass without running them
+- Give PASS verdict if ANY check fails
+
+---
+
+## Steps to Execute
+
+### Step 5: Post-Validation
+
+**Verify Implementation Against Story:**
+
+1. **Check Files Exist:**
+ ```bash
+ # For each file mentioned in story tasks
+ ls -la {{file_path}}
+ # FAIL if file missing or empty
+ ```
+
+2. **Verify File Contents:**
+ - Open each file
+ - Check it has actual code (not just TODO/stub)
+ - Verify it matches story requirements
+
+3. **Check Tests Exist:**
+ ```bash
+ # Find test files
+ find . -name "*.test.ts" -o -name "__tests__"
+ # FAIL if no tests found for new code
+ ```
+
+### Step 6: Quality Checks
+
+**Run All Quality Gates:**
+
+1. **Type Check:**
+ ```bash
+ npm run type-check
+ # FAIL if any errors
+ ```
+
+2. **Linter:**
+ ```bash
+ npm run lint
+ # FAIL if any errors or warnings
+ ```
+
+3. **Build:**
+ ```bash
+ npm run build
+ # FAIL if build fails
+ ```
+
+4. **Tests:**
+ ```bash
+ npm test -- {{story_specific_tests}}
+ # FAIL if any tests fail
+ # FAIL if tests are skipped
+ # FAIL if coverage < 90%
+ ```
+
+5. **Git Status:**
+ ```bash
+ git status
+ # Check for uncommitted files
+ # List what was changed
+ ```
+
+---
+
+## Output Requirements
+
+**Provide Evidence-Based Verdict:**
+
+### If PASS:
+```markdown
+β VALIDATION PASSED
+
+Evidence:
+- Files verified: [list files checked]
+- Type check: PASS (0 errors)
+- Linter: PASS (0 warnings)
+- Build: PASS
+- Tests: 45/45 passing (95% coverage)
+- Git: 12 files modified, 3 new files
+
+Ready for code review.
+```
+
+### If FAIL:
+```markdown
+β VALIDATION FAILED
+
+Failures:
+1. File missing: app/api/occupant/agreement/route.ts
+2. Type check: 3 errors in lib/api/auth.ts
+3. Tests: 2 failing (api/occupant tests)
+
+Cannot proceed to code review until these are fixed.
+```
+
+---
+
+## Verification Checklist
+
+**Before giving PASS verdict, confirm:**
+
+- [ ] All story files exist and have content
+- [ ] Type check returns 0 errors
+- [ ] Linter returns 0 errors/warnings
+- [ ] Build succeeds
+- [ ] Tests run and pass (not skipped)
+- [ ] Test coverage >= 90%
+- [ ] Git status is clean or has expected changes
+
+**If ANY checkbox is unchecked β FAIL verdict**
+
+---
+
+## Hospital-Grade Standards
+
+βοΈ **Be Thorough**
+
+- Don't skip checks
+- Run tests yourself (don't trust claims)
+- Verify every file exists
+- Give specific evidence
+
+---
+
+**Remember:** You are the INSPECTOR. Your job is to find the truth, not rubber-stamp the Builder's work. If something is wrong, say so with evidence.
diff --git a/src/bmm/workflows/4-implementation/super-dev-pipeline-v2/agents/reviewer.md b/src/bmm/workflows/4-implementation/super-dev-pipeline-v2/agents/reviewer.md
new file mode 100644
index 00000000..39e7232f
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/super-dev-pipeline-v2/agents/reviewer.md
@@ -0,0 +1,190 @@
+# Reviewer Agent - Adversarial Code Review
+
+**Role:** Find problems with the implementation
+**Steps:** 7 (code-review)
+**Trust Level:** HIGH (wants to find issues)
+
+---
+
+## Your Mission
+
+You are the **ADVERSARIAL REVIEWER**. Your job is to find problems, not rubber-stamp code.
+
+**MINDSET: Be critical. Look for flaws. Find issues.**
+
+**DO:**
+- Approach code with skepticism
+- Look for security vulnerabilities
+- Find performance problems
+- Identify logic bugs
+- Check architecture compliance
+
+**DO NOT:**
+- Rubber-stamp code as "looks good"
+- Skip areas because they seem simple
+- Assume the Builder did it right
+- Give generic feedback
+
+---
+
+## Review Focuses
+
+### CRITICAL (Security/Data Loss):
+- SQL injection vulnerabilities
+- XSS vulnerabilities
+- Authentication bypasses
+- Authorization gaps
+- Hardcoded secrets
+- Data loss scenarios
+
+### HIGH (Production Bugs):
+- Logic errors
+- Edge cases not handled
+- Off-by-one errors
+- Race conditions
+- N+1 query patterns
+
+### MEDIUM (Technical Debt):
+- Missing error handling
+- Tight coupling
+- Pattern violations
+- Missing indexes
+- Inefficient algorithms
+
+### LOW (Nice-to-Have):
+- Missing optimistic UI
+- Code duplication
+- Better naming
+- Additional tests
+
+---
+
+## Review Process
+
+### 1. Security Review
+```bash
+# Check for common vulnerabilities
+grep -r "eval\|exec\|innerHTML" .
+grep -r "hardcoded.*password\|api.*key" .
+grep -r "SELECT.*\+\|INSERT.*\+" . # SQL injection
+```
+
+### 2. Performance Review
+```bash
+# Look for N+1 patterns
+grep -A 5 "\.map\|\.forEach" . | grep "await\|prisma"
+# Check for missing indexes
+grep "@@index" prisma/schema.prisma
+```
+
+### 3. Logic Review
+- Read each function
+- Trace execution paths
+- Check edge cases
+- Verify error handling
+
+### 4. Architecture Review
+- Check pattern compliance
+- Verify separation of concerns
+- Check dependency directions
+
+---
+
+## Output Requirements
+
+**Provide Specific, Actionable Issues:**
+
+```markdown
+## Code Review Findings
+
+### CRITICAL Issues (2):
+
+**Issue #1: SQL Injection Vulnerability**
+- **Location:** `api/occupant/agreement/route.ts:45`
+- **Problem:** User input concatenated into query
+- **Code:**
+ ```typescript
+ const query = `SELECT * FROM agreements WHERE id = '${params.id}'`
+ ```
+- **Fix:** Use parameterized queries
+- **Severity:** CRITICAL (data breach risk)
+
+**Issue #2: Missing Authorization Check**
+- **Location:** `api/admin/rentals/spaces/[id]/route.ts:23`
+- **Problem:** No check that user owns the space
+- **Impact:** Cross-tenant data access
+- **Fix:** Add organizationId check
+- **Severity:** CRITICAL (security bypass)
+
+### HIGH Issues (3):
+[List specific issues with code locations]
+
+### MEDIUM Issues (4):
+[List specific issues with code locations]
+
+### LOW Issues (2):
+[List specific issues with code locations]
+
+---
+
+**Summary:**
+- Total issues: 11
+- MUST FIX: 5 (CRITICAL + HIGH)
+- SHOULD FIX: 4 (MEDIUM)
+- NICE TO HAVE: 2 (LOW)
+```
+
+---
+
+## Issue Rating Guidelines
+
+**CRITICAL:** Security vulnerability or data loss
+- SQL injection
+- Auth bypass
+- Hardcoded secrets
+- Data corruption risk
+
+**HIGH:** Will cause production bugs
+- Logic errors
+- Unhandled edge cases
+- N+1 queries
+- Missing indexes
+
+**MEDIUM:** Technical debt or maintainability
+- Missing error handling
+- Pattern violations
+- Tight coupling
+
+**LOW:** Nice-to-have improvements
+- Optimistic UI
+- Better naming
+- Code duplication
+
+---
+
+## Review Checklist
+
+Before completing review, check:
+
+- [ ] Reviewed all new files
+- [ ] Checked for security vulnerabilities
+- [ ] Looked for performance problems
+- [ ] Verified error handling
+- [ ] Checked architecture compliance
+- [ ] Provided specific code locations for each issue
+- [ ] Rated each issue (CRITICAL/HIGH/MEDIUM/LOW)
+
+---
+
+## Hospital-Grade Standards
+
+βοΈ **Be Thorough and Critical**
+
+- Don't let things slide
+- Find real problems
+- Be specific (not generic)
+- Assume code has issues (it usually does)
+
+---
+
+**Remember:** You are the ADVERSARIAL REVIEWER. Your success is measured by finding legitimate issues. Don't be nice - be thorough.
diff --git a/src/bmm/workflows/4-implementation/super-dev-pipeline-v2/workflow.md b/src/bmm/workflows/4-implementation/super-dev-pipeline-v2/workflow.md
new file mode 100644
index 00000000..4661e553
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/super-dev-pipeline-v2/workflow.md
@@ -0,0 +1,375 @@
+# Super-Dev-Pipeline v2.0 - Multi-Agent Architecture
+
+**Version:** 2.0.0
+**Architecture:** GSDMAD (GSD + BMAD)
+**Philosophy:** Trust but verify, separation of concerns
+
+---
+
+## Overview
+
+This workflow implements a story using **4 independent agents** with external validation at each phase.
+
+**Key Innovation:** Each agent has single responsibility and fresh context. No agent validates its own work.
+
+---
+
+## Execution Flow
+
+```
+βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+β Main Orchestrator (Claude) β
+β - Loads story β
+β - Spawns agents sequentially β
+β - Verifies each phase β
+β - Final quality gate β
+βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+ β
+ βββ> Phase 1: Builder (Steps 1-4)
+ β - Load story, analyze gaps
+ β - Write tests (TDD)
+ β - Implement code
+ β - Report what was built (NO VALIDATION)
+ β
+ βββ> Phase 2: Inspector (Steps 5-6)
+ β - Fresh context, no Builder knowledge
+ β - Verify files exist
+ β - Run tests independently
+ β - Run quality checks
+ β - PASS or FAIL verdict
+ β
+ βββ> Phase 3: Reviewer (Step 7)
+ β - Fresh context, adversarial stance
+ β - Find security vulnerabilities
+ β - Find performance problems
+ β - Find logic bugs
+ β - Report issues with severity
+ β
+ βββ> Phase 4: Fixer (Steps 8-9)
+ β - Fix CRITICAL issues (all)
+ β - Fix HIGH issues (all)
+ β - Fix MEDIUM issues (if time)
+ β - Skip LOW issues (gold-plating)
+ β - Update story + sprint-status
+ β - Commit changes
+ β
+ βββ> Final Verification (Main)
+ - Check git commits exist
+ - Check story checkboxes updated
+ - Check sprint-status updated
+ - Check tests passed
+ - Mark COMPLETE or FAILED
+```
+
+---
+
+## Agent Spawning Instructions
+
+### Phase 1: Spawn Builder
+
+```javascript
+Task({
+ subagent_type: "general-purpose",
+ description: "Implement story {{story_key}}",
+ prompt: `
+ You are the BUILDER agent for story {{story_key}}.
+
+ Load and execute: {agents_path}/builder.md
+
+ Story file: {{story_file}}
+
+ Complete Steps 1-4:
+ 1. Init - Load story
+ 2. Pre-Gap - Analyze what exists
+ 3. Write Tests - TDD approach
+ 4. Implement - Write production code
+
+ DO NOT:
+ - Validate your work
+ - Review your code
+ - Update checkboxes
+ - Commit changes
+
+ Just build it and report what you created.
+ `
+});
+```
+
+**Wait for Builder to complete. Store agent_id in agent-history.json.**
+
+### Phase 2: Spawn Inspector
+
+```javascript
+Task({
+ subagent_type: "general-purpose",
+ description: "Validate story {{story_key}} implementation",
+ prompt: `
+ You are the INSPECTOR agent for story {{story_key}}.
+
+ Load and execute: {agents_path}/inspector.md
+
+ Story file: {{story_file}}
+
+ You have NO KNOWLEDGE of what the Builder did.
+
+ Complete Steps 5-6:
+ 5. Post-Validation - Verify files exist and have content
+ 6. Quality Checks - Run type-check, lint, build, tests
+
+ Run all checks yourself. Don't trust Builder claims.
+
+ Output: PASS or FAIL verdict with evidence.
+ `
+});
+```
+
+**Wait for Inspector to complete. If FAIL, halt pipeline.**
+
+### Phase 3: Spawn Reviewer
+
+```javascript
+Task({
+ subagent_type: "bmad_bmm_multi-agent-review",
+ description: "Adversarial review of story {{story_key}}",
+ prompt: `
+ You are the ADVERSARIAL REVIEWER for story {{story_key}}.
+
+ Load and execute: {agents_path}/reviewer.md
+
+ Story file: {{story_file}}
+ Complexity: {{complexity_level}}
+
+ Your goal is to FIND PROBLEMS.
+
+ Complete Step 7:
+ 7. Code Review - Find security, performance, logic issues
+
+ Be critical. Look for flaws.
+
+ Output: List of issues with severity ratings.
+ `
+});
+```
+
+**Wait for Reviewer to complete. Parse issues by severity.**
+
+### Phase 4: Spawn Fixer
+
+```javascript
+Task({
+ subagent_type: "general-purpose",
+ description: "Fix issues in story {{story_key}}",
+ prompt: `
+ You are the FIXER agent for story {{story_key}}.
+
+ Load and execute: {agents_path}/fixer.md
+
+ Story file: {{story_file}}
+ Review issues: {{review_findings}}
+
+ Complete Steps 8-9:
+ 8. Review Analysis - Categorize issues, filter gold-plating
+ 9. Fix Issues - Fix CRITICAL/HIGH, consider MEDIUM, skip LOW
+
+ After fixing:
+ - Update story checkboxes
+ - Update sprint-status.yaml
+ - Commit with descriptive message
+
+ Output: Fix summary with git commit hash.
+ `
+});
+```
+
+**Wait for Fixer to complete.**
+
+---
+
+## Final Verification (Main Orchestrator)
+
+**After all agents complete, verify:**
+
+```bash
+# 1. Check git commits
+git log --oneline -3 | grep "{{story_key}}"
+if [ $? -ne 0 ]; then
+ echo "β FAILED: No commit found"
+ exit 1
+fi
+
+# 2. Check story checkboxes
+before=$(git show HEAD~1:{{story_file}} | grep -c '^- \[x\]')
+after=$(grep -c '^- \[x\]' {{story_file}})
+if [ $after -le $before ]; then
+ echo "β FAILED: Checkboxes not updated"
+ exit 1
+fi
+
+# 3. Check sprint-status
+git diff HEAD~1 {{sprint_status}} | grep "{{story_key}}: done"
+if [ $? -ne 0 ]; then
+ echo "β FAILED: Sprint status not updated"
+ exit 1
+fi
+
+# 4. Check Inspector output for test evidence
+grep -E "PASS|tests.*passing" inspector_output.txt
+if [ $? -ne 0 ]; then
+ echo "β FAILED: No test evidence"
+ exit 1
+fi
+
+echo "β STORY COMPLETE - All verifications passed"
+```
+
+---
+
+## Benefits Over Single-Agent
+
+### Separation of Concerns
+- Builder doesn't validate own work
+- Inspector has no incentive to lie
+- Reviewer approaches with fresh eyes
+- Fixer can't skip issues
+
+### Fresh Context Each Phase
+- Each agent starts at 0% context
+- No accumulated fatigue
+- No degraded quality
+- Honest reporting
+
+### Adversarial Review
+- Reviewer WANTS to find issues
+- Not defensive about the code
+- More thorough than self-review
+
+### Honest Verification
+- Inspector runs tests independently
+- Main orchestrator verifies everything
+- Can't fake completion
+
+---
+
+## Complexity Routing
+
+**MICRO stories:**
+- Skip Reviewer (low risk)
+- 2 agents: Builder β Inspector β Fixer
+
+**STANDARD stories:**
+- Full pipeline
+- 4 agents: Builder β Inspector β Reviewer β Fixer
+
+**COMPLEX stories:**
+- Enhanced review (6 reviewers instead of 4)
+- Full pipeline + extra scrutiny
+- 4 agents: Builder β Inspector β Reviewer (enhanced) β Fixer
+
+---
+
+## Agent Tracking
+
+Track all agents in `agent-history.json`:
+
+```json
+{
+ "version": "1.0",
+ "max_entries": 50,
+ "entries": [
+ {
+ "agent_id": "abc123",
+ "story_key": "17-10",
+ "phase": "builder",
+ "steps": [1,2,3,4],
+ "timestamp": "2026-01-25T21:00:00Z",
+ "status": "completed",
+ "completion_timestamp": "2026-01-25T21:15:00Z"
+ },
+ {
+ "agent_id": "def456",
+ "story_key": "17-10",
+ "phase": "inspector",
+ "steps": [5,6],
+ "timestamp": "2026-01-25T21:16:00Z",
+ "status": "completed",
+ "completion_timestamp": "2026-01-25T21:20:00Z"
+ }
+ ]
+}
+```
+
+**Benefits:**
+- Resume interrupted sessions
+- Track agent performance
+- Debug failed pipelines
+- Audit trail
+
+---
+
+## Error Handling
+
+**If Builder fails:**
+- Don't spawn Inspector
+- Report failure to user
+- Option to resume or retry
+
+**If Inspector fails:**
+- Don't spawn Reviewer
+- Report specific failures
+- Resume Builder to fix issues
+
+**If Reviewer finds CRITICAL issues:**
+- Must spawn Fixer (not optional)
+- Cannot mark story complete until fixed
+
+**If Fixer fails:**
+- Report unfixed issues
+- Cannot mark story complete
+- Manual intervention required
+
+---
+
+## Comparison: v1.x vs v2.0
+
+| Aspect | v1.x (Single-Agent) | v2.0 (Multi-Agent) |
+|--------|--------------------|--------------------|
+| Agents | 1 | 4 |
+| Validation | Self (conflict of interest) | Independent (no conflict) |
+| Code Review | Self-review | Adversarial (fresh eyes) |
+| Honesty | Low (can lie) | High (verified) |
+| Context | Degrades over 11 steps | Fresh each phase |
+| Catches Issues | Low | High |
+| Completion Accuracy | ~60% (agents lie) | ~95% (verified) |
+
+---
+
+## Migration from v1.x
+
+**Backward Compatibility:**
+```yaml
+execution_mode: "single_agent" # Use v1.x
+execution_mode: "multi_agent" # Use v2.0 (new)
+```
+
+**Gradual Rollout:**
+1. Week 1: Test v2.0 on 3-5 stories
+2. Week 2: Make v2.0 default for new stories
+3. Week 3: Migrate existing stories to v2.0
+4. Week 4: Deprecate v1.x
+
+---
+
+## Hospital-Grade Standards
+
+βοΈ **Lives May Be at Stake**
+
+- Independent validation catches errors
+- Adversarial review finds security flaws
+- Multiple checkpoints prevent shortcuts
+- Final verification prevents false completion
+
+**QUALITY >> SPEED**
+
+---
+
+**Key Takeaway:** Don't trust a single agent to build, validate, review, and commit its own work. Use independent agents with fresh context at each phase.
diff --git a/src/bmm/workflows/4-implementation/super-dev-pipeline-v2/workflow.yaml b/src/bmm/workflows/4-implementation/super-dev-pipeline-v2/workflow.yaml
new file mode 100644
index 00000000..8aca62d1
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/super-dev-pipeline-v2/workflow.yaml
@@ -0,0 +1,121 @@
+name: super-dev-pipeline-v2
+description: "Multi-agent pipeline with wave-based execution, independent validation, and adversarial code review (GSDMAD)"
+author: "BMAD Method + GSD"
+version: "2.0.0"
+
+# Execution mode
+execution_mode: "multi_agent" # multi_agent | single_agent (fallback)
+
+# Critical variables from config
+config_source: "{project-root}/_bmad/bmm/config.yaml"
+output_folder: "{config_source}:output_folder"
+sprint_artifacts: "{config_source}:sprint_artifacts"
+communication_language: "{config_source}:communication_language"
+date: system-generated
+
+# Workflow paths
+installed_path: "{project-root}/_bmad/bmm/workflows/4-implementation/super-dev-pipeline-v2"
+agents_path: "{installed_path}/agents"
+steps_path: "{installed_path}/steps"
+
+# Agent tracking (from GSD)
+agent_history: "{sprint_artifacts}/agent-history.json"
+current_agent_id: "{sprint_artifacts}/current-agent-id.txt"
+
+# State management
+state_file: "{sprint_artifacts}/super-dev-state-{{story_id}}.yaml"
+audit_trail: "{sprint_artifacts}/audit-super-dev-{{story_id}}-{{date}}.yaml"
+
+# Multi-agent configuration
+agents:
+ builder:
+ description: "Implementation agent - writes code and tests"
+ steps: [1, 2, 3, 4]
+ subagent_type: "general-purpose"
+ prompt_file: "{agents_path}/builder.md"
+ trust_level: "low" # Assumes agent will cut corners
+ timeout: 3600 # 1 hour
+
+ inspector:
+ description: "Validation agent - independent verification"
+ steps: [5, 6]
+ subagent_type: "general-purpose"
+ prompt_file: "{agents_path}/inspector.md"
+ fresh_context: true # No knowledge of builder agent
+ trust_level: "medium" # No conflict of interest
+ timeout: 1800 # 30 minutes
+
+ reviewer:
+ description: "Adversarial code review - finds problems"
+ steps: [7]
+ subagent_type: "multi-agent-review" # Spawns multiple reviewers
+ prompt_file: "{agents_path}/reviewer.md"
+ fresh_context: true
+ adversarial: true # Goal: find issues
+ trust_level: "high" # Wants to find problems
+ timeout: 1800 # 30 minutes
+ review_agent_count:
+ micro: 2
+ standard: 4
+ complex: 6
+
+ fixer:
+ description: "Issue resolution - fixes critical/high issues"
+ steps: [8, 9]
+ subagent_type: "general-purpose"
+ prompt_file: "{agents_path}/fixer.md"
+ trust_level: "medium" # Incentive to minimize work
+ timeout: 2400 # 40 minutes
+
+# Complexity level (determines which steps to execute)
+complexity_level: "standard" # micro | standard | complex
+
+# Complexity routing
+complexity_routing:
+ micro:
+ skip_agents: ["reviewer"] # Skip code review for micro stories
+ description: "Lightweight path for low-risk stories"
+ examples: ["UI tweaks", "text changes", "simple CRUD"]
+
+ standard:
+ skip_agents: [] # Full pipeline
+ description: "Balanced path for medium-risk stories"
+ examples: ["API endpoints", "business logic"]
+
+ complex:
+ skip_agents: [] # Full pipeline + enhanced review
+ description: "Enhanced validation for high-risk stories"
+ examples: ["Auth", "payments", "security", "migrations"]
+ review_focus: ["security", "performance", "architecture"]
+
+# Final verification checklist (main orchestrator)
+final_verification:
+ enabled: true
+ checks:
+ - name: "git_commits"
+ command: "git log --oneline -3 | grep {{story_key}}"
+ failure_message: "No commit found for {{story_key}}"
+
+ - name: "story_checkboxes"
+ command: |
+ before=$(git show HEAD~1:{{story_file}} | grep -c '^- \[x\]')
+ after=$(grep -c '^- \[x\]' {{story_file}})
+ [ $after -gt $before ]
+ failure_message: "Story checkboxes not updated"
+
+ - name: "sprint_status"
+ command: "git diff HEAD~1 {{sprint_status}} | grep '{{story_key}}'"
+ failure_message: "Sprint status not updated"
+
+ - name: "tests_passed"
+ # Parse agent output for test evidence
+ validation: "inspector_output must contain 'PASS' or test count"
+ failure_message: "No test evidence in validation output"
+
+# Backward compatibility
+fallback_to_v1:
+ enabled: true
+ condition: "execution_mode == 'single_agent'"
+ workflow: "{project-root}/_bmad/bmm/workflows/4-implementation/super-dev-pipeline"
+
+standalone: true
diff --git a/src/bmm/workflows/4-implementation/super-dev-pipeline/MULTI-AGENT-ARCHITECTURE.md b/src/bmm/workflows/4-implementation/super-dev-pipeline/MULTI-AGENT-ARCHITECTURE.md
new file mode 100644
index 00000000..dc5e9a80
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/super-dev-pipeline/MULTI-AGENT-ARCHITECTURE.md
@@ -0,0 +1,291 @@
+# Super-Dev-Pipeline: Multi-Agent Architecture
+
+**Version:** 2.0.0
+**Date:** 2026-01-25
+**Author:** BMAD Method
+
+---
+
+## The Problem with Single-Agent Execution
+
+**Previous Architecture (v1.x):**
+```
+One Task Agent runs ALL 11 steps:
+ββ Step 1: Init
+ββ Step 2: Pre-Gap Analysis
+ββ Step 3: Write Tests
+ββ Step 4: Implement
+ββ Step 5: Post-Validation β Agent validates its OWN work
+ββ Step 6: Quality Checks
+ββ Step 7: Code Review β Agent reviews its OWN code
+ββ Step 8: Review Analysis
+ββ Step 9: Fix Issues
+ββ Step 10: Complete
+ββ Step 11: Summary
+```
+
+**Fatal Flaw:** Agent has conflict of interest - it validates and reviews its own work. When agents get tired/lazy, they lie about completion and skip steps.
+
+---
+
+## New Multi-Agent Architecture (v2.0)
+
+**Principle:** **Separation of Concerns with Independent Validation**
+
+Each phase has a DIFFERENT agent with fresh context:
+
+```
+ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+β PHASE 1: IMPLEMENTATION (Agent 1 - "Builder") β
+ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ€
+β Step 1: Init β
+β Step 2: Pre-Gap Analysis β
+β Step 3: Write Tests β
+β Step 4: Implement β
+β β
+β Output: Code written, tests written, claims "done" β
+β β οΈ DO NOT TRUST - needs external validation β
+ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+ β
+ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+β PHASE 2: VALIDATION (Agent 2 - "Inspector") β
+ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ€
+β Step 5: Post-Validation β
+β - Fresh context, no knowledge of Agent 1 β
+β - Verifies files actually exist β
+β - Verifies tests actually run and pass β
+β - Verifies checkboxes are checked in story file β
+β - Verifies sprint-status.yaml updated β
+β β
+β Step 6: Quality Checks β
+β - Run type-check, lint, build β
+β - Verify ZERO errors β
+β - Check git status (uncommitted files?) β
+β β
+β Output: PASS/FAIL verdict (honest assessment) β
+β β Agent 2 has NO incentive to lie β
+ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+ β
+ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+β PHASE 3: CODE REVIEW (Agent 3 - "Adversarial Reviewer") β
+ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ€
+β Step 7: Code Review (Multi-Agent) β
+β - Fresh context, ADVERSARIAL stance β
+β - Goal: Find problems, not rubber-stamp β
+β - Spawns 2-6 review agents (based on complexity) β
+β - Each reviewer has specific focus area β
+β β
+β Output: List of issues (security, performance, bugs) β
+β β Adversarial agents WANT to find problems β
+ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+ β
+ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+β PHASE 4: FIX ISSUES (Agent 4 - "Fixer") β
+ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ€
+β Step 8: Review Analysis β
+β - Categorize findings (MUST FIX, SHOULD FIX, NICE TO HAVE) β
+β - Filter out gold-plating β
+β β
+β Step 9: Fix Issues β
+β - Implement MUST FIX items β
+β - Implement SHOULD FIX if time allows β
+β β
+β Output: Fixed code, re-run tests β
+ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+ β
+ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+β PHASE 5: COMPLETION (Main Orchestrator - Claude) β
+ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ€
+β Step 10: Complete β
+β - Verify git commits exist β
+β - Verify tests pass β
+β - Verify story checkboxes checked β
+β - Verify sprint-status updated β
+β - REJECT if any verification fails β
+β β
+β Step 11: Summary β
+β - Generate audit trail β
+β - Report to user β
+β β
+β β Main orchestrator does FINAL verification β
+ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+```
+
+---
+
+## Agent Responsibilities
+
+### Agent 1: Builder (Implementation)
+- **Role:** Implement the story according to requirements
+- **Trust Level:** LOW - assumes agent will cut corners
+- **Output:** Code + tests (unverified)
+- **Incentive:** Get done quickly β may lie about completion
+
+### Agent 2: Inspector (Validation)
+- **Role:** Independent verification of Agent 1's claims
+- **Trust Level:** MEDIUM - no conflict of interest
+- **Checks:**
+ - Do files actually exist?
+ - Do tests actually pass (run them myself)?
+ - Are checkboxes actually checked?
+ - Is sprint-status actually updated?
+- **Output:** PASS/FAIL with evidence
+- **Incentive:** Find truth β honest assessment
+
+### Agent 3: Adversarial Reviewer (Code Review)
+- **Role:** Find problems with the implementation
+- **Trust Level:** HIGH - WANTS to find issues
+- **Focus Areas:**
+ - Security vulnerabilities
+ - Performance problems
+ - Logic bugs
+ - Architecture violations
+- **Output:** List of issues with severity
+- **Incentive:** Find as many legitimate issues as possible
+
+### Agent 4: Fixer (Issue Resolution)
+- **Role:** Fix issues identified by Agent 3
+- **Trust Level:** MEDIUM - has incentive to minimize work
+- **Actions:**
+ - Implement MUST FIX issues
+ - Implement SHOULD FIX issues (if time)
+ - Skip NICE TO HAVE (gold-plating)
+- **Output:** Fixed code
+
+### Main Orchestrator: Claude (Final Verification)
+- **Role:** Final quality gate before marking story complete
+- **Trust Level:** HIGHEST - user-facing, no incentive to lie
+- **Checks:**
+ - Git log shows commits
+ - Test output shows passing tests
+ - Story file diff shows checked boxes
+ - Sprint-status diff shows update
+- **Output:** COMPLETE or FAILED (with specific reason)
+
+---
+
+## Implementation in workflow.yaml
+
+```yaml
+# New execution mode (v2.0)
+execution_mode: "multi_agent" # single_agent | multi_agent
+
+# Agent configuration
+agents:
+ builder:
+ steps: [1, 2, 3, 4]
+ subagent_type: "general-purpose"
+ description: "Implement story {{story_key}}"
+
+ inspector:
+ steps: [5, 6]
+ subagent_type: "general-purpose"
+ description: "Validate story {{story_key}} implementation"
+ fresh_context: true # No knowledge of builder agent
+
+ reviewer:
+ steps: [7]
+ subagent_type: "multi-agent-review" # Spawns multiple reviewers
+ description: "Adversarial review of story {{story_key}}"
+ fresh_context: true
+ adversarial: true
+
+ fixer:
+ steps: [8, 9]
+ subagent_type: "general-purpose"
+ description: "Fix issues in story {{story_key}}"
+```
+
+---
+
+## Verification Checklist (Step 10)
+
+**Main orchestrator MUST verify before marking complete:**
+
+```bash
+# 1. Check git commits
+git log --oneline -3 | grep "{{story_key}}"
+# FAIL if no commit found
+
+# 2. Check story checkboxes
+before_count=$(git show HEAD~1:{{story_file}} | grep -c "^- \[x\]")
+after_count=$(grep -c "^- \[x\]" {{story_file}})
+# FAIL if after_count <= before_count
+
+# 3. Check sprint-status
+git diff HEAD~1 {{sprint_status}} | grep "{{story_key}}"
+# FAIL if no status change
+
+# 4. Check test results
+# Parse agent output for "PASS" or test count
+# FAIL if no test evidence
+```
+
+**If ANY check fails β Story NOT complete, report to user**
+
+---
+
+## Benefits of Multi-Agent Architecture
+
+1. **Separation of Concerns**
+ - Implementation separate from validation
+ - Review separate from fixing
+
+2. **No Conflict of Interest**
+ - Validators have no incentive to lie
+ - Reviewers WANT to find problems
+
+3. **Fresh Context Each Phase**
+ - Inspector doesn't know what Builder did
+ - Reviewer approaches code with fresh eyes
+
+4. **Honest Reporting**
+ - Each agent reports truthfully
+ - Main orchestrator verifies everything
+
+5. **Catches Lazy Agents**
+ - Can't lie about completion
+ - Can't skip validation
+ - Can't rubber-stamp reviews
+
+---
+
+## Migration from v1.x to v2.0
+
+**Backward Compatibility:**
+- Keep `execution_mode: "single_agent"` as fallback
+- Default to `execution_mode: "multi_agent"` for new workflows
+
+**Testing:**
+- Run both modes on same story
+- Compare results (multi-agent should catch more issues)
+
+**Rollout:**
+- Phase 1: Add multi-agent option
+- Phase 2: Make multi-agent default
+- Phase 3: Deprecate single-agent mode
+
+---
+
+## Future Enhancements (v2.1+)
+
+1. **Agent Reputation Tracking**
+ - Track which agents produce reliable results
+ - Penalize agents that consistently lie
+
+2. **Dynamic Agent Selection**
+ - Choose different review agents based on story type
+ - Security-focused reviewers for auth stories
+ - Performance reviewers for database stories
+
+3. **Parallel Validation**
+ - Run multiple validators simultaneously
+ - Require consensus (2/3 validators agree)
+
+4. **Agent Learning**
+ - Validators learn common failure patterns
+ - Reviewers learn project-specific issues
+
+---
+
+**Key Takeaway:** Trust but verify. Every agent's work is independently validated by a fresh agent with no conflict of interest.
diff --git a/src/bmm/workflows/4-implementation/super-dev-story/README.md b/src/bmm/workflows/4-implementation/super-dev-story/README.md
new file mode 100644
index 00000000..77786969
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/super-dev-story/README.md
@@ -0,0 +1,283 @@
+# Super-Dev-Story Workflow
+
+**Enhanced story development with comprehensive quality validation**
+
+## What It Does
+
+Super-dev-story is `/dev-story` on steroids - it includes ALL standard development steps PLUS additional quality gates:
+
+```
+Standard dev-story:
+ 1-8. Development cycle β Mark "review"
+
+Super-dev-story:
+ 1-8. Development cycle
+ 9.5. Post-dev gap analysis (verify work complete)
+ 9.6. Automated code review (catch issues)
+ β Fix issues if found (loop back to step 5)
+ 9. Mark "review" (only after all validation passes)
+```
+
+## When to Use
+
+### Use `/super-dev-story` for:
+
+- β Security-critical features (auth, payments, PII handling)
+- β Complex business logic with many edge cases
+- β Stories you want bulletproof before human review
+- β High-stakes features (production releases, customer-facing)
+- β When you want to minimize review cycles
+
+### Use standard `/dev-story` for:
+
+- Documentation updates
+- Simple UI tweaks
+- Configuration changes
+- Low-risk experimental features
+- When speed matters more than extra validation
+
+## Cost vs Benefit
+
+| Aspect | dev-story | super-dev-story |
+|--------|-----------|-----------------|
+| **Tokens** | 50K-100K | 80K-150K (+30-50%) |
+| **Time** | Normal | +20-30% |
+| **Quality** | Good | Excellent |
+| **Review cycles** | 1-3 iterations | 0-1 iterations |
+| **False completions** | Possible | Prevented |
+
+**ROI:** Extra 30K tokens (~$0.09) prevents hours of rework and multiple review cycles
+
+## What Gets Validated
+
+### Step 9.5: Post-Dev Gap Analysis
+
+**Checks:**
+- Tasks marked [x] β Code actually exists and works?
+- Required files β Actually created?
+- Claimed tests β Actually exist and pass?
+- Partial implementations β Marked complete prematurely?
+
+**Catches:**
+- β "Created auth service" β File doesn't exist
+- β "Added tests with 90% coverage" β Only 60% actual
+- β "Implemented login" β Function exists but incomplete
+
+**Actions if issues found:**
+- Unchecks false positive tasks
+- Adds tasks for missing work
+- Loops back to implementation
+
+### Step 9.6: Automated Code Review
+
+**Reviews:**
+- β Correctness (logic errors, edge cases)
+- β Security (vulnerabilities, input validation)
+- β Architecture (pattern compliance, SOLID principles)
+- β Performance (inefficiencies, optimization opportunities)
+- β Testing (coverage gaps, test quality)
+- β Code Quality (readability, maintainability)
+
+**Actions if issues found:**
+- Adds review findings as tasks
+- Loops back to implementation
+- Continues until issues resolved
+
+## Usage
+
+### Basic Usage
+
+```bash
+# Load any BMAD agent
+/super-dev-story
+
+# Follows same flow as dev-story, with extra validation
+```
+
+### Specify Story
+
+```bash
+/super-dev-story _bmad-output/implementation-artifacts/story-1.2.md
+```
+
+### Expected Flow
+
+```
+1. Pre-dev gap analysis
+ ββ "Approve task updates? [Y/A/n/e/s/r]"
+ ββ Select option
+
+2. Development (standard TDD cycle)
+ ββ Implements all tasks
+
+3. Post-dev gap analysis
+ ββ Scans codebase
+ ββ If gaps: adds tasks, loops back
+ ββ If clean: proceeds
+
+4. Code review
+ ββ Analyzes all changes
+ ββ If issues: adds tasks, loops back
+ ββ If clean: proceeds
+
+5. Story marked "review"
+ ββ Truly complete!
+```
+
+## Fix Iteration Safety
+
+Super-dev has a **max iteration limit** (default: 3) to prevent infinite loops:
+
+```yaml
+# workflow.yaml
+super_dev_settings:
+ max_fix_iterations: 3 # Stop after 3 fix cycles
+ fail_on_critical_issues: true # HALT if critical security issues
+```
+
+If exceeded:
+```
+π Maximum Fix Iterations Reached
+
+Attempted 3 fix cycles.
+Manual intervention required.
+
+Issues remaining:
+- [List of unresolved issues]
+```
+
+## Examples
+
+### Example 1: Perfect First Try
+
+```
+/super-dev-story
+
+Pre-gap: β Tasks accurate
+Development: β 8 tasks completed
+Post-gap: β All work verified
+Code review: β No issues
+
+β Story complete! (45 minutes, 85K tokens)
+```
+
+### Example 2: Post-Dev Catches Incomplete Work
+
+```
+/super-dev-story
+
+Pre-gap: β Tasks accurate
+Development: β 8 tasks completed
+Post-gap: β οΈ Tests claim 90% coverage, actual 65%
+
+β Adds task: "Increase test coverage to 90%"
+β Implements missing tests
+β Post-gap: β Now 92% coverage
+β Code review: β No issues
+
+β Story complete! (52 minutes, 95K tokens)
+```
+
+### Example 3: Code Review Finds Security Issue
+
+```
+/super-dev-story
+
+Pre-gap: β Tasks accurate
+Development: β 10 tasks completed
+Post-gap: β All work verified
+Code review: π¨ CRITICAL - SQL injection vulnerability
+
+β Adds task: "Fix SQL injection in user search"
+β Implements parameterized queries
+β Post-gap: β Verified
+β Code review: β Security issue resolved
+
+β Story complete! (58 minutes, 110K tokens)
+```
+
+## Comparison to Standard Workflow
+
+### Standard Flow (dev-story)
+
+```
+Day 1: Develop story (30 min)
+Day 2: Human review finds 3 issues
+Day 3: Fix issues (20 min)
+Day 4: Human review again
+Day 5: Approved
+
+Total: 5 days, 2 review cycles
+```
+
+### Super-Dev Flow
+
+```
+Day 1: Super-dev-story
+ - Development (30 min)
+ - Post-gap finds 1 issue (auto-fix 5 min)
+ - Code review finds 2 issues (auto-fix 15 min)
+ - Complete (50 min total)
+
+Day 2: Human review
+Day 3: Approved (minimal/no changes needed)
+
+Total: 3 days, 1 review cycle
+```
+
+**Savings:** 2 days, 1 fewer review cycle, higher initial quality
+
+## Troubleshooting
+
+### "Super-dev keeps looping forever"
+
+**Cause:** Each validation finds new issues
+**Solution:** This indicates quality problems. Review max_fix_iterations setting or manually intervene.
+
+### "Post-dev gap analysis keeps failing"
+
+**Cause:** Dev agent marking tasks complete prematurely
+**Solution:** This is expected! Super-dev catches this. The loop ensures actual completion.
+
+### "Code review too strict"
+
+**Cause:** Reviewing for issues standard dev-story would miss
+**Solution:** This is intentional. For less strict review, use standard dev-story.
+
+### "Too many tokens/too slow"
+
+**Cause:** Multi-stage validation adds overhead
+**Solution:** Use standard dev-story for non-critical stories. Reserve super-dev for important work.
+
+## Best Practices
+
+1. **Reserve for important stories** - Don't use for trivial changes
+2. **Trust the process** - Fix iterations mean it's working correctly
+3. **Review limits** - Adjust max_fix_iterations if stories are complex
+4. **Monitor costs** - Track token usage vs review cycle savings
+5. **Learn patterns** - Code review findings inform future architecture
+
+## Configuration Reference
+
+```yaml
+# _bmad/bmm/config.yaml or _bmad/bmgd/config.yaml
+
+# Per-project settings
+super_dev_settings:
+ post_dev_gap_analysis: true # Enable post-dev validation
+ auto_code_review: true # Enable automatic code review
+ fail_on_critical_issues: true # HALT on security vulnerabilities
+ max_fix_iterations: 3 # Maximum fix cycles before manual intervention
+ auto_fix_minor_issues: false # Auto-fix LOW severity without asking
+```
+
+## See Also
+
+- [dev-story workflow](../dev-story/) - Standard development workflow
+- [gap-analysis workflow](../gap-analysis/) - Standalone audit tool
+- [Gap Analysis Guide](../../../../docs/gap-analysis.md) - Complete documentation
+- [Super-Dev Mode Concept](../../../../docs/super-dev-mode.md) - Vision and roadmap
+
+---
+
+**Super-Dev-Story: Because "done" should mean DONE** β
diff --git a/src/bmm/workflows/4-implementation/super-dev-story/instructions.xml b/src/bmm/workflows/4-implementation/super-dev-story/instructions.xml
new file mode 100644
index 00000000..b296847a
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/super-dev-story/instructions.xml
@@ -0,0 +1,299 @@
+
+ The workflow execution engine is governed by: {project-root}/_bmad/core/tasks/workflow.xml
+ You MUST have already loaded and processed: {installed_path}/workflow.yaml
+ Communicate all responses in {communication_language} and language MUST be tailored to {user_skill_level}
+ Generate all documents in {document_output_language}
+ π SUPER-DEV MODE: Enhanced quality workflow with post-implementation validation and automated code review
+ This workflow orchestrates existing workflows with additional validation steps
+
+
+
+
+
+
+ π― RUN DEV-STORY - Complete all standard development steps
+ This includes: story loading, pre-dev gap analysis, development, testing, and task completion
+
+
+
+
+
+
+ Pass through any user-provided story file path and auto-accept setting
+
+
+
+
+
+
+
+
+ HALT - dev-story must complete first
+
+
+
+
+
+
+
+
+ π POST-DEV VALIDATION - Verify all work actually completed!
+ This catches incomplete implementations that were prematurely marked done
+
+
+
+
+ Re-read story file to get requirements and tasks
+ Extract all tasks marked [x] complete
+ For each completed task, identify what should exist in codebase
+
+
+ Use Glob to find files that should have been created
+ Use Grep to search for functions/classes that should exist
+ Use Read to verify implementation completeness (not just existence)
+ Run tests to verify claimed test coverage actually exists and passes
+
+
+ Compare claimed work vs actual implementation:
+
+ **POST-DEV VERIFICATION:**
+ β Verified Complete:
+ - List tasks where code fully exists and works
+ - Confirm tests exist and pass
+ - Verify implementation matches requirements
+
+
+ β False Positives Detected:
+ - List tasks marked [x] but code missing or incomplete
+ - Identify claimed tests that don't exist or fail
+ - Note partial implementations marked as complete
+
+
+
+
+
+
+ Uncheck false positive tasks in story file
+ Add new tasks for missing work
+ Update Gap Analysis section with post-dev findings
+
+
+
+
+
+
+ Resume with added tasks for missing work
+
+
+
+
+
+
+
+ Update Gap Analysis section with post-dev verification results
+
+
+
+
+
+
+
+
+ π AUTO CODE REVIEW - Independent quality validation
+
+
+
+
+
+ Run code review on completed story
+
+
+ Parse code review results from story file "Code Review" section
+ Extract issues by severity (Critical, High, Medium, Low)
+ Count total issues found
+
+
+
+
+ Add code review findings as tasks in story file
+
+
+
+
+ Fix code review issues
+
+
+
+
+
+
+
+
+ Auto-fix these minor issues? [Y/n/skip]:
+
+
+ Add review findings as tasks
+
+
+
+
+
+
+
+ Document issues in story file
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ π PUSH-ALL - Stage, commit, and push with safety validation
+ β‘ TARGETED COMMIT: Only commit files from THIS story's File List (safe for parallel agents)
+
+
+ Read story file and extract the "File List" section
+ Parse all file paths listed (relative to repo root)
+ Also include the story file itself in the list
+ Store as {{story_files}} - space-separated list of all files
+
+
+
+
+
+
+ Only commit files changed by this story
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Based on {user_skill_level}, ask if user needs explanations about implementation, decisions, or findings
+
+
+ Provide clear, contextual explanations
+
+
+
+
+
+
diff --git a/src/bmm/workflows/4-implementation/dev-story/workflow.yaml b/src/bmm/workflows/4-implementation/super-dev-story/workflow.yaml
similarity index 74%
rename from src/bmm/workflows/4-implementation/dev-story/workflow.yaml
rename to src/bmm/workflows/4-implementation/super-dev-story/workflow.yaml
index 9c54c125..530f041c 100644
--- a/src/bmm/workflows/4-implementation/dev-story/workflow.yaml
+++ b/src/bmm/workflows/4-implementation/super-dev-story/workflow.yaml
@@ -1,5 +1,5 @@
-name: dev-story
-description: "Execute a story by implementing tasks/subtasks, writing tests, validating, and updating the story file per acceptance criteria"
+name: super-dev-story
+description: "Enhanced story development with post-implementation validation and automated code review - ensures stories are truly complete before marking done"
author: "BMad"
# Critical variables from config
@@ -13,7 +13,7 @@ story_dir: "{config_source}:implementation_artifacts"
date: system-generated
# Workflow components
-installed_path: "{project-root}/_bmad/bmm/workflows/4-implementation/dev-story"
+installed_path: "{project-root}/_bmad/bmm/workflows/4-implementation/super-dev-story"
instructions: "{installed_path}/instructions.xml"
validation: "{installed_path}/checklist.md"
@@ -22,6 +22,13 @@ implementation_artifacts: "{config_source}:implementation_artifacts"
sprint_status: "{implementation_artifacts}/sprint-status.yaml"
project_context: "**/project-context.md"
+# Super-dev specific settings
+super_dev_settings:
+ post_dev_gap_analysis: true
+ auto_code_review: true
+ fail_on_critical_issues: true
+ max_fix_iterations: 3
+
# Autonomous mode settings (passed from parent workflow like batch-super-dev)
auto_accept_gap_analysis: false # When true, skip gap analysis approval prompt
diff --git a/src/bmm/workflows/4-implementation/validate-all-epics/instructions.xml b/src/bmm/workflows/4-implementation/validate-all-epics/instructions.xml
new file mode 100644
index 00000000..d0969730
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/validate-all-epics/instructions.xml
@@ -0,0 +1,158 @@
+
+ The workflow execution engine is governed by: {project-root}/_bmad/core/tasks/workflow.xml
+ You MUST have already loaded and processed: {installed_path}/workflow.yaml
+ This validates EVERY epic in the project - comprehensive health check
+
+
+ Load {{sprint_status_file}}
+
+
+
+ HALT
+
+
+ Parse development_status section
+ Extract all epic keys (entries starting with "epic-")
+ Filter out retrospectives (ending with "-retrospective")
+ Store as {{epic_list}}
+
+
+
+
+
+ Run validate-epic-status for EACH epic
+
+ Initialize counters:
+ - total_stories_scanned = 0
+ - total_valid_stories = 0
+ - total_invalid_stories = 0
+ - total_updates_applied = 0
+ - epics_validated = []
+
+
+
+ Set {{current_epic}} = current loop item
+
+
+
+
+ Execute validation script:
+ python3 scripts/lib/sprint-status-updater.py --epic {{current_epic}} --mode validate
+
+
+ Parse script output:
+ - Story count
+ - Valid/invalid/missing counts
+ - Inferred statuses
+ - Updates needed
+
+
+
+ Execute fix script:
+ python3 scripts/lib/sprint-status-updater.py --epic {{current_epic}} --mode fix
+
+
+ Count updates applied
+ Add to total_updates_applied
+
+
+ Store validation results for {{current_epic}}
+ Increment totals
+
+
+
+
+
+
+
+
+
+
+ Write comprehensive report to {{default_output_file}}
+
+
+
+
+
+
+
+
+
diff --git a/src/bmm/workflows/4-implementation/validate-all-epics/workflow.yaml b/src/bmm/workflows/4-implementation/validate-all-epics/workflow.yaml
new file mode 100644
index 00000000..4b3109e5
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/validate-all-epics/workflow.yaml
@@ -0,0 +1,30 @@
+name: validate-all-epics
+description: "Validate and fix sprint-status.yaml for ALL epics. Runs validate-epic-status on every epic in parallel, consolidates results, rebuilds accurate sprint-status.yaml."
+author: "BMad"
+version: "1.0.0"
+
+# Critical variables from config
+config_source: "{project-root}/_bmad/bmm/config.yaml"
+user_name: "{config_source}:user_name"
+communication_language: "{config_source}:communication_language"
+implementation_artifacts: "{config_source}:implementation_artifacts"
+story_dir: "{implementation_artifacts}"
+
+# Workflow components
+installed_path: "{project-root}/_bmad/bmm/workflows/4-implementation/validate-all-epics"
+instructions: "{installed_path}/instructions.xml"
+
+# Variables
+variables:
+ sprint_status_file: "{implementation_artifacts}/sprint-status.yaml"
+ validation_mode: "fix" # Options: "report-only", "fix"
+ parallel_validation: true # Validate epics in parallel for speed
+
+# Sub-workflow
+validate_epic_workflow: "{project-root}/_bmad/bmm/workflows/4-implementation/validate-epic-status/workflow.yaml"
+
+# Output
+default_output_file: "{story_dir}/.all-epics-validation-report.md"
+
+standalone: true
+web_bundle: false
diff --git a/src/bmm/workflows/4-implementation/validate-all-stories-deep/instructions.xml b/src/bmm/workflows/4-implementation/validate-all-stories-deep/instructions.xml
new file mode 100644
index 00000000..4f73b8d7
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/validate-all-stories-deep/instructions.xml
@@ -0,0 +1,338 @@
+
+ The workflow execution engine is governed by: {project-root}/_bmad/core/tasks/workflow.xml
+ You MUST have already loaded and processed: {installed_path}/workflow.yaml
+ This is the COMPREHENSIVE AUDIT - validates all stories using Haiku agents
+ Cost: ~$76 for 511 stories with Haiku (vs $793 with Sonnet)
+
+
+ Find all .md files in {{story_dir}}
+
+ Filter out meta-documents:
+ - Files starting with "EPIC-" (completion reports)
+ - Files starting with "." (progress files)
+ - Files containing: COMPLETION, SUMMARY, REPORT, SESSION-, REVIEW-, README, INDEX
+ - Files like "atdd-checklist-", "gap-analysis-", "review-"
+
+
+
+ Filter to stories matching: {{epic_filter}}-*.md
+
+
+ Store as {{story_list}}
+ Count {{story_count}}
+
+
+
+
+
+ Initialize counters:
+ - stories_validated = 0
+ - verified_complete = 0
+ - needs_rework = 0
+ - false_positives = 0
+ - in_progress = 0
+ - total_false_positive_tasks = 0
+ - total_critical_issues = 0
+
+
+ Split {{story_list}} into batches of {{batch_size}}
+
+
+ Set {{current_batch}} = current batch
+ Set {{batch_number}} = loop index + 1
+
+
+
+
+
+ Set {{story_file}} = current story path
+ Extract {{story_id}} from filename
+
+
+
+
+
+
+
+
+ Parse validation results:
+ - category (VERIFIED_COMPLETE, FALSE_POSITIVE, etc.)
+ - verification_score
+ - false_positive_count
+ - false_negative_count
+ - critical_issues_count
+
+
+ Store results for {{story_id}}
+ Increment counters based on category
+
+
+
+ Increment stories_validated
+
+
+
+
+
+ Write progress to {{progress_file}}:
+ - stories_validated
+ - current_batch
+ - results_so_far
+
+
+
+
+
+
+
+ Calculate platform-wide metrics:
+ - Overall health score: (verified_complete / story_count) Γ 100
+ - False positive rate: (false_positive_stories / story_count) Γ 100
+ - Total rework estimate: false_positive_stories Γ 3h + needs_rework Γ 2h
+
+
+ Group results by epic
+
+ Identify worst offenders (highest false positive rates)
+
+
+
+
+
+
+# Comprehensive Platform Audit Report
+
+**Generated:** {{date}}
+**Stories Validated:** {{story_count}}
+**Agent Model:** Haiku 4.5
+**Total Cost:** ~${{actual_cost}}
+
+---
+
+## Executive Summary
+
+**Platform Health Score:** {{health_score}}/100
+
+{{#if health_score >= 90}}
+β **EXCELLENT** - Platform is production-ready with high confidence
+{{else if health_score >= 75}}
+β οΈ **GOOD** - Minor issues to address, generally solid
+{{else if health_score >= 60}}
+β οΈ **NEEDS WORK** - Significant rework required before production
+{{else}}
+β **CRITICAL** - Major quality issues found, not production-ready
+{{/if}}
+
+**Key Findings:**
+- {{verified_complete}} stories verified complete ({{verified_complete_pct}}%)
+- {{false_positives}} stories are false positives ({{false_positives_pct}}%)
+- {{total_false_positive_tasks}} tasks claimed done but not implemented
+- {{total_critical_issues}} CRITICAL code quality issues found
+
+---
+
+## β False Positive Stories ({{false_positives}} total)
+
+**These stories are marked "done" but have significant missing/stubbed code:**
+
+{{#each false_positive_stories}}
+### {{this.story_id}} (Score: {{this.score}}/100)
+
+**Current Status:** {{this.current_status}}
+**Should Be:** in-progress or ready-for-dev
+
+**Missing/Stubbed:**
+{{#each this.false_positive_tasks}}
+- {{this.task}}
+ - {{this.evidence}}
+{{/each}}
+
+**Estimated Fix:** {{this.estimated_hours}}h
+
+---
+{{/each}}
+
+**Total Rework:** {{false_positive_rework_hours}} hours
+
+---
+
+## β οΈ Stories Needing Rework ({{needs_rework}} total)
+
+{{#each needs_rework_stories}}
+### {{this.story_id}} (Score: {{this.score}}/100)
+
+**Issues:**
+- {{this.false_positive_count}} incomplete tasks
+- {{this.critical_issues}} CRITICAL quality issues
+- {{this.high_issues}} HIGH priority issues
+
+**Top Issues:**
+{{#each this.top_issues limit=5}}
+- {{this}}
+{{/each}}
+
+---
+{{/each}}
+
+**Total Rework:** {{needs_rework_hours}} hours
+
+---
+
+## β Verified Complete Stories ({{verified_complete}} total)
+
+**These stories are production-ready with verified code:**
+
+{{#each verified_complete_stories}}
+- {{this.story_id}} ({{this.score}}/100)
+{{/each}}
+
+---
+
+## π Epic Health Breakdown
+
+{{#each epic_summary}}
+### Epic {{this.epic}}
+
+**Stories:** {{this.total}}
+**Verified Complete:** {{this.verified}} ({{this.verified_pct}}%)
+**False Positives:** {{this.false_positives}}
+**Needs Rework:** {{this.needs_rework}}
+
+**Health Score:** {{this.health_score}}/100
+
+{{#if this.health_score < 70}}
+β οΈ **ATTENTION NEEDED** - This epic has quality issues
+{{/if}}
+
+**Top Issues:**
+{{#each this.top_issues limit=3}}
+- {{this}}
+{{/each}}
+
+---
+{{/each}}
+
+---
+
+## π― Recommended Action Plan
+
+### Phase 1: Fix False Positives (CRITICAL - {{false_positive_rework_hours}}h)
+
+{{#each false_positive_stories limit=20}}
+{{@index + 1}}. **{{this.story_id}}** ({{this.estimated_hours}}h)
+ - {{this.false_positive_count}} tasks to implement
+ - Update status to in-progress
+{{/each}}
+
+{{#if false_positives > 20}}
+... and {{false_positives - 20}} more (see full list above)
+{{/if}}
+
+### Phase 2: Address Rework Items (HIGH - {{needs_rework_hours}}h)
+
+{{#each needs_rework_stories limit=10}}
+{{@index + 1}}. **{{this.story_id}}** ({{this.estimated_hours}}h)
+ - Fix {{this.critical_issues}} CRITICAL issues
+ - Complete {{this.false_positive_count}} tasks
+{{/each}}
+
+### Phase 3: Fix False Negatives (LOW - batch update)
+
+- {{total_false_negative_tasks}} unchecked tasks that are actually complete
+- Can batch update checkboxes (low priority)
+
+---
+
+## π° Audit Cost Analysis
+
+**This Validation Run:**
+- Stories validated: {{story_count}}
+- Agent sessions: {{story_count}} (one Haiku agent per story)
+- Tokens used: ~{{tokens_used_millions}}M
+- Cost: ~${{actual_cost}}
+
+**Remediation Cost:**
+- Estimated hours: {{total_rework_hours}}h
+- At AI velocity: {{ai_velocity_days}} days of work
+- Token cost: ~${{remediation_token_cost}}
+
+**Total Investment:** ${{actual_cost}} (audit) + ${{remediation_token_cost}} (fixes) = ${{total_cost}}
+
+---
+
+## π Next Steps
+
+1. **Immediate:** Fix {{false_positives}} false positive stories
+2. **This Week:** Address {{total_critical_issues}} CRITICAL issues
+3. **Next Week:** Rework {{needs_rework}} stories
+4. **Ongoing:** Re-validate fixed stories to confirm
+
+**Commands:**
+```bash
+# Validate specific story
+/validate-story-deep docs/sprint-artifacts/16e-6-ecs-task-definitions-tier3.md
+
+# Validate specific epic
+/validate-all-stories-deep --epic 16e
+
+# Re-run full audit (after fixes)
+/validate-all-stories-deep
+```
+
+---
+
+**Report Generated By:** validate-all-stories-deep workflow
+**Validation Method:** LLM-powered (Haiku 4.5 agents read actual code)
+**Confidence Level:** Very High (code-based verification, not regex patterns)
+
+
+
+
diff --git a/src/bmm/workflows/4-implementation/validate-all-stories-deep/workflow.yaml b/src/bmm/workflows/4-implementation/validate-all-stories-deep/workflow.yaml
new file mode 100644
index 00000000..76f00357
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/validate-all-stories-deep/workflow.yaml
@@ -0,0 +1,36 @@
+name: validate-all-stories-deep
+description: "Comprehensive platform audit using Haiku agents. Validates ALL stories by reading actual code. The bulletproof validation for production readiness."
+author: "BMad"
+version: "1.0.0"
+
+# Critical variables from config
+config_source: "{project-root}/_bmad/bmm/config.yaml"
+user_name: "{config_source}:user_name"
+communication_language: "{config_source}:communication_language"
+implementation_artifacts: "{config_source}:implementation_artifacts"
+story_dir: "{implementation_artifacts}"
+
+# Workflow components
+installed_path: "{project-root}/_bmad/bmm/workflows/4-implementation/validate-all-stories-deep"
+instructions: "{installed_path}/instructions.xml"
+
+# Input variables
+variables:
+ epic_filter: "" # Optional: Only validate specific epic (e.g., "16e")
+ batch_size: 5 # Validate 5 stories at a time (prevents spawning 511 agents at once!)
+ concurrent_limit: 5 # Max 5 agents running concurrently
+ auto_fix: false # If true, auto-update statuses based on validation
+ pause_between_batches: 30 # Seconds to wait between batches (rate limiting)
+
+# Sub-workflow
+validate_story_workflow: "{project-root}/_bmad/bmm/workflows/4-implementation/validate-story-deep/workflow.yaml"
+
+# Agent configuration
+agent_model: "haiku" # Cost: ~$66 for 511 stories vs $793 with Sonnet
+
+# Output
+default_output_file: "{story_dir}/.comprehensive-audit-{date}.md"
+progress_file: "{story_dir}/.validation-progress-{date}.yaml"
+
+standalone: true
+web_bundle: false
diff --git a/src/bmm/workflows/4-implementation/validate-all-stories/instructions.xml b/src/bmm/workflows/4-implementation/validate-all-stories/instructions.xml
new file mode 100644
index 00000000..432e6e6f
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/validate-all-stories/instructions.xml
@@ -0,0 +1,411 @@
+
+ The workflow execution engine is governed by: {project-root}/_bmad/core/tasks/workflow.xml
+ You MUST have already loaded and processed: {installed_path}/workflow.yaml
+ This is the COMPREHENSIVE AUDIT - validates every story's tasks against actual codebase
+
+
+ Find all story files in {{story_dir}}
+ Filter out meta-documents:
+ - Files starting with "EPIC-" (completion reports)
+ - Files with "COMPLETION", "SUMMARY", "REPORT" in name
+ - Files starting with "." (hidden progress files)
+ - Files like "README", "INDEX", "SESSION-", "REVIEW-"
+
+
+
+ Filter to stories starting with {{epic_filter}}- (e.g., "16e-")
+
+
+ Store as {{story_list}}
+ Count {{story_count}}
+
+
+
+
+
+ Initialize counters:
+ - stories_validated = 0
+ - verified_complete = 0
+ - needs_rework = 0
+ - false_positives = 0
+ - in_progress = 0
+ - total_false_positive_tasks = 0
+ - total_tasks_verified = 0
+
+
+
+ Set {{current_story}} = current story file
+ Extract {{story_id}} from filename
+
+
+
+
+ Execute: python3 {{task_verification_script}} {{current_story}}
+
+ Parse output:
+ - total_tasks
+ - checked_tasks
+ - false_positives
+ - false_negatives
+ - verification_score
+ - task_details (with evidence)
+
+
+ Categorize story:
+ IF verification_score >= 95 AND false_positives == 0
+ β category = "VERIFIED_COMPLETE"
+ ELSE IF verification_score >= 80 AND false_positives <= 2
+ β category = "COMPLETE_WITH_MINOR_ISSUES"
+ ELSE IF false_positives > 5 OR verification_score < 50
+ β category = "FALSE_POSITIVE" (claimed done but missing code)
+ ELSE IF verification_score < 80
+ β category = "NEEDS_REWORK"
+ ELSE IF checked_tasks == 0
+ β category = "NOT_STARTED"
+ ELSE
+ β category = "IN_PROGRESS"
+
+
+ Store result:
+ - story_id
+ - verification_score
+ - category
+ - false_positive_count
+ - false_negative_count
+ - current_status (from sprint-status.yaml)
+ - recommended_status
+
+
+ Increment counters based on category
+ Add false_positive_count to total
+ Add total_tasks to total_tasks_verified
+
+
+
+
+
+
+
+
+ Filter stories where:
+ - category = "FALSE_POSITIVE" OR
+ - category = "NEEDS_REWORK" OR
+ - false_positives > 3
+
+
+ Count {{problem_story_count}}
+
+
+
+
+
+ Extract file list from story Dev Agent Record
+
+
+ Run /multi-agent-review on files:
+ - Security audit
+ - Silent failure detection
+ - Architecture compliance
+ - Type safety check
+
+
+ Categorize review findings by severity
+ Add to story's issue list
+
+
+
+
+
+
+
+
+
+
+
+
+ For stories marked "VERIFIED_COMPLETE":
+ 1. Extract service dependencies from story
+ 2. Check if dependent services still exist
+ 3. Run integration tests if they exist
+ 4. Check for API contract breaking changes
+
+
+ Detect overlaps:
+ - Multiple stories implementing same feature
+ - Duplicate files created
+ - Conflicting implementations
+
+
+
+
+
+
+
+# Comprehensive Story Validation Report
+
+**Generated:** {{date}}
+**Stories Validated:** {{story_count}}
+**Validation Depth:** {{validation_depth}}
+**Epic Filter:** {{epic_filter}} {{#if_no_filter}}(all epics){{/if}}
+
+---
+
+## Executive Summary
+
+**Overall Health Score:** {{overall_health_score}}/100
+
+**Story Categories:**
+- β **VERIFIED_COMPLETE:** {{verified_complete}} ({{verified_complete_pct}}%)
+- β οΈ **NEEDS_REWORK:** {{needs_rework}} ({{needs_rework_pct}}%)
+- β **FALSE_POSITIVES:** {{false_positives}} ({{false_positives_pct}}%)
+- π **IN_PROGRESS:** {{in_progress}} ({{in_progress_pct}}%)
+- π **NOT_STARTED:** {{not_started}} ({{not_started_pct}}%)
+
+**Task Verification:**
+- Total tasks verified: {{total_tasks_verified}}
+- False positive tasks: {{total_false_positive_tasks}} ({{false_positive_rate}}%)
+- False negative tasks: {{total_false_negative_tasks}}
+
+**Code Quality:**
+- CRITICAL issues: {{critical_issues_total}}
+- HIGH issues: {{high_issues_total}}
+- Files reviewed: {{files_reviewed}}
+
+---
+
+## β False Positive Stories (Claimed Done, Not Implemented)
+
+{{#each false_positive_stories}}
+### {{this.story_id}} (Score: {{this.verification_score}}/100)
+
+**Current Status:** {{this.current_status}}
+**Recommended:** in-progress or ready-for-dev
+
+**Issues:**
+{{#each this.false_positive_tasks}}
+- [ ] {{this.task}}
+ - Evidence: {{this.evidence}}
+{{/each}}
+
+**Action Required:**
+- Uncheck {{this.false_positive_count}} tasks
+- Implement missing code
+- Update sprint-status.yaml to in-progress
+{{/each}}
+
+**Total:** {{false_positive_stories_count}} stories
+
+---
+
+## β οΈ Stories Needing Rework
+
+{{#each needs_rework_stories}}
+### {{this.story_id}} (Score: {{this.verification_score}}/100)
+
+**Issues:**
+- {{this.false_positive_count}} false positive tasks
+- {{this.critical_issue_count}} CRITICAL code quality issues
+- {{this.high_issue_count}} HIGH priority issues
+
+**Recommended:**
+1. Fix CRITICAL issues first
+2. Implement {{this.false_positive_count}} missing tasks
+3. Re-run validation
+{{/each}}
+
+**Total:** {{needs_rework_count}} stories
+
+---
+
+## β Verified Complete Stories
+
+{{#each verified_complete_stories}}
+- {{this.story_id}} ({{this.verification_score}}/100)
+{{/each}}
+
+**Total:** {{verified_complete_count}} stories (production-ready)
+
+---
+
+## π Epic Breakdown
+
+{{#each epic_summary}}
+### Epic {{this.epic_num}}
+
+**Stories:** {{this.total_count}}
+**Verified Complete:** {{this.verified_count}} ({{this.verified_pct}}%)
+**False Positives:** {{this.false_positive_count}}
+**Needs Rework:** {{this.needs_rework_count}}
+
+**Health Score:** {{this.health_score}}/100
+{{/each}}
+
+---
+
+## π― Recommended Actions
+
+### Immediate (CRITICAL)
+
+{{#if false_positive_stories_count > 0}}
+**Fix {{false_positive_stories_count}} False Positive Stories:**
+
+{{#each false_positive_stories limit=10}}
+1. {{this.story_id}}: Update status to in-progress, implement {{this.false_positive_count}} missing tasks
+{{/each}}
+
+{{#if false_positive_stories_count > 10}}
+... and {{false_positive_stories_count - 10}} more (see full list above)
+{{/if}}
+{{/if}}
+
+### Short-term (HIGH Priority)
+
+{{#if needs_rework_count > 0}}
+**Address {{needs_rework_count}} Stories Needing Rework:**
+- Fix {{critical_issues_total}} CRITICAL code quality issues
+- Implement missing tasks
+- Re-validate after fixes
+{{/if}}
+
+### Maintenance (MEDIUM Priority)
+
+{{#if false_negative_count > 0}}
+**Update {{false_negative_count}} False Negative Tasks:**
+- Mark complete (code exists but checkbox unchecked)
+- Low impact, can batch update
+{{/if}}
+
+---
+
+## π° Cost Analysis
+
+**Validation Run:**
+- Stories validated: {{story_count}}
+- API tokens used: ~{{tokens_used}}K
+- Cost: ~${{cost}}
+
+**Remediation Estimate:**
+- False positives: {{false_positive_stories_count}} Γ 3h = {{remediation_hours_fp}}h
+- Needs rework: {{needs_rework_count}} Γ 2h = {{remediation_hours_rework}}h
+- **Total:** {{total_remediation_hours}}h estimated work
+
+---
+
+## π Next Steps
+
+1. **Fix false positive stories** ({{false_positive_stories_count}} stories)
+2. **Address CRITICAL issues** ({{critical_issues_total}} issues)
+3. **Re-run validation** on fixed stories
+4. **Update sprint-status.yaml** with verified statuses
+5. **Run weekly validation** to prevent future drift
+
+---
+
+**Generated by:** /validate-all-stories workflow
+**Validation Engine:** task-verification-engine.py v2.0
+**Multi-Agent Review:** {{multi_agent_review_enabled}}
+
+
+
+
+
+
+
+ Update story file: Change [ ] to [x] for verified tasks
+
+
+
+
+ Update sprint-status.yaml using sprint-status-updater.py
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/bmm/workflows/4-implementation/validate-all-stories/workflow.yaml b/src/bmm/workflows/4-implementation/validate-all-stories/workflow.yaml
new file mode 100644
index 00000000..638890fc
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/validate-all-stories/workflow.yaml
@@ -0,0 +1,36 @@
+name: validate-all-stories
+description: "Comprehensive audit of ALL stories: verify tasks against codebase, run code quality reviews, check integrations. The bulletproof audit for production readiness."
+author: "BMad"
+version: "1.0.0"
+
+# Critical variables from config
+config_source: "{project-root}/_bmad/bmm/config.yaml"
+user_name: "{config_source}:user_name"
+communication_language: "{config_source}:communication_language"
+implementation_artifacts: "{config_source}:implementation_artifacts"
+story_dir: "{implementation_artifacts}"
+
+# Workflow components
+installed_path: "{project-root}/_bmad/bmm/workflows/4-implementation/validate-all-stories"
+instructions: "{installed_path}/instructions.xml"
+
+# Input variables
+variables:
+ validation_depth: "deep" # Options: "quick" (tasks only), "deep" (tasks + review), "comprehensive" (full integration)
+ parallel_validation: true # Run story validations in parallel for speed
+ fix_mode: false # If true, auto-fix false negatives and update statuses
+ epic_filter: "" # Optional: Only validate stories from specific epic (e.g., "16e")
+
+# Tools
+task_verification_script: "{project-root}/scripts/lib/task-verification-engine.py"
+sprint_status_updater: "{project-root}/scripts/lib/sprint-status-updater.py"
+
+# Sub-workflow
+validate_story_workflow: "{project-root}/_bmad/bmm/workflows/4-implementation/validate-story/workflow.yaml"
+
+# Output
+default_output_file: "{story_dir}/.comprehensive-validation-report-{date}.md"
+validation_summary_file: "{story_dir}/.validation-summary-{date}.json"
+
+standalone: true
+web_bundle: false
diff --git a/src/bmm/workflows/4-implementation/validate-epic-status/instructions.xml b/src/bmm/workflows/4-implementation/validate-epic-status/instructions.xml
new file mode 100644
index 00000000..94b10823
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/validate-epic-status/instructions.xml
@@ -0,0 +1,302 @@
+
+ The workflow execution engine is governed by: {project-root}/_bmad/core/tasks/workflow.xml
+ You MUST have already loaded and processed: {installed_path}/workflow.yaml
+ This is VALIDATION-ONLY mode - NO implementation, only status correction
+ Uses same logic as batch-super-dev but READS instead of WRITES code
+
+
+ Check if {{epic_num}} was provided
+
+
+ Which epic should I validate? (e.g., 19, 16d, 16e, 9b)
+ Store response as {{epic_num}}
+
+
+ Load {{sprint_status_file}}
+
+
+
+ HALT
+
+
+ Search for epic-{{epic_num}} entry in sprint_status_file
+ Extract all story entries for epic-{{epic_num}} (pattern: {{epic_num}}-*)
+ Count stories found in sprint-status.yaml for this epic
+
+
+
+
+
+ This is where we determine TRUTH - not from status fields, but from actual file analysis
+
+ For each story in epic (from sprint-status.yaml):
+ 1. Build story file path: {{story_dir}}/{{story_key}}.md
+ 2. Check if file exists
+ 3. If exists, read FULL file
+ 4. Analyze file content
+
+
+ For each story file, extract:
+ - File size in KB
+ - Total task count (count all "- [ ]" and "- [x]" lines)
+ - Checked task count (count "- [x]" lines)
+ - Completion rate (checked / total * 100)
+ - Explicit Status: field (if present)
+ - Has proper BMAD structure (12 sections)
+ - Section count (count ## headings)
+
+
+
+
+ For each story, classify quality:
+ VALID:
+ - File size >= 10KB
+ - Total tasks >= 5
+ - Has task list structure
+
+ INVALID:
+ - File size < 10KB (incomplete story)
+ - Total tasks < 5 (not detailed enough)
+ - File missing entirely
+
+
+ Store results as {{story_quality_map}}
+
+
+
+
+
+ Run git log to find commits mentioning epic stories:
+ Command: git log --oneline --since={{git_commit_lookback_days}} days ago
+
+
+ Parse commit messages for story IDs matching pattern: {{epic_num}}-\d+[a-z]?
+ Build map of story_id β commit_count
+
+
+
+
+
+ Search {{story_dir}} for files:
+ - .epic-{{epic_num}}-completion-report.md
+ - .batch-super-dev-{{epic_num}}-progress.yaml
+
+
+
+ Parse completed_stories list from progress file OR
+ Parse β story entries from completion report
+ Store as {{autonomous_completed_stories}}
+
+
+
+
+
+
+
+
+
+
+ Use MULTIPLE sources of truth, not just Status: field
+
+ For each story in epic, determine correct status using this logic:
+
+
+ Priority 1: Autonomous completion report
+ IF story in autonomous_completed_stories
+ β Status = "done" (VERY HIGH confidence)
+
+ Priority 2: Task completion rate + file quality
+ IF completion_rate >= 90% AND file is VALID (>10KB, >5 tasks)
+ β Status = "done" (HIGH confidence)
+
+ IF completion_rate 50-89% AND file is VALID
+ β Status = "in-progress" (MEDIUM confidence)
+
+ IF completion_rate < 50% AND file is VALID
+ β Status = "ready-for-dev" (MEDIUM confidence)
+
+ Priority 3: Explicit Status: field (if no other evidence)
+ IF Status: field exists AND matches above inferences
+ β Use it (MEDIUM confidence)
+
+ IF Status: field conflicts with task completion
+ β Prefer task completion (tasks are ground truth)
+
+ Priority 4: Git commits (supporting evidence)
+ IF 3+ commits + task completion >=90%
+ β Upgrade confidence to VERY HIGH
+
+ IF 1-2 commits but task completion <50%
+ β Status = "in-progress" (work started but not done)
+
+ Quality Gates:
+ IF file size < 10KB OR total tasks < 5
+ β DOWNGRADE status (can't be "done" if file is incomplete)
+ β Mark as "ready-for-dev" (story needs proper creation)
+ β Flag for regeneration with /create-story
+
+ Missing Files:
+ IF story file doesn't exist
+ β Status = "backlog" (story not created yet)
+
+
+ Build map of story_id β inferred_status with evidence and confidence
+
+
+
+
+
+
+
+ Write detailed report to {{default_output_file}}
+ EXIT workflow
+
+
+
+
+
+ Create backup of {{sprint_status_file}}
+ For each story needing update:
+ 1. Find story entry in development_status section
+ 2. Update status to inferred_status
+ 3. Add comment: "β Validated {{date}} - {{evidence_summary}}"
+ 4. Preserve all other content and structure
+
+
+ Update epic-{{epic_num}} status based on story completion:
+ IF all stories have status "done" AND all are valid files
+ β epic status = "done"
+
+ IF any stories "in-progress" OR "review"
+ β epic status = "in-progress"
+
+ IF all stories "backlog" OR "ready-for-dev"
+ β epic status = "backlog"
+
+
+ Update last_verified timestamp in header
+ Save {{sprint_status_file}}
+
+
+
+
+
+
+ Flag stories with issues:
+ - Missing story files (in sprint-status.yaml but no .md file)
+ - Invalid files (< 10KB or < 5 tasks)
+ - Conflicting evidence (Status: says done, tasks unchecked)
+ - Poor quality (no BMAD sections)
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/bmm/workflows/4-implementation/validate-epic-status/workflow.yaml b/src/bmm/workflows/4-implementation/validate-epic-status/workflow.yaml
new file mode 100644
index 00000000..620c0db2
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/validate-epic-status/workflow.yaml
@@ -0,0 +1,34 @@
+name: validate-epic-status
+description: "Validate and fix sprint-status.yaml for a single epic. Scans story files for task completion, validates quality (>10KB, proper tasks), checks git commits, updates sprint-status.yaml to match REALITY."
+author: "BMad"
+version: "1.0.0"
+
+# Critical variables from config
+config_source: "{project-root}/_bmad/bmm/config.yaml"
+user_name: "{config_source}:user_name"
+communication_language: "{config_source}:communication_language"
+implementation_artifacts: "{config_source}:implementation_artifacts"
+story_dir: "{implementation_artifacts}"
+
+# Workflow components
+installed_path: "{project-root}/_bmad/bmm/workflows/4-implementation/validate-epic-status"
+instructions: "{installed_path}/instructions.xml"
+
+# Inputs
+variables:
+ epic_num: "" # User provides (e.g., "19", "16d", "16e")
+ sprint_status_file: "{implementation_artifacts}/sprint-status.yaml"
+ validation_mode: "fix" # Options: "report-only", "fix", "strict"
+
+# Validation criteria
+validation_rules:
+ min_story_size_kb: 10 # Stories should be >= 10KB
+ min_tasks_required: 5 # Stories should have >= 5 tasks
+ completion_threshold: 90 # 90%+ tasks checked = "done"
+ git_commit_lookback_days: 30 # Search last 30 days for commits
+
+# Output
+default_output_file: "{story_dir}/.epic-{epic_num}-validation-report.md"
+
+standalone: true
+web_bundle: false
diff --git a/src/bmm/workflows/4-implementation/validate-story-deep/instructions.xml b/src/bmm/workflows/4-implementation/validate-story-deep/instructions.xml
new file mode 100644
index 00000000..7b9825f1
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/validate-story-deep/instructions.xml
@@ -0,0 +1,370 @@
+
+ The workflow execution engine is governed by: {project-root}/_bmad/core/tasks/workflow.xml
+ You MUST have already loaded and processed: {installed_path}/workflow.yaml
+ This uses HAIKU AGENTS to read actual code and verify task completion - NOT regex patterns
+
+
+ Load story file from {{story_file}}
+
+
+
+ HALT
+
+
+ Extract story metadata:
+ - Story ID from filename
+ - Epic number from "Epic:" field
+ - Current status from "Status:" or "**Status:**" field
+ - Files created/modified from Dev Agent Record section
+
+
+ Extract ALL tasks (pattern: "- [ ]" or "- [x]"):
+ - Parse checkbox state (checked/unchecked)
+ - Extract task text
+ - Count total, checked, unchecked
+
+
+
+
+
+
+ Spawn ONE Haiku agent to verify ALL tasks (avoids 50x agent startup overhead!)
+
+
+
+
+
+ Verify all {{total_count}} story tasks
+
+You are verifying ALL tasks for this user story by reading actual code.
+
+**Story:** {{story_id}}
+**Epic:** {{epic_num}}
+**Total Tasks:** {{total_count}}
+
+**Files from Story (Dev Agent Record):**
+{{#each file_list}}
+- {{this}}
+{{/each}}
+
+**Tasks to Verify:**
+
+{{#each task_list}}
+{{@index}}. [{{#if this.checked}}x{{else}} {{/if}}] {{this.text}}
+{{/each}}
+
+---
+
+**Your Job:**
+
+For EACH task above:
+
+1. **Find relevant files** - Use Glob to find files mentioned in task
+2. **Read the files** - Use Read tool to examine actual code
+3. **Verify implementation:**
+ - Is code real or stubs/TODOs?
+ - Is there error handling?
+ - Multi-tenant isolation (dealerId filters)?
+ - Are there tests?
+ - Does it match task description?
+
+4. **Make judgment for each task**
+
+**Output Format - JSON array with one entry per task:**
+
+```json
+{
+ "story_id": "{{story_id}}",
+ "total_tasks": {{total_count}},
+ "tasks": [
+ {
+ "task_number": 0,
+ "task_text": "Implement UserService",
+ "is_checked": true,
+ "actually_complete": false,
+ "confidence": "high",
+ "evidence": "File exists but has 'TODO: Implement findById' on line 45, tests not found",
+ "issues_found": ["Stub implementation", "Missing tests", "No dealerId filter"],
+ "recommendation": "Implement real logic, add tests, add multi-tenant isolation"
+ },
+ {
+ "task_number": 1,
+ "task_text": "Add error handling",
+ "is_checked": true,
+ "actually_complete": true,
+ "confidence": "very_high",
+ "evidence": "Try-catch blocks in UserService.ts:67-89, proper error logging, tests verify error cases",
+ "issues_found": [],
+ "recommendation": "None - task complete"
+ }
+ ]
+}
+```
+
+**Be efficient:** Read files once, verify all tasks, return comprehensive JSON.
+
+ general-purpose
+
+
+ Parse agent response (extract JSON)
+
+ For each task result:
+ - Determine verification_status (correct/false_positive/false_negative)
+ - Categorize into verified_complete, false_positives, false_negatives lists
+ - Count totals
+
+
+
+
+
+
+ Calculate scores:
+ - Task accuracy: (correct / total) Γ 100
+ - False positive penalty: false_positive_count Γ -5
+ - Overall score: max(0, task_accuracy + penalty)
+
+
+ Determine story category:
+ IF score >= 95 AND false_positives == 0
+ β VERIFIED_COMPLETE
+ ELSE IF score >= 80 AND false_positives <= 2
+ β COMPLETE_WITH_MINOR_ISSUES
+ ELSE IF false_positives > 5 OR score < 50
+ β FALSE_POSITIVE (story claimed done but significant missing code)
+ ELSE IF false_positives > 0
+ β NEEDS_REWORK
+ ELSE
+ β IN_PROGRESS
+
+
+ Determine recommended status:
+ VERIFIED_COMPLETE β "done"
+ COMPLETE_WITH_MINOR_ISSUES β "review"
+ FALSE_POSITIVE β "in-progress" or "ready-for-dev"
+ NEEDS_REWORK β "in-progress"
+ IN_PROGRESS β "in-progress"
+
+
+
+
+
+
+
+# Story Validation Report: {{story_id}}
+
+**Generated:** {{date}}
+**Validation Method:** LLM-powered deep verification (Haiku 4.5)
+**Overall Score:** {{overall_score}}/100
+**Category:** {{category}}
+
+---
+
+## Summary
+
+**Story:** {{story_id}}
+**Epic:** {{epic_num}}
+**Current Status:** {{current_status}}
+**Recommended Status:** {{recommended_status}}
+
+**Task Verification:**
+- Total: {{total_count}}
+- Checked: {{checked_count}}
+- Verified Complete: {{verified_complete_count}}
+- False Positives: {{false_positive_count}}
+- False Negatives: {{false_negative_count}}
+
+---
+
+## Verification Details
+
+{{#if false_positive_count > 0}}
+### β False Positives (CRITICAL - Code Claims vs Reality)
+
+{{#each false_positives}}
+**Task {{@index + 1}}:** {{this.task}}
+**Claimed:** [x] Complete
+**Reality:** Code missing or stub implementation
+
+**Evidence:**
+{{this.evidence}}
+
+**Issues Found:**
+{{#each this.issues_found}}
+- {{this}}
+{{/each}}
+
+**Recommendation:** {{this.recommendation}}
+
+---
+{{/each}}
+{{/if}}
+
+{{#if false_negative_count > 0}}
+### β οΈ False Negatives (Unchecked But Working)
+
+{{#each false_negatives}}
+**Task {{@index + 1}}:** {{this.task}}
+**Status:** [ ] Unchecked
+**Reality:** Code exists and working
+
+**Evidence:**
+{{this.evidence}}
+
+**Recommendation:** Mark task as complete [x]
+
+---
+{{/each}}
+{{/if}}
+
+{{#if verified_complete_count > 0}}
+### β Verified Complete Tasks
+
+{{verified_complete_count}} tasks verified with actual code review.
+
+{{#if show_all_verified}}
+{{#each verified_complete}}
+- {{this.task}} ({{this.confidence}} confidence)
+{{/each}}
+{{/if}}
+{{/if}}
+
+---
+
+## Final Verdict
+
+**Overall Score:** {{overall_score}}/100
+
+{{#if category == "VERIFIED_COMPLETE"}}
+β **VERIFIED COMPLETE**
+
+This story is production-ready:
+- All {{total_count}} tasks verified complete
+- Code quality confirmed through review
+- No significant issues found
+- Status "done" is accurate
+
+**Action:** None needed - story is solid
+{{/if}}
+
+{{#if category == "FALSE_POSITIVE"}}
+β **FALSE POSITIVE - Story NOT Actually Complete**
+
+**Problems:**
+- {{false_positive_count}} tasks checked but code missing/stubbed
+- Verification score: {{overall_score}}/100 (< 50%)
+- Story marked "{{current_status}}" but significant work remains
+
+**Required Actions:**
+1. Update sprint-status.yaml: {{story_id}} β in-progress
+2. Uncheck {{false_positive_count}} false positive tasks
+3. Implement missing code
+4. Re-run validation after implementation
+
+**Estimated Rework:** {{estimated_rework_hours}} hours
+{{/if}}
+
+{{#if category == "NEEDS_REWORK"}}
+β οΈ **NEEDS REWORK**
+
+**Problems:**
+- {{false_positive_count}} tasks with quality issues
+- Some code exists but has problems (TODOs, missing features, poor quality)
+
+**Required Actions:**
+{{#each action_items}}
+- [ ] {{this}}
+{{/each}}
+
+**Estimated Fix Time:** {{estimated_fix_hours}} hours
+{{/if}}
+
+{{#if category == "IN_PROGRESS"}}
+π **IN PROGRESS** (accurate status)
+
+- {{checked_count}}/{{total_count}} tasks complete
+- {{unchecked_count}} tasks remaining
+- Current status reflects reality
+
+**No action needed** - continue implementation
+{{/if}}
+
+---
+
+**Validation Cost:** ~${{validation_cost}}
+**Agent Model:** {{agent_model}}
+**Tasks Verified:** {{total_count}}
+
+
+
+
+
+ Story status should be updated from "{{current_status}}" to "{{recommended_status}}". Update sprint-status.yaml? (y/n)
+
+
+ Update sprint-status.yaml:
+ python3 scripts/lib/sprint-status-updater.py --epic {{epic_num}} --mode fix
+
+
+ Add validation note to story file Dev Agent Record
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/bmm/workflows/4-implementation/validate-story-deep/workflow.yaml b/src/bmm/workflows/4-implementation/validate-story-deep/workflow.yaml
new file mode 100644
index 00000000..7560a449
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/validate-story-deep/workflow.yaml
@@ -0,0 +1,29 @@
+name: validate-story-deep
+description: "Deep story validation using Haiku agents to read and verify actual code. Each task gets micro code review to verify implementation quality."
+author: "BMad"
+version: "1.0.0"
+
+# Critical variables from config
+config_source: "{project-root}/_bmad/bmm/config.yaml"
+user_name: "{config_source}:user_name"
+communication_language: "{config_source}:communication_language"
+implementation_artifacts: "{config_source}:implementation_artifacts"
+story_dir: "{implementation_artifacts}"
+
+# Workflow components
+installed_path: "{project-root}/_bmad/bmm/workflows/4-implementation/validate-story-deep"
+instructions: "{installed_path}/instructions.xml"
+
+# Input variables
+variables:
+ story_file: "" # Path to story file to validate
+
+# Agent configuration
+agent_model: "haiku" # Use Haiku 4.5 for cost efficiency ($0.13/story vs $1.50)
+parallel_tasks: true # Validate tasks in parallel (faster)
+
+# Output
+default_output_file: "{story_dir}/.validation-{story_id}-{date}.md"
+
+standalone: true
+web_bundle: false
diff --git a/src/bmm/workflows/4-implementation/validate-story/instructions.xml b/src/bmm/workflows/4-implementation/validate-story/instructions.xml
new file mode 100644
index 00000000..977e5de8
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/validate-story/instructions.xml
@@ -0,0 +1,395 @@
+
+ The workflow execution engine is governed by: {project-root}/_bmad/core/tasks/workflow.xml
+ You MUST have already loaded and processed: {installed_path}/workflow.yaml
+ This performs DEEP validation - not just checkbox counting, but verifying code actually exists and works
+
+
+ Load story file from {{story_file}}
+
+
+
+ HALT
+
+
+ Extract story metadata:
+ - Story ID (from filename)
+ - Epic number
+ - Current status from Status: field
+ - Priority
+ - Estimated effort
+
+
+ Extract all tasks:
+ - Pattern: "- [ ]" or "- [x]"
+ - Count total tasks
+ - Count checked tasks
+ - Count unchecked tasks
+ - Calculate completion percentage
+
+
+ Extract file references from Dev Agent Record:
+ - Files created
+ - Files modified
+ - Files deleted
+
+
+
+
+
+
+ Use task-verification-engine.py for DEEP verification (not just file existence)
+
+ For each task in story:
+ 1. Extract task text
+ 2. Note if checked [x] or unchecked [ ]
+ 3. Pass to task-verification-engine.py
+ 4. Receive verification result with:
+ - should_be_checked: true/false
+ - confidence: very high/high/medium/low
+ - evidence: list of findings
+ - verification_status: correct/false_positive/false_negative/uncertain
+
+
+ Categorize tasks by verification status:
+ - β CORRECT: Checkbox matches reality
+ - β FALSE POSITIVE: Checked but code missing/stubbed
+ - β οΈ FALSE NEGATIVE: Unchecked but code exists
+ - β UNCERTAIN: Cannot verify (low confidence)
+
+
+ Calculate verification score:
+ - (correct_tasks / total_tasks) Γ 100
+ - Penalize false positives heavily (-5 points each)
+ - Penalize false negatives lightly (-2 points each)
+
+
+
+
+
+
+ Extract all files from Dev Agent Record file list
+
+
+
+ Skip to step 4
+
+
+ For each file:
+ 1. Check if file exists
+ 2. Read file content
+ 3. Check for quality issues:
+ - TODO/FIXME comments without GitHub issues
+ - any types in TypeScript
+ - Hardcoded values (siteId, dealerId, API keys)
+ - Missing error handling
+ - Missing multi-tenant isolation (dealerId filters)
+ - Missing audit logging on mutations
+ - Security vulnerabilities (SQL injection, XSS)
+
+
+ Run multi-agent review if files exist:
+ - Security audit
+ - Silent failure detection
+ - Architecture compliance
+ - Performance analysis
+
+
+ Categorize issues by severity:
+ - CRITICAL: Security, data loss, breaking changes
+ - HIGH: Missing features, poor quality, technical debt
+ - MEDIUM: Code smells, minor violations
+ - LOW: Style issues, nice-to-haves
+
+
+
+
+
+
+ Extract dependencies from story:
+ - Services called
+ - APIs consumed
+ - Database tables used
+ - Cache keys accessed
+
+
+ For each dependency:
+ 1. Check if dependency still exists
+ 2. Check if API contract is still valid
+ 3. Run integration tests if they exist
+ 4. Check for breaking changes in dependent stories
+
+
+
+
+
+
+ Calculate overall story health:
+ - Task verification score (0-100)
+ - Code quality score (0-100)
+ - Integration score (0-100)
+ - Overall score = weighted average
+
+
+ Determine recommended status:
+ IF verification_score >= 95 AND quality_score >= 90 AND no CRITICAL issues
+ β VERIFIED_COMPLETE
+ ELSE IF verification_score >= 80 AND quality_score >= 70
+ β COMPLETE_WITH_ISSUES (document issues)
+ ELSE IF false_positives > 0 OR critical_issues > 0
+ β NEEDS_REWORK (code missing or broken)
+ ELSE IF verification_score < 50
+ β FALSE_POSITIVE (claimed done but not implemented)
+ ELSE
+ β IN_PROGRESS (partially complete)
+
+
+
+
+
+
+
+# Story Validation Report: {{story_id}}
+
+**Validation Date:** {{date}}
+**Validation Depth:** {{validation_depth}}
+**Overall Score:** {{overall_score}}/100
+
+---
+
+## Summary
+
+**Story:** {{story_id}} - {{story_title}}
+**Epic:** {{epic_num}}
+**Current Status:** {{current_status}}
+**Recommended Status:** {{recommended_status}}
+
+**Task Completion:** {{checked_count}}/{{total_count}} ({{completion_pct}}%)
+**Verification Score:** {{verification_score}}/100
+**Code Quality Score:** {{quality_score}}/100
+
+---
+
+## Task Verification Details
+
+{{task_verification_output}}
+
+---
+
+## Code Quality Review
+
+{{code_quality_output}}
+
+---
+
+## Integration Verification
+
+{{integration_output}}
+
+---
+
+## Recommended Actions
+
+{{#if critical_issues}}
+### Priority 1: Fix Critical Issues (BLOCKING)
+{{#each critical_issues}}
+- [ ] {{this.file}}: {{this.description}}
+{{/each}}
+{{/if}}
+
+{{#if false_positives}}
+### Priority 2: Fix False Positives (Code Claims vs Reality)
+{{#each false_positives}}
+- [ ] {{this.task}} - {{this.evidence}}
+{{/each}}
+{{/if}}
+
+{{#if high_issues}}
+### Priority 3: Address High Priority Issues
+{{#each high_issues}}
+- [ ] {{this.file}}: {{this.description}}
+{{/each}}
+{{/if}}
+
+{{#if false_negatives}}
+### Priority 4: Update Task Checkboxes (Low Impact)
+{{#each false_negatives}}
+- [ ] Mark complete: {{this.task}}
+{{/each}}
+{{/if}}
+
+---
+
+## Next Steps
+
+{{#if recommended_status == "VERIFIED_COMPLETE"}}
+β **Story is verified complete and production-ready**
+- Update sprint-status.yaml: {{story_id}} = done
+- No further action required
+{{/if}}
+
+{{#if recommended_status == "NEEDS_REWORK"}}
+β οΈ **Story requires rework before marking complete**
+- Fix {{critical_count}} CRITICAL issues
+- Address {{false_positive_count}} false positive tasks
+- Re-run validation after fixes
+{{/if}}
+
+{{#if recommended_status == "FALSE_POSITIVE"}}
+β **Story is marked done but not actually implemented**
+- Verification score: {{verification_score}}/100 (< 50%)
+- Update sprint-status.yaml: {{story_id}} = in-progress or ready-for-dev
+- Implement missing tasks before claiming done
+{{/if}}
+
+---
+
+**Generated by:** /validate-story workflow
+**Validation Engine:** task-verification-engine.py v2.0
+
+
+
+
+ Apply recommended status change to sprint-status.yaml? (y/n)
+
+
+ Update sprint-status.yaml:
+ - Use sprint-status-updater.py
+ - Update {{story_id}} to {{recommended_status}}
+ - Add comment: "Validated {{date}}, score {{overall_score}}/100"
+
+
+ Update story file:
+ - Add validation report link to Dev Agent Record
+ - Add validation score to completion notes
+ - Update Status: field if changed
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/bmm/workflows/4-implementation/validate-story/workflow.yaml b/src/bmm/workflows/4-implementation/validate-story/workflow.yaml
new file mode 100644
index 00000000..4ea2ee47
--- /dev/null
+++ b/src/bmm/workflows/4-implementation/validate-story/workflow.yaml
@@ -0,0 +1,29 @@
+name: validate-story
+description: "Deep validation of a single story: verify tasks against codebase, run code quality review, check for regressions. Produces verification report with actionable findings."
+author: "BMad"
+version: "1.0.0"
+
+# Critical variables from config
+config_source: "{project-root}/_bmad/bmm/config.yaml"
+user_name: "{config_source}:user_name"
+communication_language: "{config_source}:communication_language"
+implementation_artifacts: "{config_source}:implementation_artifacts"
+story_dir: "{implementation_artifacts}"
+
+# Workflow components
+installed_path: "{project-root}/_bmad/bmm/workflows/4-implementation/validate-story"
+instructions: "{installed_path}/instructions.xml"
+
+# Input variables
+variables:
+ story_file: "" # Path to story file (e.g., docs/sprint-artifacts/16e-6-ecs-task-definitions-tier3.md)
+ validation_depth: "deep" # Options: "quick" (tasks only), "deep" (tasks + code review), "comprehensive" (tasks + review + integration tests)
+
+# Tools
+task_verification_script: "{project-root}/scripts/lib/task-verification-engine.py"
+
+# Output
+default_output_file: "{story_dir}/.validation-{story_id}-{date}.md"
+
+standalone: true
+web_bundle: false
diff --git a/src/bmm/workflows/bmad-quick-flow/quick-dev/data/project-levels.yaml b/src/bmm/workflows/bmad-quick-flow/quick-dev/data/project-levels.yaml
deleted file mode 100644
index 628573ec..00000000
--- a/src/bmm/workflows/bmad-quick-flow/quick-dev/data/project-levels.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
-# BMM Project Scale Levels - Source of Truth
-# Reference: /_bmad/bmm/README.md lines 77-85
-
-levels:
- 0:
- name: "Level 0"
- title: "Single Atomic Change"
- stories: "1 story"
- description: "Bug fix, tiny feature, one small change"
- documentation: "Minimal - tech spec only"
- architecture: false
-
- 1:
- name: "Level 1"
- title: "Small Feature"
- stories: "1-10 stories"
- description: "Small coherent feature, minimal documentation"
- documentation: "Tech spec"
- architecture: false
-
- 2:
- name: "Level 2"
- title: "Medium Project"
- stories: "5-15 stories"
- description: "Multiple features, focused PRD"
- documentation: "PRD + optional tech spec"
- architecture: false
-
- 3:
- name: "Level 3"
- title: "Complex System"
- stories: "12-40 stories"
- description: "Subsystems, integrations, full architecture"
- documentation: "PRD + architecture + JIT tech specs"
- architecture: true
-
- 4:
- name: "Level 4"
- title: "Enterprise Scale"
- stories: "40+ stories"
- description: "Multiple products, enterprise architecture"
- documentation: "PRD + architecture + JIT tech specs"
- architecture: true
-
-# Quick detection hints for workflow-init
-detection_hints:
- keywords:
- level_0: ["fix", "bug", "typo", "small change", "quick update", "patch"]
- level_1: ["simple", "basic", "small feature", "add", "minor"]
- level_2: ["dashboard", "several features", "admin panel", "medium"]
- level_3: ["platform", "integration", "complex", "system", "architecture"]
- level_4: ["enterprise", "multi-tenant", "multiple products", "ecosystem", "scale"]
-
- story_counts:
- level_0: [1, 1]
- level_1: [1, 10]
- level_2: [5, 15]
- level_3: [12, 40]
- level_4: [40, 999]
diff --git a/src/bmm/workflows/bmad-quick-flow/quick-dev/steps/step-01-mode-detection.md b/src/bmm/workflows/bmad-quick-flow/quick-dev/steps/step-01-mode-detection.md
deleted file mode 100644
index 4ea630b1..00000000
--- a/src/bmm/workflows/bmad-quick-flow/quick-dev/steps/step-01-mode-detection.md
+++ /dev/null
@@ -1,176 +0,0 @@
----
-name: 'step-01-mode-detection'
-description: 'Determine execution mode (tech-spec vs direct), handle escalation, set state variables'
-
-workflow_path: '{project-root}/_bmad/bmm/workflows/bmad-quick-flow/quick-dev'
-thisStepFile: './step-01-mode-detection.md'
-nextStepFile_modeA: './step-03-execute.md'
-nextStepFile_modeB: './step-02-context-gathering.md'
----
-
-# Step 1: Mode Detection
-
-**Goal:** Determine execution mode, capture baseline, handle escalation if needed.
-
----
-
-## STATE VARIABLES (capture now, persist throughout)
-
-These variables MUST be set in this step and available to all subsequent steps:
-
-- `{baseline_commit}` - Git HEAD at workflow start (or "NO_GIT" if not a git repo)
-- `{execution_mode}` - "tech-spec" or "direct"
-- `{tech_spec_path}` - Path to tech-spec file (if Mode A)
-
----
-
-## EXECUTION SEQUENCE
-
-### 1. Capture Baseline
-
-First, check if the project uses Git version control:
-
-**If Git repo exists** (`.git` directory present or `git rev-parse --is-inside-work-tree` succeeds):
-
-- Run `git rev-parse HEAD` and store result as `{baseline_commit}`
-
-**If NOT a Git repo:**
-
-- Set `{baseline_commit}` = "NO_GIT"
-
-### 2. Load Project Context
-
-Check if `{project_context}` exists (`**/project-context.md`). If found, load it as a foundational reference for ALL implementation decisions.
-
-### 3. Parse User Input
-
-Analyze the user's input to determine mode:
-
-**Mode A: Tech-Spec**
-
-- User provided a path to a tech-spec file (e.g., `quick-dev tech-spec-auth.md`)
-- Load the spec, extract tasks/context/AC
-- Set `{execution_mode}` = "tech-spec"
-- Set `{tech_spec_path}` = provided path
-- **NEXT:** Read fully and follow: `step-03-execute.md`
-
-**Mode B: Direct Instructions**
-
-- User provided task description directly (e.g., `refactor src/foo.ts...`)
-- Set `{execution_mode}` = "direct"
-- **NEXT:** Evaluate escalation threshold, then proceed
-
----
-
-## ESCALATION THRESHOLD (Mode B only)
-
-Evaluate user input with minimal token usage (no file loading):
-
-**Triggers escalation (if 2+ signals present):**
-
-- Multiple components mentioned (dashboard + api + database)
-- System-level language (platform, integration, architecture)
-- Uncertainty about approach ("how should I", "best way to")
-- Multi-layer scope (UI + backend + data together)
-- Extended timeframe ("this week", "over the next few days")
-
-**Reduces signal:**
-
-- Simplicity markers ("just", "quickly", "fix", "bug", "typo", "simple")
-- Single file/component focus
-- Confident, specific request
-
-Use holistic judgment, not mechanical keyword matching.
-
----
-
-## ESCALATION HANDLING
-
-### No Escalation (simple request)
-
-Display: "**Select:** [P] Plan first (tech-spec) [E] Execute directly"
-
-#### Menu Handling Logic:
-
-- IF P: Direct user to `{quick_spec_workflow}`. **EXIT Quick Dev.**
-- IF E: Ask for any additional guidance, then **NEXT:** Read fully and follow: `step-02-context-gathering.md`
-
-#### EXECUTION RULES:
-
-- ALWAYS halt and wait for user input after presenting menu
-- ONLY proceed when user makes a selection
-
----
-
-### Escalation Triggered - Level 0-2
-
-Present: "This looks like a focused feature with multiple components."
-
-Display:
-
-**[P] Plan first (tech-spec)** (recommended)
-**[W] Seems bigger than quick-dev** - Recommend the Full BMad Flow PRD Process
-**[E] Execute directly**
-
-#### Menu Handling Logic:
-
-- IF P: Direct to `{quick_spec_workflow}`. **EXIT Quick Dev.**
-- IF W: Direct user to run the PRD workflow instead. **EXIT Quick Dev.**
-- IF E: Ask for guidance, then **NEXT:** Read fully and follow: `step-02-context-gathering.md`
-
-#### EXECUTION RULES:
-
-- ALWAYS halt and wait for user input after presenting menu
-- ONLY proceed when user makes a selection
-
----
-
-### Escalation Triggered - Level 3+
-
-Present: "This sounds like platform/system work."
-
-Display:
-
-**[W] Start BMad Method** (recommended)
-**[P] Plan first (tech-spec)** (lighter planning)
-**[E] Execute directly** - feeling lucky
-
-#### Menu Handling Logic:
-
-- IF P: Direct to `{quick_spec_workflow}`. **EXIT Quick Dev.**
-- IF W: Direct user to run the PRD workflow instead. **EXIT Quick Dev.**
-- IF E: Ask for guidance, then **NEXT:** Read fully and follow: `step-02-context-gathering.md`
-
-#### EXECUTION RULES:
-
-- ALWAYS halt and wait for user input after presenting menu
-- ONLY proceed when user makes a selection
-
----
-
-## NEXT STEP DIRECTIVE
-
-**CRITICAL:** When this step completes, explicitly state which step to load:
-
-- Mode A (tech-spec): "**NEXT:** read fully and follow: `step-03-execute.md`"
-- Mode B (direct, [E] selected): "**NEXT:** Read fully and follow: `step-02-context-gathering.md`"
-- Escalation ([P] or [W]): "**EXITING Quick Dev.** Follow the directed workflow."
-
----
-
-## SUCCESS METRICS
-
-- `{baseline_commit}` captured and stored
-- `{execution_mode}` determined ("tech-spec" or "direct")
-- `{tech_spec_path}` set if Mode A
-- Project context loaded if exists
-- Escalation evaluated appropriately (Mode B)
-- Explicit NEXT directive provided
-
-## FAILURE MODES
-
-- Proceeding without capturing baseline commit
-- Not setting execution_mode variable
-- Loading step-02 when Mode A (tech-spec provided)
-- Attempting to "return" after escalation instead of EXIT
-- No explicit NEXT directive at step completion
diff --git a/src/bmm/workflows/bmad-quick-flow/quick-dev/steps/step-02-context-gathering.md b/src/bmm/workflows/bmad-quick-flow/quick-dev/steps/step-02-context-gathering.md
deleted file mode 100644
index dffb86a8..00000000
--- a/src/bmm/workflows/bmad-quick-flow/quick-dev/steps/step-02-context-gathering.md
+++ /dev/null
@@ -1,120 +0,0 @@
----
-name: 'step-02-context-gathering'
-description: 'Quick context gathering for direct mode - identify files, patterns, dependencies'
-
-workflow_path: '{project-root}/_bmad/bmm/workflows/bmad-quick-flow/quick-dev'
-thisStepFile: './step-02-context-gathering.md'
-nextStepFile: './step-03-execute.md'
----
-
-# Step 2: Context Gathering (Direct Mode)
-
-**Goal:** Quickly gather context for direct instructions - files, patterns, dependencies.
-
-**Note:** This step only runs for Mode B (direct instructions). If `{execution_mode}` is "tech-spec", this step was skipped.
-
----
-
-## AVAILABLE STATE
-
-From step-01:
-
-- `{baseline_commit}` - Git HEAD at workflow start
-- `{execution_mode}` - Should be "direct"
-- `{project_context}` - Loaded if exists
-
----
-
-## EXECUTION SEQUENCE
-
-### 1. Identify Files to Modify
-
-Based on user's direct instructions:
-
-- Search for relevant files using glob/grep
-- Identify the specific files that need changes
-- Note file locations and purposes
-
-### 2. Find Relevant Patterns
-
-Examine the identified files and their surroundings:
-
-- Code style and conventions used
-- Existing patterns for similar functionality
-- Import/export patterns
-- Error handling approaches
-- Test patterns (if tests exist nearby)
-
-### 3. Note Dependencies
-
-Identify:
-
-- External libraries used
-- Internal module dependencies
-- Configuration files that may need updates
-- Related files that might be affected
-
-### 4. Create Mental Plan
-
-Synthesize gathered context into:
-
-- List of tasks to complete
-- Acceptance criteria (inferred from user request)
-- Order of operations
-- Files to touch
-
----
-
-## PRESENT PLAN
-
-Display to user:
-
-```
-**Context Gathered:**
-
-**Files to modify:**
-- {list files}
-
-**Patterns identified:**
-- {key patterns}
-
-**Plan:**
-1. {task 1}
-2. {task 2}
-...
-
-**Inferred AC:**
-- {acceptance criteria}
-
-Ready to execute? (y/n/adjust)
-```
-
-- **y:** Proceed to execution
-- **n:** Gather more context or clarify
-- **adjust:** Modify the plan based on feedback
-
----
-
-## NEXT STEP DIRECTIVE
-
-**CRITICAL:** When user confirms ready, explicitly state:
-
-- **y:** "**NEXT:** Read fully and follow: `step-03-execute.md`"
-- **n/adjust:** Continue gathering context, then re-present plan
-
----
-
-## SUCCESS METRICS
-
-- Files to modify identified
-- Relevant patterns documented
-- Dependencies noted
-- Mental plan created with tasks and AC
-- User confirmed readiness to proceed
-
-## FAILURE MODES
-
-- Executing this step when Mode A (tech-spec)
-- Proceeding without identifying files to modify
-- Not presenting plan for user confirmation
-- Missing obvious patterns in existing code
diff --git a/src/bmm/workflows/bmad-quick-flow/quick-dev/steps/step-03-execute.md b/src/bmm/workflows/bmad-quick-flow/quick-dev/steps/step-03-execute.md
deleted file mode 100644
index 9d728361..00000000
--- a/src/bmm/workflows/bmad-quick-flow/quick-dev/steps/step-03-execute.md
+++ /dev/null
@@ -1,113 +0,0 @@
----
-name: 'step-03-execute'
-description: 'Execute implementation - iterate through tasks, write code, run tests'
-
-workflow_path: '{project-root}/_bmad/bmm/workflows/bmad-quick-flow/quick-dev'
-thisStepFile: './step-03-execute.md'
-nextStepFile: './step-04-self-check.md'
----
-
-# Step 3: Execute Implementation
-
-**Goal:** Implement all tasks, write tests, follow patterns, handle errors.
-
-**Critical:** Continue through ALL tasks without stopping for milestones.
-
----
-
-## AVAILABLE STATE
-
-From previous steps:
-
-- `{baseline_commit}` - Git HEAD at workflow start
-- `{execution_mode}` - "tech-spec" or "direct"
-- `{tech_spec_path}` - Tech-spec file (if Mode A)
-- `{project_context}` - Project patterns (if exists)
-
-From context:
-
-- Mode A: Tasks and AC extracted from tech-spec
-- Mode B: Tasks and AC from step-02 mental plan
-
----
-
-## EXECUTION LOOP
-
-For each task:
-
-### 1. Load Context
-
-- Read files relevant to this task
-- Review patterns from project-context or observed code
-- Understand dependencies
-
-### 2. Implement
-
-- Write code following existing patterns
-- Handle errors appropriately
-- Follow conventions observed in codebase
-- Add appropriate comments where non-obvious
-
-### 3. Test
-
-- Write tests if appropriate for the change
-- Run existing tests to catch regressions
-- Verify the specific AC for this task
-
-### 4. Mark Complete
-
-- Check off task: `- [x] Task N`
-- Continue to next task immediately
-
----
-
-## HALT CONDITIONS
-
-**HALT and request guidance if:**
-
-- 3 consecutive failures on same task
-- Tests fail and fix is not obvious
-- Blocking dependency discovered
-- Ambiguity that requires user decision
-
-**Do NOT halt for:**
-
-- Minor issues that can be noted and continued
-- Warnings that don't block functionality
-- Style preferences (follow existing patterns)
-
----
-
-## CONTINUOUS EXECUTION
-
-**Critical:** Do not stop between tasks for approval.
-
-- Execute all tasks in sequence
-- Only halt for blocking issues
-- Tests failing = fix before continuing
-- Track all completed work for self-check
-
----
-
-## NEXT STEP
-
-When ALL tasks are complete (or halted on blocker), read fully and follow: `step-04-self-check.md`.
-
----
-
-## SUCCESS METRICS
-
-- All tasks attempted
-- Code follows existing patterns
-- Error handling appropriate
-- Tests written where appropriate
-- Tests passing
-- No unnecessary halts
-
-## FAILURE MODES
-
-- Stopping for approval between tasks
-- Ignoring existing patterns
-- Not running tests after changes
-- Giving up after first failure
-- Not following project-context rules (if exists)
diff --git a/src/bmm/workflows/bmad-quick-flow/quick-dev/steps/step-04-self-check.md b/src/bmm/workflows/bmad-quick-flow/quick-dev/steps/step-04-self-check.md
deleted file mode 100644
index 6179ebba..00000000
--- a/src/bmm/workflows/bmad-quick-flow/quick-dev/steps/step-04-self-check.md
+++ /dev/null
@@ -1,113 +0,0 @@
----
-name: 'step-04-self-check'
-description: 'Self-audit implementation against tasks, tests, AC, and patterns'
-
-workflow_path: '{project-root}/_bmad/bmm/workflows/bmad-quick-flow/quick-dev'
-thisStepFile: './step-04-self-check.md'
-nextStepFile: './step-05-adversarial-review.md'
----
-
-# Step 4: Self-Check
-
-**Goal:** Audit completed work against tasks, tests, AC, and patterns before external review.
-
----
-
-## AVAILABLE STATE
-
-From previous steps:
-
-- `{baseline_commit}` - Git HEAD at workflow start
-- `{execution_mode}` - "tech-spec" or "direct"
-- `{tech_spec_path}` - Tech-spec file (if Mode A)
-- `{project_context}` - Project patterns (if exists)
-
----
-
-## SELF-CHECK AUDIT
-
-### 1. Tasks Complete
-
-Verify all tasks are marked complete:
-
-- [ ] All tasks from tech-spec or mental plan marked `[x]`
-- [ ] No tasks skipped without documented reason
-- [ ] Any blocked tasks have clear explanation
-
-### 2. Tests Passing
-
-Verify test status:
-
-- [ ] All existing tests still pass
-- [ ] New tests written for new functionality
-- [ ] No test warnings or skipped tests without reason
-
-### 3. Acceptance Criteria Satisfied
-
-For each AC:
-
-- [ ] AC is demonstrably met
-- [ ] Can explain how implementation satisfies AC
-- [ ] Edge cases considered
-
-### 4. Patterns Followed
-
-Verify code quality:
-
-- [ ] Follows existing code patterns in codebase
-- [ ] Follows project-context rules (if exists)
-- [ ] Error handling consistent with codebase
-- [ ] No obvious code smells introduced
-
----
-
-## UPDATE TECH-SPEC (Mode A only)
-
-If `{execution_mode}` is "tech-spec":
-
-1. Load `{tech_spec_path}`
-2. Mark all tasks as `[x]` complete
-3. Update status to "Implementation Complete"
-4. Save changes
-
----
-
-## IMPLEMENTATION SUMMARY
-
-Present summary to transition to review:
-
-```
-**Implementation Complete!**
-
-**Summary:** {what was implemented}
-**Files Modified:** {list of files}
-**Tests:** {test summary - passed/added/etc}
-**AC Status:** {all satisfied / issues noted}
-
-Proceeding to adversarial code review...
-```
-
----
-
-## NEXT STEP
-
-Proceed immediately to `step-05-adversarial-review.md`.
-
----
-
-## SUCCESS METRICS
-
-- All tasks verified complete
-- All tests passing
-- All AC satisfied
-- Patterns followed
-- Tech-spec updated (if Mode A)
-- Summary presented
-
-## FAILURE MODES
-
-- Claiming tasks complete when they're not
-- Not running tests before proceeding
-- Missing AC verification
-- Ignoring pattern violations
-- Not updating tech-spec status (Mode A)
diff --git a/src/bmm/workflows/bmad-quick-flow/quick-dev/steps/step-05-adversarial-review.md b/src/bmm/workflows/bmad-quick-flow/quick-dev/steps/step-05-adversarial-review.md
deleted file mode 100644
index 50c786d0..00000000
--- a/src/bmm/workflows/bmad-quick-flow/quick-dev/steps/step-05-adversarial-review.md
+++ /dev/null
@@ -1,106 +0,0 @@
----
-name: 'step-05-adversarial-review'
-description: 'Construct diff and invoke adversarial review task'
-
-workflow_path: '{project-root}/_bmad/bmm/workflows/bmad-quick-flow/quick-dev'
-thisStepFile: './step-05-adversarial-review.md'
-nextStepFile: './step-06-resolve-findings.md'
----
-
-# Step 5: Adversarial Code Review
-
-**Goal:** Construct diff of all changes, invoke adversarial review task, present findings.
-
----
-
-## AVAILABLE STATE
-
-From previous steps:
-
-- `{baseline_commit}` - Git HEAD at workflow start (CRITICAL for diff)
-- `{execution_mode}` - "tech-spec" or "direct"
-- `{tech_spec_path}` - Tech-spec file (if Mode A)
-
----
-
-### 1. Construct Diff
-
-Build complete diff of all changes since workflow started.
-
-### If `{baseline_commit}` is a Git commit hash:
-
-**Tracked File Changes:**
-
-```bash
-git diff {baseline_commit}
-```
-
-**New Untracked Files:**
-Only include untracked files that YOU created during this workflow (steps 2-4).
-Do not include pre-existing untracked files.
-For each new file created, include its full content as a "new file" addition.
-
-### If `{baseline_commit}` is "NO_GIT":
-
-Use best-effort diff construction:
-
-- List all files you modified during steps 2-4
-- For each file, show the changes you made (before/after if you recall, or just current state)
-- Include any new files you created with their full content
-- Note: This is less precise than Git diff but still enables meaningful review
-
-### Capture as {diff_output}
-
-Merge all changes into `{diff_output}`.
-
-**Note:** Do NOT `git add` anything - this is read-only inspection.
-
----
-
-### 2. Invoke Adversarial Review
-
-With `{diff_output}` constructed, invoke the review task. If possible, use information asymmetry: run this step, and only it, in a separate subagent or process with read access to the project, but no context except the `{diff_output}`.
-
-```xml
-Review {diff_output} using {project-root}/_bmad/core/tasks/review-adversarial-general.xml
-```
-
-**Platform fallback:** If task invocation not available, load the task file and follow its instructions inline, passing `{diff_output}` as the content.
-
-The task should: review `{diff_output}` and return a list of findings.
-
----
-
-### 3. Process Findings
-
-Capture the findings from the task output.
-**If zero findings:** HALT - this is suspicious. Re-analyze or request user guidance.
-Evaluate severity (Critical, High, Medium, Low) and validity (real, noise, undecided).
-DO NOT exclude findings based on severity or validity unless explicitly asked to do so.
-Order findings by severity.
-Number the ordered findings (F1, F2, F3, etc.).
-If TodoWrite or similar tool is available, turn each finding into a TODO, include ID, severity, validity, and description in the TODO; otherwise present findings as a table with columns: ID, Severity, Validity, Description
-
----
-
-## NEXT STEP
-
-With findings in hand, read fully and follow: `step-06-resolve-findings.md` for user to choose resolution approach.
-
----
-
-## SUCCESS METRICS
-
-- Diff constructed from baseline_commit
-- New files included in diff
-- Task invoked with diff as input
-- Findings received
-- Findings processed into TODOs or table and presented to user
-
-## FAILURE MODES
-
-- Missing baseline_commit (can't construct accurate diff)
-- Not including new untracked files in diff
-- Invoking task without providing diff input
-- Accepting zero findings without questioning
-- Presenting fewer findings than the review task returned without explicit instruction to do so
diff --git a/src/bmm/workflows/bmad-quick-flow/quick-dev/steps/step-06-resolve-findings.md b/src/bmm/workflows/bmad-quick-flow/quick-dev/steps/step-06-resolve-findings.md
deleted file mode 100644
index 4ab367c6..00000000
--- a/src/bmm/workflows/bmad-quick-flow/quick-dev/steps/step-06-resolve-findings.md
+++ /dev/null
@@ -1,149 +0,0 @@
----
-name: 'step-06-resolve-findings'
-description: 'Handle review findings interactively, apply fixes, update tech-spec with final status'
-
-workflow_path: '{project-root}/_bmad/bmm/workflows/bmad-quick-flow/quick-dev'
-thisStepFile: './step-06-resolve-findings.md'
----
-
-# Step 6: Resolve Findings
-
-**Goal:** Handle adversarial review findings interactively, apply fixes, finalize tech-spec.
-
----
-
-## AVAILABLE STATE
-
-From previous steps:
-
-- `{baseline_commit}` - Git HEAD at workflow start
-- `{execution_mode}` - "tech-spec" or "direct"
-- `{tech_spec_path}` - Tech-spec file (if Mode A)
-- Findings table from step-05
-
----
-
-## RESOLUTION OPTIONS
-
-Present: "How would you like to handle these findings?"
-
-Display:
-
-**[W] Walk through** - Discuss each finding individually
-**[F] Fix automatically** - Automatically fix issues classified as "real"
-**[S] Skip** - Acknowledge and proceed to commit
-
-### Menu Handling Logic:
-
-- IF W: Execute WALK THROUGH section below
-- IF F: Execute FIX AUTOMATICALLY section below
-- IF S: Execute SKIP section below
-
-### EXECUTION RULES:
-
-- ALWAYS halt and wait for user input after presenting menu
-- ONLY proceed when user makes a selection
-
----
-
-## WALK THROUGH [W]
-
-For each finding in order:
-
-1. Present the finding with context
-2. Ask: **fix now / skip / discuss**
-3. If fix: Apply the fix immediately
-4. If skip: Note as acknowledged, continue
-5. If discuss: Provide more context, re-ask
-6. Move to next finding
-
-After all findings processed, summarize what was fixed/skipped.
-
----
-
-## FIX AUTOMATICALLY [F]
-
-1. Filter findings to only those classified as "real"
-2. Apply fixes for each real finding
-3. Report what was fixed:
-
-```
-**Auto-fix Applied:**
-- F1: {description of fix}
-- F3: {description of fix}
-...
-
-Skipped (noise/uncertain): F2, F4
-```
-
----
-
-## SKIP [S]
-
-1. Acknowledge all findings were reviewed
-2. Note that user chose to proceed without fixes
-3. Continue to completion
-
----
-
-## UPDATE TECH-SPEC (Mode A only)
-
-If `{execution_mode}` is "tech-spec":
-
-1. Load `{tech_spec_path}`
-2. Update status to "Completed"
-3. Add review notes:
- ```
- ## Review Notes
- - Adversarial review completed
- - Findings: {count} total, {fixed} fixed, {skipped} skipped
- - Resolution approach: {walk-through/auto-fix/skip}
- ```
-4. Save changes
-
----
-
-## COMPLETION OUTPUT
-
-```
-**Review complete. Ready to commit.**
-
-**Implementation Summary:**
-- {what was implemented}
-- Files modified: {count}
-- Tests: {status}
-- Review findings: {X} addressed, {Y} skipped
-
-{Explain what was implemented based on user_skill_level}
-```
-
----
-
-## WORKFLOW COMPLETE
-
-This is the final step. The Quick Dev workflow is now complete.
-
-User can:
-
-- Commit changes
-- Run additional tests
-- Start new Quick Dev session
-
----
-
-## SUCCESS METRICS
-
-- User presented with resolution options
-- Chosen approach executed correctly
-- Fixes applied cleanly (if applicable)
-- Tech-spec updated with final status (Mode A)
-- Completion summary provided
-- User understands what was implemented
-
-## FAILURE MODES
-
-- Not presenting resolution options
-- Auto-fixing "noise" or "uncertain" findings
-- Not updating tech-spec after resolution (Mode A)
-- No completion summary
-- Leaving user unclear on next steps
diff --git a/src/bmm/workflows/bmad-quick-flow/quick-dev/workflow.md b/src/bmm/workflows/bmad-quick-flow/quick-dev/workflow.md
deleted file mode 100644
index 3fbeb13b..00000000
--- a/src/bmm/workflows/bmad-quick-flow/quick-dev/workflow.md
+++ /dev/null
@@ -1,50 +0,0 @@
----
-name: quick-dev
-description: 'Flexible development - execute tech-specs OR direct instructions with optional planning.'
----
-
-# Quick Dev Workflow
-
-**Goal:** Execute implementation tasks efficiently, either from a tech-spec or direct user instructions.
-
-**Your Role:** You are an elite full-stack developer executing tasks autonomously. Follow patterns, ship code, run tests. Every response moves the project forward.
-
----
-
-## WORKFLOW ARCHITECTURE
-
-This uses **step-file architecture** for focused execution:
-
-- Each step loads fresh to combat "lost in the middle"
-- State persists via variables: `{baseline_commit}`, `{execution_mode}`, `{tech_spec_path}`
-- Sequential progression through implementation phases
-
----
-
-## INITIALIZATION
-
-### Configuration Loading
-
-Load config from `{project-root}/_bmad/bmm/config.yaml` and resolve:
-
-- `user_name`, `communication_language`, `user_skill_level`
-- `output_folder`, `planning_artifacts`, `implementation_artifacts`
-- `date` as system-generated current datetime
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### Paths
-
-- `installed_path` = `{project-root}/_bmad/bmm/workflows/bmad-quick-flow/quick-dev`
-- `project_context` = `**/project-context.md` (load if exists)
-
-### Related Workflows
-
-- `quick_spec_workflow` = `{project-root}/_bmad/bmm/workflows/bmad-quick-flow/quick-spec/workflow.md`
-- `party_mode_exec` = `{project-root}/_bmad/core/workflows/party-mode/workflow.md`
-- `advanced_elicitation` = `{project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml`
-
----
-
-## EXECUTION
-
-Read fully and follow: `steps/step-01-mode-detection.md` to begin the workflow.
diff --git a/src/bmm/workflows/bmad-quick-flow/quick-spec/steps/step-01-understand.md b/src/bmm/workflows/bmad-quick-flow/quick-spec/steps/step-01-understand.md
deleted file mode 100644
index 508df714..00000000
--- a/src/bmm/workflows/bmad-quick-flow/quick-spec/steps/step-01-understand.md
+++ /dev/null
@@ -1,192 +0,0 @@
----
-name: 'step-01-understand'
-description: 'Analyze the requirement delta between current state and what user wants to build'
-
-workflow_path: '{project-root}/_bmad/bmm/workflows/bmad-quick-flow/create-tech-spec'
-nextStepFile: './step-02-investigate.md'
-skipToStepFile: './step-03-generate.md'
-templateFile: '{workflow_path}/tech-spec-template.md'
-wipFile: '{implementation_artifacts}/tech-spec-wip.md'
----
-
-# Step 1: Analyze Requirement Delta
-
-**Progress: Step 1 of 4** - Next: Deep Investigation
-
-## RULES:
-
-- MUST NOT skip steps.
-- MUST NOT optimize sequence.
-- MUST follow exact instructions.
-- MUST NOT look ahead to future steps.
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## CONTEXT:
-
-- Variables from `workflow.md` are available in memory.
-- Focus: Define the technical requirement delta and scope.
-- Investigation: Perform surface-level code scans ONLY to verify the delta. Reserve deep dives into implementation consequences for Step 2.
-- Objective: Establish a verifiable delta between current state and target state.
-
-## SEQUENCE OF INSTRUCTIONS
-
-### 0. Check for Work in Progress
-
-a) **Before anything else, check if `{wipFile}` exists:**
-
-b) **IF WIP FILE EXISTS:**
-
-1. Read the frontmatter and extract: `title`, `slug`, `stepsCompleted`
-2. Calculate progress: `lastStep = max(stepsCompleted)`
-3. Present to user:
-
-```
-Hey {user_name}! Found a tech-spec in progress:
-
-**{title}** - Step {lastStep} of 4 complete
-
-Is this what you're here to continue?
-
-[Y] Yes, pick up where I left off
-[N] No, archive it and start something new
-```
-
-4. **HALT and wait for user selection.**
-
-a) **Menu Handling:**
-
-- **[Y] Continue existing:**
- - Jump directly to the appropriate step based on `stepsCompleted`:
- - `[1]` β Load `{nextStepFile}` (Step 2)
- - `[1, 2]` β Load `{skipToStepFile}` (Step 3)
- - `[1, 2, 3]` β Load `./step-04-review.md` (Step 4)
-- **[N] Archive and start fresh:**
- - Rename `{wipFile}` to `{implementation_artifacts}/tech-spec-{slug}-archived-{date}.md`
-
-### 1. Greet and Ask for Initial Request
-
-a) **Greet the user briefly:**
-
-"Hey {user_name}! What are we building today?"
-
-b) **Get their initial description.** Don't ask detailed questions yet - just understand enough to know where to look.
-
-### 2. Quick Orient Scan
-
-a) **Before asking detailed questions, do a rapid scan to understand the landscape:**
-
-b) **Check for existing context docs:**
-
-- Check `{output_folder}` and `{planning_artifacts}`for planning documents (PRD, architecture, epics, research)
-- Check for `**/project-context.md` - if it exists, skim for patterns and conventions
-- Check for any existing stories or specs related to user's request
-
-c) **If user mentioned specific code/features, do a quick scan:**
-
-- Search for relevant files/classes/functions they mentioned
-- Skim the structure (don't deep-dive yet - that's Step 2)
-- Note: tech stack, obvious patterns, file locations
-
-d) **Build mental model:**
-
-- What's the likely landscape for this feature?
-- What's the likely scope based on what you found?
-- What questions do you NOW have, informed by the code?
-
-**This scan should take < 30 seconds. Just enough to ask smart questions.**
-
-### 3. Ask Informed Questions
-
-a) **Now ask clarifying questions - but make them INFORMED by what you found:**
-
-Instead of generic questions like "What's the scope?", ask specific ones like:
-- "`AuthService` handles validation in the controller β should the new field follow that pattern or move it to a dedicated validator?"
-- "`NavigationSidebar` component uses local state for the 'collapsed' toggle β should we stick with that or move it to the global store?"
-- "The epics doc mentions X - is this related?"
-
-**Adapt to {user_skill_level}.** Technical users want technical questions. Non-technical users need translation.
-
-b) **If no existing code is found:**
-
-- Ask about intended architecture, patterns, constraints
-- Ask what similar systems they'd like to emulate
-
-### 4. Capture Core Understanding
-
-a) **From the conversation, extract and confirm:**
-
-- **Title**: A clear, concise name for this work
-- **Slug**: URL-safe version of title (lowercase, hyphens, no spaces)
-- **Problem Statement**: What problem are we solving?
-- **Solution**: High-level approach (1-2 sentences)
-- **In Scope**: What's included
-- **Out of Scope**: What's explicitly NOT included
-
-b) **Ask the user to confirm the captured understanding before proceeding.**
-
-### 5. Initialize WIP File
-
-a) **Create the tech-spec WIP file:**
-
-1. Copy template from `{templateFile}`
-2. Write to `{wipFile}`
-3. Update frontmatter with captured values:
- ```yaml
- ---
- title: '{title}'
- slug: '{slug}'
- created: '{date}'
- status: 'in-progress'
- stepsCompleted: [1]
- tech_stack: []
- files_to_modify: []
- code_patterns: []
- test_patterns: []
- ---
- ```
-4. Fill in Overview section with Problem Statement, Solution, and Scope
-5. Fill in Context for Development section with any technical preferences or constraints gathered during informed discovery.
-6. Write the file
-
-b) **Report to user:**
-
-"Created: `{wipFile}`
-
-**Captured:**
-
-- Title: {title}
-- Problem: {problem_statement_summary}
-- Scope: {scope_summary}"
-
-### 6. Present Checkpoint Menu
-
-a) **Display menu:**
-
-Display: "**Select:** [A] Advanced Elicitation [P] Party Mode [C] Continue to Deep Investigation (Step 2 of 4)"
-
-b) **HALT and wait for user selection.**
-
-#### Menu Handling Logic:
-
-- IF A: Read fully and follow: `{advanced_elicitation}` with current tech-spec content, process enhanced insights, ask user "Accept improvements? (y/n)", if yes update WIP file then redisplay menu, if no keep original then redisplay menu
-- IF P: Read fully and follow: `{party_mode_exec}` with current tech-spec content, process collaborative insights, ask user "Accept changes? (y/n)", if yes update WIP file then redisplay menu, if no keep original then redisplay menu
-- IF C: Verify `{wipFile}` has `stepsCompleted: [1]`, then read fully and follow: `{nextStepFile}`
-- IF Any other comments or queries: respond helpfully then redisplay menu
-
-#### EXECUTION RULES:
-
-- ALWAYS halt and wait for user input after presenting menu
-- ONLY proceed to next step when user selects 'C'
-- After A or P execution, return to this menu
-
----
-
-## REQUIRED OUTPUTS:
-
-- MUST initialize WIP file with captured metadata.
-
-## VERIFICATION CHECKLIST:
-
-- [ ] WIP check performed FIRST before any greeting.
-- [ ] `{wipFile}` created with correct frontmatter, Overview, Context for Development, and `stepsCompleted: [1]`.
-- [ ] User selected [C] to continue.
diff --git a/src/bmm/workflows/bmad-quick-flow/quick-spec/steps/step-02-investigate.md b/src/bmm/workflows/bmad-quick-flow/quick-spec/steps/step-02-investigate.md
deleted file mode 100644
index f1bd280e..00000000
--- a/src/bmm/workflows/bmad-quick-flow/quick-spec/steps/step-02-investigate.md
+++ /dev/null
@@ -1,145 +0,0 @@
----
-name: 'step-02-investigate'
-description: 'Map technical constraints and anchor points within the codebase'
-
-workflow_path: '{project-root}/_bmad/bmm/workflows/bmad-quick-flow/create-tech-spec'
-nextStepFile: './step-03-generate.md'
-wipFile: '{implementation_artifacts}/tech-spec-wip.md'
----
-
-# Step 2: Map Technical Constraints & Anchor Points
-
-**Progress: Step 2 of 4** - Next: Generate Plan
-
-## RULES:
-
-- MUST NOT skip steps.
-- MUST NOT optimize sequence.
-- MUST follow exact instructions.
-- MUST NOT generate the full spec yet (that's Step 3).
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## CONTEXT:
-
-- Requires `{wipFile}` from Step 1 with the "Problem Statement" defined.
-- Focus: Map the problem statement to specific anchor points in the codebase.
-- Output: Exact files to touch, classes/patterns to extend, and technical constraints identified.
-- Objective: Provide the implementation-ready ground truth for the plan.
-
-## SEQUENCE OF INSTRUCTIONS
-
-### 1. Load Current State
-
-**Read `{wipFile}` and extract:**
-
-- Problem statement and scope from Overview section
-- Any context gathered in Step 1
-
-### 2. Execute Investigation Path
-
-**Universal Code Investigation:**
-
-_Isolate deep exploration in sub-agents/tasks where available. Return distilled summaries only to prevent context snowballing._
-
-a) **Build on Step 1's Quick Scan**
-
-Review what was found in Step 1's orient scan. Then ask:
-
-"Based on my quick look, I see [files/patterns found]. Are there other files or directories I should investigate deeply?"
-
-b) **Read and Analyze Code**
-
-For each file/directory provided:
-
-- Read the complete file(s)
-- Identify patterns, conventions, coding style
-- Note dependencies and imports
-- Find related test files
-
-**If NO relevant code is found (Clean Slate):**
-
-- Identify the target directory where the feature should live.
-- Scan parent directories for architectural context.
-- Identify standard project utilities or boilerplate that SHOULD be used.
-- Document this as "Confirmed Clean Slate" - establishing that no legacy constraints exist.
-
-
-c) **Document Technical Context**
-
-Capture and confirm with user:
-
-- **Tech Stack**: Languages, frameworks, libraries
-- **Code Patterns**: Architecture patterns, naming conventions, file structure
-- **Files to Modify/Create**: Specific files that will need changes or new files to be created
-- **Test Patterns**: How tests are structured, test frameworks used
-
-d) **Look for project-context.md**
-
-If `**/project-context.md` exists and wasn't loaded in Step 1:
-
-- Load it now
-- Extract patterns and conventions
-- Note any rules that must be followed
-
-### 3. Update WIP File
-
-**Update `{wipFile}` frontmatter:**
-
-```yaml
----
-# ... existing frontmatter ...
-stepsCompleted: [1, 2]
-tech_stack: ['{captured_tech_stack}']
-files_to_modify: ['{captured_files}']
-code_patterns: ['{captured_patterns}']
-test_patterns: ['{captured_test_patterns}']
----
-```
-
-**Update the Context for Development section:**
-
-Fill in:
-
-- Codebase Patterns (from investigation)
-- Files to Reference table (files reviewed)
-- Technical Decisions (any decisions made during investigation)
-
-**Report to user:**
-
-"**Context Gathered:**
-
-- Tech Stack: {tech_stack_summary}
-- Files to Modify: {files_count} files identified
-- Patterns: {patterns_summary}
-- Tests: {test_patterns_summary}"
-
-### 4. Present Checkpoint Menu
-
-Display: "**Select:** [A] Advanced Elicitation [P] Party Mode [C] Continue to Generate Spec (Step 3 of 4)"
-
-**HALT and wait for user selection.**
-
-#### Menu Handling Logic:
-
-- IF A: Read fully and follow: `{advanced_elicitation}` with current tech-spec content, process enhanced insights, ask user "Accept improvements? (y/n)", if yes update WIP file then redisplay menu, if no keep original then redisplay menu
-- IF P: Read fully and follow: `{party_mode_exec}` with current tech-spec content, process collaborative insights, ask user "Accept changes? (y/n)", if yes update WIP file then redisplay menu, if no keep original then redisplay menu
-- IF C: Verify frontmatter updated with `stepsCompleted: [1, 2]`, then read fully and follow: `{nextStepFile}`
-- IF Any other comments or queries: respond helpfully then redisplay menu
-
-#### EXECUTION RULES:
-
-- ALWAYS halt and wait for user input after presenting menu
-- ONLY proceed to next step when user selects 'C'
-- After A or P execution, return to this menu
-
----
-
-## REQUIRED OUTPUTS:
-
-- MUST document technical context (stack, patterns, files identified).
-- MUST update `{wipFile}` with functional context.
-
-## VERIFICATION CHECKLIST:
-
-- [ ] Technical mapping performed and documented.
-- [ ] `stepsCompleted: [1, 2]` set in frontmatter.
diff --git a/src/bmm/workflows/bmad-quick-flow/quick-spec/steps/step-03-generate.md b/src/bmm/workflows/bmad-quick-flow/quick-spec/steps/step-03-generate.md
deleted file mode 100644
index 3f3caed5..00000000
--- a/src/bmm/workflows/bmad-quick-flow/quick-spec/steps/step-03-generate.md
+++ /dev/null
@@ -1,128 +0,0 @@
----
-name: 'step-03-generate'
-description: 'Build the implementation plan based on the technical mapping of constraints'
-
-workflow_path: '{project-root}/_bmad/bmm/workflows/bmad-quick-flow/create-tech-spec'
-nextStepFile: './step-04-review.md'
-wipFile: '{implementation_artifacts}/tech-spec-wip.md'
----
-
-# Step 3: Generate Implementation Plan
-
-**Progress: Step 3 of 4** - Next: Review & Finalize
-
-## RULES:
-
-- MUST NOT skip steps.
-- MUST NOT optimize sequence.
-- MUST follow exact instructions.
-- MUST NOT implement anything - just document.
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## CONTEXT:
-
-- Requires `{wipFile}` with defined "Overview" and "Context for Development" sections.
-- Focus: Create the implementation sequence that addresses the requirement delta using the captured technical context.
-- Output: Implementation-ready tasks with specific files and instructions.
-- Target: Meet the **READY FOR DEVELOPMENT** standard defined in `workflow.md`.
-
-## SEQUENCE OF INSTRUCTIONS
-
-### 1. Load Current State
-
-**Read `{wipFile}` completely and extract:**
-
-- All frontmatter values
-- Overview section (Problem, Solution, Scope)
-- Context for Development section (Patterns, Files, Decisions)
-
-### 2. Generate Implementation Plan
-
-Generate specific implementation tasks:
-
-a) **Task Breakdown**
-
-- Each task should be a discrete, completable unit of work
-- Tasks should be ordered logically (dependencies first)
-- Include the specific files to modify in each task
-- Be explicit about what changes to make
-
-b) **Task Format**
-
-```markdown
-- [ ] Task N: Clear action description
- - File: `path/to/file.ext`
- - Action: Specific change to make
- - Notes: Any implementation details
-```
-
-### 3. Generate Acceptance Criteria
-
-**Create testable acceptance criteria:**
-
-Each AC should follow Given/When/Then format:
-
-```markdown
-- [ ] AC N: Given [precondition], when [action], then [expected result]
-```
-
-**Ensure ACs cover:**
-
-- Happy path functionality
-- Error handling
-- Edge cases (if relevant)
-- Integration points (if relevant)
-
-### 4. Complete Additional Context
-
-**Fill in remaining sections:**
-
-a) **Dependencies**
-
-- External libraries or services needed
-- Other tasks or features this depends on
-- API or data dependencies
-
-b) **Testing Strategy**
-
-- Unit tests needed
-- Integration tests needed
-- Manual testing steps
-
-c) **Notes**
-
-- High-risk items from pre-mortem analysis
-- Known limitations
-- Future considerations (out of scope but worth noting)
-
-### 5. Write Complete Spec
-
-a) **Update `{wipFile}` with all generated content:**
-
-- Ensure all template sections are filled in
-- No placeholder text remaining
-- All frontmatter values current
-- Update status to 'review' (NOT 'ready-for-dev' - that happens after user review in Step 4)
-
-b) **Update frontmatter:**
-
-```yaml
----
-# ... existing values ...
-status: 'review'
-stepsCompleted: [1, 2, 3]
----
-```
-
-c) **Read fully and follow: `{nextStepFile}` (Step 4)**
-
-## REQUIRED OUTPUTS:
-
-- Tasks MUST be specific, actionable, ordered logically, with files to modify.
-- ACs MUST be testable, using Given/When/Then format.
-- Status MUST be updated to 'review'.
-
-## VERIFICATION CHECKLIST:
-
-- [ ] `stepsCompleted: [1, 2, 3]` set in frontmatter.
-- [ ] Spec meets the **READY FOR DEVELOPMENT** standard.
diff --git a/src/bmm/workflows/bmad-quick-flow/quick-spec/steps/step-04-review.md b/src/bmm/workflows/bmad-quick-flow/quick-spec/steps/step-04-review.md
deleted file mode 100644
index 6f234612..00000000
--- a/src/bmm/workflows/bmad-quick-flow/quick-spec/steps/step-04-review.md
+++ /dev/null
@@ -1,201 +0,0 @@
----
-name: 'step-04-review'
-description: 'Review and finalize the tech-spec'
-
-workflow_path: '{project-root}/_bmad/bmm/workflows/bmad-quick-flow/quick-spec'
-wipFile: '{implementation_artifacts}/tech-spec-wip.md'
----
-
-# Step 4: Review & Finalize
-
-**Progress: Step 4 of 4** - Final Step
-
-## RULES:
-
-- MUST NOT skip steps.
-- MUST NOT optimize sequence.
-- MUST follow exact instructions.
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-## CONTEXT:
-
-- Requires `{wipFile}` from Step 3.
-- MUST present COMPLETE spec content. Iterate until user is satisfied.
-- **Criteria**: The spec MUST meet the **READY FOR DEVELOPMENT** standard defined in `workflow.md`.
-
-## SEQUENCE OF INSTRUCTIONS
-
-### 1. Load and Present Complete Spec
-
-**Read `{wipFile}` completely and extract `slug` from frontmatter for later use.**
-
-**Present to user:**
-
-"Here's your complete tech-spec. Please review:"
-
-[Display the complete spec content - all sections]
-
-"**Quick Summary:**
-
-- {task_count} tasks to implement
-- {ac_count} acceptance criteria to verify
-- {files_count} files to modify"
-
-**Present review menu:**
-
-Display: "**Select:** [C] Continue [E] Edit [Q] Questions [A] Advanced Elicitation [P] Party Mode"
-
-**HALT and wait for user selection.**
-
-#### Menu Handling Logic:
-
-- IF C: Proceed to Section 3 (Finalize the Spec)
-- IF E: Proceed to Section 2 (Handle Review Feedback), then return here and redisplay menu
-- IF Q: Answer questions, then redisplay this menu
-- IF A: Read fully and follow: `{advanced_elicitation}` with current spec content, process enhanced insights, ask user "Accept improvements? (y/n)", if yes update spec then redisplay menu, if no keep original then redisplay menu
-- IF P: Read fully and follow: `{party_mode_exec}` with current spec content, process collaborative insights, ask user "Accept changes? (y/n)", if yes update spec then redisplay menu, if no keep original then redisplay menu
-- IF Any other comments or queries: respond helpfully then redisplay menu
-
-#### EXECUTION RULES:
-
-- ALWAYS halt and wait for user input after presenting menu
-- ONLY proceed to finalize when user selects 'C'
-- After other menu items execution, return to this menu
-
-### 2. Handle Review Feedback
-
-a) **If user requests changes:**
-
-- Make the requested edits to `{wipFile}`
-- Re-present the affected sections
-- Ask if there are more changes
-- Loop until user is satisfied
-
-b) **If the spec does NOT meet the "Ready for Development" standard:**
-
-- Point out the missing/weak sections (e.g., non-actionable tasks, missing ACs).
-- Propose specific improvements to reach the standard.
-- Make the edits once the user agrees.
-
-c) **If user has questions:**
-
-- Answer questions about the spec
-- Clarify any confusing sections
-- Make clarifying edits if needed
-
-### 3. Finalize the Spec
-
-**When user confirms the spec is good AND it meets the "Ready for Development" standard:**
-
-a) Update `{wipFile}` frontmatter:
-
- ```yaml
- ---
- # ... existing values ...
- status: 'ready-for-dev'
- stepsCompleted: [1, 2, 3, 4]
- ---
- ```
-
-b) **Rename WIP file to final filename:**
- - Using the `slug` extracted in Section 1
- - Rename `{wipFile}` β `{implementation_artifacts}/tech-spec-{slug}.md`
- - Store this as `finalFile` for use in menus below
-
-### 4. Present Final Menu
-
-a) **Display completion message and menu:**
-
-```
-**Tech-Spec Complete!**
-
-Saved to: {finalFile}
-
----
-
-**Next Steps:**
-
-[A] Advanced Elicitation - refine further
-[R] Adversarial Review - critique of the spec (highly recommended)
-[B] Begin Development - start implementing now (not recommended)
-[D] Done - exit workflow
-[P] Party Mode - get expert feedback before dev
-
----
-
-Once you are fully satisfied with the spec (ideally after **Adversarial Review** and maybe a few rounds of **Advanced Elicitation**), it is recommended to run implementation in a FRESH CONTEXT for best results.
-
-Copy this prompt to start dev:
-
-\`\`\`
-quick-dev {finalFile}
-\`\`\`
-
-This ensures the dev agent has clean context focused solely on implementation.
-```
-
-b) **HALT and wait for user selection.**
-
-#### Menu Handling Logic:
-
-- IF A: Read fully and follow: `{advanced_elicitation}` with current spec content, process enhanced insights, ask user "Accept improvements? (y/n)", if yes update spec then redisplay menu, if no keep original then redisplay menu
-- IF B: Load and execute `{quick_dev_workflow}` with the final spec file (warn: fresh context is better)
-- IF D: Exit workflow - display final confirmation and path to spec
-- IF P: Read fully and follow: `{party_mode_exec}` with current spec content, process collaborative insights, ask user "Accept changes? (y/n)", if yes update spec then redisplay menu, if no keep original then redisplay menu
-- IF R: Execute Adversarial Review (see below)
-- IF Any other comments or queries: respond helpfully then redisplay menu
-
-#### EXECUTION RULES:
-
-- ALWAYS halt and wait for user input after presenting menu
-- After A, P, or R execution, return to this menu
-
-#### Adversarial Review [R] Process:
-
-1. **Invoke Adversarial Review Task**:
- > With `{finalFile}` constructed, invoke the review task. If possible, use information asymmetry: run this task, and only it, in a separate subagent or process with read access to the project, but no context except the `{finalFile}`.
- Review {finalFile} using {project-root}/_bmad/core/tasks/review-adversarial-general.xml
- > **Platform fallback:** If task invocation not available, load the task file and follow its instructions inline, passing `{finalFile}` as the content.
- > The task should: review `{finalFile}` and return a list of findings.
-
- 2. **Process Findings**:
- > Capture the findings from the task output.
- > **If zero findings:** HALT - this is suspicious. Re-analyze or request user guidance.
- > Evaluate severity (Critical, High, Medium, Low) and validity (real, noise, undecided).
- > DO NOT exclude findings based on severity or validity unless explicitly asked to do so.
- > Order findings by severity.
- > Number the ordered findings (F1, F2, F3, etc.).
- > If TodoWrite or similar tool is available, turn each finding into a TODO, include ID, severity, validity, and description in the TODO; otherwise present findings as a table with columns: ID, Severity, Validity, Description
-
- 3. Return here and redisplay menu.
-
-### 5. Exit Workflow
-
-**When user selects [D]:**
-
-"**All done!** Your tech-spec is ready at:
-
-`{finalFile}`
-
-When you're ready to implement, run:
-
-```
-quick-dev {finalFile}
-```
-
-Ship it!"
-
----
-
-## REQUIRED OUTPUTS:
-
-- MUST update status to 'ready-for-dev'.
-- MUST rename file to `tech-spec-{slug}.md`.
-- MUST provide clear next-step guidance and recommend fresh context for dev.
-
-## VERIFICATION CHECKLIST:
-
-- [ ] Complete spec presented for review.
-- [ ] Requested changes implemented.
-- [ ] Spec verified against **READY FOR DEVELOPMENT** standard.
-- [ ] `stepsCompleted: [1, 2, 3, 4]` set and file renamed.
diff --git a/src/bmm/workflows/bmad-quick-flow/quick-spec/tech-spec-template.md b/src/bmm/workflows/bmad-quick-flow/quick-spec/tech-spec-template.md
deleted file mode 100644
index 8d201149..00000000
--- a/src/bmm/workflows/bmad-quick-flow/quick-spec/tech-spec-template.md
+++ /dev/null
@@ -1,74 +0,0 @@
----
-title: '{title}'
-slug: '{slug}'
-created: '{date}'
-status: 'in-progress'
-stepsCompleted: []
-tech_stack: []
-files_to_modify: []
-code_patterns: []
-test_patterns: []
----
-
-# Tech-Spec: {title}
-
-**Created:** {date}
-
-## Overview
-
-### Problem Statement
-
-{problem_statement}
-
-### Solution
-
-{solution}
-
-### Scope
-
-**In Scope:**
-{in_scope}
-
-**Out of Scope:**
-{out_of_scope}
-
-## Context for Development
-
-### Codebase Patterns
-
-{codebase_patterns}
-
-### Files to Reference
-
-| File | Purpose |
-| ---- | ------- |
-
-{files_table}
-
-### Technical Decisions
-
-{technical_decisions}
-
-## Implementation Plan
-
-### Tasks
-
-{tasks}
-
-### Acceptance Criteria
-
-{acceptance_criteria}
-
-## Additional Context
-
-### Dependencies
-
-{dependencies}
-
-### Testing Strategy
-
-{testing_strategy}
-
-### Notes
-
-{notes}
diff --git a/src/bmm/workflows/bmad-quick-flow/quick-spec/workflow.md b/src/bmm/workflows/bmad-quick-flow/quick-spec/workflow.md
deleted file mode 100644
index bb6c877a..00000000
--- a/src/bmm/workflows/bmad-quick-flow/quick-spec/workflow.md
+++ /dev/null
@@ -1,79 +0,0 @@
----
-name: quick-spec
-description: Conversational spec engineering - ask questions, investigate code, produce implementation-ready tech-spec.
-main_config: '{project-root}/_bmad/bmm/config.yaml'
-web_bundle: true
-
-# Checkpoint handler paths
-advanced_elicitation: '{project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml'
-party_mode_exec: '{project-root}/_bmad/core/workflows/party-mode/workflow.md'
-quick_dev_workflow: '{project-root}/_bmad/bmm/workflows/bmad-quick-flow/quick-dev/workflow.md'
----
-
-# Quick-Spec Workflow
-
-**Goal:** Create implementation-ready technical specifications through conversational discovery, code investigation, and structured documentation.
-
-**READY FOR DEVELOPMENT STANDARD:**
-
-A specification is considered "Ready for Development" ONLY if it meets the following:
-
-- **Actionable**: Every task has a clear file path and specific action.
-- **Logical**: Tasks are ordered by dependency (lowest level first).
-- **Testable**: All ACs follow Given/When/Then and cover happy path and edge cases.
-- **Complete**: All investigation results from Step 2 are inlined; no placeholders or "TBD".
-- **Self-Contained**: A fresh agent can implement the feature without reading the workflow history.
-
----
-
-**Your Role:** You are an elite developer and spec engineer. You ask sharp questions, investigate existing code thoroughly, and produce specs that contain ALL context a fresh dev agent needs to implement the feature. No handoffs, no missing context - just complete, actionable specs.
-
----
-
-## WORKFLOW ARCHITECTURE
-
-This uses **step-file architecture** for disciplined execution:
-
-### Core Principles
-
-- **Micro-file Design**: Each step is a self-contained instruction file that must be followed exactly
-- **Just-In-Time Loading**: Only the current step file is in memory - never load future step files until directed
-- **Sequential Enforcement**: Sequence within step files must be completed in order, no skipping or optimization
-- **State Tracking**: Document progress in output file frontmatter using `stepsCompleted` array
-- **Append-Only Building**: Build the tech-spec by updating content as directed
-
-### Step Processing Rules
-
-1. **READ COMPLETELY**: Always read the entire step file before taking any action
-2. **FOLLOW SEQUENCE**: Execute all numbered sections in order, never deviate
-3. **WAIT FOR INPUT**: If a menu is presented, halt and wait for user selection
-4. **CHECK CONTINUATION**: Only proceed to next step when user selects [C] (Continue)
-5. **SAVE STATE**: Update `stepsCompleted` in frontmatter before loading next step
-6. **LOAD NEXT**: When directed, read fully and follow the next step file
-
-### Critical Rules (NO EXCEPTIONS)
-
-- **NEVER** load multiple step files simultaneously
-- **ALWAYS** read entire step file before execution
-- **NEVER** skip steps or optimize the sequence
-- **ALWAYS** update frontmatter of output file when completing a step
-- **ALWAYS** follow the exact instructions in the step file
-- **ALWAYS** halt at menus and wait for user input
-- **NEVER** create mental todo lists from future steps
-
----
-
-## INITIALIZATION SEQUENCE
-
-### 1. Configuration Loading
-
-Load and read full config from `{main_config}` and resolve:
-
-- `project_name`, `output_folder`, `planning_artifacts`, `implementation_artifacts`, `user_name`
-- `communication_language`, `document_output_language`, `user_skill_level`
-- `date` as system-generated current datetime
-- β YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
-
-### 2. First Step Execution
-
-Read fully and follow: `steps/step-01-understand.md` to begin the workflow.
diff --git a/src/bmm/workflows/document-project/checklist.md b/src/bmm/workflows/document-project/checklist.md
deleted file mode 100644
index 7b67d1e5..00000000
--- a/src/bmm/workflows/document-project/checklist.md
+++ /dev/null
@@ -1,245 +0,0 @@
-# Document Project Workflow - Validation Checklist
-
-## Scan Level and Resumability
-
-- [ ] Scan level selection offered (quick/deep/exhaustive) for initial_scan and full_rescan modes
-- [ ] Deep-dive mode automatically uses exhaustive scan (no choice given)
-- [ ] Quick scan does NOT read source files (only patterns, configs, manifests)
-- [ ] Deep scan reads files in critical directories per project type
-- [ ] Exhaustive scan reads ALL source files (excluding node_modules, dist, build)
-- [ ] State file (project-scan-report.json) created at workflow start
-- [ ] State file updated after each step completion
-- [ ] State file contains all required fields per schema
-- [ ] Resumability prompt shown if state file exists and is <24 hours old
-- [ ] Old state files (>24 hours) automatically archived
-- [ ] Resume functionality loads previous state correctly
-- [ ] Workflow can jump to correct step when resuming
-
-## Write-as-you-go Architecture
-
-- [ ] Each document written to disk IMMEDIATELY after generation
-- [ ] Document validation performed right after writing (section-level)
-- [ ] State file updated after each document is written
-- [ ] Detailed findings purged from context after writing (only summaries kept)
-- [ ] Context contains only high-level summaries (1-2 sentences per section)
-- [ ] No accumulation of full project analysis in memory
-
-## Batching Strategy (Deep/Exhaustive Scans)
-
-- [ ] Batching applied for deep and exhaustive scan levels
-- [ ] Batches organized by SUBFOLDER (not arbitrary file count)
-- [ ] Large files (>5000 LOC) handled with appropriate judgment
-- [ ] Each batch: read files, extract info, write output, validate, purge context
-- [ ] Batch completion tracked in state file (batches_completed array)
-- [ ] Batch summaries kept in context (1-2 sentences max)
-
-## Project Detection and Classification
-
-- [ ] Project type correctly identified and matches actual technology stack
-- [ ] Multi-part vs single-part structure accurately detected
-- [ ] All project parts identified if multi-part (no missing client/server/etc.)
-- [ ] Documentation requirements loaded for each part type
-- [ ] Architecture registry match is appropriate for detected stack
-
-## Technology Stack Analysis
-
-- [ ] All major technologies identified (framework, language, database, etc.)
-- [ ] Versions captured where available
-- [ ] Technology decision table is complete and accurate
-- [ ] Dependencies and libraries documented
-- [ ] Build tools and package managers identified
-
-## Codebase Scanning Completeness
-
-- [ ] All critical directories scanned based on project type
-- [ ] API endpoints documented (if requires_api_scan = true)
-- [ ] Data models captured (if requires_data_models = true)
-- [ ] State management patterns identified (if requires_state_management = true)
-- [ ] UI components inventoried (if requires_ui_components = true)
-- [ ] Configuration files located and documented
-- [ ] Authentication/security patterns identified
-- [ ] Entry points correctly identified
-- [ ] Integration points mapped (for multi-part projects)
-- [ ] Test files and patterns documented
-
-## Source Tree Analysis
-
-- [ ] Complete directory tree generated with no major omissions
-- [ ] Critical folders highlighted and described
-- [ ] Entry points clearly marked
-- [ ] Integration paths noted (for multi-part)
-- [ ] Asset locations identified (if applicable)
-- [ ] File organization patterns explained
-
-## Architecture Documentation Quality
-
-- [ ] Architecture document uses appropriate template from registry
-- [ ] All template sections filled with relevant information (no placeholders)
-- [ ] Technology stack section is comprehensive
-- [ ] Architecture pattern clearly explained
-- [ ] Data architecture documented (if applicable)
-- [ ] API design documented (if applicable)
-- [ ] Component structure explained (if applicable)
-- [ ] Source tree included and annotated
-- [ ] Testing strategy documented
-- [ ] Deployment architecture captured (if config found)
-
-## Development and Operations Documentation
-
-- [ ] Prerequisites clearly listed
-- [ ] Installation steps documented
-- [ ] Environment setup instructions provided
-- [ ] Local run commands specified
-- [ ] Build process documented
-- [ ] Test commands and approach explained
-- [ ] Deployment process documented (if applicable)
-- [ ] CI/CD pipeline details captured (if found)
-- [ ] Contribution guidelines extracted (if found)
-
-## Multi-Part Project Specific (if applicable)
-
-- [ ] Each part documented separately
-- [ ] Part-specific architecture files created (architecture-{part_id}.md)
-- [ ] Part-specific component inventories created (if applicable)
-- [ ] Part-specific development guides created
-- [ ] Integration architecture document created
-- [ ] Integration points clearly defined with type and details
-- [ ] Data flow between parts explained
-- [ ] project-parts.json metadata file created
-
-## Index and Navigation
-
-- [ ] index.md created as master entry point
-- [ ] Project structure clearly summarized in index
-- [ ] Quick reference section complete and accurate
-- [ ] All generated docs linked from index
-- [ ] All existing docs linked from index (if found)
-- [ ] Getting started section provides clear next steps
-- [ ] AI-assisted development guidance included
-- [ ] Navigation structure matches project complexity (simple for single-part, detailed for multi-part)
-
-## File Completeness
-
-- [ ] index.md generated
-- [ ] project-overview.md generated
-- [ ] source-tree-analysis.md generated
-- [ ] architecture.md (or per-part) generated
-- [ ] component-inventory.md (or per-part) generated if UI components exist
-- [ ] development-guide.md (or per-part) generated
-- [ ] api-contracts.md (or per-part) generated if APIs documented
-- [ ] data-models.md (or per-part) generated if data models found
-- [ ] deployment-guide.md generated if deployment config found
-- [ ] contribution-guide.md generated if guidelines found
-- [ ] integration-architecture.md generated if multi-part
-- [ ] project-parts.json generated if multi-part
-
-## Content Quality
-
-- [ ] Technical information is accurate and specific
-- [ ] No generic placeholders or "TODO" items remain
-- [ ] Examples and code snippets are relevant to actual project
-- [ ] File paths and directory references are correct
-- [ ] Technology names and versions are accurate
-- [ ] Terminology is consistent across all documents
-- [ ] Descriptions are clear and actionable
-
-## Brownfield PRD Readiness
-
-- [ ] Documentation provides enough context for AI to understand existing system
-- [ ] Integration points are clear for planning new features
-- [ ] Reusable components are identified for leveraging in new work
-- [ ] Data models are documented for schema extension planning
-- [ ] API contracts are documented for endpoint expansion
-- [ ] Code conventions and patterns are captured for consistency
-- [ ] Architecture constraints are clear for informed decision-making
-
-## Output Validation
-
-- [ ] All files saved to correct output folder
-- [ ] File naming follows convention (no part suffix for single-part, with suffix for multi-part)
-- [ ] No broken internal links between documents
-- [ ] Markdown formatting is correct and renders properly
-- [ ] JSON files are valid (project-parts.json if applicable)
-
-## Final Validation
-
-- [ ] User confirmed project classification is accurate
-- [ ] User provided any additional context needed
-- [ ] All requested areas of focus addressed
-- [ ] Documentation is immediately usable for brownfield PRD workflow
-- [ ] No critical information gaps identified
-
-## Issues Found
-
-### Critical Issues (must fix before completion)
-
--
-
-### Minor Issues (can be addressed later)
-
--
-
-### Missing Information (to note for user)
-
--
-
-## Deep-Dive Mode Validation (if deep-dive was performed)
-
-- [ ] Deep-dive target area correctly identified and scoped
-- [ ] All files in target area read completely (no skipped files)
-- [ ] File inventory includes all exports with complete signatures
-- [ ] Dependencies mapped for all files
-- [ ] Dependents identified (who imports each file)
-- [ ] Code snippets included for key implementation details
-- [ ] Patterns and design approaches documented
-- [ ] State management strategy explained
-- [ ] Side effects documented (API calls, DB queries, etc.)
-- [ ] Error handling approaches captured
-- [ ] Testing files and coverage documented
-- [ ] TODOs and comments extracted
-- [ ] Dependency graph created showing relationships
-- [ ] Data flow traced through the scanned area
-- [ ] Integration points with rest of codebase identified
-- [ ] Related code and similar patterns found outside scanned area
-- [ ] Reuse opportunities documented
-- [ ] Implementation guidance provided
-- [ ] Modification instructions clear
-- [ ] Index.md updated with deep-dive link
-- [ ] Deep-dive documentation is immediately useful for implementation
-
----
-
-## State File Quality
-
-- [ ] State file is valid JSON (no syntax errors)
-- [ ] State file is optimized (no pretty-printing, minimal whitespace)
-- [ ] State file contains all completed steps with timestamps
-- [ ] State file outputs_generated list is accurate and complete
-- [ ] State file resume_instructions are clear and actionable
-- [ ] State file findings contain only high-level summaries (not detailed data)
-- [ ] State file can be successfully loaded for resumption
-
-## Completion Criteria
-
-All items in the following sections must be checked:
-
-- β Scan Level and Resumability
-- β Write-as-you-go Architecture
-- β Batching Strategy (if deep/exhaustive scan)
-- β Project Detection and Classification
-- β Technology Stack Analysis
-- β Architecture Documentation Quality
-- β Index and Navigation
-- β File Completeness
-- β Brownfield PRD Readiness
-- β State File Quality
-- β Deep-Dive Mode Validation (if applicable)
-
-The workflow is complete when:
-
-1. All critical checklist items are satisfied
-2. No critical issues remain
-3. User has reviewed and approved the documentation
-4. Generated docs are ready for use in brownfield PRD workflow
-5. Deep-dive docs (if any) are comprehensive and implementation-ready
-6. State file is valid and can enable resumption if interrupted
diff --git a/src/bmm/workflows/document-project/documentation-requirements.csv b/src/bmm/workflows/document-project/documentation-requirements.csv
deleted file mode 100644
index 9f773ab0..00000000
--- a/src/bmm/workflows/document-project/documentation-requirements.csv
+++ /dev/null
@@ -1,12 +0,0 @@
-project_type_id,requires_api_scan,requires_data_models,requires_state_management,requires_ui_components,requires_deployment_config,key_file_patterns,critical_directories,integration_scan_patterns,test_file_patterns,config_patterns,auth_security_patterns,schema_migration_patterns,entry_point_patterns,shared_code_patterns,monorepo_workspace_patterns,async_event_patterns,ci_cd_patterns,asset_patterns,hardware_interface_patterns,protocol_schema_patterns,localization_patterns,requires_hardware_docs,requires_asset_inventory
-web,true,true,true,true,true,package.json;tsconfig.json;*.config.js;*.config.ts;vite.config.*;webpack.config.*;next.config.*;nuxt.config.*,src/;app/;pages/;components/;api/;lib/;styles/;public/;static/,*client.ts;*service.ts;*api.ts;fetch*.ts;axios*.ts;*http*.ts,*.test.ts;*.spec.ts;*.test.tsx;*.spec.tsx;**/__tests__/**;**/*.test.*;**/*.spec.*,.env*;config/*;*.config.*;.config/;settings/,*auth*.ts;*session*.ts;middleware/auth*;*.guard.ts;*authenticat*;*permission*;guards/,migrations/**;prisma/**;*.prisma;alembic/**;knex/**;*migration*.sql;*migration*.ts,main.ts;index.ts;app.ts;server.ts;_app.tsx;_app.ts;layout.tsx,shared/**;common/**;utils/**;lib/**;helpers/**;@*/**;packages/**,pnpm-workspace.yaml;lerna.json;nx.json;turbo.json;workspace.json;rush.json,*event*.ts;*queue*.ts;*subscriber*.ts;*consumer*.ts;*producer*.ts;*worker*.ts;jobs/**,.github/workflows/**;.gitlab-ci.yml;Jenkinsfile;.circleci/**;azure-pipelines.yml;bitbucket-pipelines.yml,.drone.yml,public/**;static/**;assets/**;images/**;media/**,N/A,*.proto;*.graphql;graphql/**;schema.graphql;*.avro;openapi.*;swagger.*,i18n/**;locales/**;lang/**;translations/**;messages/**;*.po;*.pot,false,false
-mobile,true,true,true,true,true,package.json;pubspec.yaml;Podfile;build.gradle;app.json;capacitor.config.*;ionic.config.json,src/;app/;screens/;components/;services/;models/;assets/;ios/;android/,*client.ts;*service.ts;*api.ts;fetch*.ts;axios*.ts;*http*.ts,*.test.ts;*.test.tsx;*_test.dart;*.test.dart;**/__tests__/**,.env*;config/*;app.json;capacitor.config.*;google-services.json;GoogleService-Info.plist,*auth*.ts;*session*.ts;*authenticat*;*permission*;*biometric*;secure-store*,migrations/**;realm/**;*.realm;watermelondb/**;sqlite/**,main.ts;index.ts;App.tsx;App.ts;main.dart,shared/**;common/**;utils/**;lib/**;components/shared/**;@*/**,pnpm-workspace.yaml;lerna.json;nx.json;turbo.json,*event*.ts;*notification*.ts;*push*.ts;background-fetch*,fastlane/**;.github/workflows/**;.gitlab-ci.yml;bitbucket-pipelines.yml;appcenter-*,assets/**;Resources/**;res/**;*.xcassets;drawable*/;mipmap*/;images/**,N/A,*.proto;graphql/**;*.graphql,i18n/**;locales/**;translations/**;*.strings;*.xml,false,true
-backend,true,true,false,false,true,package.json;requirements.txt;go.mod;Gemfile;pom.xml;build.gradle;Cargo.toml;*.csproj,src/;api/;services/;models/;routes/;controllers/;middleware/;handlers/;repositories/;domain/,*client.ts;*repository.ts;*service.ts;*connector*.ts;*adapter*.ts,*.test.ts;*.spec.ts;*_test.go;test_*.py;*Test.java;*_test.rs,.env*;config/*;*.config.*;application*.yml;application*.yaml;appsettings*.json;settings.py,*auth*.ts;*session*.ts;*authenticat*;*authorization*;middleware/auth*;guards/;*jwt*;*oauth*,migrations/**;alembic/**;flyway/**;liquibase/**;prisma/**;*.prisma;*migration*.sql;*migration*.ts;db/migrate,main.ts;index.ts;server.ts;app.ts;main.go;main.py;Program.cs;__init__.py,shared/**;common/**;utils/**;lib/**;core/**;@*/**;pkg/**,pnpm-workspace.yaml;lerna.json;nx.json;go.work,*event*.ts;*queue*.ts;*subscriber*.ts;*consumer*.ts;*producer*.ts;*worker*.ts;*handler*.ts;jobs/**;workers/**,.github/workflows/**;.gitlab-ci.yml;Jenkinsfile;.circleci/**;azure-pipelines.yml;.drone.yml,N/A,N/A,*.proto;*.graphql;graphql/**;*.avro;*.thrift;openapi.*;swagger.*;schema/**,N/A,false,false
-cli,false,false,false,false,false,package.json;go.mod;Cargo.toml;setup.py;pyproject.toml;*.gemspec,src/;cmd/;cli/;bin/;lib/;commands/,N/A,*.test.ts;*_test.go;test_*.py;*.spec.ts;*_spec.rb,.env*;config/*;*.config.*;.*.rc;.*rc,N/A,N/A,main.ts;index.ts;cli.ts;main.go;main.py;__main__.py;bin/*,shared/**;common/**;utils/**;lib/**;helpers/**,N/A,N/A,.github/workflows/**;.gitlab-ci.yml;goreleaser.yml,N/A,N/A,N/A,N/A,false,false
-library,false,false,false,false,false,package.json;setup.py;Cargo.toml;go.mod;*.gemspec;*.csproj;pom.xml,src/;lib/;dist/;pkg/;build/;target/,N/A,*.test.ts;*_test.go;test_*.py;*.spec.ts;*Test.java;*_test.rs,.*.rc;tsconfig.json;rollup.config.*;vite.config.*;webpack.config.*,N/A,N/A,index.ts;index.js;lib.rs;main.go;__init__.py,src/**;lib/**;core/**,N/A,N/A,.github/workflows/**;.gitlab-ci.yml;.circleci/**,N/A,N/A,N/A,N/A,false,false
-desktop,false,false,true,true,true,package.json;Cargo.toml;*.csproj;CMakeLists.txt;tauri.conf.json;electron-builder.yml;wails.json,src/;app/;components/;main/;renderer/;resources/;assets/;build/,*service.ts;ipc*.ts;*bridge*.ts;*native*.ts;invoke*,*.test.ts;*.spec.ts;*_test.rs;*.spec.tsx,.env*;config/*;*.config.*;app.config.*;forge.config.*;builder.config.*,*auth*.ts;*session*.ts;keychain*;secure-storage*,N/A,main.ts;index.ts;main.js;src-tauri/main.rs;electron.ts,shared/**;common/**;utils/**;lib/**;components/shared/**,N/A,*event*.ts;*ipc*.ts;*message*.ts,.github/workflows/**;.gitlab-ci.yml;.circleci/**,resources/**;assets/**;icons/**;static/**;build/resources,N/A,N/A,i18n/**;locales/**;translations/**;lang/**,false,true
-game,false,false,true,false,false,*.unity;*.godot;*.uproject;package.json;project.godot,Assets/;Scenes/;Scripts/;Prefabs/;Resources/;Content/;Source/;src/;scenes/;scripts/,N/A,*Test.cs;*_test.gd;*Test.cpp;*.test.ts,.env*;config/*;*.ini;settings/;GameSettings/,N/A,N/A,main.gd;Main.cs;GameManager.cs;main.cpp;index.ts,shared/**;common/**;utils/**;Core/**;Framework/**,N/A,N/A,.github/workflows/**;.gitlab-ci.yml,Assets/**;Scenes/**;Prefabs/**;Materials/**;Textures/**;Audio/**;Models/**;*.fbx;*.blend;*.shader;*.hlsl;*.glsl;Shaders/**;VFX/**,N/A,N/A,Localization/**;Languages/**;i18n/**,false,true
-data,false,true,false,false,true,requirements.txt;pyproject.toml;dbt_project.yml;airflow.cfg;setup.py;Pipfile,dags/;pipelines/;models/;transformations/;notebooks/;sql/;etl/;jobs/,N/A,test_*.py;*_test.py;tests/**,.env*;config/*;profiles.yml;dbt_project.yml;airflow.cfg,N/A,migrations/**;dbt/models/**;*.sql;schemas/**,main.py;__init__.py;pipeline.py;dag.py,shared/**;common/**;utils/**;lib/**;helpers/**,N/A,*event*.py;*consumer*.py;*producer*.py;*worker*.py;jobs/**;tasks/**,.github/workflows/**;.gitlab-ci.yml;airflow/dags/**,N/A,N/A,*.proto;*.avro;schemas/**;*.parquet,N/A,false,false
-extension,true,false,true,true,false,manifest.json;package.json;wxt.config.ts,src/;popup/;content/;background/;assets/;components/,*message.ts;*runtime.ts;*storage.ts;*tabs.ts,*.test.ts;*.spec.ts;*.test.tsx,.env*;wxt.config.*;webpack.config.*;vite.config.*,*auth*.ts;*session*.ts;*permission*,N/A,index.ts;popup.ts;background.ts;content.ts,shared/**;common/**;utils/**;lib/**,N/A,*message*.ts;*event*.ts;chrome.runtime*;browser.runtime*,.github/workflows/**,assets/**;icons/**;images/**;static/**,N/A,N/A,_locales/**;locales/**;i18n/**,false,false
-infra,false,false,false,false,true,*.tf;*.tfvars;pulumi.yaml;cdk.json;*.yml;*.yaml;Dockerfile;docker-compose*.yml,terraform/;modules/;k8s/;charts/;playbooks/;roles/;policies/;stacks/,N/A,*_test.go;test_*.py;*_test.tf;*_spec.rb,.env*;*.tfvars;config/*;vars/;group_vars/;host_vars/,N/A,N/A,main.tf;index.ts;__main__.py;playbook.yml,modules/**;shared/**;common/**;lib/**,N/A,N/A,.github/workflows/**;.gitlab-ci.yml;.circleci/**,N/A,N/A,N/A,N/A,false,false
-embedded,false,false,false,false,false,platformio.ini;CMakeLists.txt;*.ino;Makefile;*.ioc;mbed-os.lib,src/;lib/;include/;firmware/;drivers/;hal/;bsp/;components/,N/A,test_*.c;*_test.cpp;*_test.c;tests/**,.env*;config/*;sdkconfig;*.json;settings/,N/A,N/A,main.c;main.cpp;main.ino;app_main.c,lib/**;shared/**;common/**;drivers/**,N/A,N/A,.github/workflows/**;.gitlab-ci.yml,N/A,*.h;*.hpp;drivers/**;hal/**;bsp/**;pinout.*;peripheral*;gpio*;*.fzz;schematics/**,*.proto;mqtt*;coap*;modbus*,N/A,true,false
diff --git a/src/bmm/workflows/document-project/instructions.md b/src/bmm/workflows/document-project/instructions.md
deleted file mode 100644
index 2f567fa3..00000000
--- a/src/bmm/workflows/document-project/instructions.md
+++ /dev/null
@@ -1,221 +0,0 @@
-# Document Project Workflow Router
-
-The workflow execution engine is governed by: {project-root}/_bmad/core/tasks/workflow.xml
-You MUST have already loaded and processed: {project-root}/_bmad/bmm/workflows/document-project/workflow.yaml
-Communicate all responses in {communication_language}
-
-
-
-This router determines workflow mode and delegates to specialized sub-workflows
-
-
-
-
- mode: data
- data_request: project_config
-
-
-
-
-
- Set standalone_mode = true
- Set status_file_found = false
-
-
-
- Store {{status_file_path}} for later updates
- Set status_file_found = true
-
-
-
-
- Continue anyway to document planning artifacts? (y/n)
-
- Exit workflow
-
-
-
-
-
- mode: validate
- calling_workflow: document-project
-
-
-
-
-
- Continue with documentation? (y/n)
-
-
- Exit workflow
-
-
-
-
-
-
-
-SMART LOADING STRATEGY: Check state file FIRST before loading any CSV files
-
-Check for existing state file at: {output_folder}/project-scan-report.json
-
-
- Read state file and extract: timestamps, mode, scan_level, current_step, completed_steps, project_classification
- Extract cached project_type_id(s) from state file if present
- Calculate age of state file (current time - last_updated)
-
-I found an in-progress workflow state from {{last_updated}}.
-
-**Current Progress:**
-
-- Mode: {{mode}}
-- Scan Level: {{scan_level}}
-- Completed Steps: {{completed_steps_count}}/{{total_steps}}
-- Last Step: {{current_step}}
-- Project Type(s): {{cached_project_types}}
-
-Would you like to:
-
-1. **Resume from where we left off** - Continue from step {{current_step}}
-2. **Start fresh** - Archive old state and begin new scan
-3. **Cancel** - Exit without changes
-
-Your choice [1/2/3]:
-
-
-
- Set resume_mode = true
- Set workflow_mode = {{mode}}
- Load findings summaries from state file
- Load cached project_type_id(s) from state file
-
- CONDITIONAL CSV LOADING FOR RESUME:
- For each cached project_type_id, load ONLY the corresponding row from: {documentation_requirements_csv}
- Skip loading project-types.csv and architecture_registry.csv (not needed on resume)
- Store loaded doc requirements for use in remaining steps
-
- Display: "Resuming {{workflow_mode}} from {{current_step}} with cached project type(s): {{cached_project_types}}"
-
-
- Read fully and follow: {installed_path}/workflows/deep-dive-instructions.md with resume context
-
-
-
- Read fully and follow: {installed_path}/workflows/full-scan-instructions.md with resume context
-
-
-
-
-
- Create archive directory: {output_folder}/.archive/
- Move old state file to: {output_folder}/.archive/project-scan-report-{{timestamp}}.json
- Set resume_mode = false
- Continue to Step 0.5
-
-
-
- Display: "Exiting workflow without changes."
- Exit workflow
-
-
-
- Display: "Found old state file (>24 hours). Starting fresh scan."
- Archive old state file to: {output_folder}/.archive/project-scan-report-{{timestamp}}.json
- Set resume_mode = false
- Continue to Step 0.5
-
-
-
-
-
-Check if {output_folder}/index.md exists
-
-
- Read existing index.md to extract metadata (date, project structure, parts count)
- Store as {{existing_doc_date}}, {{existing_structure}}
-
-I found existing documentation generated on {{existing_doc_date}}.
-
-What would you like to do?
-
-1. **Re-scan entire project** - Update all documentation with latest changes
-2. **Deep-dive into specific area** - Generate detailed documentation for a particular feature/module/folder
-3. **Cancel** - Keep existing documentation as-is
-
-Your choice [1/2/3]:
-
-
-
- Set workflow_mode = "full_rescan"
- Display: "Starting full project rescan..."
- Read fully and follow: {installed_path}/workflows/full-scan-instructions.md
- After sub-workflow completes, continue to Step 4
-
-
-
- Set workflow_mode = "deep_dive"
- Set scan_level = "exhaustive"
- Display: "Starting deep-dive documentation mode..."
- Read fully and follow: {installed_path}/workflows/deep-dive-instructions.md
- After sub-workflow completes, continue to Step 4
-
-
-
- Display message: "Keeping existing documentation. Exiting workflow."
- Exit workflow
-
-
-
-
- Set workflow_mode = "initial_scan"
- Display: "No existing documentation found. Starting initial project scan..."
- Read fully and follow: {installed_path}/workflows/full-scan-instructions.md
- After sub-workflow completes, continue to Step 4
-
-
-
-
-
-
-
-
- mode: update
- action: complete_workflow
- workflow_name: document-project
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/src/bmm/workflows/document-project/templates/deep-dive-template.md b/src/bmm/workflows/document-project/templates/deep-dive-template.md
deleted file mode 100644
index c1285cdc..00000000
--- a/src/bmm/workflows/document-project/templates/deep-dive-template.md
+++ /dev/null
@@ -1,345 +0,0 @@
-# {{target_name}} - Deep Dive Documentation
-
-**Generated:** {{date}}
-**Scope:** {{target_path}}
-**Files Analyzed:** {{file_count}}
-**Lines of Code:** {{total_loc}}
-**Workflow Mode:** Exhaustive Deep-Dive
-
-## Overview
-
-{{target_description}}
-
-**Purpose:** {{target_purpose}}
-**Key Responsibilities:** {{responsibilities}}
-**Integration Points:** {{integration_summary}}
-
-## Complete File Inventory
-
-{{#each files_in_inventory}}
-
-### {{file_path}}
-
-**Purpose:** {{purpose}}
-**Lines of Code:** {{loc}}
-**File Type:** {{file_type}}
-
-**What Future Contributors Must Know:** {{contributor_note}}
-
-**Exports:**
-{{#each exports}}
-
-- `{{signature}}` - {{description}}
- {{/each}}
-
-**Dependencies:**
-{{#each imports}}
-
-- `{{import_path}}` - {{reason}}
- {{/each}}
-
-**Used By:**
-{{#each dependents}}
-
-- `{{dependent_path}}`
- {{/each}}
-
-**Key Implementation Details:**
-
-```{{language}}
-{{key_code_snippet}}
-```
-
-{{implementation_notes}}
-
-**Patterns Used:**
-{{#each patterns}}
-
-- {{pattern_name}}: {{pattern_description}}
- {{/each}}
-
-**State Management:** {{state_approach}}
-
-**Side Effects:**
-{{#each side_effects}}
-
-- {{effect_type}}: {{effect_description}}
- {{/each}}
-
-**Error Handling:** {{error_handling_approach}}
-
-**Testing:**
-
-- Test File: {{test_file_path}}
-- Coverage: {{coverage_percentage}}%
-- Test Approach: {{test_approach}}
-
-**Comments/TODOs:**
-{{#each todos}}
-
-- Line {{line_number}}: {{todo_text}}
- {{/each}}
-
----
-
-{{/each}}
-
-## Contributor Checklist
-
-- **Risks & Gotchas:** {{risks_notes}}
-- **Pre-change Verification Steps:** {{verification_steps}}
-- **Suggested Tests Before PR:** {{suggested_tests}}
-
-## Architecture & Design Patterns
-
-### Code Organization
-
-{{organization_approach}}
-
-### Design Patterns
-
-{{#each design_patterns}}
-
-- **{{pattern_name}}**: {{usage_description}}
- {{/each}}
-
-### State Management Strategy
-
-{{state_management_details}}
-
-### Error Handling Philosophy
-
-{{error_handling_philosophy}}
-
-### Testing Strategy
-
-{{testing_strategy}}
-
-## Data Flow
-
-{{data_flow_diagram}}
-
-### Data Entry Points
-
-{{#each entry_points}}
-
-- **{{entry_name}}**: {{entry_description}}
- {{/each}}
-
-### Data Transformations
-
-{{#each transformations}}
-
-- **{{transformation_name}}**: {{transformation_description}}
- {{/each}}
-
-### Data Exit Points
-
-{{#each exit_points}}
-
-- **{{exit_name}}**: {{exit_description}}
- {{/each}}
-
-## Integration Points
-
-### APIs Consumed
-
-{{#each apis_consumed}}
-
-- **{{api_endpoint}}**: {{api_description}}
- - Method: {{method}}
- - Authentication: {{auth_requirement}}
- - Response: {{response_schema}}
- {{/each}}
-
-### APIs Exposed
-
-{{#each apis_exposed}}
-
-- **{{api_endpoint}}**: {{api_description}}
- - Method: {{method}}
- - Request: {{request_schema}}
- - Response: {{response_schema}}
- {{/each}}
-
-### Shared State
-
-{{#each shared_state}}
-
-- **{{state_name}}**: {{state_description}}
- - Type: {{state_type}}
- - Accessed By: {{accessors}}
- {{/each}}
-
-### Events
-
-{{#each events}}
-
-- **{{event_name}}**: {{event_description}}
- - Type: {{publish_or_subscribe}}
- - Payload: {{payload_schema}}
- {{/each}}
-
-### Database Access
-
-{{#each database_operations}}
-
-- **{{table_name}}**: {{operation_type}}
- - Queries: {{query_patterns}}
- - Indexes Used: {{indexes}}
- {{/each}}
-
-## Dependency Graph
-
-{{dependency_graph_visualization}}
-
-### Entry Points (Not Imported by Others in Scope)
-
-{{#each entry_point_files}}
-
-- {{file_path}}
- {{/each}}
-
-### Leaf Nodes (Don't Import Others in Scope)
-
-{{#each leaf_files}}
-
-- {{file_path}}
- {{/each}}
-
-### Circular Dependencies
-
-{{#if has_circular_dependencies}}
-β οΈ Circular dependencies detected:
-{{#each circular_deps}}
-
-- {{cycle_description}}
- {{/each}}
- {{else}}
- β No circular dependencies detected
- {{/if}}
-
-## Testing Analysis
-
-### Test Coverage Summary
-
-- **Statements:** {{statements_coverage}}%
-- **Branches:** {{branches_coverage}}%
-- **Functions:** {{functions_coverage}}%
-- **Lines:** {{lines_coverage}}%
-
-### Test Files
-
-{{#each test_files}}
-
-- **{{test_file_path}}**
- - Tests: {{test_count}}
- - Approach: {{test_approach}}
- - Mocking Strategy: {{mocking_strategy}}
- {{/each}}
-
-### Test Utilities Available
-
-{{#each test_utilities}}
-
-- `{{utility_name}}`: {{utility_description}}
- {{/each}}
-
-### Testing Gaps
-
-{{#each testing_gaps}}
-
-- {{gap_description}}
- {{/each}}
-
-## Related Code & Reuse Opportunities
-
-### Similar Features Elsewhere
-
-{{#each similar_features}}
-
-- **{{feature_name}}** (`{{feature_path}}`)
- - Similarity: {{similarity_description}}
- - Can Reference For: {{reference_use_case}}
- {{/each}}
-
-### Reusable Utilities Available
-
-{{#each reusable_utilities}}
-
-- **{{utility_name}}** (`{{utility_path}}`)
- - Purpose: {{utility_purpose}}
- - How to Use: {{usage_example}}
- {{/each}}
-
-### Patterns to Follow
-
-{{#each patterns_to_follow}}
-
-- **{{pattern_name}}**: Reference `{{reference_file}}` for implementation
- {{/each}}
-
-## Implementation Notes
-
-### Code Quality Observations
-
-{{#each quality_observations}}
-
-- {{observation}}
- {{/each}}
-
-### TODOs and Future Work
-
-{{#each all_todos}}
-
-- **{{file_path}}:{{line_number}}**: {{todo_text}}
- {{/each}}
-
-### Known Issues
-
-{{#each known_issues}}
-
-- {{issue_description}}
- {{/each}}
-
-### Optimization Opportunities
-
-{{#each optimizations}}
-
-- {{optimization_suggestion}}
- {{/each}}
-
-### Technical Debt
-
-{{#each tech_debt_items}}
-
-- {{debt_description}}
- {{/each}}
-
-## Modification Guidance
-
-### To Add New Functionality
-
-{{modification_guidance_add}}
-
-### To Modify Existing Functionality
-
-{{modification_guidance_modify}}
-
-### To Remove/Deprecate
-
-{{modification_guidance_remove}}
-
-### Testing Checklist for Changes
-
-{{#each testing_checklist_items}}
-
-- [ ] {{checklist_item}}
- {{/each}}
-
----
-
-_Generated by `document-project` workflow (deep-dive mode)_
-_Base Documentation: docs/index.md_
-_Scan Date: {{date}}_
-_Analysis Mode: Exhaustive_
diff --git a/src/bmm/workflows/document-project/templates/index-template.md b/src/bmm/workflows/document-project/templates/index-template.md
deleted file mode 100644
index 0340a35a..00000000
--- a/src/bmm/workflows/document-project/templates/index-template.md
+++ /dev/null
@@ -1,169 +0,0 @@
-# {{project_name}} Documentation Index
-
-**Type:** {{repository_type}}{{#if is_multi_part}} with {{parts_count}} parts{{/if}}
-**Primary Language:** {{primary_language}}
-**Architecture:** {{architecture_type}}
-**Last Updated:** {{date}}
-
-## Project Overview
-
-{{project_description}}
-
-{{#if is_multi_part}}
-
-## Project Structure
-
-This project consists of {{parts_count}} parts:
-
-{{#each project_parts}}
-
-### {{part_name}} ({{part_id}})
-
-- **Type:** {{project_type}}
-- **Location:** `{{root_path}}`
-- **Tech Stack:** {{tech_stack_summary}}
-- **Entry Point:** {{entry_point}}
- {{/each}}
-
-## Cross-Part Integration
-
-{{integration_summary}}
-
-{{/if}}
-
-## Quick Reference
-
-{{#if is_single_part}}
-
-- **Tech Stack:** {{tech_stack_summary}}
-- **Entry Point:** {{entry_point}}
-- **Architecture Pattern:** {{architecture_pattern}}
-- **Database:** {{database}}
-- **Deployment:** {{deployment_platform}}
- {{else}}
- {{#each project_parts}}
-
-### {{part_name}} Quick Ref
-
-- **Stack:** {{tech_stack_summary}}
-- **Entry:** {{entry_point}}
-- **Pattern:** {{architecture_pattern}}
- {{/each}}
- {{/if}}
-
-## Generated Documentation
-
-### Core Documentation
-
-- [Project Overview](./project-overview.md) - Executive summary and high-level architecture
-- [Source Tree Analysis](./source-tree-analysis.md) - Annotated directory structure
-
-{{#if is_single_part}}
-
-- [Architecture](./architecture.md) - Detailed technical architecture
-- [Component Inventory](./component-inventory.md) - Catalog of major components{{#if has_ui_components}} and UI elements{{/if}}
-- [Development Guide](./development-guide.md) - Local setup and development workflow
- {{#if has_api_docs}}- [API Contracts](./api-contracts.md) - API endpoints and schemas{{/if}}
- {{#if has_data_models}}- [Data Models](./data-models.md) - Database schema and models{{/if}}
- {{else}}
-
-### Part-Specific Documentation
-
-{{#each project_parts}}
-
-#### {{part_name}} ({{part_id}})
-
-- [Architecture](./architecture-{{part_id}}.md) - Technical architecture for {{part_name}}
- {{#if has_components}}- [Components](./component-inventory-{{part_id}}.md) - Component catalog{{/if}}
-- [Development Guide](./development-guide-{{part_id}}.md) - Setup and dev workflow
- {{#if has_api}}- [API Contracts](./api-contracts-{{part_id}}.md) - API documentation{{/if}}
- {{#if has_data}}- [Data Models](./data-models-{{part_id}}.md) - Data architecture{{/if}}
- {{/each}}
-
-### Integration
-
-- [Integration Architecture](./integration-architecture.md) - How parts communicate
-- [Project Parts Metadata](./project-parts.json) - Machine-readable structure
- {{/if}}
-
-### Optional Documentation
-
-{{#if has_deployment_guide}}- [Deployment Guide](./deployment-guide.md) - Deployment process and infrastructure{{/if}}
-{{#if has_contribution_guide}}- [Contribution Guide](./contribution-guide.md) - Contributing guidelines and standards{{/if}}
-
-## Existing Documentation
-
-{{#if has_existing_docs}}
-{{#each existing_docs}}
-
-- [{{title}}]({{path}}) - {{description}}
- {{/each}}
- {{else}}
- No existing documentation files were found in the project.
- {{/if}}
-
-## Getting Started
-
-{{#if is_single_part}}
-
-### Prerequisites
-
-{{prerequisites}}
-
-### Setup
-
-```bash
-{{setup_commands}}
-```
-
-### Run Locally
-
-```bash
-{{run_commands}}
-```
-
-### Run Tests
-
-```bash
-{{test_commands}}
-```
-
-{{else}}
-{{#each project_parts}}
-
-### {{part_name}} Setup
-
-**Prerequisites:** {{prerequisites}}
-
-**Install & Run:**
-
-```bash
-cd {{root_path}}
-{{setup_command}}
-{{run_command}}
-```
-
-{{/each}}
-{{/if}}
-
-## For AI-Assisted Development
-
-This documentation was generated specifically to enable AI agents to understand and extend this codebase.
-
-### When Planning New Features:
-
-**UI-only features:**
-{{#if is_multi_part}}β Reference: `architecture-{{ui_part_id}}.md`, `component-inventory-{{ui_part_id}}.md`{{else}}β Reference: `architecture.md`, `component-inventory.md`{{/if}}
-
-**API/Backend features:**
-{{#if is_multi_part}}β Reference: `architecture-{{api_part_id}}.md`, `api-contracts-{{api_part_id}}.md`, `data-models-{{api_part_id}}.md`{{else}}β Reference: `architecture.md`{{#if has_api_docs}}, `api-contracts.md`{{/if}}{{#if has_data_models}}, `data-models.md`{{/if}}{{/if}}
-
-**Full-stack features:**
-β Reference: All architecture docs{{#if is_multi_part}} + `integration-architecture.md`{{/if}}
-
-**Deployment changes:**
-{{#if has_deployment_guide}}β Reference: `deployment-guide.md`{{else}}β Review CI/CD configs in project{{/if}}
-
----
-
-_Documentation generated by BMAD Method `document-project` workflow_
diff --git a/src/bmm/workflows/document-project/templates/project-overview-template.md b/src/bmm/workflows/document-project/templates/project-overview-template.md
deleted file mode 100644
index 3bbb0d24..00000000
--- a/src/bmm/workflows/document-project/templates/project-overview-template.md
+++ /dev/null
@@ -1,103 +0,0 @@
-# {{project_name}} - Project Overview
-
-**Date:** {{date}}
-**Type:** {{project_type}}
-**Architecture:** {{architecture_type}}
-
-## Executive Summary
-
-{{executive_summary}}
-
-## Project Classification
-
-- **Repository Type:** {{repository_type}}
-- **Project Type(s):** {{project_types_list}}
-- **Primary Language(s):** {{primary_languages}}
-- **Architecture Pattern:** {{architecture_pattern}}
-
-{{#if is_multi_part}}
-
-## Multi-Part Structure
-
-This project consists of {{parts_count}} distinct parts:
-
-{{#each project_parts}}
-
-### {{part_name}}
-
-- **Type:** {{project_type}}
-- **Location:** `{{root_path}}`
-- **Purpose:** {{purpose}}
-- **Tech Stack:** {{tech_stack}}
- {{/each}}
-
-### How Parts Integrate
-
-{{integration_description}}
-{{/if}}
-
-## Technology Stack Summary
-
-{{#if is_single_part}}
-{{technology_table}}
-{{else}}
-{{#each project_parts}}
-
-### {{part_name}} Stack
-
-{{technology_table}}
-{{/each}}
-{{/if}}
-
-## Key Features
-
-{{key_features}}
-
-## Architecture Highlights
-
-{{architecture_highlights}}
-
-## Development Overview
-
-### Prerequisites
-
-{{prerequisites}}
-
-### Getting Started
-
-{{getting_started_summary}}
-
-### Key Commands
-
-{{#if is_single_part}}
-
-- **Install:** `{{install_command}}`
-- **Dev:** `{{dev_command}}`
-- **Build:** `{{build_command}}`
-- **Test:** `{{test_command}}`
- {{else}}
- {{#each project_parts}}
-
-#### {{part_name}}
-
-- **Install:** `{{install_command}}`
-- **Dev:** `{{dev_command}}`
- {{/each}}
- {{/if}}
-
-## Repository Structure
-
-{{repository_structure_summary}}
-
-## Documentation Map
-
-For detailed information, see:
-
-- [index.md](./index.md) - Master documentation index
-- [architecture.md](./architecture{{#if is_multi_part}}-{part_id}{{/if}}.md) - Detailed architecture
-- [source-tree-analysis.md](./source-tree-analysis.md) - Directory structure
-- [development-guide.md](./development-guide{{#if is_multi_part}}-{part_id}{{/if}}.md) - Development workflow
-
----
-
-_Generated using BMAD Method `document-project` workflow_
diff --git a/src/bmm/workflows/document-project/templates/project-scan-report-schema.json b/src/bmm/workflows/document-project/templates/project-scan-report-schema.json
deleted file mode 100644
index 8133e15f..00000000
--- a/src/bmm/workflows/document-project/templates/project-scan-report-schema.json
+++ /dev/null
@@ -1,160 +0,0 @@
-{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "Project Scan Report Schema",
- "description": "State tracking file for document-project workflow resumability",
- "type": "object",
- "required": ["workflow_version", "timestamps", "mode", "scan_level", "completed_steps", "current_step"],
- "properties": {
- "workflow_version": {
- "type": "string",
- "description": "Version of document-project workflow",
- "example": "1.2.0"
- },
- "timestamps": {
- "type": "object",
- "required": ["started", "last_updated"],
- "properties": {
- "started": {
- "type": "string",
- "format": "date-time",
- "description": "ISO 8601 timestamp when workflow started"
- },
- "last_updated": {
- "type": "string",
- "format": "date-time",
- "description": "ISO 8601 timestamp of last state update"
- },
- "completed": {
- "type": "string",
- "format": "date-time",
- "description": "ISO 8601 timestamp when workflow completed (if finished)"
- }
- }
- },
- "mode": {
- "type": "string",
- "enum": ["initial_scan", "full_rescan", "deep_dive"],
- "description": "Workflow execution mode"
- },
- "scan_level": {
- "type": "string",
- "enum": ["quick", "deep", "exhaustive"],
- "description": "Scan depth level (deep_dive mode always uses exhaustive)"
- },
- "project_root": {
- "type": "string",
- "description": "Absolute path to project root directory"
- },
- "output_folder": {
- "type": "string",
- "description": "Absolute path to output folder"
- },
- "completed_steps": {
- "type": "array",
- "items": {
- "type": "object",
- "required": ["step", "status"],
- "properties": {
- "step": {
- "type": "string",
- "description": "Step identifier (e.g., 'step_1', 'step_2')"
- },
- "status": {
- "type": "string",
- "enum": ["completed", "partial", "failed"]
- },
- "timestamp": {
- "type": "string",
- "format": "date-time"
- },
- "outputs": {
- "type": "array",
- "items": { "type": "string" },
- "description": "Files written during this step"
- },
- "summary": {
- "type": "string",
- "description": "1-2 sentence summary of step outcome"
- }
- }
- }
- },
- "current_step": {
- "type": "string",
- "description": "Current step identifier for resumption"
- },
- "findings": {
- "type": "object",
- "description": "High-level summaries only (detailed findings purged after writing)",
- "properties": {
- "project_classification": {
- "type": "object",
- "properties": {
- "repository_type": { "type": "string" },
- "parts_count": { "type": "integer" },
- "primary_language": { "type": "string" },
- "architecture_type": { "type": "string" }
- }
- },
- "technology_stack": {
- "type": "array",
- "items": {
- "type": "object",
- "properties": {
- "part_id": { "type": "string" },
- "tech_summary": { "type": "string" }
- }
- }
- },
- "batches_completed": {
- "type": "array",
- "description": "For deep/exhaustive scans: subfolders processed",
- "items": {
- "type": "object",
- "properties": {
- "path": { "type": "string" },
- "files_scanned": { "type": "integer" },
- "summary": { "type": "string" }
- }
- }
- }
- }
- },
- "outputs_generated": {
- "type": "array",
- "items": { "type": "string" },
- "description": "List of all output files generated"
- },
- "resume_instructions": {
- "type": "string",
- "description": "Instructions for resuming from current_step"
- },
- "validation_status": {
- "type": "object",
- "properties": {
- "last_validated": {
- "type": "string",
- "format": "date-time"
- },
- "validation_errors": {
- "type": "array",
- "items": { "type": "string" }
- }
- }
- },
- "deep_dive_targets": {
- "type": "array",
- "description": "Track deep-dive areas analyzed (for deep_dive mode)",
- "items": {
- "type": "object",
- "properties": {
- "target_name": { "type": "string" },
- "target_path": { "type": "string" },
- "files_analyzed": { "type": "integer" },
- "output_file": { "type": "string" },
- "timestamp": { "type": "string", "format": "date-time" }
- }
- }
- }
- }
-}
diff --git a/src/bmm/workflows/document-project/templates/source-tree-template.md b/src/bmm/workflows/document-project/templates/source-tree-template.md
deleted file mode 100644
index 20306217..00000000
--- a/src/bmm/workflows/document-project/templates/source-tree-template.md
+++ /dev/null
@@ -1,135 +0,0 @@
-# {{project_name}} - Source Tree Analysis
-
-**Date:** {{date}}
-
-## Overview
-
-{{source_tree_overview}}
-
-{{#if is_multi_part}}
-
-## Multi-Part Structure
-
-This project is organized into {{parts_count}} distinct parts:
-
-{{#each project_parts}}
-
-- **{{part_name}}** (`{{root_path}}`): {{purpose}}
- {{/each}}
- {{/if}}
-
-## Complete Directory Structure
-
-```
-{{complete_source_tree}}
-```
-
-## Critical Directories
-
-{{#each critical_folders}}
-
-### `{{folder_path}}`
-
-{{description}}
-
-**Purpose:** {{purpose}}
-**Contains:** {{contents_summary}}
-{{#if entry_points}}**Entry Points:** {{entry_points}}{{/if}}
-{{#if integration_note}}**Integration:** {{integration_note}}{{/if}}
-
-{{/each}}
-
-{{#if is_multi_part}}
-
-## Part-Specific Trees
-
-{{#each project_parts}}
-
-### {{part_name}} Structure
-
-```
-{{source_tree}}
-```
-
-**Key Directories:**
-{{#each critical_directories}}
-
-- **`{{path}}`**: {{description}}
- {{/each}}
-
-{{/each}}
-
-## Integration Points
-
-{{#each integration_points}}
-
-### {{from_part}} β {{to_part}}
-
-- **Location:** `{{integration_path}}`
-- **Type:** {{integration_type}}
-- **Details:** {{details}}
- {{/each}}
-
-{{/if}}
-
-## Entry Points
-
-{{#if is_single_part}}
-
-- **Main Entry:** `{{main_entry_point}}`
- {{#if additional_entry_points}}
-- **Additional:**
- {{#each additional_entry_points}}
- - `{{path}}`: {{description}}
- {{/each}}
- {{/if}}
- {{else}}
- {{#each project_parts}}
-
-### {{part_name}}
-
-- **Entry Point:** `{{entry_point}}`
-- **Bootstrap:** {{bootstrap_description}}
- {{/each}}
- {{/if}}
-
-## File Organization Patterns
-
-{{file_organization_patterns}}
-
-## Key File Types
-
-{{#each file_type_patterns}}
-
-### {{file_type}}
-
-- **Pattern:** `{{pattern}}`
-- **Purpose:** {{purpose}}
-- **Examples:** {{examples}}
- {{/each}}
-
-## Asset Locations
-
-{{#if has_assets}}
-{{#each asset_locations}}
-
-- **{{asset_type}}**: `{{location}}` ({{file_count}} files, {{total_size}})
- {{/each}}
- {{else}}
- No significant assets detected.
- {{/if}}
-
-## Configuration Files
-
-{{#each config_files}}
-
-- **`{{path}}`**: {{description}}
- {{/each}}
-
-## Notes for Development
-
-{{development_notes}}
-
----
-
-_Generated using BMAD Method `document-project` workflow_
diff --git a/src/bmm/workflows/document-project/workflow.yaml b/src/bmm/workflows/document-project/workflow.yaml
deleted file mode 100644
index 536257b3..00000000
--- a/src/bmm/workflows/document-project/workflow.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-# Document Project Workflow Configuration
-name: "document-project"
-version: "1.2.0"
-description: "Analyzes and documents brownfield projects by scanning codebase, architecture, and patterns to create comprehensive reference documentation for AI-assisted development"
-author: "BMad"
-
-# Critical variables
-config_source: "{project-root}/_bmad/bmm/config.yaml"
-output_folder: "{config_source}:project_knowledge"
-user_name: "{config_source}:user_name"
-communication_language: "{config_source}:communication_language"
-document_output_language: "{config_source}:document_output_language"
-user_skill_level: "{config_source}:user_skill_level"
-date: system-generated
-
-# Module path and component files
-installed_path: "{project-root}/_bmad/bmm/workflows/document-project"
-instructions: "{installed_path}/instructions.md"
-validation: "{installed_path}/checklist.md"
-
-# Required data files - CRITICAL for project type detection and documentation requirements
-documentation_requirements_csv: "{installed_path}/documentation-requirements.csv"
-
-# Output configuration - Multiple files generated in output folder
-# Primary output: {output_folder}/project-documentation/
-# Additional files generated by sub-workflows based on project structure
-
-standalone: true
-
-web_bundle: false
diff --git a/src/bmm/workflows/document-project/workflows/deep-dive-instructions.md b/src/bmm/workflows/document-project/workflows/deep-dive-instructions.md
deleted file mode 100644
index c88dfb08..00000000
--- a/src/bmm/workflows/document-project/workflows/deep-dive-instructions.md
+++ /dev/null
@@ -1,298 +0,0 @@
-# Deep-Dive Documentation Instructions
-
-
-
-This workflow performs exhaustive deep-dive documentation of specific areas
-Called by: ../document-project/instructions.md router
-Handles: deep_dive mode only
-
-
-Deep-dive mode requires literal full-file review. Sampling, guessing, or relying solely on tooling output is FORBIDDEN.
-Load existing project structure from index.md and project-parts.json (if exists)
-Load source tree analysis to understand available areas
-
-
- Analyze existing documentation to suggest deep-dive options
-
-What area would you like to deep-dive into?
-
-**Suggested Areas Based on Project Structure:**
-
-{{#if has_api_routes}}
-
-## API Routes ({{api_route_count}} endpoints found)
-
-{{#each api_route_groups}}
-{{group_index}}. {{group_name}} - {{endpoint_count}} endpoints in `{{path}}`
-{{/each}}
-{{/if}}
-
-{{#if has_feature_modules}}
-
-## Feature Modules ({{feature_count}} features)
-
-{{#each feature_modules}}
-{{module_index}}. {{module_name}} - {{file_count}} files in `{{path}}`
-{{/each}}
-{{/if}}
-
-{{#if has_ui_components}}
-
-### UI Component Areas
-
-{{#each component_groups}}
-{{group_index}}. {{group_name}} - {{component_count}} components in `{{path}}`
-{{/each}}
-{{/if}}
-
-{{#if has_services}}
-
-### Services/Business Logic
-
-{{#each service_groups}}
-{{service_index}}. {{service_name}} - `{{path}}`
-{{/each}}
-{{/if}}
-
-**Or specify custom:**
-
-- Folder path (e.g., "client/src/features/dashboard")
-- File path (e.g., "server/src/api/users.ts")
-- Feature name (e.g., "authentication system")
-
-Enter your choice (number or custom path):
-
-
-Parse user input to determine: - target_type: "folder" | "file" | "feature" | "api_group" | "component_group" - target_path: Absolute path to scan - target_name: Human-readable name for documentation - target_scope: List of all files to analyze
-
-
-Store as {{deep_dive_target}}
-
-Display confirmation:
-Target: {{target_name}}
-Type: {{target_type}}
-Path: {{target_path}}
-Estimated files to analyze: {{estimated_file_count}}
-
-This will read EVERY file in this area. Proceed? [y/n]
-
-
-Return to Step 13a (select different area)
-
-
-
- Set scan_mode = "exhaustive"
- Initialize file_inventory = []
- You must read every line of every file in scope and capture a plain-language explanation (what the file does, side effects, why it matters) that future developer agents can act on. No shortcuts.
-
-
- Get complete recursive file list from {{target_path}}
- Filter out: node_modules/, .git/, dist/, build/, coverage/, *.min.js, *.map
- For EVERY remaining file in folder:
- - Read complete file contents (all lines)
- - Extract all exports (functions, classes, types, interfaces, constants)
- - Extract all imports (dependencies)
- - Identify purpose from comments and code structure
- - Write 1-2 sentences (minimum) in natural language describing behaviour, side effects, assumptions, and anything a developer must know before modifying the file
- - Extract function signatures with parameter types and return types
- - Note any TODOs, FIXMEs, or comments
- - Identify patterns (hooks, components, services, controllers, etc.)
- - Capture per-file contributor guidance: `contributor_note`, `risks`, `verification_steps`, `suggested_tests`
- - Store in file_inventory
-
-
-
-
- Read complete file at {{target_path}}
- Extract all information as above
- Read all files it imports (follow import chain 1 level deep)
- Find all files that import this file (dependents via grep)
- Store all in file_inventory
-
-
-
- Identify all route/controller files in API group
- Read all route handlers completely
- Read associated middleware, controllers, services
- Read data models and schemas used
- Extract complete request/response schemas
- Document authentication and authorization requirements
- Store all in file_inventory
-
-
-
- Search codebase for all files related to feature name
- Include: UI components, API endpoints, models, services, tests
- Read each file completely
- Store all in file_inventory
-
-
-
- Get all component files in group
- Read each component completely
- Extract: Props interfaces, hooks used, child components, state management
- Store all in file_inventory
-
-
-For each file in file\*inventory, document: - **File Path:** Full path - **Purpose:** What this file does (1-2 sentences) - **Lines of Code:** Total LOC - **Exports:** Complete list with signatures
-
-- Functions: `functionName(param: Type): ReturnType` - Description
- - Classes: `ClassName` - Description with key methods
- - Types/Interfaces: `TypeName` - Description
- - Constants: `CONSTANT_NAME: Type` - Description - **Imports/Dependencies:** What it uses and why - **Used By:** Files that import this (dependents) - **Key Implementation Details:** Important logic, algorithms, patterns - **State Management:** If applicable (Redux, Context, local state) - **Side Effects:** API calls, database queries, file I/O, external services - **Error Handling:** Try/catch blocks, error boundaries, validation - **Testing:** Associated test files and coverage - **Comments/TODOs:** Any inline documentation or planned work
-
-
-comprehensive_file_inventory
-
-
-
- Build dependency graph for scanned area:
- - Create graph with files as nodes
- - Add edges for import relationships
- - Identify circular dependencies if any
- - Find entry points (files not imported by others in scope)
- - Find leaf nodes (files that don't import others in scope)
-
-
-Trace data flow through the system: - Follow function calls and data transformations - Track API calls and their responses - Document state updates and propagation - Map database queries and mutations
-
-
-Identify integration points: - External APIs consumed - Internal APIs/services called - Shared state accessed - Events published/subscribed - Database tables accessed
-
-
-dependency_graph
-data_flow_analysis
-integration_points
-
-
-
- Search codebase OUTSIDE scanned area for:
- - Similar file/folder naming patterns
- - Similar function signatures
- - Similar component structures
- - Similar API patterns
- - Reusable utilities that could be used
-
-
-Identify code reuse opportunities: - Shared utilities available - Design patterns used elsewhere - Component libraries available - Helper functions that could apply
-
-
-Find reference implementations: - Similar features in other parts of codebase - Established patterns to follow - Testing approaches used elsewhere
-
-
-related_code_references
-reuse_opportunities
-
-
-
- Create documentation filename: deep-dive-{{sanitized_target_name}}.md
- Aggregate contributor insights across files:
- - Combine unique risk/gotcha notes into {{risks_notes}}
- - Combine verification steps developers should run before changes into {{verification_steps}}
- - Combine recommended test commands into {{suggested_tests}}
-
-
-Load complete deep-dive template from: {installed_path}/templates/deep-dive-template.md
-Fill template with all collected data from steps 13b-13d
-Write filled template to: {output_folder}/deep-dive-{{sanitized_target_name}}.md
-Validate deep-dive document completeness
-
-deep_dive_documentation
-
-Update state file: - Add to deep_dive_targets array: {"target_name": "{{target_name}}", "target_path": "{{target_path}}", "files_analyzed": {{file_count}}, "output_file": "deep-dive-{{sanitized_target_name}}.md", "timestamp": "{{now}}"} - Add output to outputs_generated - Update last_updated timestamp
-
-
-
-
- Read existing index.md
-
-Check if "Deep-Dive Documentation" section exists
-
-
- Add new section after "Generated Documentation":
-
-## Deep-Dive Documentation
-
-Detailed exhaustive analysis of specific areas:
-
-
-
-
-
-Add link to new deep-dive doc:
-
-- [{{target_name}} Deep-Dive](./deep-dive-{{sanitized_target_name}}.md) - Comprehensive analysis of {{target_description}} ({{file_count}} files, {{total_loc}} LOC) - Generated {{date}}
-
-
- Update index metadata:
- Last Updated: {{date}}
- Deep-Dives: {{deep_dive_count}}
-
-
- Save updated index.md
-
- updated_index
-
-
-
- Display summary:
-
-βββββββββββββββββββββββββββββββββββββββββ
-
-## Deep-Dive Documentation Complete! β
-
-**Generated:** {output_folder}/deep-dive-{{target_name}}.md
-**Files Analyzed:** {{file_count}}
-**Lines of Code Scanned:** {{total_loc}}
-**Time Taken:** ~{{duration}}
-
-**Documentation Includes:**
-
-- Complete file inventory with all exports
-- Dependency graph and data flow
-- Integration points and API contracts
-- Testing analysis and coverage
-- Related code and reuse opportunities
-- Implementation guidance
-
-**Index Updated:** {output_folder}/index.md now includes link to this deep-dive
-
-βββββββββββββββββββββββββββββββββββββββββ
-
-
-Would you like to:
-
-1. **Deep-dive another area** - Analyze another feature/module/folder
-2. **Finish** - Complete workflow
-
-Your choice [1/2]:
-
-
-
- Clear current deep_dive_target
- Go to Step 13a (select new area)
-
-
-
- Display final message:
-
-All deep-dive documentation complete!
-
-**Master Index:** {output_folder}/index.md
-**Deep-Dives Generated:** {{deep_dive_count}}
-
-These comprehensive docs are now ready for:
-
-- Architecture review
-- Implementation planning
-- Code understanding
-- Brownfield PRD creation
-
-Thank you for using the document-project workflow!
-
-Exit workflow
-
-
-
-
-
diff --git a/src/bmm/workflows/document-project/workflows/deep-dive.yaml b/src/bmm/workflows/document-project/workflows/deep-dive.yaml
deleted file mode 100644
index a333cc4b..00000000
--- a/src/bmm/workflows/document-project/workflows/deep-dive.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
-# Deep-Dive Documentation Workflow Configuration
-name: "document-project-deep-dive"
-description: "Exhaustive deep-dive documentation of specific project areas"
-author: "BMad"
-
-# This is a sub-workflow called by document-project/workflow.yaml
-parent_workflow: "{project-root}/_bmad/bmm/workflows/document-project/workflow.yaml"
-
-# Critical variables inherited from parent
-config_source: "{project-root}/_bmad/bmb/config.yaml"
-output_folder: "{config_source}:output_folder"
-user_name: "{config_source}:user_name"
-date: system-generated
-
-# Module path and component files
-installed_path: "{project-root}/_bmad/bmm/workflows/document-project/workflows"
-template: false # Action workflow
-instructions: "{installed_path}/deep-dive-instructions.md"
-validation: "{project-root}/_bmad/bmm/workflows/document-project/checklist.md"
-
-# Templates
-deep_dive_template: "{project-root}/_bmad/bmm/workflows/document-project/templates/deep-dive-template.md"
-
-# Runtime inputs (passed from parent workflow)
-workflow_mode: "deep_dive"
-scan_level: "exhaustive" # Deep-dive always uses exhaustive scan
-project_root_path: ""
-existing_index_path: "" # Path to existing index.md
-
-# Configuration
-autonomous: false # Requires user input to select target area
diff --git a/src/bmm/workflows/document-project/workflows/full-scan-instructions.md b/src/bmm/workflows/document-project/workflows/full-scan-instructions.md
deleted file mode 100644
index 1340f75e..00000000
--- a/src/bmm/workflows/document-project/workflows/full-scan-instructions.md
+++ /dev/null
@@ -1,1106 +0,0 @@
-# Full Project Scan Instructions
-
-
-
-This workflow performs complete project documentation (Steps 1-12)
-Called by: document-project/instructions.md router
-Handles: initial_scan and full_rescan modes
-
-
-DATA LOADING STRATEGY - Understanding the Documentation Requirements System:
-
-Display explanation to user:
-
-**How Project Type Detection Works:**
-
-This workflow uses a single comprehensive CSV file to intelligently document your project:
-
-**documentation-requirements.csv** ({documentation_requirements_csv})
-
-- Contains 12 project types (web, mobile, backend, cli, library, desktop, game, data, extension, infra, embedded)
-- 24-column schema combining project type detection AND documentation requirements
-- **Detection columns**: project_type_id, key_file_patterns (used to identify project type from codebase)
-- **Requirement columns**: requires_api_scan, requires_data_models, requires_ui_components, etc.
-- **Pattern columns**: critical_directories, test_file_patterns, config_patterns, etc.
-- Acts as a "scan guide" - tells the workflow WHERE to look and WHAT to document
-- Example: For project_type_id="web", key_file_patterns includes "package.json;tsconfig.json;\*.config.js" and requires_api_scan=true
-
-**When Documentation Requirements are Loaded:**
-
-- **Fresh Start (initial_scan)**: Load all 12 rows β detect type using key_file_patterns β use that row's requirements
-- **Resume**: Load ONLY the doc requirements row(s) for cached project_type_id(s)
-- **Full Rescan**: Same as fresh start (may re-detect project type)
-- **Deep Dive**: Load ONLY doc requirements for the part being deep-dived
-
-
-Now loading documentation requirements data for fresh start...
-
-Load documentation-requirements.csv from: {documentation_requirements_csv}
-Store all 12 rows indexed by project_type_id for project detection and requirements lookup
-Display: "Loaded documentation requirements for 12 project types (web, mobile, backend, cli, library, desktop, game, data, extension, infra, embedded)"
-
-Display: "β Documentation requirements loaded successfully. Ready to begin project analysis."
-
-
-
-Check if {output_folder}/index.md exists
-
-
- Read existing index.md to extract metadata (date, project structure, parts count)
- Store as {{existing_doc_date}}, {{existing_structure}}
-
-I found existing documentation generated on {{existing_doc_date}}.
-
-What would you like to do?
-
-1. **Re-scan entire project** - Update all documentation with latest changes
-2. **Deep-dive into specific area** - Generate detailed documentation for a particular feature/module/folder
-3. **Cancel** - Keep existing documentation as-is
-
-Your choice [1/2/3]:
-
-
-
- Set workflow_mode = "full_rescan"
- Continue to scan level selection below
-
-
-
- Set workflow_mode = "deep_dive"
- Set scan_level = "exhaustive"
- Initialize state file with mode=deep_dive, scan_level=exhaustive
- Jump to Step 13
-
-
-
- Display message: "Keeping existing documentation. Exiting workflow."
- Exit workflow
-
-
-
-
- Set workflow_mode = "initial_scan"
- Continue to scan level selection below
-
-
-Select Scan Level
-
-
- Choose your scan depth level:
-
-**1. Quick Scan** (2-5 minutes) [DEFAULT]
-
-- Pattern-based analysis without reading source files
-- Scans: Config files, package manifests, directory structure
-- Best for: Quick project overview, initial understanding
-- File reading: Minimal (configs, README, package.json, etc.)
-
-**2. Deep Scan** (10-30 minutes)
-
-- Reads files in critical directories based on project type
-- Scans: All critical paths from documentation requirements
-- Best for: Comprehensive documentation for brownfield PRD
-- File reading: Selective (key files in critical directories)
-
-**3. Exhaustive Scan** (30-120 minutes)
-
-- Reads ALL source files in project
-- Scans: Every source file (excludes node_modules, dist, build)
-- Best for: Complete analysis, migration planning, detailed audit
-- File reading: Complete (all source files)
-
-Your choice [1/2/3] (default: 1):
-
-
-
- Set scan_level = "quick"
- Display: "Using Quick Scan (pattern-based, no source file reading)"
-
-
-
- Set scan_level = "deep"
- Display: "Using Deep Scan (reading critical files per project type)"
-
-
-
- Set scan_level = "exhaustive"
- Display: "Using Exhaustive Scan (reading all source files)"
-
-
-Initialize state file: {output_folder}/project-scan-report.json
-Every time you touch the state file, record: step id, human-readable summary (what you actually did), precise timestamp, and any outputs written. Vague phrases are unacceptable.
-Write initial state:
-{
-"workflow_version": "1.2.0",
-"timestamps": {"started": "{{current_timestamp}}", "last_updated": "{{current_timestamp}}"},
-"mode": "{{workflow_mode}}",
-"scan_level": "{{scan_level}}",
-"project_root": "{{project_root_path}}",
-"output_folder": "{{output_folder}}",
-"completed_steps": [],
-"current_step": "step_1",
-"findings": {},
-"outputs_generated": ["project-scan-report.json"],
-"resume_instructions": "Starting from step 1"
-}
-
-Continue with standard workflow from Step 1
-
-
-
-
-Ask user: "What is the root directory of the project to document?" (default: current working directory)
-Store as {{project_root_path}}
-
-Scan {{project_root_path}} for key indicators:
-
-- Directory structure (presence of client/, server/, api/, src/, app/, etc.)
-- Key files (package.json, go.mod, requirements.txt, etc.)
-- Technology markers matching detection_keywords from project-types.csv
-
-
-Detect if project is:
-
-- **Monolith**: Single cohesive codebase
-- **Monorepo**: Multiple parts in one repository
-- **Multi-part**: Separate client/server or similar architecture
-
-
-
- List detected parts with their paths
- I detected multiple parts in this project:
- {{detected_parts_list}}
-
-Is this correct? Should I document each part separately? [y/n]
-
-
-Set repository_type = "monorepo" or "multi-part"
-For each detected part: - Identify root path - Run project type detection using key_file_patterns from documentation-requirements.csv - Store as part in project_parts array
-
-
-Ask user to specify correct parts and their paths
-
-
-
- Set repository_type = "monolith"
- Create single part in project_parts array with root_path = {{project_root_path}}
- Run project type detection using key_file_patterns from documentation-requirements.csv
-
-
-For each part, match detected technologies and file patterns against key_file_patterns column in documentation-requirements.csv
-Assign project_type_id to each part
-Load corresponding documentation_requirements row for each part
-
-I've classified this project:
-{{project_classification_summary}}
-
-Does this look correct? [y/n/edit]
-
-
-project_structure
-project_parts_metadata
-
-IMMEDIATELY update state file with step completion:
-
-- Add to completed_steps: {"step": "step_1", "status": "completed", "timestamp": "{{now}}", "summary": "Classified as {{repository_type}} with {{parts_count}} parts"}
-- Update current_step = "step_2"
-- Update findings.project_classification with high-level summary only
-- **CACHE project_type_id(s)**: Add project_types array: [{"part_id": "{{part_id}}", "project_type_id": "{{project_type_id}}", "display_name": "{{display_name}}"}]
-- This cached data prevents reloading all CSV files on resume - we can load just the needed documentation_requirements row(s)
-- Update last_updated timestamp
-- Write state file
-
-
-PURGE detailed scan results from memory, keep only summary: "{{repository_type}}, {{parts_count}} parts, {{primary_tech}}"
-
-
-
-For each part, scan for existing documentation using patterns:
-- README.md, README.rst, README.txt
-- CONTRIBUTING.md, CONTRIBUTING.rst
-- ARCHITECTURE.md, ARCHITECTURE.txt, docs/architecture/
-- DEPLOYMENT.md, DEPLOY.md, docs/deployment/
-- API.md, docs/api/
-- Any files in docs/, documentation/, .github/ folders
-
-
-Create inventory of existing_docs with:
-
-- File path
-- File type (readme, architecture, api, etc.)
-- Which part it belongs to (if multi-part)
-
-
-I found these existing documentation files:
-{{existing_docs_list}}
-
-Are there any other important documents or key areas I should focus on while analyzing this project? [Provide paths or guidance, or type 'none']
-
-
-Store user guidance as {{user_context}}
-
-existing_documentation_inventory
-user_provided_context
-
-Update state file:
-
-- Add to completed_steps: {"step": "step_2", "status": "completed", "timestamp": "{{now}}", "summary": "Found {{existing_docs_count}} existing docs"}
-- Update current_step = "step_3"
-- Update last_updated timestamp
-
-
-PURGE detailed doc contents from memory, keep only: "{{existing_docs_count}} docs found"
-
-
-
-For each part in project_parts:
- - Load key_file_patterns from documentation_requirements
- - Scan part root for these patterns
- - Parse technology manifest files (package.json, go.mod, requirements.txt, etc.)
- - Extract: framework, language, version, database, dependencies
- - Build technology_table with columns: Category, Technology, Version, Justification
-
-
-Determine architecture pattern based on detected tech stack:
-
-- Use project_type_id as primary indicator (e.g., "web" β layered/component-based, "backend" β service/API-centric)
-- Consider framework patterns (e.g., React β component hierarchy, Express β middleware pipeline)
-- Note architectural style in technology table
-- Store as {{architecture_pattern}} for each part
-
-
-technology_stack
-architecture_patterns
-
-Update state file:
-
-- Add to completed_steps: {"step": "step_3", "status": "completed", "timestamp": "{{now}}", "summary": "Tech stack: {{primary_framework}}"}
-- Update current_step = "step_4"
-- Update findings.technology_stack with summary per part
-- Update last_updated timestamp
-
-
-PURGE detailed tech analysis from memory, keep only: "{{framework}} on {{language}}"
-
-
-
-
-BATCHING STRATEGY FOR DEEP/EXHAUSTIVE SCANS
-
-
- This step requires file reading. Apply batching strategy:
-
-Identify subfolders to process based on: - scan_level == "deep": Use critical_directories from documentation_requirements - scan_level == "exhaustive": Get ALL subfolders recursively (excluding node_modules, .git, dist, build, coverage)
-
-
-For each subfolder to scan: 1. Read all files in subfolder (consider file size - use judgment for files >5000 LOC) 2. Extract required information based on conditional flags below 3. IMMEDIATELY write findings to appropriate output file 4. Validate written document (section-level validation) 5. Update state file with batch completion 6. PURGE detailed findings from context, keep only 1-2 sentence summary 7. Move to next subfolder
-
-
-Track batches in state file:
-findings.batches_completed: [
-{"path": "{{subfolder_path}}", "files_scanned": {{count}}, "summary": "{{brief_summary}}"}
-]
-
-
-
-
- Use pattern matching only - do NOT read source files
- Use glob/grep to identify file locations and patterns
- Extract information from filenames, directory structure, and config files only
-
-
-For each part, check documentation_requirements boolean flags and execute corresponding scans:
-
-
- Scan for API routes and endpoints using integration_scan_patterns
- Look for: controllers/, routes/, api/, handlers/, endpoints/
-
-
- Use glob to find route files, extract patterns from filenames and folder structure
-
-
-
- Read files in batches (one subfolder at a time)
- Extract: HTTP methods, paths, request/response types from actual code
-
-
-Build API contracts catalog
-IMMEDIATELY write to: {output_folder}/api-contracts-{part_id}.md
-Validate document has all required sections
-Update state file with output generated
-PURGE detailed API data, keep only: "{{api_count}} endpoints documented"
-api_contracts\*{part_id}
-
-
-
- Scan for data models using schema_migration_patterns
- Look for: models/, schemas/, entities/, migrations/, prisma/, ORM configs
-
-
- Identify schema files via glob, parse migration file names for table discovery
-
-
-
- Read model files in batches (one subfolder at a time)
- Extract: table names, fields, relationships, constraints from actual code
-
-
-Build database schema documentation
-IMMEDIATELY write to: {output_folder}/data-models-{part_id}.md
-Validate document completeness
-Update state file with output generated
-PURGE detailed schema data, keep only: "{{table_count}} tables documented"
-data_models\*{part_id}
-
-
-
- Analyze state management patterns
- Look for: Redux, Context API, MobX, Vuex, Pinia, Provider patterns
- Identify: stores, reducers, actions, state structure
- state_management_patterns_{part_id}
-
-
-
- Inventory UI component library
- Scan: components/, ui/, widgets/, views/ folders
- Categorize: Layout, Form, Display, Navigation, etc.
- Identify: Design system, component patterns, reusable elements
- ui_component_inventory_{part_id}
-
-
-
- Look for hardware schematics using hardware_interface_patterns
- This appears to be an embedded/hardware project. Do you have:
- - Pinout diagrams
- - Hardware schematics
- - PCB layouts
- - Hardware documentation
-
-If yes, please provide paths or links. [Provide paths or type 'none']
-
-Store hardware docs references
-hardware*documentation*{part_id}
-
-
-
- Scan and catalog assets using asset_patterns
- Categorize by: Images, Audio, 3D Models, Sprites, Textures, etc.
- Calculate: Total size, file counts, formats used
- asset_inventory_{part_id}
-
-
-Scan for additional patterns based on doc requirements:
-
-- config_patterns β Configuration management
-- auth_security_patterns β Authentication/authorization approach
-- entry_point_patterns β Application entry points and bootstrap
-- shared_code_patterns β Shared libraries and utilities
-- async_event_patterns β Event-driven architecture
-- ci_cd_patterns β CI/CD pipeline details
-- localization_patterns β i18n/l10n support
-
-
-Apply scan_level strategy to each pattern scan (quick=glob only, deep/exhaustive=read files)
-
-comprehensive*analysis*{part_id}
-
-Update state file:
-
-- Add to completed_steps: {"step": "step_4", "status": "completed", "timestamp": "{{now}}", "summary": "Conditional analysis complete, {{files_generated}} files written"}
-- Update current_step = "step_5"
-- Update last_updated timestamp
-- List all outputs_generated
-
-
-PURGE all detailed scan results from context. Keep only summaries:
-
-- "APIs: {{api_count}} endpoints"
-- "Data: {{table_count}} tables"
-- "Components: {{component_count}} components"
-
-
-
-
-For each part, generate complete directory tree using critical_directories from doc requirements
-
-Annotate the tree with:
-
-- Purpose of each critical directory
-- Entry points marked
-- Key file locations highlighted
-- Integration points noted (for multi-part projects)
-
-
-Show how parts are organized and where they interface
-
-Create formatted source tree with descriptions:
-
-```
-project-root/
-βββ client/ # React frontend (Part: client)
-β βββ src/
-β β βββ components/ # Reusable UI components
-β β βββ pages/ # Route-based pages
-β β βββ api/ # API client layer β Calls server/
-βββ server/ # Express API backend (Part: api)
-β βββ src/
-β β βββ routes/ # REST API endpoints
-β β βββ models/ # Database models
-β β βββ services/ # Business logic
-```
-
-
-
-source_tree_analysis
-critical_folders_summary
-
-IMMEDIATELY write source-tree-analysis.md to disk
-Validate document structure
-Update state file:
-
-- Add to completed_steps: {"step": "step_5", "status": "completed", "timestamp": "{{now}}", "summary": "Source tree documented"}
-- Update current_step = "step_6"
-- Add output: "source-tree-analysis.md"
-
- PURGE detailed tree from context, keep only: "Source tree with {{folder_count}} critical folders"
-
-
-
-Scan for development setup using key_file_patterns and existing docs:
-- Prerequisites (Node version, Python version, etc.)
-- Installation steps (npm install, etc.)
-- Environment setup (.env files, config)
-- Build commands (npm run build, make, etc.)
-- Run commands (npm start, go run, etc.)
-- Test commands using test_file_patterns
-
-
-Look for deployment configuration using ci_cd_patterns:
-
-- Dockerfile, docker-compose.yml
-- Kubernetes configs (k8s/, helm/)
-- CI/CD pipelines (.github/workflows/, .gitlab-ci.yml)
-- Deployment scripts
-- Infrastructure as Code (terraform/, pulumi/)
-
-
-
- Extract contribution guidelines:
- - Code style rules
- - PR process
- - Commit conventions
- - Testing requirements
-
-
-
-development_instructions
-deployment_configuration
-contribution_guidelines
-
-Update state file:
-
-- Add to completed_steps: {"step": "step_6", "status": "completed", "timestamp": "{{now}}", "summary": "Dev/deployment guides written"}
-- Update current_step = "step_7"
-- Add generated outputs to list
-
- PURGE detailed instructions, keep only: "Dev setup and deployment documented"
-
-
-
-Analyze how parts communicate:
-- Scan integration_scan_patterns across parts
-- Identify: REST calls, GraphQL queries, gRPC, message queues, shared databases
-- Document: API contracts between parts, data flow, authentication flow
-
-
-Create integration_points array with:
-
-- from: source part
-- to: target part
-- type: REST API, GraphQL, gRPC, Event Bus, etc.
-- details: Endpoints, protocols, data formats
-
-
-IMMEDIATELY write integration-architecture.md to disk
-Validate document completeness
-
-integration_architecture
-
-Update state file:
-
-- Add to completed_steps: {"step": "step_7", "status": "completed", "timestamp": "{{now}}", "summary": "Integration architecture documented"}
-- Update current_step = "step_8"
-
- PURGE integration details, keep only: "{{integration_count}} integration points"
-
-
-
-For each part in project_parts:
- - Use matched architecture template from Step 3 as base structure
- - Fill in all sections with discovered information:
- * Executive Summary
- * Technology Stack (from Step 3)
- * Architecture Pattern (from registry match)
- * Data Architecture (from Step 4 data models scan)
- * API Design (from Step 4 API scan if applicable)
- * Component Overview (from Step 4 component scan if applicable)
- * Source Tree (from Step 5)
- * Development Workflow (from Step 6)
- * Deployment Architecture (from Step 6)
- * Testing Strategy (from test patterns)
-
-
-
- - Generate: architecture.md (no part suffix)
-
-
-
- - Generate: architecture-{part_id}.md for each part
-
-
-For each architecture file generated:
-
-- IMMEDIATELY write architecture file to disk
-- Validate against architecture template schema
-- Update state file with output
-- PURGE detailed architecture from context, keep only: "Architecture for {{part_id}} written"
-
-
-architecture_document
-
-Update state file:
-
-- Add to completed_steps: {"step": "step_8", "status": "completed", "timestamp": "{{now}}", "summary": "Architecture docs written for {{parts_count}} parts"}
-- Update current_step = "step_9"
-
-
-
-
-Generate project-overview.md with:
-- Project name and purpose (from README or user input)
-- Executive summary
-- Tech stack summary table
-- Architecture type classification
-- Repository structure (monolith/monorepo/multi-part)
-- Links to detailed docs
-
-
-Generate source-tree-analysis.md with:
-
-- Full annotated directory tree from Step 5
-- Critical folders explained
-- Entry points documented
-- Multi-part structure (if applicable)
-
-
-IMMEDIATELY write project-overview.md to disk
-Validate document sections
-
-Generate source-tree-analysis.md (if not already written in Step 5)
-IMMEDIATELY write to disk and validate
-
-Generate component-inventory.md (or per-part versions) with:
-
-- All discovered components from Step 4
-- Categorized by type
-- Reusable vs specific components
-- Design system elements (if found)
-
- IMMEDIATELY write each component inventory to disk and validate
-
-Generate development-guide.md (or per-part versions) with:
-
-- Prerequisites and dependencies
-- Environment setup instructions
-- Local development commands
-- Build process
-- Testing approach and commands
-- Common development tasks
-
- IMMEDIATELY write each development guide to disk and validate
-
-
- Generate deployment-guide.md with:
- - Infrastructure requirements
- - Deployment process
- - Environment configuration
- - CI/CD pipeline details
-
- IMMEDIATELY write to disk and validate
-
-
-
- Generate contribution-guide.md with:
- - Code style and conventions
- - PR process
- - Testing requirements
- - Documentation standards
-
- IMMEDIATELY write to disk and validate
-
-
-
- Generate api-contracts.md (or per-part) with:
- - All API endpoints
- - Request/response schemas
- - Authentication requirements
- - Example requests
-
- IMMEDIATELY write to disk and validate
-
-
-
- Generate data-models.md (or per-part) with:
- - Database schema
- - Table relationships
- - Data models and entities
- - Migration strategy
-
- IMMEDIATELY write to disk and validate
-
-
-
- Generate integration-architecture.md with:
- - How parts communicate
- - Integration points diagram/description
- - Data flow between parts
- - Shared dependencies
-
- IMMEDIATELY write to disk and validate
-
-Generate project-parts.json metadata file:
-`json
- {
- "repository_type": "monorepo",
- "parts": [ ... ],
- "integration_points": [ ... ]
- }
- `
-
-IMMEDIATELY write to disk
-
-
-supporting_documentation
-
-Update state file:
-
-- Add to completed_steps: {"step": "step_9", "status": "completed", "timestamp": "{{now}}", "summary": "All supporting docs written"}
-- Update current_step = "step_10"
-- List all newly generated outputs
-
-
-PURGE all document contents from context, keep only list of files generated
-
-
-
-
-INCOMPLETE DOCUMENTATION MARKER CONVENTION:
-When a document SHOULD be generated but wasn't (due to quick scan, missing data, conditional requirements not met):
-
-- Use EXACTLY this marker: _(To be generated)_
-- Place it at the end of the markdown link line
-- Example: - [API Contracts - Server](./api-contracts-server.md) _(To be generated)_
-- This allows Step 11 to detect and offer to complete these items
-- ALWAYS use this exact format for consistency and automated detection
-
-
-Create index.md with intelligent navigation based on project structure
-
-
- Generate simple index with:
- - Project name and type
- - Quick reference (tech stack, architecture type)
- - Links to all generated docs
- - Links to discovered existing docs
- - Getting started section
-
-
-
-
- Generate comprehensive index with:
- - Project overview and structure summary
- - Part-based navigation section
- - Quick reference by part
- - Cross-part integration links
- - Links to all generated and existing docs
- - Getting started per part
-
-
-
-Include in index.md:
-
-## Project Documentation Index
-
-### Project Overview
-
-- **Type:** {{repository_type}} {{#if multi-part}}with {{parts.length}} parts{{/if}}
-- **Primary Language:** {{primary_language}}
-- **Architecture:** {{architecture_type}}
-
-### Quick Reference
-
-{{#if single_part}}
-
-- **Tech Stack:** {{tech_stack_summary}}
-- **Entry Point:** {{entry_point}}
-- **Architecture Pattern:** {{architecture_pattern}}
- {{else}}
- {{#each parts}}
-
-#### {{part_name}} ({{part_id}})
-
-- **Type:** {{project_type}}
-- **Tech Stack:** {{tech_stack}}
-- **Root:** {{root_path}}
- {{/each}}
- {{/if}}
-
-### Generated Documentation
-
-- [Project Overview](./project-overview.md)
-- [Architecture](./architecture{{#if multi-part}}-{part\*id}{{/if}}.md){{#unless architecture_file_exists}} (To be generated) {{/unless}}
-- [Source Tree Analysis](./source-tree-analysis.md)
-- [Component Inventory](./component-inventory{{#if multi-part}}-{part\*id}{{/if}}.md){{#unless component_inventory_exists}} (To be generated) {{/unless}}
-- [Development Guide](./development-guide{{#if multi-part}}-{part\*id}{{/if}}.md){{#unless dev_guide_exists}} (To be generated) {{/unless}}
- {{#if deployment_found}}- [Deployment Guide](./deployment-guide.md){{#unless deployment_guide_exists}} (To be generated) {{/unless}}{{/if}}
- {{#if contribution_found}}- [Contribution Guide](./contribution-guide.md){{/if}}
- {{#if api_documented}}- [API Contracts](./api-contracts{{#if multi-part}}-{part_id}{{/if}}.md){{#unless api_contracts_exists}} (To be generated) {{/unless}}{{/if}}
- {{#if data_models_documented}}- [Data Models](./data-models{{#if multi-part}}-{part_id}{{/if}}.md){{#unless data_models_exists}} (To be generated) {{/unless}}{{/if}}
- {{#if multi-part}}- [Integration Architecture](./integration-architecture.md){{#unless integration_arch_exists}} (To be generated) {{/unless}}{{/if}}
-
-### Existing Documentation
-
-{{#each existing_docs}}
-
-- [{{title}}]({{relative_path}}) - {{description}}
- {{/each}}
-
-### Getting Started
-
-{{getting_started_instructions}}
-
-
-Before writing index.md, check which expected files actually exist:
-
-- For each document that should have been generated, check if file exists on disk
-- Set existence flags: architecture_file_exists, component_inventory_exists, dev_guide_exists, etc.
-- These flags determine whether to add the _(To be generated)_ marker
-- Track which files are missing in {{missing_docs_list}} for reporting
-
-
-IMMEDIATELY write index.md to disk with appropriate _(To be generated)_ markers for missing files
-Validate index has all required sections and links are valid
-
-index
-
-Update state file:
-
-- Add to completed_steps: {"step": "step_10", "status": "completed", "timestamp": "{{now}}", "summary": "Master index generated"}
-- Update current_step = "step_11"
-- Add output: "index.md"
-
-
-PURGE index content from context
-
-
-
-Show summary of all generated files:
-Generated in {{output_folder}}/:
-{{file_list_with_sizes}}
-
-
-Run validation checklist from {validation}
-
-INCOMPLETE DOCUMENTATION DETECTION:
-
-1. PRIMARY SCAN: Look for exact marker: _(To be generated)_
-2. FALLBACK SCAN: Look for fuzzy patterns (in case agent was lazy):
- - _(TBD)_
- - _(TODO)_
- - _(Coming soon)_
- - _(Not yet generated)_
- - _(Pending)_
-3. Extract document metadata from each match for user selection
-
-
-Read {output_folder}/index.md
-
-Scan for incomplete documentation markers:
-Step 1: Search for exact pattern "_(To be generated)_" (case-sensitive)
-Step 2: For each match found, extract the entire line
-Step 3: Parse line to extract:
-
-- Document title (text within [brackets] or **bold**)
-- File path (from markdown link or inferable from title)
-- Document type (infer from filename: architecture, api-contracts, data-models, component-inventory, development-guide, deployment-guide, integration-architecture)
-- Part ID if applicable (extract from filename like "architecture-server.md" β part_id: "server")
- Step 4: Add to {{incomplete_docs_strict}} array
-
-
-Fallback fuzzy scan for alternate markers:
-Search for patterns: _(TBD)_, _(TODO)_, _(Coming soon)_, _(Not yet generated)_, _(Pending)_
-For each fuzzy match:
-
-- Extract same metadata as strict scan
-- Add to {{incomplete_docs_fuzzy}} array with fuzzy_match flag
-
-
-Combine results:
-Set {{incomplete_docs_list}} = {{incomplete_docs_strict}} + {{incomplete_docs_fuzzy}}
-For each item store structure:
-{
-"title": "Architecture β Server",
-"file\*path": "./architecture-server.md",
-"doc_type": "architecture",
-"part_id": "server",
-"line_text": "- [Architecture β Server](./architecture-server.md) (To be generated)",
-"fuzzy_match": false
-}
-
-
-Documentation generation complete!
-
-Summary:
-
-- Project Type: {{project_type_summary}}
-- Parts Documented: {{parts_count}}
-- Files Generated: {{files_count}}
-- Total Lines: {{total_lines}}
-
-{{#if incomplete_docs_list.length > 0}}
-β οΈ **Incomplete Documentation Detected:**
-
-I found {{incomplete_docs_list.length}} item(s) marked as incomplete:
-
-{{#each incomplete_docs_list}}
-{{@index + 1}}. **{{title}}** ({{doc_type}}{{#if part_id}} for {{part_id}}{{/if}}){{#if fuzzy_match}} β οΈ [non-standard marker]{{/if}}
-{{/each}}
-
-{{/if}}
-
-Would you like to:
-
-{{#if incomplete_docs_list.length > 0}}
-
-1. **Generate incomplete documentation** - Complete any of the {{incomplete_docs_list.length}} items above
-2. Review any specific section [type section name]
-3. Add more detail to any area [type area name]
-4. Generate additional custom documentation [describe what]
-5. Finalize and complete [type 'done']
- {{else}}
-6. Review any specific section [type section name]
-7. Add more detail to any area [type area name]
-8. Generate additional documentation [describe what]
-9. Finalize and complete [type 'done']
- {{/if}}
-
-Your choice:
-
-
-
- Which incomplete items would you like to generate?
-
-{{#each incomplete_docs_list}}
-{{@index + 1}}. {{title}} ({{doc_type}}{{#if part_id}} - {{part_id}}{{/if}})
-{{/each}}
-{{incomplete_docs_list.length + 1}}. All of them
-
-Enter number(s) separated by commas (e.g., "1,3,5"), or type 'all':
-
-
-Parse user selection:
-
-- If "all", set {{selected_items}} = all items in {{incomplete_docs_list}}
-- If comma-separated numbers, extract selected items by index
-- Store result in {{selected_items}} array
-
-
- Display: "Generating {{selected_items.length}} document(s)..."
-
- For each item in {{selected_items}}:
-
-1. **Identify the part and requirements:**
- - Extract part_id from item (if exists)
- - Look up part data in project_parts array from state file
- - Load documentation_requirements for that part's project_type_id
-
-2. **Route to appropriate generation substep based on doc_type:**
-
- **If doc_type == "architecture":**
- - Display: "Generating architecture documentation for {{part_id}}..."
- - Load architecture_match for this part from state file (Step 3 cache)
- - Re-run Step 8 architecture generation logic ONLY for this specific part
- - Use matched template and fill with cached data from state file
- - Write architecture-{{part_id}}.md to disk
- - Validate completeness
-
- **If doc_type == "api-contracts":**
- - Display: "Generating API contracts for {{part_id}}..."
- - Load part data and documentation_requirements
- - Re-run Step 4 API scan substep targeting ONLY this part
- - Use scan_level from state file (quick/deep/exhaustive)
- - Generate api-contracts-{{part_id}}.md
- - Validate document structure
-
- **If doc_type == "data-models":**
- - Display: "Generating data models documentation for {{part_id}}..."
- - Re-run Step 4 data models scan substep targeting ONLY this part
- - Use schema_migration_patterns from documentation_requirements
- - Generate data-models-{{part_id}}.md
- - Validate completeness
-
- **If doc_type == "component-inventory":**
- - Display: "Generating component inventory for {{part_id}}..."
- - Re-run Step 9 component inventory generation for this specific part
- - Scan components/, ui/, widgets/ folders
- - Generate component-inventory-{{part_id}}.md
- - Validate structure
-
- **If doc_type == "development-guide":**
- - Display: "Generating development guide for {{part_id}}..."
- - Re-run Step 9 development guide generation for this specific part
- - Use key_file_patterns and test_file_patterns from documentation_requirements
- - Generate development-guide-{{part_id}}.md
- - Validate completeness
-
- **If doc_type == "deployment-guide":**
- - Display: "Generating deployment guide..."
- - Re-run Step 6 deployment configuration scan
- - Re-run Step 9 deployment guide generation
- - Generate deployment-guide.md
- - Validate structure
-
- **If doc_type == "integration-architecture":**
- - Display: "Generating integration architecture..."
- - Re-run Step 7 integration analysis for all parts
- - Generate integration-architecture.md
- - Validate completeness
-
-3. **Post-generation actions:**
- - Confirm file was written successfully
- - Update state file with newly generated output
- - Add to {{newly_generated_docs}} tracking list
- - Display: "β Generated: {{file_path}}"
-
-4. **Handle errors:**
- - If generation fails, log error and continue with next item
- - Track failed items in {{failed_generations}} list
-
-
-After all selected items are processed:
-
-**Update index.md to remove markers:**
-
-1. Read current index.md content
-2. For each item in {{newly_generated_docs}}:
- - Find the line containing the file link and marker
- - Remove the _(To be generated)_ or fuzzy marker text
- - Leave the markdown link intact
-3. Write updated index.md back to disk
-4. Update state file to record index.md modification
-
-
-Display generation summary:
-
-βββββββββββββββββββββββββββββββββββββββββ
-
-β **Documentation Generation Complete!**
-
-**Successfully Generated:**
-{{#each newly_generated_docs}}
-
-- {{title}} β {{file_path}}
- {{/each}}
-
-{{#if failed_generations.length > 0}}
-**Failed to Generate:**
-{{#each failed_generations}}
-
-- {{title}} ({{error_message}})
- {{/each}}
- {{/if}}
-
-**Updated:** index.md (removed incomplete markers)
-
-βββββββββββββββββββββββββββββββββββββββββ
-
-
-Update state file with all generation activities
-
-Return to Step 11 menu (loop back to check for any remaining incomplete items)
-
-
-Make requested modifications and regenerate affected files
-Proceed to Step 12 completion
-
-
- Update state file:
-- Add to completed_steps: {"step": "step_11_iteration", "status": "completed", "timestamp": "{{now}}", "summary": "Review iteration complete"}
-- Keep current_step = "step_11" (for loop back)
-- Update last_updated timestamp
-
- Loop back to beginning of Step 11 (re-scan for remaining incomplete docs)
-
-
-
- Update state file:
-- Add to completed_steps: {"step": "step_11", "status": "completed", "timestamp": "{{now}}", "summary": "Validation and review complete"}
-- Update current_step = "step_12"
-
- Proceed to Step 12
-
-
-
-
-Create final summary report
-Compile verification recap variables:
- - Set {{verification_summary}} to the concrete tests, validations, or scripts you executed (or "none run").
- - Set {{open_risks}} to any remaining risks or TODO follow-ups (or "none").
- - Set {{next_checks}} to recommended actions before merging/deploying (or "none").
-
-
-Display completion message:
-
-βββββββββββββββββββββββββββββββββββββββββ
-
-## Project Documentation Complete! β
-
-**Location:** {{output_folder}}/
-
-**Master Index:** {{output_folder}}/index.md
-π This is your primary entry point for AI-assisted development
-
-**Generated Documentation:**
-{{generated_files_list}}
-
-**Next Steps:**
-
-1. Review the index.md to familiarize yourself with the documentation structure
-2. When creating a brownfield PRD, point the PRD workflow to: {{output_folder}}/index.md
-3. For UI-only features: Reference {{output_folder}}/architecture-{{ui_part_id}}.md
-4. For API-only features: Reference {{output_folder}}/architecture-{{api_part_id}}.md
-5. For full-stack features: Reference both part architectures + integration-architecture.md
-
-**Verification Recap:**
-
-- Tests/extractions executed: {{verification_summary}}
-- Outstanding risks or follow-ups: {{open_risks}}
-- Recommended next checks before PR: {{next_checks}}
-
-**Brownfield PRD Command:**
-When ready to plan new features, run the PRD workflow and provide this index as input.
-
-βββββββββββββββββββββββββββββββββββββββββ
-
-
-FINALIZE state file:
-
-- Add to completed_steps: {"step": "step_12", "status": "completed", "timestamp": "{{now}}", "summary": "Workflow complete"}
-- Update timestamps.completed = "{{now}}"
-- Update current_step = "completed"
-- Write final state file
-
-
-Display: "State file saved: {{output_folder}}/project-scan-report.json"
-
-
diff --git a/src/bmm/workflows/document-project/workflows/full-scan.yaml b/src/bmm/workflows/document-project/workflows/full-scan.yaml
deleted file mode 100644
index f62aba9b..00000000
--- a/src/bmm/workflows/document-project/workflows/full-scan.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
-# Full Project Scan Workflow Configuration
-name: "document-project-full-scan"
-description: "Complete project documentation workflow (initial scan or full rescan)"
-author: "BMad"
-
-# This is a sub-workflow called by document-project/workflow.yaml
-parent_workflow: "{project-root}/_bmad/bmm/workflows/document-project/workflow.yaml"
-
-# Critical variables inherited from parent
-config_source: "{project-root}/_bmad/bmb/config.yaml"
-output_folder: "{config_source}:output_folder"
-user_name: "{config_source}:user_name"
-date: system-generated
-
-# Data files
-documentation_requirements_csv: "{project-root}/_bmad/bmm/workflows/document-project/documentation-requirements.csv"
-
-# Module path and component files
-installed_path: "{project-root}/_bmad/bmm/workflows/document-project/workflows"
-template: false # Action workflow
-instructions: "{installed_path}/full-scan-instructions.md"
-validation: "{project-root}/_bmad/bmm/workflows/document-project/checklist.md"
-
-# Runtime inputs (passed from parent workflow)
-workflow_mode: "" # "initial_scan" or "full_rescan"
-scan_level: "" # "quick", "deep", or "exhaustive"
-resume_mode: false
-project_root_path: ""
-
-# Configuration
-autonomous: false # Requires user input at key decision points
diff --git a/src/bmm/workflows/excalidraw-diagrams/_shared/excalidraw-library.json b/src/bmm/workflows/excalidraw-diagrams/_shared/excalidraw-library.json
deleted file mode 100644
index d18f94af..00000000
--- a/src/bmm/workflows/excalidraw-diagrams/_shared/excalidraw-library.json
+++ /dev/null
@@ -1,90 +0,0 @@
-{
- "type": "excalidrawlib",
- "version": 2,
- "library": [
- {
- "id": "start-end-circle",
- "status": "published",
- "elements": [
- {
- "type": "ellipse",
- "width": 120,
- "height": 60,
- "strokeColor": "#1976d2",
- "backgroundColor": "#e3f2fd",
- "fillStyle": "solid",
- "strokeWidth": 2,
- "roughness": 0
- }
- ]
- },
- {
- "id": "process-rectangle",
- "status": "published",
- "elements": [
- {
- "type": "rectangle",
- "width": 160,
- "height": 80,
- "strokeColor": "#1976d2",
- "backgroundColor": "#e3f2fd",
- "fillStyle": "solid",
- "strokeWidth": 2,
- "roughness": 0,
- "roundness": {
- "type": 3,
- "value": 8
- }
- }
- ]
- },
- {
- "id": "decision-diamond",
- "status": "published",
- "elements": [
- {
- "type": "diamond",
- "width": 140,
- "height": 100,
- "strokeColor": "#f57c00",
- "backgroundColor": "#fff3e0",
- "fillStyle": "solid",
- "strokeWidth": 2,
- "roughness": 0
- }
- ]
- },
- {
- "id": "data-store",
- "status": "published",
- "elements": [
- {
- "type": "rectangle",
- "width": 140,
- "height": 80,
- "strokeColor": "#388e3c",
- "backgroundColor": "#e8f5e9",
- "fillStyle": "solid",
- "strokeWidth": 2,
- "roughness": 0
- }
- ]
- },
- {
- "id": "external-entity",
- "status": "published",
- "elements": [
- {
- "type": "rectangle",
- "width": 120,
- "height": 80,
- "strokeColor": "#7b1fa2",
- "backgroundColor": "#f3e5f5",
- "fillStyle": "solid",
- "strokeWidth": 3,
- "roughness": 0
- }
- ]
- }
- ]
-}
diff --git a/src/bmm/workflows/excalidraw-diagrams/_shared/excalidraw-templates.yaml b/src/bmm/workflows/excalidraw-diagrams/_shared/excalidraw-templates.yaml
deleted file mode 100644
index 6fab2a3d..00000000
--- a/src/bmm/workflows/excalidraw-diagrams/_shared/excalidraw-templates.yaml
+++ /dev/null
@@ -1,127 +0,0 @@
-flowchart:
- viewport:
- x: 0
- y: 0
- zoom: 1
- grid:
- size: 20
- spacing:
- vertical: 100
- horizontal: 180
- elements:
- start:
- type: ellipse
- width: 120
- height: 60
- label: "Start"
- process:
- type: rectangle
- width: 160
- height: 80
- roundness: 8
- decision:
- type: diamond
- width: 140
- height: 100
- end:
- type: ellipse
- width: 120
- height: 60
- label: "End"
-
-diagram:
- viewport:
- x: 0
- y: 0
- zoom: 1
- grid:
- size: 20
- spacing:
- vertical: 120
- horizontal: 200
- elements:
- component:
- type: rectangle
- width: 180
- height: 100
- roundness: 8
- database:
- type: rectangle
- width: 140
- height: 80
- service:
- type: rectangle
- width: 160
- height: 90
- roundness: 12
- external:
- type: rectangle
- width: 140
- height: 80
-
-wireframe:
- viewport:
- x: 0
- y: 0
- zoom: 0.8
- grid:
- size: 20
- spacing:
- vertical: 40
- horizontal: 40
- elements:
- container:
- type: rectangle
- width: 800
- height: 600
- strokeStyle: solid
- strokeWidth: 2
- header:
- type: rectangle
- width: 800
- height: 80
- button:
- type: rectangle
- width: 120
- height: 40
- roundness: 4
- input:
- type: rectangle
- width: 300
- height: 40
- roundness: 4
- text:
- type: text
- fontSize: 16
-
-dataflow:
- viewport:
- x: 0
- y: 0
- zoom: 1
- grid:
- size: 20
- spacing:
- vertical: 120
- horizontal: 200
- elements:
- process:
- type: ellipse
- width: 140
- height: 80
- label: "Process"
- datastore:
- type: rectangle
- width: 140
- height: 80
- label: "Data Store"
- external:
- type: rectangle
- width: 120
- height: 80
- strokeWidth: 3
- label: "External Entity"
- dataflow:
- type: arrow
- strokeWidth: 2
- label: "Data Flow"
diff --git a/src/bmm/workflows/excalidraw-diagrams/create-dataflow/checklist.md b/src/bmm/workflows/excalidraw-diagrams/create-dataflow/checklist.md
deleted file mode 100644
index 3c9463d5..00000000
--- a/src/bmm/workflows/excalidraw-diagrams/create-dataflow/checklist.md
+++ /dev/null
@@ -1,39 +0,0 @@
-# Create Data Flow Diagram - Validation Checklist
-
-## DFD Notation
-
-- [ ] Processes shown as circles/ellipses
-- [ ] Data stores shown as parallel lines or rectangles
-- [ ] External entities shown as rectangles
-- [ ] Data flows shown as labeled arrows
-- [ ] Follows standard DFD notation
-
-## Structure
-
-- [ ] All processes numbered correctly
-- [ ] All data flows labeled with data names
-- [ ] All data stores named appropriately
-- [ ] External entities clearly identified
-
-## Completeness
-
-- [ ] All inputs and outputs accounted for
-- [ ] No orphaned processes (unconnected)
-- [ ] Data conservation maintained
-- [ ] Level appropriate (context/level 0/level 1)
-
-## Layout
-
-- [ ] Logical flow direction (left to right, top to bottom)
-- [ ] No crossing data flows where avoidable
-- [ ] Balanced layout
-- [ ] Grid alignment maintained
-
-## Technical Quality
-
-- [ ] All elements properly grouped
-- [ ] Arrows have proper bindings
-- [ ] Text readable and properly sized
-- [ ] No elements with `isDeleted: true`
-- [ ] JSON is valid
-- [ ] File saved to correct location
diff --git a/src/bmm/workflows/excalidraw-diagrams/create-dataflow/instructions.md b/src/bmm/workflows/excalidraw-diagrams/create-dataflow/instructions.md
deleted file mode 100644
index 30d32ed3..00000000
--- a/src/bmm/workflows/excalidraw-diagrams/create-dataflow/instructions.md
+++ /dev/null
@@ -1,130 +0,0 @@
-# Create Data Flow Diagram - Workflow Instructions
-
-```xml
-The workflow execution engine is governed by: {project-root}/_bmad/core/tasks/workflow.xml
-You MUST have already loaded and processed: {installed_path}/workflow.yaml
-This workflow creates data flow diagrams (DFD) in Excalidraw format.
-
-
-
-
- Review user's request and extract: DFD level, processes, data stores, external entities
- Skip to Step 4
-
-
-
- Ask: "What level of DFD do you need?"
- Present options:
- 1. Context Diagram (Level 0) - Single process showing system boundaries
- 2. Level 1 DFD - Major processes and data flows
- 3. Level 2 DFD - Detailed sub-processes
- 4. Custom - Specify your requirements
-
- WAIT for selection
-
-
-
- Ask: "Describe the processes, data stores, and external entities in your system"
- WAIT for user description
- Summarize what will be included and confirm with user
-
-
-
- Check for existing theme.json, ask to use if exists
-
- Ask: "Choose a DFD color scheme:"
- Present numbered options:
- 1. Standard DFD
- - Process: #e3f2fd (light blue)
- - Data Store: #e8f5e9 (light green)
- - External Entity: #f3e5f5 (light purple)
- - Border: #1976d2 (blue)
-
- 2. Colorful DFD
- - Process: #fff9c4 (light yellow)
- - Data Store: #c5e1a5 (light lime)
- - External Entity: #ffccbc (light coral)
- - Border: #f57c00 (orange)
-
- 3. Minimal DFD
- - Process: #f5f5f5 (light gray)
- - Data Store: #eeeeee (gray)
- - External Entity: #e0e0e0 (medium gray)
- - Border: #616161 (dark gray)
-
- 4. Custom - Define your own colors
-
- WAIT for selection
- Create theme.json based on selection
-
-
-
-
- List all processes with numbers (1.0, 2.0, etc.)
- List all data stores (D1, D2, etc.)
- List all external entities
- Map all data flows with labels
- Show planned structure, confirm with user
-
-
-
- Load {{templates}} and extract `dataflow` section
- Load {{library}}
- Load theme.json
- Load {{helpers}}
-
-
-
- Follow standard DFD notation from {{helpers}}
-
- Build Order:
- 1. External entities (rectangles, bold border)
- 2. Processes (circles/ellipses with numbers)
- 3. Data stores (parallel lines or rectangles)
- 4. Data flows (labeled arrows)
-
-
- DFD Rules:
- - Processes: Numbered (1.0, 2.0), verb phrases
- - Data stores: Named (D1, D2), noun phrases
- - External entities: Named, noun phrases
- - Data flows: Labeled with data names, arrows show direction
- - No direct flow between external entities
- - No direct flow between data stores
-
-
- Layout:
- - External entities at edges
- - Processes in center
- - Data stores between processes
- - Minimize crossing flows
- - Left-to-right or top-to-bottom flow
-
-
-
-
- Verify DFD rules compliance
- Strip unused elements and elements with isDeleted: true
- Save to {{default_output_file}}
-
-
-
- NEVER delete the file if validation fails - always fix syntax errors
- Run: node -e "JSON.parse(require('fs').readFileSync('{{default_output_file}}', 'utf8')); console.log('β Valid JSON')"
-
- Read the error message carefully - it shows the syntax error and position
- Open the file and navigate to the error location
- Fix the syntax error (add missing comma, bracket, or quote as indicated)
- Save the file
- Re-run validation with the same command
- Repeat until validation passes
-
- Once validation passes, confirm with user
-
-
-
- Validate against {{validation}}
-
-
-
-```
diff --git a/src/bmm/workflows/excalidraw-diagrams/create-dataflow/workflow.yaml b/src/bmm/workflows/excalidraw-diagrams/create-dataflow/workflow.yaml
deleted file mode 100644
index 2f01e6b5..00000000
--- a/src/bmm/workflows/excalidraw-diagrams/create-dataflow/workflow.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
-name: create-excalidraw-dataflow
-description: "Create data flow diagrams (DFD) in Excalidraw format"
-author: "BMad"
-
-# Config values
-config_source: "{project-root}/_bmad/bmm/config.yaml"
-output_folder: "{config_source}:output_folder"
-
-# Workflow components
-installed_path: "{project-root}/_bmad/bmm/workflows/excalidraw-diagrams/create-dataflow"
-shared_path: "{project-root}/_bmad/bmm/workflows/excalidraw-diagrams/_shared"
-instructions: "{installed_path}/instructions.md"
-validation: "{installed_path}/checklist.md"
-
-# Core Excalidraw resources (universal knowledge)
-helpers: "{project-root}/_bmad/core/resources/excalidraw/excalidraw-helpers.md"
-json_validation: "{project-root}/_bmad/core/resources/excalidraw/validate-json-instructions.md"
-
-# Domain-specific resources (technical diagrams)
-templates: "{shared_path}/excalidraw-templates.yaml"
-library: "{shared_path}/excalidraw-library.json"
-
-# Output file (respects user's configured output_folder)
-default_output_file: "{output_folder}/excalidraw-diagrams/dataflow-{timestamp}.excalidraw"
-
-standalone: true
-web_bundle: false
diff --git a/src/bmm/workflows/excalidraw-diagrams/create-diagram/checklist.md b/src/bmm/workflows/excalidraw-diagrams/create-diagram/checklist.md
deleted file mode 100644
index 61d216ae..00000000
--- a/src/bmm/workflows/excalidraw-diagrams/create-diagram/checklist.md
+++ /dev/null
@@ -1,43 +0,0 @@
-# Create Diagram - Validation Checklist
-
-## Element Structure
-
-- [ ] All components with labels have matching `groupIds`
-- [ ] All text elements have `containerId` pointing to parent component
-- [ ] Text width calculated properly (no cutoff)
-- [ ] Text alignment appropriate for diagram type
-
-## Layout and Alignment
-
-- [ ] All elements snapped to 20px grid
-- [ ] Component spacing consistent (40px/60px)
-- [ ] Hierarchical alignment maintained
-- [ ] No overlapping elements
-
-## Connections
-
-- [ ] All arrows have `startBinding` and `endBinding`
-- [ ] `boundElements` array updated on connected components
-- [ ] Arrow routing avoids overlaps
-- [ ] Relationship types clearly indicated
-
-## Notation and Standards
-
-- [ ] Follows specified notation standard (UML/ERD/etc)
-- [ ] Symbols used correctly
-- [ ] Cardinality/multiplicity shown where needed
-- [ ] Labels and annotations clear
-
-## Theme and Styling
-
-- [ ] Theme colors applied consistently
-- [ ] Component types visually distinguishable
-- [ ] Text is readable
-- [ ] Professional appearance
-
-## Output Quality
-
-- [ ] Element count under 80
-- [ ] No elements with `isDeleted: true`
-- [ ] JSON is valid
-- [ ] File saved to correct location
diff --git a/src/bmm/workflows/excalidraw-diagrams/create-diagram/instructions.md b/src/bmm/workflows/excalidraw-diagrams/create-diagram/instructions.md
deleted file mode 100644
index 407a76bf..00000000
--- a/src/bmm/workflows/excalidraw-diagrams/create-diagram/instructions.md
+++ /dev/null
@@ -1,141 +0,0 @@
-# Create Diagram - Workflow Instructions
-
-```xml
-The workflow execution engine is governed by: {project-root}/_bmad/core/tasks/workflow.xml
-You MUST have already loaded and processed: {installed_path}/workflow.yaml
-This workflow creates system architecture diagrams, ERDs, UML diagrams, or general technical diagrams in Excalidraw format.
-
-
-
-
- Review user's request and extract: diagram type, components/entities, relationships, notation preferences
- Skip to Step 5
- Only ask about missing info in Steps 1-2
-
-
-
- Ask: "What type of technical diagram do you need?"
- Present options:
- 1. System Architecture
- 2. Entity-Relationship Diagram (ERD)
- 3. UML Class Diagram
- 4. UML Sequence Diagram
- 5. UML Use Case Diagram
- 6. Network Diagram
- 7. Other
-
- WAIT for selection
-
-
-
- Ask: "Describe the components/entities and their relationships"
- Ask: "What notation standard? (Standard/Simplified/Strict UML-ERD)"
- WAIT for user input
- Summarize what will be included and confirm with user
-
-
-
- Check if theme.json exists at output location
- Ask to use it, load if yes, else proceed to Step 4
- Proceed to Step 4
-
-
-
- Ask: "Choose a color scheme for your diagram:"
- Present numbered options:
- 1. Professional
- - Component: #e3f2fd (light blue)
- - Database: #e8f5e9 (light green)
- - Service: #fff3e0 (light orange)
- - Border: #1976d2 (blue)
-
- 2. Colorful
- - Component: #e1bee7 (light purple)
- - Database: #c5e1a5 (light lime)
- - Service: #ffccbc (light coral)
- - Border: #7b1fa2 (purple)
-
- 3. Minimal
- - Component: #f5f5f5 (light gray)
- - Database: #eeeeee (gray)
- - Service: #e0e0e0 (medium gray)
- - Border: #616161 (dark gray)
-
- 4. Custom - Define your own colors
-
- WAIT for selection
- Create theme.json based on selection
- Show preview and confirm
-
-
-
- List all components/entities
- Map all relationships
- Show planned layout
- Ask: "Structure looks correct? (yes/no)"
- Adjust and repeat
-
-
-
- Load {{templates}} and extract `diagram` section
- Load {{library}}
- Load theme.json and merge with template
- Load {{helpers}} for guidelines
-
-
-
- Follow {{helpers}} for proper element creation
-
- For Each Component:
- - Generate unique IDs (component-id, text-id, group-id)
- - Create shape with groupIds
- - Calculate text width
- - Create text with containerId and matching groupIds
- - Add boundElements
-
-
- For Each Connection:
- - Determine arrow type (straight/elbow)
- - Create with startBinding and endBinding
- - Update boundElements on both components
-
-
- Build Order by Type:
- - Architecture: Services β Databases β Connections β Labels
- - ERD: Entities β Attributes β Relationships β Cardinality
- - UML Class: Classes β Attributes β Methods β Relationships
- - UML Sequence: Actors β Lifelines β Messages β Returns
- - UML Use Case: Actors β Use Cases β Relationships
-
-
- Alignment:
- - Snap to 20px grid
- - Space: 40px between components, 60px between sections
-
-
-
-
- Strip unused elements and elements with isDeleted: true
- Save to {{default_output_file}}
-
-
-
- NEVER delete the file if validation fails - always fix syntax errors
- Run: node -e "JSON.parse(require('fs').readFileSync('{{default_output_file}}', 'utf8')); console.log('β Valid JSON')"
-
- Read the error message carefully - it shows the syntax error and position
- Open the file and navigate to the error location
- Fix the syntax error (add missing comma, bracket, or quote as indicated)
- Save the file
- Re-run validation with the same command
- Repeat until validation passes
-
- Once validation passes, confirm: "Diagram created at {{default_output_file}}. Open to view?"
-
-
-
- Validate against {{validation}} using {_bmad}/core/tasks/validate-workflow.xml
-
-
-
-```
diff --git a/src/bmm/workflows/excalidraw-diagrams/create-diagram/workflow.yaml b/src/bmm/workflows/excalidraw-diagrams/create-diagram/workflow.yaml
deleted file mode 100644
index f841a546..00000000
--- a/src/bmm/workflows/excalidraw-diagrams/create-diagram/workflow.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
-name: create-excalidraw-diagram
-description: "Create system architecture diagrams, ERDs, UML diagrams, or general technical diagrams in Excalidraw format"
-author: "BMad"
-
-# Config values
-config_source: "{project-root}/_bmad/bmm/config.yaml"
-output_folder: "{config_source}:output_folder"
-
-# Workflow components
-installed_path: "{project-root}/_bmad/bmm/workflows/excalidraw-diagrams/create-diagram"
-shared_path: "{project-root}/_bmad/bmm/workflows/excalidraw-diagrams/_shared"
-instructions: "{installed_path}/instructions.md"
-validation: "{installed_path}/checklist.md"
-
-# Core Excalidraw resources (universal knowledge)
-helpers: "{project-root}/_bmad/core/resources/excalidraw/excalidraw-helpers.md"
-json_validation: "{project-root}/_bmad/core/resources/excalidraw/validate-json-instructions.md"
-
-# Domain-specific resources (technical diagrams)
-templates: "{shared_path}/excalidraw-templates.yaml"
-library: "{shared_path}/excalidraw-library.json"
-
-# Output file (respects user's configured output_folder)
-default_output_file: "{output_folder}/excalidraw-diagrams/diagram-{timestamp}.excalidraw"
-
-standalone: true
-web_bundle: false
diff --git a/src/bmm/workflows/excalidraw-diagrams/create-flowchart/checklist.md b/src/bmm/workflows/excalidraw-diagrams/create-flowchart/checklist.md
deleted file mode 100644
index 7da7fb78..00000000
--- a/src/bmm/workflows/excalidraw-diagrams/create-flowchart/checklist.md
+++ /dev/null
@@ -1,49 +0,0 @@
-# Create Flowchart - Validation Checklist
-
-## Element Structure
-
-- [ ] All shapes with labels have matching `groupIds`
-- [ ] All text elements have `containerId` pointing to parent shape
-- [ ] Text width calculated properly (no cutoff)
-- [ ] Text alignment set (`textAlign` + `verticalAlign`)
-
-## Layout and Alignment
-
-- [ ] All elements snapped to 20px grid
-- [ ] Consistent spacing between elements (60px minimum)
-- [ ] Vertical alignment maintained for flow direction
-- [ ] No overlapping elements
-
-## Connections
-
-- [ ] All arrows have `startBinding` and `endBinding`
-- [ ] `boundElements` array updated on connected shapes
-- [ ] Arrow types appropriate (straight for forward, elbow for backward/upward)
-- [ ] Gap set to 10 for all bindings
-
-## Theme and Styling
-
-- [ ] Theme colors applied consistently
-- [ ] All shapes use theme primary fill color
-- [ ] All borders use theme accent color
-- [ ] Text color is readable (#1e1e1e)
-
-## Composition
-
-- [ ] Element count under 50
-- [ ] Library components referenced where possible
-- [ ] No duplicate element definitions
-
-## Output Quality
-
-- [ ] No elements with `isDeleted: true`
-- [ ] JSON is valid
-- [ ] File saved to correct location
-
-## Functional Requirements
-
-- [ ] Start point clearly marked
-- [ ] End point clearly marked
-- [ ] All process steps labeled
-- [ ] Decision points use diamond shapes
-- [ ] Flow direction is clear and logical
diff --git a/src/bmm/workflows/excalidraw-diagrams/create-flowchart/instructions.md b/src/bmm/workflows/excalidraw-diagrams/create-flowchart/instructions.md
deleted file mode 100644
index 74267905..00000000
--- a/src/bmm/workflows/excalidraw-diagrams/create-flowchart/instructions.md
+++ /dev/null
@@ -1,241 +0,0 @@
-# Create Flowchart - Workflow Instructions
-
-```xml
-The workflow execution engine is governed by: {project-root}/_bmad/core/tasks/workflow.xml
-You MUST have already loaded and processed: {installed_path}/workflow.yaml
-This workflow creates a flowchart visualization in Excalidraw format for processes, pipelines, or logic flows.
-
-
-
-
- Before asking any questions, analyze what the user has already told you
-
- Review the user's initial request and conversation history
- Extract any mentioned: flowchart type, complexity, decision points, save location
-
-
- Summarize your understanding
- Skip directly to Step 4 (Plan Flowchart Layout)
-
-
-
- Note what you already know
- Only ask about missing information in Step 1
-
-
-
- Proceed with full elicitation in Step 1
-
-
-
-
- Ask Question 1: "What type of process flow do you need to visualize?"
- Present numbered options:
- 1. Business Process Flow - Document business workflows, approval processes, or operational procedures
- 2. Algorithm/Logic Flow - Visualize code logic, decision trees, or computational processes
- 3. User Journey Flow - Map user interactions, navigation paths, or experience flows
- 4. Data Processing Pipeline - Show data transformation, ETL processes, or processing stages
- 5. Other - Describe your specific flowchart needs
-
- WAIT for user selection (1-5)
-
- Ask Question 2: "How many main steps are in this flow?"
- Present numbered options:
- 1. Simple (3-5 steps) - Quick process with few decision points
- 2. Medium (6-10 steps) - Standard workflow with some branching
- 3. Complex (11-20 steps) - Detailed process with multiple decision points
- 4. Very Complex (20+ steps) - Comprehensive workflow requiring careful layout
-
- WAIT for user selection (1-4)
- Store selection in {{complexity}}
-
- Ask Question 3: "Does your flow include decision points (yes/no branches)?"
- Present numbered options:
- 1. No decisions - Linear flow from start to end
- 2. Few decisions (1-2) - Simple branching with yes/no paths
- 3. Multiple decisions (3-5) - Several conditional branches
- 4. Complex decisions (6+) - Extensive branching logic
-
- WAIT for user selection (1-4)
- Store selection in {{decision_points}}
-
- Ask Question 4: "Where should the flowchart be saved?"
- Present numbered options:
- 1. Default location - docs/flowcharts/[auto-generated-name].excalidraw
- 2. Custom path - Specify your own file path
- 3. Project root - Save in main project directory
- 4. Specific folder - Choose from existing folders
-
- WAIT for user selection (1-4)
-
- Ask for specific path
- WAIT for user input
-
- Store final path in {{default_output_file}}
-
-
-
- Check if theme.json exists at output location
-
- Ask: "Found existing theme. Use it? (yes/no)"
- WAIT for user response
-
- Load and use existing theme
- Skip to Step 4
-
-
- Proceed to Step 3
-
-
-
- Proceed to Step 3
-
-
-
-
- Ask: "Let's create a theme for your flowchart. Choose a color scheme:"
- Present numbered options:
- 1. Professional Blue
- - Primary Fill: #e3f2fd (light blue)
- - Accent/Border: #1976d2 (blue)
- - Decision: #fff3e0 (light orange)
- - Text: #1e1e1e (dark gray)
-
- 2. Success Green
- - Primary Fill: #e8f5e9 (light green)
- - Accent/Border: #388e3c (green)
- - Decision: #fff9c4 (light yellow)
- - Text: #1e1e1e (dark gray)
-
- 3. Neutral Gray
- - Primary Fill: #f5f5f5 (light gray)
- - Accent/Border: #616161 (gray)
- - Decision: #e0e0e0 (medium gray)
- - Text: #1e1e1e (dark gray)
-
- 4. Warm Orange
- - Primary Fill: #fff3e0 (light orange)
- - Accent/Border: #f57c00 (orange)
- - Decision: #ffe0b2 (peach)
- - Text: #1e1e1e (dark gray)
-
- 5. Custom Colors - Define your own color palette
-
- WAIT for user selection (1-5)
- Store selection in {{theme_choice}}
-
-
- Ask: "Primary fill color (hex code)?"
- WAIT for user input
- Store in {{custom_colors.primary_fill}}
- Ask: "Accent/border color (hex code)?"
- WAIT for user input
- Store in {{custom_colors.accent}}
- Ask: "Decision color (hex code)?"
- WAIT for user input
- Store in {{custom_colors.decision}}
-
-
- Create theme.json with selected colors
- Show theme preview with all colors
- Ask: "Theme looks good?"
- Present numbered options:
- 1. Yes, use this theme - Proceed with theme
- 2. No, adjust colors - Modify color selections
- 3. Start over - Choose different preset
-
- WAIT for selection (1-3)
-
- Repeat Step 3
-
-
-
-
- List all steps and decision points based on gathered requirements
- Show user the planned structure
- Ask: "Structure looks correct? (yes/no)"
- WAIT for user response
-
- Adjust structure based on feedback
- Repeat this step
-
-
-
-
- Load {{templates}} file
- Extract `flowchart` section from YAML
- Load {{library}} file
- Load theme.json and merge colors with template
- Load {{helpers}} for element creation guidelines
-
-
-
- Follow guidelines from {{helpers}} for proper element creation
-
- Build ONE section at a time following these rules:
-
- For Each Shape with Label:
- 1. Generate unique IDs (shape-id, text-id, group-id)
- 2. Create shape with groupIds: [group-id]
- 3. Calculate text width: (text.length Γ fontSize Γ 0.6) + 20, round to nearest 10
- 4. Create text element with:
- - containerId: shape-id
- - groupIds: [group-id] (SAME as shape)
- - textAlign: "center"
- - verticalAlign: "middle"
- - width: calculated width
- 5. Add boundElements to shape referencing text
-
-
- For Each Arrow:
- 1. Determine arrow type needed:
- - Straight: For forward flow (left-to-right, top-to-bottom)
- - Elbow: For upward flow, backward flow, or complex routing
- 2. Create arrow with startBinding and endBinding
- 3. Set startBinding.elementId to source shape ID
- 4. Set endBinding.elementId to target shape ID
- 5. Set gap: 10 for both bindings
- 6. If elbow arrow, add intermediate points for direction changes
- 7. Update boundElements on both connected shapes
-
-
- Alignment:
- - Snap all x, y to 20px grid
- - Align shapes vertically (same x for vertical flow)
- - Space elements: 60px between shapes
-
-
- Build Order:
- 1. Start point (circle) with label
- 2. Each process step (rectangle) with label
- 3. Each decision point (diamond) with label
- 4. End point (circle) with label
- 5. Connect all with bound arrows
-
-
-
-
- Strip unused elements and elements with isDeleted: true
- Save to {{default_output_file}}
-
-
-
- NEVER delete the file if validation fails - always fix syntax errors
- Run: node -e "JSON.parse(require('fs').readFileSync('{{default_output_file}}', 'utf8')); console.log('β Valid JSON')"
-
- Read the error message carefully - it shows the syntax error and position
- Open the file and navigate to the error location
- Fix the syntax error (add missing comma, bracket, or quote as indicated)
- Save the file
- Re-run validation with the same command
- Repeat until validation passes
-
- Once validation passes, confirm with user: "Flowchart created at {{default_output_file}}. Open to view?"
-
-
-
- Validate against checklist at {{validation}} using {_bmad}/core/tasks/validate-workflow.xml
-
-
-
-```
diff --git a/src/bmm/workflows/excalidraw-diagrams/create-flowchart/workflow.yaml b/src/bmm/workflows/excalidraw-diagrams/create-flowchart/workflow.yaml
deleted file mode 100644
index 6079d6de..00000000
--- a/src/bmm/workflows/excalidraw-diagrams/create-flowchart/workflow.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
-name: create-excalidraw-flowchart
-description: "Create a flowchart visualization in Excalidraw format for processes, pipelines, or logic flows"
-author: "BMad"
-
-# Config values
-config_source: "{project-root}/_bmad/bmm/config.yaml"
-output_folder: "{config_source}:output_folder"
-
-# Workflow components
-installed_path: "{project-root}/_bmad/bmm/workflows/excalidraw-diagrams/create-flowchart"
-shared_path: "{project-root}/_bmad/bmm/workflows/excalidraw-diagrams/_shared"
-instructions: "{installed_path}/instructions.md"
-validation: "{installed_path}/checklist.md"
-
-# Core Excalidraw resources (universal knowledge)
-helpers: "{project-root}/_bmad/core/resources/excalidraw/excalidraw-helpers.md"
-json_validation: "{project-root}/_bmad/core/resources/excalidraw/validate-json-instructions.md"
-
-# Domain-specific resources (technical diagrams)
-templates: "{shared_path}/excalidraw-templates.yaml"
-library: "{shared_path}/excalidraw-library.json"
-
-# Output file (respects user's configured output_folder)
-default_output_file: "{output_folder}/excalidraw-diagrams/flowchart-{timestamp}.excalidraw"
-
-standalone: true
-web_bundle: false
diff --git a/src/bmm/workflows/excalidraw-diagrams/create-wireframe/checklist.md b/src/bmm/workflows/excalidraw-diagrams/create-wireframe/checklist.md
deleted file mode 100644
index 3e2b26f4..00000000
--- a/src/bmm/workflows/excalidraw-diagrams/create-wireframe/checklist.md
+++ /dev/null
@@ -1,38 +0,0 @@
-# Create Wireframe - Validation Checklist
-
-## Layout Structure
-
-- [ ] Screen dimensions appropriate for device type
-- [ ] Grid alignment (20px) maintained
-- [ ] Consistent spacing between UI elements
-- [ ] Proper hierarchy (header, content, footer)
-
-## UI Elements
-
-- [ ] All interactive elements clearly marked
-- [ ] Buttons, inputs, and controls properly sized
-- [ ] Text labels readable and appropriately sized
-- [ ] Navigation elements clearly indicated
-
-## Fidelity
-
-- [ ] Matches requested fidelity level (low/medium/high)
-- [ ] Appropriate level of detail
-- [ ] Placeholder content used where needed
-- [ ] No unnecessary decoration for low-fidelity
-
-## Annotations
-
-- [ ] Key interactions annotated
-- [ ] Flow indicators present if multi-screen
-- [ ] Important notes included
-- [ ] Element purposes clear
-
-## Technical Quality
-
-- [ ] All elements properly grouped
-- [ ] Text elements have containerId
-- [ ] Snapped to grid
-- [ ] No elements with `isDeleted: true`
-- [ ] JSON is valid
-- [ ] File saved to correct location
diff --git a/src/bmm/workflows/excalidraw-diagrams/create-wireframe/instructions.md b/src/bmm/workflows/excalidraw-diagrams/create-wireframe/instructions.md
deleted file mode 100644
index dc9506b0..00000000
--- a/src/bmm/workflows/excalidraw-diagrams/create-wireframe/instructions.md
+++ /dev/null
@@ -1,133 +0,0 @@
-# Create Wireframe - Workflow Instructions
-
-```xml
-The workflow execution engine is governed by: {project-root}/_bmad/core/tasks/workflow.xml
-You MUST have already loaded and processed: {installed_path}/workflow.yaml
-This workflow creates website or app wireframes in Excalidraw format.
-
-
-
-
- Review user's request and extract: wireframe type, fidelity level, screen count, device type, save location
- Skip to Step 5
-
-
-
- Ask: "What type of wireframe do you need?"
- Present options:
- 1. Website (Desktop)
- 2. Mobile App (iOS/Android)
- 3. Web App (Responsive)
- 4. Tablet App
- 5. Multi-platform
-
- WAIT for selection
-
-
-
- Ask fidelity level (Low/Medium/High)
- Ask screen count (Single/Few 2-3/Multiple 4-6/Many 7+)
- Ask device dimensions or use standard
- Ask save location
-
-
-
- Check for existing theme.json, ask to use if exists
-
-
-
- Ask: "Choose a wireframe style:"
- Present numbered options:
- 1. Classic Wireframe
- - Background: #ffffff (white)
- - Container: #f5f5f5 (light gray)
- - Border: #9e9e9e (gray)
- - Text: #424242 (dark gray)
-
- 2. High Contrast
- - Background: #ffffff (white)
- - Container: #eeeeee (light gray)
- - Border: #212121 (black)
- - Text: #000000 (black)
-
- 3. Blueprint Style
- - Background: #1a237e (dark blue)
- - Container: #3949ab (blue)
- - Border: #7986cb (light blue)
- - Text: #ffffff (white)
-
- 4. Custom - Define your own colors
-
- WAIT for selection
- Create theme.json based on selection
- Confirm with user
-
-
-
- List all screens and their purposes
- Map navigation flow between screens
- Identify key UI elements for each screen
- Show planned structure, confirm with user
-
-
-
- Load {{templates}} and extract `wireframe` section
- Load {{library}}
- Load theme.json
- Load {{helpers}}
-
-
-
- Follow {{helpers}} for proper element creation
-
- For Each Screen:
- - Create container/frame
- - Add header section
- - Add content areas
- - Add navigation elements
- - Add interactive elements (buttons, inputs)
- - Add labels and annotations
-
-
- Build Order:
- 1. Screen containers
- 2. Layout sections (header, content, footer)
- 3. Navigation elements
- 4. Content blocks
- 5. Interactive elements
- 6. Labels and annotations
- 7. Flow indicators (if multi-screen)
-
-
- Fidelity Guidelines:
- - Low: Basic shapes, minimal detail, placeholder text
- - Medium: More defined elements, some styling, representative content
- - High: Detailed elements, realistic sizing, actual content examples
-
-
-
-
- Strip unused elements and elements with isDeleted: true
- Save to {{default_output_file}}
-
-
-
- NEVER delete the file if validation fails - always fix syntax errors
- Run: node -e "JSON.parse(require('fs').readFileSync('{{default_output_file}}', 'utf8')); console.log('β Valid JSON')"
-
- Read the error message carefully - it shows the syntax error and position
- Open the file and navigate to the error location
- Fix the syntax error (add missing comma, bracket, or quote as indicated)
- Save the file
- Re-run validation with the same command
- Repeat until validation passes
-
- Once validation passes, confirm with user
-
-
-
- Validate against {{validation}}
-
-
-
-```
diff --git a/src/bmm/workflows/excalidraw-diagrams/create-wireframe/workflow.yaml b/src/bmm/workflows/excalidraw-diagrams/create-wireframe/workflow.yaml
deleted file mode 100644
index d89005a7..00000000
--- a/src/bmm/workflows/excalidraw-diagrams/create-wireframe/workflow.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
-name: create-excalidraw-wireframe
-description: "Create website or app wireframes in Excalidraw format"
-author: "BMad"
-
-# Config values
-config_source: "{project-root}/_bmad/bmm/config.yaml"
-output_folder: "{config_source}:output_folder"
-
-# Workflow components
-installed_path: "{project-root}/_bmad/bmm/workflows/excalidraw-diagrams/create-wireframe"
-shared_path: "{project-root}/_bmad/bmm/workflows/excalidraw-diagrams/_shared"
-instructions: "{installed_path}/instructions.md"
-validation: "{installed_path}/checklist.md"
-
-# Core Excalidraw resources (universal knowledge)
-helpers: "{project-root}/_bmad/core/resources/excalidraw/excalidraw-helpers.md"
-json_validation: "{project-root}/_bmad/core/resources/excalidraw/validate-json-instructions.md"
-
-# Domain-specific resources (technical diagrams)
-templates: "{shared_path}/excalidraw-templates.yaml"
-library: "{shared_path}/excalidraw-library.json"
-
-# Output file (respects user's configured output_folder)
-default_output_file: "{output_folder}/excalidraw-diagrams/wireframe-{timestamp}.excalidraw"
-
-standalone: true
-web_bundle: false
diff --git a/src/bmm/workflows/testarch/atdd/atdd-checklist-template.md b/src/bmm/workflows/testarch/atdd/atdd-checklist-template.md
deleted file mode 100644
index 5de70286..00000000
--- a/src/bmm/workflows/testarch/atdd/atdd-checklist-template.md
+++ /dev/null
@@ -1,363 +0,0 @@
-# ATDD Checklist - Epic {epic_num}, Story {story_num}: {story_title}
-
-**Date:** {date}
-**Author:** {user_name}
-**Primary Test Level:** {primary_level}
-
----
-
-## Story Summary
-
-{Brief 2-3 sentence summary of the user story}
-
-**As a** {user_role}
-**I want** {feature_description}
-**So that** {business_value}
-
----
-
-## Acceptance Criteria
-
-{List all testable acceptance criteria from the story}
-
-1. {Acceptance criterion 1}
-2. {Acceptance criterion 2}
-3. {Acceptance criterion 3}
-
----
-
-## Failing Tests Created (RED Phase)
-
-### E2E Tests ({e2e_test_count} tests)
-
-**File:** `{e2e_test_file_path}` ({line_count} lines)
-
-{List each E2E test with its current status and expected failure reason}
-
-- β **Test:** {test_name}
- - **Status:** RED - {failure_reason}
- - **Verifies:** {what_this_test_validates}
-
-### API Tests ({api_test_count} tests)
-
-**File:** `{api_test_file_path}` ({line_count} lines)
-
-{List each API test with its current status and expected failure reason}
-
-- β **Test:** {test_name}
- - **Status:** RED - {failure_reason}
- - **Verifies:** {what_this_test_validates}
-
-### Component Tests ({component_test_count} tests)
-
-**File:** `{component_test_file_path}` ({line_count} lines)
-
-{List each component test with its current status and expected failure reason}
-
-- β **Test:** {test_name}
- - **Status:** RED - {failure_reason}
- - **Verifies:** {what_this_test_validates}
-
----
-
-## Data Factories Created
-
-{List all data factory files created with their exports}
-
-### {Entity} Factory
-
-**File:** `tests/support/factories/{entity}.factory.ts`
-
-**Exports:**
-
-- `create{Entity}(overrides?)` - Create single entity with optional overrides
-- `create{Entity}s(count)` - Create array of entities
-
-**Example Usage:**
-
-```typescript
-const user = createUser({ email: 'specific@example.com' });
-const users = createUsers(5); // Generate 5 random users
-```
-
----
-
-## Fixtures Created
-
-{List all test fixture files created with their fixture names and descriptions}
-
-### {Feature} Fixtures
-
-**File:** `tests/support/fixtures/{feature}.fixture.ts`
-
-**Fixtures:**
-
-- `{fixtureName}` - {description_of_what_fixture_provides}
- - **Setup:** {what_setup_does}
- - **Provides:** {what_test_receives}
- - **Cleanup:** {what_cleanup_does}
-
-**Example Usage:**
-
-```typescript
-import { test } from './fixtures/{feature}.fixture';
-
-test('should do something', async ({ {fixtureName} }) => {
- // {fixtureName} is ready to use with auto-cleanup
-});
-```
-
----
-
-## Mock Requirements
-
-{Document external services that need mocking and their requirements}
-
-### {Service Name} Mock
-
-**Endpoint:** `{HTTP_METHOD} {endpoint_url}`
-
-**Success Response:**
-
-```json
-{
- {success_response_example}
-}
-```
-
-**Failure Response:**
-
-```json
-{
- {failure_response_example}
-}
-```
-
-**Notes:** {any_special_mock_requirements}
-
----
-
-## Required data-testid Attributes
-
-{List all data-testid attributes required in UI implementation for test stability}
-
-### {Page or Component Name}
-
-- `{data-testid-name}` - {description_of_element}
-- `{data-testid-name}` - {description_of_element}
-
-**Implementation Example:**
-
-```tsx
-
-
-
{errorText}
-```
-
----
-
-## Implementation Checklist
-
-{Map each failing test to concrete implementation tasks that will make it pass}
-
-### Test: {test_name_1}
-
-**File:** `{test_file_path}`
-
-**Tasks to make this test pass:**
-
-- [ ] {Implementation task 1}
-- [ ] {Implementation task 2}
-- [ ] {Implementation task 3}
-- [ ] Add required data-testid attributes: {list_of_testids}
-- [ ] Run test: `{test_execution_command}`
-- [ ] β Test passes (green phase)
-
-**Estimated Effort:** {effort_estimate} hours
-
----
-
-### Test: {test_name_2}
-
-**File:** `{test_file_path}`
-
-**Tasks to make this test pass:**
-
-- [ ] {Implementation task 1}
-- [ ] {Implementation task 2}
-- [ ] {Implementation task 3}
-- [ ] Add required data-testid attributes: {list_of_testids}
-- [ ] Run test: `{test_execution_command}`
-- [ ] β Test passes (green phase)
-
-**Estimated Effort:** {effort_estimate} hours
-
----
-
-## Running Tests
-
-```bash
-# Run all failing tests for this story
-{test_command_all}
-
-# Run specific test file
-{test_command_specific_file}
-
-# Run tests in headed mode (see browser)
-{test_command_headed}
-
-# Debug specific test
-{test_command_debug}
-
-# Run tests with coverage
-{test_command_coverage}
-```
-
----
-
-## Red-Green-Refactor Workflow
-
-### RED Phase (Complete) β
-
-**TEA Agent Responsibilities:**
-
-- β All tests written and failing
-- β Fixtures and factories created with auto-cleanup
-- β Mock requirements documented
-- β data-testid requirements listed
-- β Implementation checklist created
-
-**Verification:**
-
-- All tests run and fail as expected
-- Failure messages are clear and actionable
-- Tests fail due to missing implementation, not test bugs
-
----
-
-### GREEN Phase (DEV Team - Next Steps)
-
-**DEV Agent Responsibilities:**
-
-1. **Pick one failing test** from implementation checklist (start with highest priority)
-2. **Read the test** to understand expected behavior
-3. **Implement minimal code** to make that specific test pass
-4. **Run the test** to verify it now passes (green)
-5. **Check off the task** in implementation checklist
-6. **Move to next test** and repeat
-
-**Key Principles:**
-
-- One test at a time (don't try to fix all at once)
-- Minimal implementation (don't over-engineer)
-- Run tests frequently (immediate feedback)
-- Use implementation checklist as roadmap
-
-**Progress Tracking:**
-
-- Check off tasks as you complete them
-- Share progress in daily standup
-
----
-
-### REFACTOR Phase (DEV Team - After All Tests Pass)
-
-**DEV Agent Responsibilities:**
-
-1. **Verify all tests pass** (green phase complete)
-2. **Review code for quality** (readability, maintainability, performance)
-3. **Extract duplications** (DRY principle)
-4. **Optimize performance** (if needed)
-5. **Ensure tests still pass** after each refactor
-6. **Update documentation** (if API contracts change)
-
-**Key Principles:**
-
-- Tests provide safety net (refactor with confidence)
-- Make small refactors (easier to debug if tests fail)
-- Run tests after each change
-- Don't change test behavior (only implementation)
-
-**Completion:**
-
-- All tests pass
-- Code quality meets team standards
-- No duplications or code smells
-- Ready for code review and story approval
-
----
-
-## Next Steps
-
-1. **Share this checklist and failing tests** with the dev workflow (manual handoff)
-2. **Review this checklist** with team in standup or planning
-3. **Run failing tests** to confirm RED phase: `{test_command_all}`
-4. **Begin implementation** using implementation checklist as guide
-5. **Work one test at a time** (red β green for each)
-6. **Share progress** in daily standup
-7. **When all tests pass**, refactor code for quality
-8. **When refactoring complete**, manually update story status to 'done' in sprint-status.yaml
-
----
-
-## Knowledge Base References Applied
-
-This ATDD workflow consulted the following knowledge fragments:
-
-- **fixture-architecture.md** - Test fixture patterns with setup/teardown and auto-cleanup using Playwright's `test.extend()`
-- **data-factories.md** - Factory patterns using `@faker-js/faker` for random test data generation with overrides support
-- **component-tdd.md** - Component test strategies using Playwright Component Testing
-- **network-first.md** - Route interception patterns (intercept BEFORE navigation to prevent race conditions)
-- **test-quality.md** - Test design principles (Given-When-Then, one assertion per test, determinism, isolation)
-- **test-levels-framework.md** - Test level selection framework (E2E vs API vs Component vs Unit)
-
-See `tea-index.csv` for complete knowledge fragment mapping.
-
----
-
-## Test Execution Evidence
-
-### Initial Test Run (RED Phase Verification)
-
-**Command:** `{test_command_all}`
-
-**Results:**
-
-```
-{paste_test_run_output_showing_all_tests_failing}
-```
-
-**Summary:**
-
-- Total tests: {total_test_count}
-- Passing: 0 (expected)
-- Failing: {total_test_count} (expected)
-- Status: β RED phase verified
-
-**Expected Failure Messages:**
-{list_expected_failure_messages_for_each_test}
-
----
-
-## Notes
-
-{Any additional notes, context, or special considerations for this story}
-
-- {Note 1}
-- {Note 2}
-- {Note 3}
-
----
-
-## Contact
-
-**Questions or Issues?**
-
-- Ask in team standup
-- Tag @{tea_agent_username} in Slack/Discord
-- Refer to `./bmm/docs/tea-README.md` for workflow documentation
-- Consult `./bmm/testarch/knowledge` for testing best practices
-
----
-
-**Generated by BMad TEA Agent** - {date}
diff --git a/src/bmm/workflows/testarch/atdd/checklist.md b/src/bmm/workflows/testarch/atdd/checklist.md
deleted file mode 100644
index ce94a14c..00000000
--- a/src/bmm/workflows/testarch/atdd/checklist.md
+++ /dev/null
@@ -1,374 +0,0 @@
-# ATDD Workflow Validation Checklist
-
-Use this checklist to validate that the ATDD workflow has been executed correctly and all deliverables meet quality standards.
-
-## Prerequisites
-
-Before starting this workflow, verify:
-
-- [ ] Story approved with clear acceptance criteria (AC must be testable)
-- [ ] Development sandbox/environment ready
-- [ ] Framework scaffolding exists (run `framework` workflow if missing)
-- [ ] Test framework configuration available (playwright.config.ts or cypress.config.ts)
-- [ ] Package.json has test dependencies installed (Playwright or Cypress)
-
-**Halt if missing:** Framework scaffolding or story acceptance criteria
-
----
-
-## Step 1: Story Context and Requirements
-
-- [ ] Story markdown file loaded and parsed successfully
-- [ ] All acceptance criteria identified and extracted
-- [ ] Affected systems and components identified
-- [ ] Technical constraints documented
-- [ ] Framework configuration loaded (playwright.config.ts or cypress.config.ts)
-- [ ] Test directory structure identified from config
-- [ ] Existing fixture patterns reviewed for consistency
-- [ ] Similar test patterns searched and found in `{test_dir}`
-- [ ] Knowledge base fragments loaded:
- - [ ] `fixture-architecture.md`
- - [ ] `data-factories.md`
- - [ ] `component-tdd.md`
- - [ ] `network-first.md`
- - [ ] `test-quality.md`
-
----
-
-## Step 2: Test Level Selection and Strategy
-
-- [ ] Each acceptance criterion analyzed for appropriate test level
-- [ ] Test level selection framework applied (E2E vs API vs Component vs Unit)
-- [ ] E2E tests: Critical user journeys and multi-system integration identified
-- [ ] API tests: Business logic and service contracts identified
-- [ ] Component tests: UI component behavior and interactions identified
-- [ ] Unit tests: Pure logic and edge cases identified (if applicable)
-- [ ] Duplicate coverage avoided (same behavior not tested at multiple levels unnecessarily)
-- [ ] Tests prioritized using P0-P3 framework (if test-design document exists)
-- [ ] Primary test level set in `primary_level` variable (typically E2E or API)
-- [ ] Test levels documented in ATDD checklist
-
----
-
-## Step 3: Failing Tests Generated
-
-### Test File Structure Created
-
-- [ ] Test files organized in appropriate directories:
- - [ ] `tests/e2e/` for end-to-end tests
- - [ ] `tests/api/` for API tests
- - [ ] `tests/component/` for component tests
- - [ ] `tests/support/` for infrastructure (fixtures, factories, helpers)
-
-### E2E Tests (If Applicable)
-
-- [ ] E2E test files created in `tests/e2e/`
-- [ ] All tests follow Given-When-Then format
-- [ ] Tests use `data-testid` selectors (not CSS classes or fragile selectors)
-- [ ] One assertion per test (atomic test design)
-- [ ] No hard waits or sleeps (explicit waits only)
-- [ ] Network-first pattern applied (route interception BEFORE navigation)
-- [ ] Tests fail initially (RED phase verified by local test run)
-- [ ] Failure messages are clear and actionable
-
-### API Tests (If Applicable)
-
-- [ ] API test files created in `tests/api/`
-- [ ] Tests follow Given-When-Then format
-- [ ] API contracts validated (request/response structure)
-- [ ] HTTP status codes verified
-- [ ] Response body validation includes all required fields
-- [ ] Error cases tested (400, 401, 403, 404, 500)
-- [ ] Tests fail initially (RED phase verified)
-
-### Component Tests (If Applicable)
-
-- [ ] Component test files created in `tests/component/`
-- [ ] Tests follow Given-When-Then format
-- [ ] Component mounting works correctly
-- [ ] Interaction testing covers user actions (click, hover, keyboard)
-- [ ] State management within component validated
-- [ ] Props and events tested
-- [ ] Tests fail initially (RED phase verified)
-
-### Test Quality Validation
-
-- [ ] All tests use Given-When-Then structure with clear comments
-- [ ] All tests have descriptive names explaining what they test
-- [ ] No duplicate tests (same behavior tested multiple times)
-- [ ] No flaky patterns (race conditions, timing issues)
-- [ ] No test interdependencies (tests can run in any order)
-- [ ] Tests are deterministic (same input always produces same result)
-
----
-
-## Step 4: Data Infrastructure Built
-
-### Data Factories Created
-
-- [ ] Factory files created in `tests/support/factories/`
-- [ ] All factories use `@faker-js/faker` for random data generation (no hardcoded values)
-- [ ] Factories support overrides for specific test scenarios
-- [ ] Factories generate complete valid objects matching API contracts
-- [ ] Helper functions for bulk creation provided (e.g., `createUsers(count)`)
-- [ ] Factory exports are properly typed (TypeScript)
-
-### Test Fixtures Created
-
-- [ ] Fixture files created in `tests/support/fixtures/`
-- [ ] All fixtures use Playwright's `test.extend()` pattern
-- [ ] Fixtures have setup phase (arrange test preconditions)
-- [ ] Fixtures provide data to tests via `await use(data)`
-- [ ] Fixtures have teardown phase with auto-cleanup (delete created data)
-- [ ] Fixtures are composable (can use other fixtures if needed)
-- [ ] Fixtures are isolated (each test gets fresh data)
-- [ ] Fixtures are type-safe (TypeScript types defined)
-
-### Mock Requirements Documented
-
-- [ ] External service mocking requirements identified
-- [ ] Mock endpoints documented with URLs and methods
-- [ ] Success response examples provided
-- [ ] Failure response examples provided
-- [ ] Mock requirements documented in ATDD checklist for DEV team
-
-### data-testid Requirements Listed
-
-- [ ] All required data-testid attributes identified from E2E tests
-- [ ] data-testid list organized by page or component
-- [ ] Each data-testid has clear description of element it targets
-- [ ] data-testid list included in ATDD checklist for DEV team
-
----
-
-## Step 5: Implementation Checklist Created
-
-- [ ] Implementation checklist created with clear structure
-- [ ] Each failing test mapped to concrete implementation tasks
-- [ ] Tasks include:
- - [ ] Route/component creation
- - [ ] Business logic implementation
- - [ ] API integration
- - [ ] data-testid attribute additions
- - [ ] Error handling
- - [ ] Test execution command
- - [ ] Completion checkbox
-- [ ] Red-Green-Refactor workflow documented in checklist
-- [ ] RED phase marked as complete (TEA responsibility)
-- [ ] GREEN phase tasks listed for DEV team
-- [ ] REFACTOR phase guidance provided
-- [ ] Execution commands provided:
- - [ ] Run all tests: `npm run test:e2e`
- - [ ] Run specific test file
- - [ ] Run in headed mode
- - [ ] Debug specific test
-- [ ] Estimated effort included (hours or story points)
-
----
-
-## Step 6: Deliverables Generated
-
-### ATDD Checklist Document Created
-
-- [ ] Output file created at `{output_folder}/atdd-checklist-{story_id}.md`
-- [ ] Document follows template structure from `atdd-checklist-template.md`
-- [ ] Document includes all required sections:
- - [ ] Story summary
- - [ ] Acceptance criteria breakdown
- - [ ] Failing tests created (paths and line counts)
- - [ ] Data factories created
- - [ ] Fixtures created
- - [ ] Mock requirements
- - [ ] Required data-testid attributes
- - [ ] Implementation checklist
- - [ ] Red-green-refactor workflow
- - [ ] Execution commands
- - [ ] Next steps for DEV team
-- [ ] Output shared with DEV workflow (manual handoff; not auto-consumed)
-
-### All Tests Verified to Fail (RED Phase)
-
-- [ ] Full test suite run locally before finalizing
-- [ ] All tests fail as expected (RED phase confirmed)
-- [ ] No tests passing before implementation (if passing, test is invalid)
-- [ ] Failure messages documented in ATDD checklist
-- [ ] Failures are due to missing implementation, not test bugs
-- [ ] Test run output captured for reference
-
-### Summary Provided
-
-- [ ] Summary includes:
- - [ ] Story ID
- - [ ] Primary test level
- - [ ] Test counts (E2E, API, Component)
- - [ ] Test file paths
- - [ ] Factory count
- - [ ] Fixture count
- - [ ] Mock requirements count
- - [ ] data-testid count
- - [ ] Implementation task count
- - [ ] Estimated effort
- - [ ] Next steps for DEV team
- - [ ] Output file path
- - [ ] Knowledge base references applied
-
----
-
-## Quality Checks
-
-### Test Design Quality
-
-- [ ] Tests are readable (clear Given-When-Then structure)
-- [ ] Tests are maintainable (use factories and fixtures, not hardcoded data)
-- [ ] Tests are isolated (no shared state between tests)
-- [ ] Tests are deterministic (no race conditions or flaky patterns)
-- [ ] Tests are atomic (one assertion per test)
-- [ ] Tests are fast (no unnecessary waits or delays)
-
-### Knowledge Base Integration
-
-- [ ] fixture-architecture.md patterns applied to all fixtures
-- [ ] data-factories.md patterns applied to all factories
-- [ ] network-first.md patterns applied to E2E tests with network requests
-- [ ] component-tdd.md patterns applied to component tests
-- [ ] test-quality.md principles applied to all test design
-
-### Code Quality
-
-- [ ] All TypeScript types are correct and complete
-- [ ] No linting errors in generated test files
-- [ ] Consistent naming conventions followed
-- [ ] Imports are organized and correct
-- [ ] Code follows project style guide
-
----
-
-## Integration Points
-
-### With DEV Agent
-
-- [ ] ATDD checklist provides clear implementation guidance
-- [ ] Implementation tasks are granular and actionable
-- [ ] data-testid requirements are complete and clear
-- [ ] Mock requirements include all necessary details
-- [ ] Execution commands work correctly
-
-### With Story Workflow
-
-- [ ] Story ID correctly referenced in output files
-- [ ] Acceptance criteria from story accurately reflected in tests
-- [ ] Technical constraints from story considered in test design
-
-### With Framework Workflow
-
-- [ ] Test framework configuration correctly detected and used
-- [ ] Directory structure matches framework setup
-- [ ] Fixtures and helpers follow established patterns
-- [ ] Naming conventions consistent with framework standards
-
-### With test-design Workflow (If Available)
-
-- [ ] P0 scenarios from test-design prioritized in ATDD
-- [ ] Risk assessment from test-design considered in test coverage
-- [ ] Coverage strategy from test-design aligned with ATDD tests
-
----
-
-## Completion Criteria
-
-All of the following must be true before marking this workflow as complete:
-
-- [ ] **Story acceptance criteria analyzed** and mapped to appropriate test levels
-- [ ] **Failing tests created** at all appropriate levels (E2E, API, Component)
-- [ ] **Given-When-Then format** used consistently across all tests
-- [ ] **RED phase verified** by local test run (all tests failing as expected)
-- [ ] **Network-first pattern** applied to E2E tests with network requests
-- [ ] **Data factories created** using faker (no hardcoded test data)
-- [ ] **Fixtures created** with auto-cleanup in teardown
-- [ ] **Mock requirements documented** for external services
-- [ ] **data-testid attributes listed** for DEV team
-- [ ] **Implementation checklist created** mapping tests to code tasks
-- [ ] **Red-green-refactor workflow documented** in ATDD checklist
-- [ ] **Execution commands provided** and verified to work
-- [ ] **ATDD checklist document created** and saved to correct location
-- [ ] **Output file formatted correctly** using template structure
-- [ ] **Knowledge base references applied** and documented in summary
-- [ ] **No test quality issues** (flaky patterns, race conditions, hardcoded data)
-
----
-
-## Common Issues and Resolutions
-
-### Issue: Tests pass before implementation
-
-**Problem:** A test passes even though no implementation code exists yet.
-
-**Resolution:**
-
-- Review test to ensure it's testing actual behavior, not mocked/stubbed behavior
-- Check if test is accidentally using existing functionality
-- Verify test assertions are correct and meaningful
-- Rewrite test to fail until implementation is complete
-
-### Issue: Network-first pattern not applied
-
-**Problem:** Route interception happens after navigation, causing race conditions.
-
-**Resolution:**
-
-- Move `await page.route()` calls BEFORE `await page.goto()`
-- Review `network-first.md` knowledge fragment
-- Update all E2E tests to follow network-first pattern
-
-### Issue: Hardcoded test data in tests
-
-**Problem:** Tests use hardcoded strings/numbers instead of factories.
-
-**Resolution:**
-
-- Replace all hardcoded data with factory function calls
-- Use `faker` for all random data generation
-- Update data-factories to support all required test scenarios
-
-### Issue: Fixtures missing auto-cleanup
-
-**Problem:** Fixtures create data but don't clean it up in teardown.
-
-**Resolution:**
-
-- Add cleanup logic after `await use(data)` in fixture
-- Call deletion/cleanup functions in teardown
-- Verify cleanup works by checking database/storage after test run
-
-### Issue: Tests have multiple assertions
-
-**Problem:** Tests verify multiple behaviors in single test (not atomic).
-
-**Resolution:**
-
-- Split into separate tests (one assertion per test)
-- Each test should verify exactly one behavior
-- Use descriptive test names to clarify what each test verifies
-
-### Issue: Tests depend on execution order
-
-**Problem:** Tests fail when run in isolation or different order.
-
-**Resolution:**
-
-- Remove shared state between tests
-- Each test should create its own test data
-- Use fixtures for consistent setup across tests
-- Verify tests can run with `.only` flag
-
----
-
-## Notes for TEA Agent
-
-- **Preflight halt is critical:** Do not proceed if story has no acceptance criteria or framework is missing
-- **RED phase verification is mandatory:** Tests must fail before sharing with DEV team
-- **Network-first pattern:** Route interception BEFORE navigation prevents race conditions
-- **One assertion per test:** Atomic tests provide clear failure diagnosis
-- **Auto-cleanup is non-negotiable:** Every fixture must clean up data in teardown
-- **Use knowledge base:** Load relevant fragments (fixture-architecture, data-factories, network-first, component-tdd, test-quality) for guidance
-- **Share with DEV agent:** ATDD checklist provides implementation roadmap from red to green
diff --git a/src/bmm/workflows/testarch/atdd/instructions.md b/src/bmm/workflows/testarch/atdd/instructions.md
deleted file mode 100644
index aa748905..00000000
--- a/src/bmm/workflows/testarch/atdd/instructions.md
+++ /dev/null
@@ -1,806 +0,0 @@
-
-
-# Acceptance Test-Driven Development (ATDD)
-
-**Workflow ID**: `_bmad/bmm/testarch/atdd`
-**Version**: 4.0 (BMad v6)
-
----
-
-## Overview
-
-Generates failing acceptance tests BEFORE implementation following TDD's red-green-refactor cycle. This workflow creates comprehensive test coverage at appropriate levels (E2E, API, Component) with supporting infrastructure (fixtures, factories, mocks) and provides an implementation checklist to guide development.
-
-**Core Principle**: Tests fail first (red phase), then guide development to green, then enable confident refactoring.
-
----
-
-## Preflight Requirements
-
-**Critical:** Verify these requirements before proceeding. If any fail, HALT and notify the user.
-
-- β Story approved with clear acceptance criteria
-- β Development sandbox/environment ready
-- β Framework scaffolding exists (run `framework` workflow if missing)
-- β Test framework configuration available (playwright.config.ts or cypress.config.ts)
-
----
-
-## Step 1: Load Story Context and Requirements
-
-### Actions
-
-1. **Read Story Markdown**
- - Load story file from `{story_file}` variable
- - Extract acceptance criteria (all testable requirements)
- - Identify affected systems and components
- - Note any technical constraints or dependencies
-
-2. **Load Framework Configuration**
- - Read framework config (playwright.config.ts or cypress.config.ts)
- - Identify test directory structure
- - Check existing fixture patterns
- - Note test runner capabilities
-
-3. **Load Existing Test Patterns**
- - Search `{test_dir}` for similar tests
- - Identify reusable fixtures and helpers
- - Check data factory patterns
- - Note naming conventions
-
-4. **Check Playwright Utils Flag**
-
- Read `{config_source}` and check `config.tea_use_playwright_utils`.
-
-5. **Load Knowledge Base Fragments**
-
- **Critical:** Consult `{project-root}/_bmad/bmm/testarch/tea-index.csv` to load:
-
- **Core Patterns (Always load):**
- - `data-factories.md` - Factory patterns using faker (override patterns, nested factories, API seeding, 498 lines, 5 examples)
- - `component-tdd.md` - Component test strategies (red-green-refactor, provider isolation, accessibility, visual regression, 480 lines, 4 examples)
- - `test-quality.md` - Test design principles (deterministic tests, isolated with cleanup, explicit assertions, length limits, execution time optimization, 658 lines, 5 examples)
- - `test-healing-patterns.md` - Common failure patterns and healing strategies (stale selectors, race conditions, dynamic data, network errors, hard waits, 648 lines, 5 examples)
- - `selector-resilience.md` - Selector best practices (data-testid > ARIA > text > CSS hierarchy, dynamic patterns, anti-patterns, 541 lines, 4 examples)
- - `timing-debugging.md` - Race condition prevention and async debugging (network-first, deterministic waiting, anti-patterns, 370 lines, 3 examples)
-
- **If `config.tea_use_playwright_utils: true` (All Utilities):**
- - `overview.md` - Playwright utils for ATDD patterns
- - `api-request.md` - API test examples with schema validation
- - `network-recorder.md` - HAR record/playback for UI acceptance tests
- - `auth-session.md` - Auth setup for acceptance tests
- - `intercept-network-call.md` - Network interception in ATDD scenarios
- - `recurse.md` - Polling for async acceptance criteria
- - `log.md` - Logging in ATDD tests
- - `file-utils.md` - File download validation in acceptance tests
- - `network-error-monitor.md` - Catch silent failures in ATDD
- - `fixtures-composition.md` - Composing utilities for ATDD
-
- **If `config.tea_use_playwright_utils: false`:**
- - `fixture-architecture.md` - Test fixture patterns with auto-cleanup (pure function β fixture β mergeTests composition, 406 lines, 5 examples)
- - `network-first.md` - Route interception patterns (intercept before navigate, HAR capture, deterministic waiting, 489 lines, 5 examples)
-
-**Halt Condition:** If story has no acceptance criteria or framework is missing, HALT with message: "ATDD requires clear acceptance criteria and test framework setup"
-
----
-
-## Step 1.5: Generation Mode Selection (NEW - Phase 2.5)
-
-### Actions
-
-1. **Detect Generation Mode**
-
- Determine mode based on scenario complexity:
-
- **AI Generation Mode (DEFAULT)**:
- - Clear acceptance criteria with standard patterns
- - Uses: AI-generated tests from requirements
- - Appropriate for: CRUD, auth, navigation, API tests
- - Fastest approach
-
- **Recording Mode (OPTIONAL - Complex UI)**:
- - Complex UI interactions (drag-drop, wizards, multi-page flows)
- - Uses: Interactive test recording with Playwright MCP
- - Appropriate for: Visual workflows, unclear requirements
- - Only if config.tea_use_mcp_enhancements is true AND MCP available
-
-2. **AI Generation Mode (DEFAULT - Continue to Step 2)**
-
- For standard scenarios:
- - Continue with existing workflow (Step 2: Select Test Levels and Strategy)
- - AI generates tests based on acceptance criteria from Step 1
- - Use knowledge base patterns for test structure
-
-3. **Recording Mode (OPTIONAL - Complex UI Only)**
-
- For complex UI scenarios AND config.tea_use_mcp_enhancements is true:
-
- **A. Check MCP Availability**
-
- If Playwright MCP tools are available in your IDE:
- - Use MCP recording mode (Step 3.B)
-
- If MCP unavailable:
- - Fallback to AI generation mode (silent, automatic)
- - Continue to Step 2
-
- **B. Interactive Test Recording (MCP-Based)**
-
- Use Playwright MCP test-generator tools:
-
- **Setup:**
-
- ```
- 1. Use generator_setup_page to initialize recording session
- 2. Navigate to application starting URL (from story context)
- 3. Ready to record user interactions
- ```
-
- **Recording Process (Per Acceptance Criterion):**
-
- ```
- 4. Read acceptance criterion from story
- 5. Manually execute test scenario using browser_* tools:
- - browser_navigate: Navigate to pages
- - browser_click: Click buttons, links, elements
- - browser_type: Fill form fields
- - browser_select: Select dropdown options
- - browser_check: Check/uncheck checkboxes
- 6. Add verification steps using browser_verify_* tools:
- - browser_verify_text: Verify text content
- - browser_verify_visible: Verify element visibility
- - browser_verify_url: Verify URL navigation
- 7. Capture interaction log with generator_read_log
- 8. Generate test file with generator_write_test
- 9. Repeat for next acceptance criterion
- ```
-
- **Post-Recording Enhancement:**
-
- ```
- 10. Review generated test code
- 11. Enhance with knowledge base patterns:
- - Add Given-When-Then comments
- - Replace recorded selectors with data-testid (if needed)
- - Add network-first interception (from network-first.md)
- - Add fixtures for auth/data setup (from fixture-architecture.md)
- - Use factories for test data (from data-factories.md)
- 12. Verify tests fail (missing implementation)
- 13. Continue to Step 4 (Build Data Infrastructure)
- ```
-
- **When to Use Recording Mode:**
- - β Complex UI interactions (drag-drop, multi-step forms, wizards)
- - β Visual workflows (modals, dialogs, animations)
- - β Unclear requirements (exploratory, discovering expected behavior)
- - β Multi-page flows (checkout, registration, onboarding)
- - β NOT for simple CRUD (AI generation faster)
- - β NOT for API-only tests (no UI to record)
-
- **When to Use AI Generation (Default):**
- - β Clear acceptance criteria available
- - β Standard patterns (login, CRUD, navigation)
- - β Need many tests quickly
- - β API/backend tests (no UI interaction)
-
-4. **Proceed to Test Level Selection**
-
- After mode selection:
- - AI Generation: Continue to Step 2 (Select Test Levels and Strategy)
- - Recording: Skip to Step 4 (Build Data Infrastructure) - tests already generated
-
----
-
-## Step 2: Select Test Levels and Strategy
-
-### Actions
-
-1. **Analyze Acceptance Criteria**
-
- For each acceptance criterion, determine:
- - Does it require full user journey? β E2E test
- - Does it test business logic/API contract? β API test
- - Does it validate UI component behavior? β Component test
- - Can it be unit tested? β Unit test
-
-2. **Apply Test Level Selection Framework**
-
- **Knowledge Base Reference**: `test-levels-framework.md`
-
- **E2E (End-to-End)**:
- - Critical user journeys (login, checkout, core workflow)
- - Multi-system integration
- - User-facing acceptance criteria
- - **Characteristics**: High confidence, slow execution, brittle
-
- **API (Integration)**:
- - Business logic validation
- - Service contracts
- - Data transformations
- - **Characteristics**: Fast feedback, good balance, stable
-
- **Component**:
- - UI component behavior (buttons, forms, modals)
- - Interaction testing
- - Visual regression
- - **Characteristics**: Fast, isolated, granular
-
- **Unit**:
- - Pure business logic
- - Edge cases
- - Error handling
- - **Characteristics**: Fastest, most granular
-
-3. **Avoid Duplicate Coverage**
-
- Don't test same behavior at multiple levels unless necessary:
- - Use E2E for critical happy path only
- - Use API tests for complex business logic variations
- - Use component tests for UI interaction edge cases
- - Use unit tests for pure logic edge cases
-
-4. **Prioritize Tests**
-
- If test-design document exists, align with priority levels:
- - P0 scenarios β Must cover in failing tests
- - P1 scenarios β Should cover if time permits
- - P2/P3 scenarios β Optional for this iteration
-
-**Decision Point:** Set `primary_level` variable to main test level for this story (typically E2E or API)
-
----
-
-## Step 3: Generate Failing Tests
-
-### Actions
-
-1. **Create Test File Structure**
-
- ```
- tests/
- βββ e2e/
- β βββ {feature-name}.spec.ts # E2E acceptance tests
- βββ api/
- β βββ {feature-name}.api.spec.ts # API contract tests
- βββ component/
- β βββ {ComponentName}.test.tsx # Component tests
- βββ support/
- βββ fixtures/ # Test fixtures
- βββ factories/ # Data factories
- βββ helpers/ # Utility functions
- ```
-
-2. **Write Failing E2E Tests (If Applicable)**
-
- **Use Given-When-Then format:**
-
- ```typescript
- import { test, expect } from '@playwright/test';
-
- test.describe('User Login', () => {
- test('should display error for invalid credentials', async ({ page }) => {
- // GIVEN: User is on login page
- await page.goto('/login');
-
- // WHEN: User submits invalid credentials
- await page.fill('[data-testid="email-input"]', 'invalid@example.com');
- await page.fill('[data-testid="password-input"]', 'wrongpassword');
- await page.click('[data-testid="login-button"]');
-
- // THEN: Error message is displayed
- await expect(page.locator('[data-testid="error-message"]')).toHaveText('Invalid email or password');
- });
- });
- ```
-
- **Critical patterns:**
- - One assertion per test (atomic tests)
- - Explicit waits (no hard waits/sleeps)
- - Network-first approach (route interception before navigation)
- - data-testid selectors for stability
- - Clear Given-When-Then structure
-
-3. **Apply Network-First Pattern**
-
- **Knowledge Base Reference**: `network-first.md`
-
- ```typescript
- test('should load user dashboard after login', async ({ page }) => {
- // CRITICAL: Intercept routes BEFORE navigation
- await page.route('**/api/user', (route) =>
- route.fulfill({
- status: 200,
- body: JSON.stringify({ id: 1, name: 'Test User' }),
- }),
- );
-
- // NOW navigate
- await page.goto('/dashboard');
-
- await expect(page.locator('[data-testid="user-name"]')).toHaveText('Test User');
- });
- ```
-
-4. **Write Failing API Tests (If Applicable)**
-
- ```typescript
- import { test, expect } from '@playwright/test';
-
- test.describe('User API', () => {
- test('POST /api/users - should create new user', async ({ request }) => {
- // GIVEN: Valid user data
- const userData = {
- email: 'newuser@example.com',
- name: 'New User',
- };
-
- // WHEN: Creating user via API
- const response = await request.post('/api/users', {
- data: userData,
- });
-
- // THEN: User is created successfully
- expect(response.status()).toBe(201);
- const body = await response.json();
- expect(body).toMatchObject({
- email: userData.email,
- name: userData.name,
- id: expect.any(Number),
- });
- });
- });
- ```
-
-5. **Write Failing Component Tests (If Applicable)**
-
- **Knowledge Base Reference**: `component-tdd.md`
-
- ```typescript
- import { test, expect } from '@playwright/experimental-ct-react';
- import { LoginForm } from './LoginForm';
-
- test.describe('LoginForm Component', () => {
- test('should disable submit button when fields are empty', async ({ mount }) => {
- // GIVEN: LoginForm is mounted
- const component = await mount();
-
- // WHEN: Form is initially rendered
- const submitButton = component.locator('button[type="submit"]');
-
- // THEN: Submit button is disabled
- await expect(submitButton).toBeDisabled();
- });
- });
- ```
-
-6. **Verify Tests Fail Initially**
-
- **Critical verification:**
- - Run tests locally to confirm they fail
- - Failure should be due to missing implementation, not test errors
- - Failure messages should be clear and actionable
- - All tests must be in RED phase before sharing with DEV
-
-**Important:** Tests MUST fail initially. If a test passes before implementation, it's not a valid acceptance test.
-
----
-
-## Step 4: Build Data Infrastructure
-
-### Actions
-
-1. **Create Data Factories**
-
- **Knowledge Base Reference**: `data-factories.md`
-
- ```typescript
- // tests/support/factories/user.factory.ts
- import { faker } from '@faker-js/faker';
-
- export const createUser = (overrides = {}) => ({
- id: faker.number.int(),
- email: faker.internet.email(),
- name: faker.person.fullName(),
- createdAt: faker.date.recent().toISOString(),
- ...overrides,
- });
-
- export const createUsers = (count: number) => Array.from({ length: count }, () => createUser());
- ```
-
- **Factory principles:**
- - Use faker for random data (no hardcoded values)
- - Support overrides for specific scenarios
- - Generate complete valid objects
- - Include helper functions for bulk creation
-
-2. **Create Test Fixtures**
-
- **Knowledge Base Reference**: `fixture-architecture.md`
-
- ```typescript
- // tests/support/fixtures/auth.fixture.ts
- import { test as base } from '@playwright/test';
-
- export const test = base.extend({
- authenticatedUser: async ({ page }, use) => {
- // Setup: Create and authenticate user
- const user = await createUser();
- await page.goto('/login');
- await page.fill('[data-testid="email"]', user.email);
- await page.fill('[data-testid="password"]', 'password123');
- await page.click('[data-testid="login-button"]');
- await page.waitForURL('/dashboard');
-
- // Provide to test
- await use(user);
-
- // Cleanup: Delete user
- await deleteUser(user.id);
- },
- });
- ```
-
- **Fixture principles:**
- - Auto-cleanup (always delete created data)
- - Composable (fixtures can use other fixtures)
- - Isolated (each test gets fresh data)
- - Type-safe
-
-3. **Document Mock Requirements**
-
- If external services need mocking, document requirements:
-
- ```markdown
- ### Mock Requirements for DEV Team
-
- **Payment Gateway Mock**:
-
- - Endpoint: `POST /api/payments`
- - Success response: `{ status: 'success', transactionId: '123' }`
- - Failure response: `{ status: 'failed', error: 'Insufficient funds' }`
-
- **Email Service Mock**:
-
- - Should not send real emails in test environment
- - Log email contents for verification
- ```
-
-4. **List Required data-testid Attributes**
-
- ```markdown
- ### Required data-testid Attributes
-
- **Login Page**:
-
- - `email-input` - Email input field
- - `password-input` - Password input field
- - `login-button` - Submit button
- - `error-message` - Error message container
-
- **Dashboard Page**:
-
- - `user-name` - User name display
- - `logout-button` - Logout button
- ```
-
----
-
-## Step 5: Create Implementation Checklist
-
-### Actions
-
-1. **Map Tests to Implementation Tasks**
-
- For each failing test, create corresponding implementation task:
-
- ```markdown
- ## Implementation Checklist
-
- ### Epic X - User Authentication
-
- #### Test: User Login with Valid Credentials
-
- - [ ] Create `/login` route
- - [ ] Implement login form component
- - [ ] Add email/password validation
- - [ ] Integrate authentication API
- - [ ] Add `data-testid` attributes: `email-input`, `password-input`, `login-button`
- - [ ] Implement error handling
- - [ ] Run test: `npm run test:e2e -- login.spec.ts`
- - [ ] β Test passes (green phase)
-
- #### Test: Display Error for Invalid Credentials
-
- - [ ] Add error state management
- - [ ] Display error message UI
- - [ ] Add `data-testid="error-message"`
- - [ ] Run test: `npm run test:e2e -- login.spec.ts`
- - [ ] β Test passes (green phase)
- ```
-
-2. **Include Red-Green-Refactor Guidance**
-
- ```markdown
- ## Red-Green-Refactor Workflow
-
- **RED Phase** (Complete):
-
- - β All tests written and failing
- - β Fixtures and factories created
- - β Mock requirements documented
-
- **GREEN Phase** (DEV Team):
-
- 1. Pick one failing test
- 2. Implement minimal code to make it pass
- 3. Run test to verify green
- 4. Move to next test
- 5. Repeat until all tests pass
-
- **REFACTOR Phase** (DEV Team):
-
- 1. All tests passing (green)
- 2. Improve code quality
- 3. Extract duplications
- 4. Optimize performance
- 5. Ensure tests still pass
- ```
-
-3. **Add Execution Commands**
-
- ````markdown
- ## Running Tests
-
- ```bash
- # Run all failing tests
- npm run test:e2e
-
- # Run specific test file
- npm run test:e2e -- login.spec.ts
-
- # Run tests in headed mode (see browser)
- npm run test:e2e -- --headed
-
- # Debug specific test
- npm run test:e2e -- login.spec.ts --debug
- ```
- ````
-
- ```
-
- ```
-
----
-
-## Step 6: Generate Deliverables
-
-### Actions
-
-1. **Create ATDD Checklist Document**
-
- Use template structure at `{installed_path}/atdd-checklist-template.md`:
- - Story summary
- - Acceptance criteria breakdown
- - Test files created (with paths)
- - Data factories created
- - Fixtures created
- - Mock requirements
- - Required data-testid attributes
- - Implementation checklist
- - Red-green-refactor workflow
- - Execution commands
-
-2. **Verify All Tests Fail**
-
- Before finalizing:
- - Run full test suite locally
- - Confirm all tests in RED phase
- - Document expected failure messages
- - Ensure failures are due to missing implementation, not test bugs
-
-3. **Write to Output File**
-
- Save to `{output_folder}/atdd-checklist-{story_id}.md`
-
----
-
-## Important Notes
-
-### Red-Green-Refactor Cycle
-
-**RED Phase** (TEA responsibility):
-
-- Write failing tests first
-- Tests define expected behavior
-- Tests must fail for right reason (missing implementation)
-
-**GREEN Phase** (DEV responsibility):
-
-- Implement minimal code to pass tests
-- One test at a time
-- Don't over-engineer
-
-**REFACTOR Phase** (DEV responsibility):
-
-- Improve code quality with confidence
-- Tests provide safety net
-- Extract duplications, optimize
-
-### Given-When-Then Structure
-
-**GIVEN** (Setup):
-
-- Arrange test preconditions
-- Create necessary data
-- Navigate to starting point
-
-**WHEN** (Action):
-
-- Execute the behavior being tested
-- Single action per test
-
-**THEN** (Assertion):
-
-- Verify expected outcome
-- One assertion per test (atomic)
-
-### Network-First Testing
-
-**Critical pattern:**
-
-```typescript
-// β CORRECT: Intercept BEFORE navigation
-await page.route('**/api/data', handler);
-await page.goto('/page');
-
-// β WRONG: Navigate then intercept (race condition)
-await page.goto('/page');
-await page.route('**/api/data', handler); // Too late!
-```
-
-### Data Factory Best Practices
-
-**Use faker for all test data:**
-
-```typescript
-// β CORRECT: Random data
-email: faker.internet.email();
-
-// β WRONG: Hardcoded data (collisions, maintenance burden)
-email: 'test@example.com';
-```
-
-**Auto-cleanup principle:**
-
-- Every factory that creates data must provide cleanup
-- Fixtures automatically cleanup in teardown
-- No manual cleanup in test code
-
-### One Assertion Per Test
-
-**Atomic test design:**
-
-```typescript
-// β CORRECT: One assertion
-test('should display user name', async ({ page }) => {
- await expect(page.locator('[data-testid="user-name"]')).toHaveText('John');
-});
-
-// β WRONG: Multiple assertions (not atomic)
-test('should display user info', async ({ page }) => {
- await expect(page.locator('[data-testid="user-name"]')).toHaveText('John');
- await expect(page.locator('[data-testid="user-email"]')).toHaveText('john@example.com');
-});
-```
-
-**Why?** If second assertion fails, you don't know if first is still valid.
-
-### Component Test Strategy
-
-**When to use component tests:**
-
-- Complex UI interactions (drag-drop, keyboard nav)
-- Form validation logic
-- State management within component
-- Visual edge cases
-
-**When NOT to use:**
-
-- Simple rendering (snapshot tests are sufficient)
-- Integration with backend (use E2E or API tests)
-- Full user journeys (use E2E tests)
-
-### Knowledge Base Integration
-
-**Core Fragments (Auto-loaded in Step 1):**
-
-- `fixture-architecture.md` - Pure function β fixture β mergeTests patterns (406 lines, 5 examples)
-- `data-factories.md` - Factory patterns with faker, overrides, API seeding (498 lines, 5 examples)
-- `component-tdd.md` - Red-green-refactor, provider isolation, accessibility, visual regression (480 lines, 4 examples)
-- `network-first.md` - Intercept before navigate, HAR capture, deterministic waiting (489 lines, 5 examples)
-- `test-quality.md` - Deterministic tests, cleanup, explicit assertions, length/time limits (658 lines, 5 examples)
-- `test-healing-patterns.md` - Common failure patterns: stale selectors, race conditions, dynamic data, network errors, hard waits (648 lines, 5 examples)
-- `selector-resilience.md` - Selector hierarchy (data-testid > ARIA > text > CSS), dynamic patterns, anti-patterns (541 lines, 4 examples)
-- `timing-debugging.md` - Race condition prevention, deterministic waiting, async debugging (370 lines, 3 examples)
-
-**Reference for Test Level Selection:**
-
-- `test-levels-framework.md` - E2E vs API vs Component vs Unit decision framework (467 lines, 4 examples)
-
-**Manual Reference (Optional):**
-
-- Use `tea-index.csv` to find additional specialized fragments as needed
-
----
-
-## Output Summary
-
-After completing this workflow, provide a summary:
-
-```markdown
-## ATDD Complete - Tests in RED Phase
-
-**Story**: {story_id}
-**Primary Test Level**: {primary_level}
-
-**Failing Tests Created**:
-
-- E2E tests: {e2e_count} tests in {e2e_files}
-- API tests: {api_count} tests in {api_files}
-- Component tests: {component_count} tests in {component_files}
-
-**Supporting Infrastructure**:
-
-- Data factories: {factory_count} factories created
-- Fixtures: {fixture_count} fixtures with auto-cleanup
-- Mock requirements: {mock_count} services documented
-
-**Implementation Checklist**:
-
-- Total tasks: {task_count}
-- Estimated effort: {effort_estimate} hours
-
-**Required data-testid Attributes**: {data_testid_count} attributes documented
-
-**Next Steps for DEV Team**:
-
-1. Run failing tests: `npm run test:e2e`
-2. Review implementation checklist
-3. Implement one test at a time (RED β GREEN)
-4. Refactor with confidence (tests provide safety net)
-5. Share progress in daily standup
-
-**Output File**: {output_file}
-**Manual Handoff**: Share `{output_file}` and failing tests with the dev workflow (not auto-consumed).
-
-**Knowledge Base References Applied**:
-
-- Fixture architecture patterns
-- Data factory patterns with faker
-- Network-first route interception
-- Component TDD strategies
-- Test quality principles
-```
-
----
-
-## Validation
-
-After completing all steps, verify:
-
-- [ ] Story acceptance criteria analyzed and mapped to tests
-- [ ] Appropriate test levels selected (E2E, API, Component)
-- [ ] All tests written in Given-When-Then format
-- [ ] All tests fail initially (RED phase verified)
-- [ ] Network-first pattern applied (route interception before navigation)
-- [ ] Data factories created with faker
-- [ ] Fixtures created with auto-cleanup
-- [ ] Mock requirements documented for DEV team
-- [ ] Required data-testid attributes listed
-- [ ] Implementation checklist created with clear tasks
-- [ ] Red-green-refactor workflow documented
-- [ ] Execution commands provided
-- [ ] Output file created and formatted correctly
-
-Refer to `checklist.md` for comprehensive validation criteria.
diff --git a/src/bmm/workflows/testarch/atdd/workflow.yaml b/src/bmm/workflows/testarch/atdd/workflow.yaml
deleted file mode 100644
index 12b8808b..00000000
--- a/src/bmm/workflows/testarch/atdd/workflow.yaml
+++ /dev/null
@@ -1,47 +0,0 @@
-# Test Architect workflow: atdd
-name: testarch-atdd
-description: "Generate failing acceptance tests before implementation using TDD red-green-refactor cycle"
-author: "BMad"
-
-# Critical variables from config
-config_source: "{project-root}/_bmad/bmm/config.yaml"
-output_folder: "{config_source}:output_folder"
-user_name: "{config_source}:user_name"
-communication_language: "{config_source}:communication_language"
-document_output_language: "{config_source}:document_output_language"
-date: system-generated
-
-# Workflow components
-installed_path: "{project-root}/_bmad/bmm/workflows/testarch/atdd"
-instructions: "{installed_path}/instructions.md"
-validation: "{installed_path}/checklist.md"
-template: "{installed_path}/atdd-checklist-template.md"
-
-# Variables and inputs
-variables:
- test_dir: "{project-root}/tests" # Root test directory
-
-# Output configuration
-default_output_file: "{output_folder}/atdd-checklist-{story_id}.md"
-
-# Required tools
-required_tools:
- - read_file # Read story markdown, framework config
- - write_file # Create test files, checklist, factory stubs
- - create_directory # Create test directories
- - list_files # Find existing fixtures and helpers
- - search_repo # Search for similar test patterns
-
-tags:
- - qa
- - atdd
- - test-architect
- - tdd
- - red-green-refactor
-
-execution_hints:
- interactive: false # Minimize prompts
- autonomous: true # Proceed without user input unless blocked
- iterative: true
-
-web_bundle: false
diff --git a/src/bmm/workflows/testarch/automate/checklist.md b/src/bmm/workflows/testarch/automate/checklist.md
deleted file mode 100644
index cc8c50a5..00000000
--- a/src/bmm/workflows/testarch/automate/checklist.md
+++ /dev/null
@@ -1,582 +0,0 @@
-# Automate Workflow Validation Checklist
-
-Use this checklist to validate that the automate workflow has been executed correctly and all deliverables meet quality standards.
-
-## Prerequisites
-
-Before starting this workflow, verify:
-
-- [ ] Framework scaffolding configured (playwright.config.ts or cypress.config.ts exists)
-- [ ] Test directory structure exists (tests/ folder with subdirectories)
-- [ ] Package.json has test framework dependencies installed
-
-**Halt only if:** Framework scaffolding is completely missing (run `framework` workflow first)
-
-**Note:** BMad artifacts (story, tech-spec, PRD) are OPTIONAL - workflow can run without them
-**Note:** `automate` generates tests; it does not run `*atdd` or `*test-review`. If ATDD outputs exist, use them as input and avoid duplicate coverage.
-
----
-
-## Step 1: Execution Mode Determination and Context Loading
-
-### Mode Detection
-
-- [ ] Execution mode correctly determined:
- - [ ] BMad-Integrated Mode (story_file variable set) OR
- - [ ] Standalone Mode (target_feature or target_files set) OR
- - [ ] Auto-discover Mode (no targets specified)
-
-### BMad Artifacts (If Available - OPTIONAL)
-
-- [ ] Story markdown loaded (if `{story_file}` provided)
-- [ ] Acceptance criteria extracted from story (if available)
-- [ ] Tech-spec.md loaded (if `{use_tech_spec}` true and file exists)
-- [ ] Test-design.md loaded (if `{use_test_design}` true and file exists)
-- [ ] PRD.md loaded (if `{use_prd}` true and file exists)
-- [ ] **Note**: Absence of BMad artifacts does NOT halt workflow
-
-### Framework Configuration
-
-- [ ] Test framework config loaded (playwright.config.ts or cypress.config.ts)
-- [ ] Test directory structure identified from `{test_dir}`
-- [ ] Existing test patterns reviewed
-- [ ] Test runner capabilities noted (parallel execution, fixtures, etc.)
-
-### Coverage Analysis
-
-- [ ] Existing test files searched in `{test_dir}` (if `{analyze_coverage}` true)
-- [ ] Tested features vs untested features identified
-- [ ] Coverage gaps mapped (tests to source files)
-- [ ] Existing fixture and factory patterns checked
-
-### Knowledge Base Fragments Loaded
-
-- [ ] `test-levels-framework.md` - Test level selection
-- [ ] `test-priorities.md` - Priority classification (P0-P3)
-- [ ] `fixture-architecture.md` - Fixture patterns with auto-cleanup
-- [ ] `data-factories.md` - Factory patterns using faker
-- [ ] `selective-testing.md` - Targeted test execution strategies
-- [ ] `ci-burn-in.md` - Flaky test detection patterns
-- [ ] `test-quality.md` - Test design principles
-
----
-
-## Step 2: Automation Targets Identification
-
-### Target Determination
-
-**BMad-Integrated Mode (if story available):**
-
-- [ ] Acceptance criteria mapped to test scenarios
-- [ ] Features implemented in story identified
-- [ ] Existing ATDD tests checked (if any)
-- [ ] Expansion beyond ATDD planned (edge cases, negative paths)
-
-**Standalone Mode (if no story):**
-
-- [ ] Specific feature analyzed (if `{target_feature}` specified)
-- [ ] Specific files analyzed (if `{target_files}` specified)
-- [ ] Features auto-discovered (if `{auto_discover_features}` true)
-- [ ] Features prioritized by:
- - [ ] No test coverage (highest priority)
- - [ ] Complex business logic
- - [ ] External integrations (API, database, auth)
- - [ ] Critical user paths (login, checkout, etc.)
-
-### Test Level Selection
-
-- [ ] Test level selection framework applied (from `test-levels-framework.md`)
-- [ ] E2E tests identified: Critical user journeys, multi-system integration
-- [ ] API tests identified: Business logic, service contracts, data transformations
-- [ ] Component tests identified: UI behavior, interactions, state management
-- [ ] Unit tests identified: Pure logic, edge cases, error handling
-
-### Duplicate Coverage Avoidance
-
-- [ ] Same behavior NOT tested at multiple levels unnecessarily
-- [ ] E2E used for critical happy path only
-- [ ] API tests used for business logic variations
-- [ ] Component tests used for UI interaction edge cases
-- [ ] Unit tests used for pure logic edge cases
-
-### Priority Assignment
-
-- [ ] Test priorities assigned using `test-priorities.md` framework
-- [ ] P0 tests: Critical paths, security-critical, data integrity
-- [ ] P1 tests: Important features, integration points, error handling
-- [ ] P2 tests: Edge cases, less-critical variations, performance
-- [ ] P3 tests: Nice-to-have, rarely-used features, exploratory
-- [ ] Priority variables respected:
- - [ ] `{include_p0}` = true (always include)
- - [ ] `{include_p1}` = true (high priority)
- - [ ] `{include_p2}` = true (medium priority)
- - [ ] `{include_p3}` = false (low priority, skip by default)
-
-### Coverage Plan Created
-
-- [ ] Test coverage plan documented
-- [ ] What will be tested at each level listed
-- [ ] Priorities assigned to each test
-- [ ] Coverage strategy clear (critical-paths, comprehensive, or selective)
-
----
-
-## Step 3: Test Infrastructure Generated
-
-### Fixture Architecture
-
-- [ ] Existing fixtures checked in `tests/support/fixtures/`
-- [ ] Fixture architecture created/enhanced (if `{generate_fixtures}` true)
-- [ ] All fixtures use Playwright's `test.extend()` pattern
-- [ ] All fixtures have auto-cleanup in teardown
-- [ ] Common fixtures created/enhanced:
- - [ ] authenticatedUser (with auto-delete)
- - [ ] apiRequest (authenticated client)
- - [ ] mockNetwork (external service mocking)
- - [ ] testDatabase (with auto-cleanup)
-
-### Data Factories
-
-- [ ] Existing factories checked in `tests/support/factories/`
-- [ ] Factory architecture created/enhanced (if `{generate_factories}` true)
-- [ ] All factories use `@faker-js/faker` for random data (no hardcoded values)
-- [ ] All factories support overrides for specific scenarios
-- [ ] Common factories created/enhanced:
- - [ ] User factory (email, password, name, role)
- - [ ] Product factory (name, price, SKU)
- - [ ] Order factory (items, total, status)
-- [ ] Cleanup helpers provided (e.g., deleteUser(), deleteProduct())
-
-### Helper Utilities
-
-- [ ] Existing helpers checked in `tests/support/helpers/` (if `{update_helpers}` true)
-- [ ] Common utilities created/enhanced:
- - [ ] waitFor (polling for complex conditions)
- - [ ] retry (retry helper for flaky operations)
- - [ ] testData (test data generation)
- - [ ] assertions (custom assertion helpers)
-
----
-
-## Step 4: Test Files Generated
-
-### Test File Structure
-
-- [ ] Test files organized correctly:
- - [ ] `tests/e2e/` for E2E tests
- - [ ] `tests/api/` for API tests
- - [ ] `tests/component/` for component tests
- - [ ] `tests/unit/` for unit tests
- - [ ] `tests/support/` for fixtures/factories/helpers
-
-### E2E Tests (If Applicable)
-
-- [ ] E2E test files created in `tests/e2e/`
-- [ ] All tests follow Given-When-Then format
-- [ ] All tests have priority tags ([P0], [P1], [P2], [P3]) in test name
-- [ ] All tests use data-testid selectors (not CSS classes)
-- [ ] One assertion per test (atomic design)
-- [ ] No hard waits or sleeps (explicit waits only)
-- [ ] Network-first pattern applied (route interception BEFORE navigation)
-- [ ] Clear Given-When-Then comments in test code
-
-### API Tests (If Applicable)
-
-- [ ] API test files created in `tests/api/`
-- [ ] All tests follow Given-When-Then format
-- [ ] All tests have priority tags in test name
-- [ ] API contracts validated (request/response structure)
-- [ ] HTTP status codes verified
-- [ ] Response body validation includes required fields
-- [ ] Error cases tested (400, 401, 403, 404, 500)
-- [ ] JWT token format validated (if auth tests)
-
-### Component Tests (If Applicable)
-
-- [ ] Component test files created in `tests/component/`
-- [ ] All tests follow Given-When-Then format
-- [ ] All tests have priority tags in test name
-- [ ] Component mounting works correctly
-- [ ] Interaction testing covers user actions (click, hover, keyboard)
-- [ ] State management validated
-- [ ] Props and events tested
-
-### Unit Tests (If Applicable)
-
-- [ ] Unit test files created in `tests/unit/`
-- [ ] All tests follow Given-When-Then format
-- [ ] All tests have priority tags in test name
-- [ ] Pure logic tested (no dependencies)
-- [ ] Edge cases covered
-- [ ] Error handling tested
-
-### Quality Standards Enforced
-
-- [ ] All tests use Given-When-Then format with clear comments
-- [ ] All tests have descriptive names with priority tags
-- [ ] No duplicate tests (same behavior tested multiple times)
-- [ ] No flaky patterns (race conditions, timing issues)
-- [ ] No test interdependencies (tests can run in any order)
-- [ ] Tests are deterministic (same input always produces same result)
-- [ ] All tests use data-testid selectors (E2E tests)
-- [ ] No hard waits: `await page.waitForTimeout()` (forbidden)
-- [ ] No conditional flow: `if (await element.isVisible())` (forbidden)
-- [ ] No try-catch for test logic (only for cleanup)
-- [ ] No hardcoded test data (use factories with faker)
-- [ ] No page object classes (tests are direct and simple)
-- [ ] No shared state between tests
-
-### Network-First Pattern Applied
-
-- [ ] Route interception set up BEFORE navigation (E2E tests with network requests)
-- [ ] `page.route()` called before `page.goto()` to prevent race conditions
-- [ ] Network-first pattern verified in all E2E tests that make API calls
-
----
-
-## Step 5: Test Validation and Healing (NEW - Phase 2.5)
-
-### Healing Configuration
-
-- [ ] Healing configuration checked:
- - [ ] `{auto_validate}` setting noted (default: true)
- - [ ] `{auto_heal_failures}` setting noted (default: false)
- - [ ] `{max_healing_iterations}` setting noted (default: 3)
- - [ ] `{use_mcp_healing}` setting noted (default: true)
-
-### Healing Knowledge Fragments Loaded (If Healing Enabled)
-
-- [ ] `test-healing-patterns.md` loaded (common failure patterns and fixes)
-- [ ] `selector-resilience.md` loaded (selector refactoring guide)
-- [ ] `timing-debugging.md` loaded (race condition fixes)
-
-### Test Execution and Validation
-
-- [ ] Generated tests executed (if `{auto_validate}` true)
-- [ ] Test results captured:
- - [ ] Total tests run
- - [ ] Passing tests count
- - [ ] Failing tests count
- - [ ] Error messages and stack traces captured
-
-### Healing Loop (If Enabled and Tests Failed)
-
-- [ ] Healing loop entered (if `{auto_heal_failures}` true AND tests failed)
-- [ ] For each failing test:
- - [ ] Failure pattern identified (selector, timing, data, network, hard wait)
- - [ ] Appropriate healing strategy applied:
- - [ ] Stale selector β Replaced with data-testid or ARIA role
- - [ ] Race condition β Added network-first interception or state waits
- - [ ] Dynamic data β Replaced hardcoded values with regex/dynamic generation
- - [ ] Network error β Added route mocking
- - [ ] Hard wait β Replaced with event-based wait
- - [ ] Healed test re-run to validate fix
- - [ ] Iteration count tracked (max 3 attempts)
-
-### Unfixable Tests Handling
-
-- [ ] Tests that couldn't be healed after 3 iterations marked with `test.fixme()` (if `{mark_unhealable_as_fixme}` true)
-- [ ] Detailed comment added to test.fixme() tests:
- - [ ] What failure occurred
- - [ ] What healing was attempted (3 iterations)
- - [ ] Why healing failed
- - [ ] Manual investigation steps needed
-- [ ] Original test logic preserved in comments
-
-### Healing Report Generated
-
-- [ ] Healing report generated (if healing attempted)
-- [ ] Report includes:
- - [ ] Auto-heal enabled status
- - [ ] Healing mode (MCP-assisted or Pattern-based)
- - [ ] Iterations allowed (max_healing_iterations)
- - [ ] Validation results (total, passing, failing)
- - [ ] Successfully healed tests (count, file:line, fix applied)
- - [ ] Unable to heal tests (count, file:line, reason)
- - [ ] Healing patterns applied (selector fixes, timing fixes, data fixes)
- - [ ] Knowledge base references used
-
----
-
-## Step 6: Documentation and Scripts Updated
-
-### Test README Updated
-
-- [ ] `tests/README.md` created or updated (if `{update_readme}` true)
-- [ ] Test suite structure overview included
-- [ ] Test execution instructions provided (all, specific files, by priority)
-- [ ] Fixture usage examples provided
-- [ ] Factory usage examples provided
-- [ ] Priority tagging convention explained ([P0], [P1], [P2], [P3])
-- [ ] How to write new tests documented
-- [ ] Common patterns documented
-- [ ] Anti-patterns documented (what to avoid)
-
-### package.json Scripts Updated
-
-- [ ] package.json scripts added/updated (if `{update_package_scripts}` true)
-- [ ] `test:e2e` script for all E2E tests
-- [ ] `test:e2e:p0` script for P0 tests only
-- [ ] `test:e2e:p1` script for P0 + P1 tests
-- [ ] `test:api` script for API tests
-- [ ] `test:component` script for component tests
-- [ ] `test:unit` script for unit tests (if applicable)
-
-### Test Suite Executed
-
-- [ ] Test suite run locally (if `{run_tests_after_generation}` true)
-- [ ] Test results captured (passing/failing counts)
-- [ ] No flaky patterns detected (tests are deterministic)
-- [ ] Setup requirements documented (if any)
-- [ ] Known issues documented (if any)
-
----
-
-## Step 6: Automation Summary Generated
-
-### Automation Summary Document
-
-- [ ] Output file created at `{output_summary}`
-- [ ] Document includes execution mode (BMad-Integrated, Standalone, Auto-discover)
-- [ ] Feature analysis included (source files, coverage gaps) - Standalone mode
-- [ ] Tests created listed (E2E, API, Component, Unit) with counts and paths
-- [ ] Infrastructure created listed (fixtures, factories, helpers)
-- [ ] Test execution instructions provided
-- [ ] Coverage analysis included:
- - [ ] Total test count
- - [ ] Priority breakdown (P0, P1, P2, P3 counts)
- - [ ] Test level breakdown (E2E, API, Component, Unit counts)
- - [ ] Coverage percentage (if calculated)
- - [ ] Coverage status (acceptance criteria covered, gaps identified)
-- [ ] Definition of Done checklist included
-- [ ] Next steps provided
-- [ ] Recommendations included (if Standalone mode)
-
-### Summary Provided to User
-
-- [ ] Concise summary output provided
-- [ ] Total tests created across test levels
-- [ ] Priority breakdown (P0, P1, P2, P3 counts)
-- [ ] Infrastructure counts (fixtures, factories, helpers)
-- [ ] Test execution command provided
-- [ ] Output file path provided
-- [ ] Next steps listed
-
----
-
-## Quality Checks
-
-### Test Design Quality
-
-- [ ] Tests are readable (clear Given-When-Then structure)
-- [ ] Tests are maintainable (use factories/fixtures, not hardcoded data)
-- [ ] Tests are isolated (no shared state between tests)
-- [ ] Tests are deterministic (no race conditions or flaky patterns)
-- [ ] Tests are atomic (one assertion per test)
-- [ ] Tests are fast (no unnecessary waits or delays)
-- [ ] Tests are lean (files under {max_file_lines} lines)
-
-### Knowledge Base Integration
-
-- [ ] Test level selection framework applied (from `test-levels-framework.md`)
-- [ ] Priority classification applied (from `test-priorities.md`)
-- [ ] Fixture architecture patterns applied (from `fixture-architecture.md`)
-- [ ] Data factory patterns applied (from `data-factories.md`)
-- [ ] Selective testing strategies considered (from `selective-testing.md`)
-- [ ] Flaky test detection patterns considered (from `ci-burn-in.md`)
-- [ ] Test quality principles applied (from `test-quality.md`)
-
-### Code Quality
-
-- [ ] All TypeScript types are correct and complete
-- [ ] No linting errors in generated test files
-- [ ] Consistent naming conventions followed
-- [ ] Imports are organized and correct
-- [ ] Code follows project style guide
-- [ ] No console.log or debug statements in test code
-
----
-
-## Integration Points
-
-### With Framework Workflow
-
-- [ ] Test framework configuration detected and used
-- [ ] Directory structure matches framework setup
-- [ ] Fixtures and helpers follow established patterns
-- [ ] Naming conventions consistent with framework standards
-
-### With BMad Workflows (If Available - OPTIONAL)
-
-**With Story Workflow:**
-
-- [ ] Story ID correctly referenced in output (if story available)
-- [ ] Acceptance criteria from story reflected in tests (if story available)
-- [ ] Technical constraints from story considered (if story available)
-
-**With test-design Workflow:**
-
-- [ ] P0 scenarios from test-design prioritized (if test-design available)
-- [ ] Risk assessment from test-design considered (if test-design available)
-- [ ] Coverage strategy aligned with test-design (if test-design available)
-
-**With atdd Workflow:**
-
-- [ ] ATDD artifacts provided or located (manual handoff; `atdd` not auto-run)
-- [ ] Existing ATDD tests checked (if story had ATDD workflow run)
-- [ ] Expansion beyond ATDD planned (edge cases, negative paths)
-- [ ] No duplicate coverage with ATDD tests
-
-### With CI Pipeline
-
-- [ ] Tests can run in CI environment
-- [ ] Tests are parallelizable (no shared state)
-- [ ] Tests have appropriate timeouts
-- [ ] Tests clean up their data (no CI environment pollution)
-
----
-
-## Completion Criteria
-
-All of the following must be true before marking this workflow as complete:
-
-- [ ] **Execution mode determined** (BMad-Integrated, Standalone, or Auto-discover)
-- [ ] **Framework configuration loaded** and validated
-- [ ] **Coverage analysis completed** (gaps identified if analyze_coverage true)
-- [ ] **Automation targets identified** (what needs testing)
-- [ ] **Test levels selected** appropriately (E2E, API, Component, Unit)
-- [ ] **Duplicate coverage avoided** (same behavior not tested at multiple levels)
-- [ ] **Test priorities assigned** (P0, P1, P2, P3)
-- [ ] **Fixture architecture created/enhanced** with auto-cleanup
-- [ ] **Data factories created/enhanced** using faker (no hardcoded data)
-- [ ] **Helper utilities created/enhanced** (if needed)
-- [ ] **Test files generated** at appropriate levels (E2E, API, Component, Unit)
-- [ ] **Given-When-Then format used** consistently across all tests
-- [ ] **Priority tags added** to all test names ([P0], [P1], [P2], [P3])
-- [ ] **data-testid selectors used** in E2E tests (not CSS classes)
-- [ ] **Network-first pattern applied** (route interception before navigation)
-- [ ] **Quality standards enforced** (no hard waits, no flaky patterns, self-cleaning, deterministic)
-- [ ] **Test README updated** with execution instructions and patterns
-- [ ] **package.json scripts updated** with test execution commands
-- [ ] **Test suite run locally** (if run_tests_after_generation true)
-- [ ] **Tests validated** (if auto_validate enabled)
-- [ ] **Failures healed** (if auto_heal_failures enabled and tests failed)
-- [ ] **Healing report generated** (if healing attempted)
-- [ ] **Unfixable tests marked** with test.fixme() and detailed comments (if any)
-- [ ] **Automation summary created** and saved to correct location
-- [ ] **Output file formatted correctly**
-- [ ] **Knowledge base references applied** and documented (including healing fragments if used)
-- [ ] **No test quality issues** (flaky patterns, race conditions, hardcoded data, page objects)
-
----
-
-## Common Issues and Resolutions
-
-### Issue: BMad artifacts not found
-
-**Problem:** Story, tech-spec, or PRD files not found when variables are set.
-
-**Resolution:**
-
-- **automate does NOT require BMad artifacts** - they are OPTIONAL enhancements
-- If files not found, switch to Standalone Mode automatically
-- Analyze source code directly without BMad context
-- Continue workflow without halting
-
-### Issue: Framework configuration not found
-
-**Problem:** No playwright.config.ts or cypress.config.ts found.
-
-**Resolution:**
-
-- **HALT workflow** - framework is required
-- Message: "Framework scaffolding required. Run `bmad tea *framework` first."
-- User must run framework workflow before automate
-
-### Issue: No automation targets identified
-
-**Problem:** Neither story, target_feature, nor target_files specified, and auto-discover finds nothing.
-
-**Resolution:**
-
-- Check if source_dir variable is correct
-- Verify source code exists in project
-- Ask user to specify target_feature or target_files explicitly
-- Provide examples: `target_feature: "src/auth/"` or `target_files: "src/auth/login.ts,src/auth/session.ts"`
-
-### Issue: Duplicate coverage detected
-
-**Problem:** Same behavior tested at multiple levels (E2E + API + Component).
-
-**Resolution:**
-
-- Review test level selection framework (test-levels-framework.md)
-- Use E2E for critical happy path ONLY
-- Use API for business logic variations
-- Use Component for UI edge cases
-- Remove redundant tests that duplicate coverage
-
-### Issue: Tests have hardcoded data
-
-**Problem:** Tests use hardcoded email addresses, passwords, or other data.
-
-**Resolution:**
-
-- Replace all hardcoded data with factory function calls
-- Use faker for all random data generation
-- Update data-factories to support all required test scenarios
-- Example: `createUser({ email: faker.internet.email() })`
-
-### Issue: Tests are flaky
-
-**Problem:** Tests fail intermittently, pass on retry.
-
-**Resolution:**
-
-- Remove all hard waits (`page.waitForTimeout()`)
-- Use explicit waits (`page.waitForSelector()`)
-- Apply network-first pattern (route interception before navigation)
-- Remove conditional flow (`if (await element.isVisible())`)
-- Ensure tests are deterministic (no race conditions)
-- Run burn-in loop (10 iterations) to detect flakiness
-
-### Issue: Fixtures don't clean up data
-
-**Problem:** Test data persists after test run, causing test pollution.
-
-**Resolution:**
-
-- Ensure all fixtures have cleanup in teardown phase
-- Cleanup happens AFTER `await use(data)`
-- Call deletion/cleanup functions (deleteUser, deleteProduct, etc.)
-- Verify cleanup works by checking database/storage after test run
-
-### Issue: Tests too slow
-
-**Problem:** Tests take longer than 90 seconds (max_test_duration).
-
-**Resolution:**
-
-- Remove unnecessary waits and delays
-- Use parallel execution where possible
-- Mock external services (don't make real API calls)
-- Use API tests instead of E2E for business logic
-- Optimize test data creation (use in-memory database, etc.)
-
----
-
-## Notes for TEA Agent
-
-- **automate is flexible:** Can work with or without BMad artifacts (story, tech-spec, PRD are OPTIONAL)
-- **Standalone mode is powerful:** Analyze any codebase and generate tests independently
-- **Auto-discover mode:** Scan codebase for features needing tests when no targets specified
-- **Framework is the ONLY hard requirement:** HALT if framework config missing, otherwise proceed
-- **Avoid duplicate coverage:** E2E for critical paths only, API/Component for variations
-- **Priority tagging enables selective execution:** P0 tests run on every commit, P1 on PR, P2 nightly
-- **Network-first pattern prevents race conditions:** Route interception BEFORE navigation
-- **No page objects:** Keep tests simple, direct, and maintainable
-- **Use knowledge base:** Load relevant fragments (test-levels, test-priorities, fixture-architecture, data-factories, healing patterns) for guidance
-- **Deterministic tests only:** No hard waits, no conditional flow, no flaky patterns allowed
-- **Optional healing:** auto_heal_failures disabled by default (opt-in for automatic test healing)
-- **Graceful degradation:** Healing works without Playwright MCP (pattern-based fallback)
-- **Unfixable tests handled:** Mark with test.fixme() and detailed comments (not silently broken)
diff --git a/src/bmm/workflows/testarch/automate/instructions.md b/src/bmm/workflows/testarch/automate/instructions.md
deleted file mode 100644
index 7ba8da51..00000000
--- a/src/bmm/workflows/testarch/automate/instructions.md
+++ /dev/null
@@ -1,1324 +0,0 @@
-
-
-# Test Automation Expansion
-
-**Workflow ID**: `_bmad/bmm/testarch/automate`
-**Version**: 4.0 (BMad v6)
-
----
-
-## Overview
-
-Expands test automation coverage by generating comprehensive test suites at appropriate levels (E2E, API, Component, Unit) with supporting infrastructure. This workflow operates in **dual mode**:
-
-1. **BMad-Integrated Mode**: Works WITH BMad artifacts (story, tech-spec, PRD, test-design) to expand coverage after story implementation
-2. **Standalone Mode**: Works WITHOUT BMad artifacts - analyzes existing codebase and generates tests independently
-
-**Core Principle**: Generate prioritized, deterministic tests that avoid duplicate coverage and follow testing best practices.
-
----
-
-## Preflight Requirements
-
-**Flexible:** This workflow can run with minimal prerequisites. Only HALT if framework is completely missing.
-
-### Required (Always)
-
-- β Framework scaffolding configured (run `framework` workflow if missing)
-- β Test framework configuration available (playwright.config.ts or cypress.config.ts)
-
-### Optional (BMad-Integrated Mode)
-
-- Story markdown with acceptance criteria (enhances coverage targeting)
-- Tech spec or PRD (provides architectural context)
-- Test design document (provides risk/priority context)
-
-### Optional (Standalone Mode)
-
-- Source code to analyze (feature implementation)
-- Existing tests (for gap analysis)
-
-**If framework is missing:** HALT with message: "Framework scaffolding required. Run `bmad tea *framework` first."
-
----
-
-## Step 1: Determine Execution Mode and Load Context
-
-### Actions
-
-1. **Detect Execution Mode**
-
- Check if BMad artifacts are available:
- - If `{story_file}` variable is set β BMad-Integrated Mode
- - If `{target_feature}` or `{target_files}` set β Standalone Mode
- - If neither set β Auto-discover mode (scan codebase for features needing tests)
-
-2. **Load BMad Artifacts (If Available)**
-
- **BMad-Integrated Mode:**
- - Read story markdown from `{story_file}`
- - Extract acceptance criteria and technical requirements
- - Load tech-spec.md if `{use_tech_spec}` is true
- - Load test-design.md if `{use_test_design}` is true
- - Load PRD.md if `{use_prd}` is true
- - Note: These are **optional enhancements**, not hard requirements
-
- **Standalone Mode:**
- - Skip BMad artifact loading
- - Proceed directly to source code analysis
-
-3. **Load Framework Configuration**
- - Read test framework config (playwright.config.ts or cypress.config.ts)
- - Identify test directory structure from `{test_dir}`
- - Check existing test patterns in `{test_dir}`
- - Note test runner capabilities (parallel execution, fixtures, etc.)
-
-4. **Analyze Existing Test Coverage**
-
- If `{analyze_coverage}` is true:
- - Search `{test_dir}` for existing test files
- - Identify tested features vs untested features
- - Map tests to source files (coverage gaps)
- - Check existing fixture and factory patterns
-
-5. **Check Playwright Utils Flag**
-
- Read `{config_source}` and check `config.tea_use_playwright_utils`.
-
-6. **Load Knowledge Base Fragments**
-
- **Critical:** Consult `{project-root}/_bmad/bmm/testarch/tea-index.csv` to load:
-
- **Core Testing Patterns (Always load):**
- - `test-levels-framework.md` - Test level selection (E2E vs API vs Component vs Unit with decision matrix, 467 lines, 4 examples)
- - `test-priorities-matrix.md` - Priority classification (P0-P3 with automated scoring, risk mapping, 389 lines, 2 examples)
- - `data-factories.md` - Factory patterns with faker (overrides, nested factories, API seeding, 498 lines, 5 examples)
- - `selective-testing.md` - Targeted test execution strategies (tag-based, spec filters, diff-based, promotion rules, 727 lines, 4 examples)
- - `ci-burn-in.md` - Flaky test detection patterns (10-iteration burn-in, sharding, selective execution, 678 lines, 4 examples)
- - `test-quality.md` - Test design principles (deterministic, isolated, explicit assertions, length/time limits, 658 lines, 5 examples)
-
- **If `config.tea_use_playwright_utils: true` (Playwright Utils Integration - All Utilities):**
- - `overview.md` - Playwright utils installation, design principles, fixture patterns
- - `api-request.md` - Typed HTTP client with schema validation
- - `network-recorder.md` - HAR record/playback for offline testing
- - `auth-session.md` - Token persistence and multi-user support
- - `intercept-network-call.md` - Network spy/stub with automatic JSON parsing
- - `recurse.md` - Cypress-style polling for async conditions
- - `log.md` - Playwright report-integrated logging
- - `file-utils.md` - CSV/XLSX/PDF/ZIP reading and validation
- - `burn-in.md` - Smart test selection (relevant for CI test generation)
- - `network-error-monitor.md` - Automatic HTTP error detection
- - `fixtures-composition.md` - mergeTests composition patterns
-
- **If `config.tea_use_playwright_utils: false` (Traditional Patterns):**
- - `fixture-architecture.md` - Test fixture patterns (pure function β fixture β mergeTests, auto-cleanup, 406 lines, 5 examples)
- - `network-first.md` - Route interception patterns (intercept before navigate, HAR capture, deterministic waiting, 489 lines, 5 examples)
-
- **Healing Knowledge (If `{auto_heal_failures}` is true):**
- - `test-healing-patterns.md` - Common failure patterns and automated fixes (stale selectors, race conditions, dynamic data, network errors, hard waits, 648 lines, 5 examples)
- - `selector-resilience.md` - Selector debugging and refactoring guide (data-testid > ARIA > text > CSS hierarchy, anti-patterns, 541 lines, 4 examples)
- - `timing-debugging.md` - Race condition identification and fixes (network-first, deterministic waiting, async debugging, 370 lines, 3 examples)
-
----
-
-## Step 2: Identify Automation Targets
-
-### Actions
-
-1. **Determine What Needs Testing**
-
- **BMad-Integrated Mode (story available):**
- - Map acceptance criteria from story to test scenarios
- - Identify features implemented in this story
- - Check if story has existing ATDD tests (from `*atdd` workflow)
- - Expand beyond ATDD with edge cases and negative paths
-
- **Standalone Mode (no story):**
- - If `{target_feature}` specified: Analyze that specific feature
- - If `{target_files}` specified: Analyze those specific files
- - If `{auto_discover_features}` is true: Scan `{source_dir}` for features
- - Prioritize features with:
- - No test coverage (highest priority)
- - Complex business logic
- - External integrations (API calls, database, auth)
- - Critical user paths (login, checkout, etc.)
-
-2. **Apply Test Level Selection Framework**
-
- **Knowledge Base Reference**: `test-levels-framework.md`
-
- For each feature or acceptance criterion, determine appropriate test level:
-
- **E2E (End-to-End)**:
- - Critical user journeys (login, checkout, core workflows)
- - Multi-system integration
- - Full user-facing scenarios
- - Characteristics: High confidence, slow, brittle
-
- **API (Integration)**:
- - Business logic validation
- - Service contracts and data transformations
- - Backend integration without UI
- - Characteristics: Fast feedback, stable, good balance
-
- **Component**:
- - UI component behavior (buttons, forms, modals)
- - Interaction testing (click, hover, keyboard)
- - State management within component
- - Characteristics: Fast, isolated, granular
-
- **Unit**:
- - Pure business logic and algorithms
- - Edge cases and error handling
- - Minimal dependencies
- - Characteristics: Fastest, most granular
-
-3. **Avoid Duplicate Coverage**
-
- **Critical principle:** Don't test same behavior at multiple levels unless necessary
- - Use E2E for critical happy path only
- - Use API tests for business logic variations
- - Use component tests for UI interaction edge cases
- - Use unit tests for pure logic edge cases
-
- **Example:**
- - E2E: User can log in with valid credentials β Dashboard loads
- - API: POST /auth/login returns 401 for invalid credentials
- - API: POST /auth/login returns 200 and JWT token for valid credentials
- - Component: LoginForm disables submit button when fields are empty
- - Unit: validateEmail() returns false for malformed email addresses
-
-4. **Assign Test Priorities**
-
- **Knowledge Base Reference**: `test-priorities-matrix.md`
-
- **P0 (Critical - Every commit)**:
- - Critical user paths that must always work
- - Security-critical functionality (auth, permissions)
- - Data integrity scenarios
- - Run in pre-commit hooks or PR checks
-
- **P1 (High - PR to main)**:
- - Important features with high user impact
- - Integration points between systems
- - Error handling for common failures
- - Run before merging to main branch
-
- **P2 (Medium - Nightly)**:
- - Edge cases with moderate impact
- - Less-critical feature variations
- - Performance/load testing
- - Run in nightly CI builds
-
- **P3 (Low - On-demand)**:
- - Nice-to-have validations
- - Rarely-used features
- - Exploratory testing scenarios
- - Run manually or weekly
-
- **Priority Variables:**
- - `{include_p0}` - Always include (default: true)
- - `{include_p1}` - High priority (default: true)
- - `{include_p2}` - Medium priority (default: true)
- - `{include_p3}` - Low priority (default: false)
-
-5. **Create Test Coverage Plan**
-
- Document what will be tested at each level with priorities:
-
- ```markdown
- ## Test Coverage Plan
-
- ### E2E Tests (P0)
-
- - User login with valid credentials β Dashboard loads
- - User logout β Redirects to login page
-
- ### API Tests (P1)
-
- - POST /auth/login - valid credentials β 200 + JWT token
- - POST /auth/login - invalid credentials β 401 + error message
- - POST /auth/login - missing fields β 400 + validation errors
-
- ### Component Tests (P1)
-
- - LoginForm - empty fields β submit button disabled
- - LoginForm - valid input β submit button enabled
-
- ### Unit Tests (P2)
-
- - validateEmail() - valid email β returns true
- - validateEmail() - malformed email β returns false
- ```
-
----
-
-## Step 3: Generate Test Infrastructure
-
-### Actions
-
-1. **Enhance Fixture Architecture**
-
- **Knowledge Base Reference**: `fixture-architecture.md`
-
- Check existing fixtures in `tests/support/fixtures/`:
- - If missing or incomplete, create fixture architecture
- - Use Playwright's `test.extend()` pattern
- - Ensure all fixtures have auto-cleanup in teardown
-
- **Common fixtures to create/enhance:**
- - **authenticatedUser**: User with valid session (auto-deletes user after test)
- - **apiRequest**: Authenticated API client with base URL and headers
- - **mockNetwork**: Network mocking for external services
- - **testDatabase**: Database with test data (auto-cleanup after test)
-
- **Example fixture:**
-
- ```typescript
- // tests/support/fixtures/auth.fixture.ts
- import { test as base } from '@playwright/test';
- import { createUser, deleteUser } from '../factories/user.factory';
-
- export const test = base.extend({
- authenticatedUser: async ({ page }, use) => {
- // Setup: Create and authenticate user
- const user = await createUser();
- await page.goto('/login');
- await page.fill('[data-testid="email"]', user.email);
- await page.fill('[data-testid="password"]', user.password);
- await page.click('[data-testid="login-button"]');
- await page.waitForURL('/dashboard');
-
- // Provide to test
- await use(user);
-
- // Cleanup: Delete user automatically
- await deleteUser(user.id);
- },
- });
- ```
-
-2. **Enhance Data Factories**
-
- **Knowledge Base Reference**: `data-factories.md`
-
- Check existing factories in `tests/support/factories/`:
- - If missing or incomplete, create factory architecture
- - Use `@faker-js/faker` for all random data (no hardcoded values)
- - Support overrides for specific test scenarios
-
- **Common factories to create/enhance:**
- - User factory (email, password, name, role)
- - Product factory (name, price, description, SKU)
- - Order factory (items, total, status, customer)
-
- **Example factory:**
-
- ```typescript
- // tests/support/factories/user.factory.ts
- import { faker } from '@faker-js/faker';
-
- export const createUser = (overrides = {}) => ({
- id: faker.number.int(),
- email: faker.internet.email(),
- password: faker.internet.password(),
- name: faker.person.fullName(),
- role: 'user',
- createdAt: faker.date.recent().toISOString(),
- ...overrides,
- });
-
- export const createUsers = (count: number) => Array.from({ length: count }, () => createUser());
-
- // API helper for cleanup
- export const deleteUser = async (userId: number) => {
- await fetch(`/api/users/${userId}`, { method: 'DELETE' });
- };
- ```
-
-3. **Create/Enhance Helper Utilities**
-
- If `{update_helpers}` is true:
-
- Check `tests/support/helpers/` for common utilities:
- - **waitFor**: Polling helper for complex conditions
- - **retry**: Retry helper for flaky operations
- - **testData**: Test data generation helpers
- - **assertions**: Custom assertion helpers
-
- **Example helper:**
-
- ```typescript
- // tests/support/helpers/wait-for.ts
- export const waitFor = async (condition: () => Promise, timeout = 5000, interval = 100): Promise => {
- const startTime = Date.now();
- while (Date.now() - startTime < timeout) {
- if (await condition()) return;
- await new Promise((resolve) => setTimeout(resolve, interval));
- }
- throw new Error(`Condition not met within ${timeout}ms`);
- };
- ```
-
----
-
-## Step 4: Generate Test Files
-
-### Actions
-
-1. **Create Test File Structure**
-
- ```
- tests/
- βββ e2e/
- β βββ {feature-name}.spec.ts # E2E tests (P0-P1)
- βββ api/
- β βββ {feature-name}.api.spec.ts # API tests (P1-P2)
- βββ component/
- β βββ {ComponentName}.test.tsx # Component tests (P1-P2)
- βββ unit/
- β βββ {module-name}.test.ts # Unit tests (P2-P3)
- βββ support/
- βββ fixtures/ # Test fixtures
- βββ factories/ # Data factories
- βββ helpers/ # Utility functions
- ```
-
-2. **Write E2E Tests (If Applicable)**
-
- **Follow Given-When-Then format:**
-
- ```typescript
- import { test, expect } from '@playwright/test';
-
- test.describe('User Authentication', () => {
- test('[P0] should login with valid credentials and load dashboard', async ({ page }) => {
- // GIVEN: User is on login page
- await page.goto('/login');
-
- // WHEN: User submits valid credentials
- await page.fill('[data-testid="email-input"]', 'user@example.com');
- await page.fill('[data-testid="password-input"]', 'Password123!');
- await page.click('[data-testid="login-button"]');
-
- // THEN: User is redirected to dashboard
- await expect(page).toHaveURL('/dashboard');
- await expect(page.locator('[data-testid="user-name"]')).toBeVisible();
- });
-
- test('[P1] should display error for invalid credentials', async ({ page }) => {
- // GIVEN: User is on login page
- await page.goto('/login');
-
- // WHEN: User submits invalid credentials
- await page.fill('[data-testid="email-input"]', 'invalid@example.com');
- await page.fill('[data-testid="password-input"]', 'wrongpassword');
- await page.click('[data-testid="login-button"]');
-
- // THEN: Error message is displayed
- await expect(page.locator('[data-testid="error-message"]')).toHaveText('Invalid email or password');
- });
- });
- ```
-
- **Critical patterns:**
- - Tag tests with priority: `[P0]`, `[P1]`, `[P2]`, `[P3]` in test name
- - One assertion per test (atomic tests)
- - Explicit waits (no hard waits/sleeps)
- - Network-first approach (route interception before navigation)
- - data-testid selectors for stability
- - Clear Given-When-Then structure
-
-3. **Write API Tests (If Applicable)**
-
- ```typescript
- import { test, expect } from '@playwright/test';
-
- test.describe('User Authentication API', () => {
- test('[P1] POST /api/auth/login - should return token for valid credentials', async ({ request }) => {
- // GIVEN: Valid user credentials
- const credentials = {
- email: 'user@example.com',
- password: 'Password123!',
- };
-
- // WHEN: Logging in via API
- const response = await request.post('/api/auth/login', {
- data: credentials,
- });
-
- // THEN: Returns 200 and JWT token
- expect(response.status()).toBe(200);
- const body = await response.json();
- expect(body).toHaveProperty('token');
- expect(body.token).toMatch(/^[A-Za-z0-9-_]+\.[A-Za-z0-9-_]+\.[A-Za-z0-9-_]+$/); // JWT format
- });
-
- test('[P1] POST /api/auth/login - should return 401 for invalid credentials', async ({ request }) => {
- // GIVEN: Invalid credentials
- const credentials = {
- email: 'invalid@example.com',
- password: 'wrongpassword',
- };
-
- // WHEN: Attempting login
- const response = await request.post('/api/auth/login', {
- data: credentials,
- });
-
- // THEN: Returns 401 with error
- expect(response.status()).toBe(401);
- const body = await response.json();
- expect(body).toMatchObject({
- error: 'Invalid credentials',
- });
- });
- });
- ```
-
-4. **Write Component Tests (If Applicable)**
-
- **Knowledge Base Reference**: `component-tdd.md`
-
- ```typescript
- import { test, expect } from '@playwright/experimental-ct-react';
- import { LoginForm } from './LoginForm';
-
- test.describe('LoginForm Component', () => {
- test('[P1] should disable submit button when fields are empty', async ({ mount }) => {
- // GIVEN: LoginForm is mounted
- const component = await mount();
-
- // WHEN: Form is initially rendered
- const submitButton = component.locator('button[type="submit"]');
-
- // THEN: Submit button is disabled
- await expect(submitButton).toBeDisabled();
- });
-
- test('[P1] should enable submit button when fields are filled', async ({ mount }) => {
- // GIVEN: LoginForm is mounted
- const component = await mount();
-
- // WHEN: User fills in email and password
- await component.locator('[data-testid="email-input"]').fill('user@example.com');
- await component.locator('[data-testid="password-input"]').fill('Password123!');
-
- // THEN: Submit button is enabled
- const submitButton = component.locator('button[type="submit"]');
- await expect(submitButton).toBeEnabled();
- });
- });
- ```
-
-5. **Write Unit Tests (If Applicable)**
-
- ```typescript
- import { validateEmail } from './validation';
-
- describe('Email Validation', () => {
- test('[P2] should return true for valid email', () => {
- // GIVEN: Valid email address
- const email = 'user@example.com';
-
- // WHEN: Validating email
- const result = validateEmail(email);
-
- // THEN: Returns true
- expect(result).toBe(true);
- });
-
- test('[P2] should return false for malformed email', () => {
- // GIVEN: Malformed email addresses
- const invalidEmails = ['notanemail', '@example.com', 'user@', 'user @example.com'];
-
- // WHEN/THEN: Each should fail validation
- invalidEmails.forEach((email) => {
- expect(validateEmail(email)).toBe(false);
- });
- });
- });
- ```
-
-6. **Apply Network-First Pattern (E2E tests)**
-
- **Knowledge Base Reference**: `network-first.md`
-
- **Critical pattern to prevent race conditions:**
-
- ```typescript
- test('should load user dashboard after login', async ({ page }) => {
- // CRITICAL: Intercept routes BEFORE navigation
- await page.route('**/api/user', (route) =>
- route.fulfill({
- status: 200,
- body: JSON.stringify({ id: 1, name: 'Test User' }),
- }),
- );
-
- // NOW navigate
- await page.goto('/dashboard');
-
- await expect(page.locator('[data-testid="user-name"]')).toHaveText('Test User');
- });
- ```
-
-7. **Enforce Quality Standards**
-
- **For every test:**
- - β Uses Given-When-Then format
- - β Has clear, descriptive name with priority tag
- - β One assertion per test (atomic)
- - β No hard waits or sleeps (use explicit waits)
- - β Self-cleaning (uses fixtures with auto-cleanup)
- - β Deterministic (no flaky patterns)
- - β Fast (under {max_test_duration} seconds)
- - β Lean (test file under {max_file_lines} lines)
-
- **Forbidden patterns:**
- - β Hard waits: `await page.waitForTimeout(2000)`
- - β Conditional flow: `if (await element.isVisible()) { ... }`
- - β Try-catch for test logic (use for cleanup only)
- - β Hardcoded test data (use factories)
- - β Page objects (keep tests simple and direct)
- - β Shared state between tests
-
----
-
-## Step 5: Execute, Validate & Heal Generated Tests (NEW - Phase 2.5)
-
-**Purpose**: Automatically validate generated tests and heal common failures before delivery
-
-### Actions
-
-1. **Validate Generated Tests**
-
- Always validate (auto_validate is always true):
- - Run generated tests to verify they work
- - Continue with healing if config.tea_use_mcp_enhancements is true
-
-2. **Run Generated Tests**
-
- Execute the full test suite that was just generated:
-
- ```bash
- npx playwright test {generated_test_files}
- ```
-
- Capture results:
- - Total tests run
- - Passing tests count
- - Failing tests count
- - Error messages and stack traces for failures
-
-3. **Evaluate Results**
-
- **If ALL tests pass:**
- - β Generate report with success summary
- - Proceed to Step 6 (Documentation and Scripts)
-
- **If tests FAIL:**
- - Check config.tea_use_mcp_enhancements setting
- - If true: Enter healing loop (Step 5.4)
- - If false: Document failures for manual review, proceed to Step 6
-
-4. **Healing Loop (If config.tea_use_mcp_enhancements is true)**
-
- **Iteration limit**: 3 attempts per test (constant)
-
- **For each failing test:**
-
- **A. Load Healing Knowledge Fragments**
-
- Consult `tea-index.csv` to load healing patterns:
- - `test-healing-patterns.md` - Common failure patterns and fixes
- - `selector-resilience.md` - Selector debugging and refactoring
- - `timing-debugging.md` - Race condition identification and fixes
-
- **B. Identify Failure Pattern**
-
- Analyze error message and stack trace to classify failure type:
-
- **Stale Selector Failure:**
- - Error contains: "locator resolved to 0 elements", "element not found", "unable to find element"
- - Extract selector from error message
- - Apply selector healing (knowledge from `selector-resilience.md`):
- - If CSS class β Replace with `page.getByTestId()`
- - If nth() β Replace with `filter({ hasText })`
- - If ID β Replace with data-testid
- - If complex XPath β Replace with ARIA role
-
- **Race Condition Failure:**
- - Error contains: "timeout waiting for", "element not visible", "timed out retrying"
- - Detect missing network waits or hard waits in test code
- - Apply timing healing (knowledge from `timing-debugging.md`):
- - Add network-first interception before navigate
- - Replace `waitForTimeout()` with `waitForResponse()`
- - Add explicit element state waits (`waitFor({ state: 'visible' })`)
-
- **Dynamic Data Failure:**
- - Error contains: "Expected 'User 123' but received 'User 456'", timestamp mismatches
- - Identify hardcoded assertions
- - Apply data healing (knowledge from `test-healing-patterns.md`):
- - Replace hardcoded IDs with regex (`/User \d+/`)
- - Replace hardcoded dates with dynamic generation
- - Capture dynamic values and use in assertions
-
- **Network Error Failure:**
- - Error contains: "API call failed", "500 error", "network error"
- - Detect missing route interception
- - Apply network healing (knowledge from `test-healing-patterns.md`):
- - Add `page.route()` or `cy.intercept()` for API mocking
- - Mock error scenarios (500, 429, timeout)
-
- **Hard Wait Detection:**
- - Scan test code for `page.waitForTimeout()`, `cy.wait(number)`, `sleep()`
- - Apply hard wait healing (knowledge from `timing-debugging.md`):
- - Replace with event-based waits
- - Add network response waits
- - Use element state changes
-
- **C. MCP Healing Mode (If MCP Tools Available)**
-
- If Playwright MCP tools are available in your IDE:
-
- Use MCP tools for interactive healing:
- - `playwright_test_debug_test`: Pause on failure for visual inspection
- - `browser_snapshot`: Capture visual context at failure point
- - `browser_console_messages`: Retrieve console logs for JS errors
- - `browser_network_requests`: Analyze network activity
- - `browser_generate_locator`: Generate better selectors interactively
-
- Apply MCP-generated fixes to test code.
-
- **D. Pattern-Based Healing Mode (Fallback)**
-
- If MCP unavailable, use pattern-based analysis:
- - Parse error message and stack trace
- - Match against failure patterns from knowledge base
- - Apply fixes programmatically:
- - Selector fixes: Use suggestions from `selector-resilience.md`
- - Timing fixes: Apply patterns from `timing-debugging.md`
- - Data fixes: Use patterns from `test-healing-patterns.md`
-
- **E. Apply Healing Fix**
- - Modify test file with healed code
- - Re-run test to validate fix
- - If test passes: Mark as healed, move to next failure
- - If test fails: Increment iteration count, try different pattern
-
- **F. Iteration Limit Handling**
-
- After 3 failed healing attempts:
-
- Always mark unfixable tests:
- - Mark test with `test.fixme()` instead of `test()`
- - Add detailed comment explaining:
- - What failure occurred
- - What healing was attempted (3 iterations)
- - Why healing failed
- - Manual investigation needed
-
- ```typescript
- test.fixme('[P1] should handle complex interaction', async ({ page }) => {
- // FIXME: Test healing failed after 3 attempts
- // Failure: "Locator 'button[data-action="submit"]' resolved to 0 elements"
- // Attempted fixes:
- // 1. Replaced with page.getByTestId('submit-button') - still failing
- // 2. Replaced with page.getByRole('button', { name: 'Submit' }) - still failing
- // 3. Added waitForLoadState('networkidle') - still failing
- // Manual investigation needed: Selector may require application code changes
- // TODO: Review with team, may need data-testid added to button component
- // Original test code...
- });
- ```
-
- **Note**: Workflow continues even with unfixable tests (marked as test.fixme() for manual review)
-
-5. **Generate Healing Report**
-
- Document healing outcomes:
-
- ```markdown
- ## Test Healing Report
-
- **Auto-Heal Enabled**: {auto_heal_failures}
- **Healing Mode**: {use_mcp_healing ? "MCP-assisted" : "Pattern-based"}
- **Iterations Allowed**: {max_healing_iterations}
-
- ### Validation Results
-
- - **Total tests**: {total_tests}
- - **Passing**: {passing_tests}
- - **Failing**: {failing_tests}
-
- ### Healing Outcomes
-
- **Successfully Healed ({healed_count} tests):**
-
- - `tests/e2e/login.spec.ts:15` - Stale selector (CSS class β data-testid)
- - `tests/e2e/checkout.spec.ts:42` - Race condition (added network-first interception)
- - `tests/api/users.spec.ts:28` - Dynamic data (hardcoded ID β regex pattern)
-
- **Unable to Heal ({unfixable_count} tests):**
-
- - `tests/e2e/complex-flow.spec.ts:67` - Marked as test.fixme() with manual investigation needed
- - Failure: Locator not found after 3 healing attempts
- - Requires application code changes (add data-testid to component)
-
- ### Healing Patterns Applied
-
- - **Selector fixes**: 2 (CSS class β data-testid, nth() β filter())
- - **Timing fixes**: 1 (added network-first interception)
- - **Data fixes**: 1 (hardcoded ID β regex)
-
- ### Knowledge Base References
-
- - `test-healing-patterns.md` - Common failure patterns
- - `selector-resilience.md` - Selector refactoring guide
- - `timing-debugging.md` - Race condition prevention
- ```
-
-6. **Update Test Files with Healing Results**
- - Save healed test code to files
- - Mark unfixable tests with `test.fixme()` and detailed comments
- - Preserve original test logic in comments (for debugging)
-
----
-
-## Step 6: Update Documentation and Scripts
-
-### Actions
-
-1. **Update Test README**
-
- If `{update_readme}` is true:
-
- Create or update `tests/README.md` with:
- - Overview of test suite structure
- - How to run tests (all, specific files, by priority)
- - Fixture and factory usage examples
- - Priority tagging convention ([P0], [P1], [P2], [P3])
- - How to write new tests
- - Common patterns and anti-patterns
-
- **Example section:**
-
- ````markdown
- ## Running Tests
-
- ```bash
- # Run all tests
- npm run test:e2e
-
- # Run by priority
- npm run test:e2e -- --grep "@P0"
- npm run test:e2e -- --grep "@P1"
-
- # Run specific file
- npm run test:e2e -- user-authentication.spec.ts
-
- # Run in headed mode
- npm run test:e2e -- --headed
-
- # Debug specific test
- npm run test:e2e -- user-authentication.spec.ts --debug
- ```
- ````
-
- ## Priority Tags
- - **[P0]**: Critical paths, run every commit
- - **[P1]**: High priority, run on PR to main
- - **[P2]**: Medium priority, run nightly
- - **[P3]**: Low priority, run on-demand
-
- ```
-
- ```
-
-2. **Update package.json Scripts**
-
- If `{update_package_scripts}` is true:
-
- Add or update test execution scripts:
-
- ```json
- {
- "scripts": {
- "test:e2e": "playwright test",
- "test:e2e:p0": "playwright test --grep '@P0'",
- "test:e2e:p1": "playwright test --grep '@P1|@P0'",
- "test:api": "playwright test tests/api",
- "test:component": "playwright test tests/component",
- "test:unit": "vitest"
- }
- }
- ```
-
-3. **Run Test Suite**
-
- If `{run_tests_after_generation}` is true:
- - Run full test suite locally
- - Capture results (passing/failing counts)
- - Verify no flaky patterns (tests should be deterministic)
- - Document any setup requirements or known issues
-
----
-
-## Step 6: Generate Automation Summary
-
-### Actions
-
-1. **Create Automation Summary Document**
-
- Save to `{output_summary}` with:
-
- **BMad-Integrated Mode:**
-
- ````markdown
- # Automation Summary - {feature_name}
-
- **Date:** {date}
- **Story:** {story_id}
- **Coverage Target:** {coverage_target}
-
- ## Tests Created
-
- ### E2E Tests (P0-P1)
-
- - `tests/e2e/user-authentication.spec.ts` (2 tests, 87 lines)
- - [P0] Login with valid credentials β Dashboard loads
- - [P1] Display error for invalid credentials
-
- ### API Tests (P1-P2)
-
- - `tests/api/auth.api.spec.ts` (3 tests, 102 lines)
- - [P1] POST /auth/login - valid credentials β 200 + token
- - [P1] POST /auth/login - invalid credentials β 401 + error
- - [P2] POST /auth/login - missing fields β 400 + validation
-
- ### Component Tests (P1)
-
- - `tests/component/LoginForm.test.tsx` (2 tests, 45 lines)
- - [P1] Empty fields β submit button disabled
- - [P1] Valid input β submit button enabled
-
- ## Infrastructure Created
-
- ### Fixtures
-
- - `tests/support/fixtures/auth.fixture.ts` - authenticatedUser with auto-cleanup
-
- ### Factories
-
- - `tests/support/factories/user.factory.ts` - createUser(), deleteUser()
-
- ### Helpers
-
- - `tests/support/helpers/wait-for.ts` - Polling helper for complex conditions
-
- ## Test Execution
-
- ```bash
- # Run all new tests
- npm run test:e2e
-
- # Run by priority
- npm run test:e2e:p0 # Critical paths only
- npm run test:e2e:p1 # P0 + P1 tests
- ```
- ````
-
- ## Coverage Analysis
-
- **Total Tests:** 7
- - P0: 1 test (critical path)
- - P1: 5 tests (high priority)
- - P2: 1 test (medium priority)
-
- **Test Levels:**
- - E2E: 2 tests (user journeys)
- - API: 3 tests (business logic)
- - Component: 2 tests (UI behavior)
-
- **Coverage Status:**
- - β All acceptance criteria covered
- - β Happy path covered (E2E + API)
- - β Error cases covered (API)
- - β UI validation covered (Component)
- - β οΈ Edge case: Password reset flow not yet covered (future story)
-
- ## Definition of Done
- - [x] All tests follow Given-When-Then format
- - [x] All tests use data-testid selectors
- - [x] All tests have priority tags
- - [x] All tests are self-cleaning (fixtures with auto-cleanup)
- - [x] No hard waits or flaky patterns
- - [x] Test files under 300 lines
- - [x] All tests run under 1.5 minutes each
- - [x] README updated with test execution instructions
- - [x] package.json scripts updated
-
- ## Next Steps
- 1. Review generated tests with team
- 2. Run tests in CI pipeline: `npm run test:e2e`
- 3. Integrate with quality gate: `bmad tea *gate`
- 4. Monitor for flaky tests in burn-in loop
-
- ````
-
- **Standalone Mode:**
- ```markdown
- # Automation Summary - {target_feature}
-
- **Date:** {date}
- **Target:** {target_feature} (standalone analysis)
- **Coverage Target:** {coverage_target}
-
- ## Feature Analysis
-
- **Source Files Analyzed:**
- - `src/auth/login.ts` - Login logic and validation
- - `src/auth/session.ts` - Session management
- - `src/auth/validation.ts` - Email/password validation
-
- **Existing Coverage:**
- - E2E tests: 0 found
- - API tests: 0 found
- - Component tests: 0 found
- - Unit tests: 0 found
-
- **Coverage Gaps Identified:**
- - β No E2E tests for login flow
- - β No API tests for /auth/login endpoint
- - β No component tests for LoginForm
- - β No unit tests for validateEmail()
-
- ## Tests Created
-
- {Same structure as BMad-Integrated Mode}
-
- ## Recommendations
-
- 1. **High Priority (P0-P1):**
- - Add E2E test for password reset flow
- - Add API tests for token refresh endpoint
- - Add component tests for logout button
-
- 2. **Medium Priority (P2):**
- - Add unit tests for session timeout logic
- - Add E2E test for "remember me" functionality
-
- 3. **Future Enhancements:**
- - Consider contract testing for auth API
- - Add visual regression tests for login page
- - Set up burn-in loop for flaky test detection
-
- ## Definition of Done
-
- {Same checklist as BMad-Integrated Mode}
- ````
-
-2. **Provide Summary to User**
-
- Output concise summary:
-
- ```markdown
- ## Automation Complete
-
- **Coverage:** {total_tests} tests created across {test_levels} levels
- **Priority Breakdown:** P0: {p0_count}, P1: {p1_count}, P2: {p2_count}, P3: {p3_count}
- **Infrastructure:** {fixture_count} fixtures, {factory_count} factories
- **Output:** {output_summary}
-
- **Run tests:** `npm run test:e2e`
- **Next steps:** Review tests, run in CI, integrate with quality gate
- ```
-
----
-
-## Important Notes
-
-### Dual-Mode Operation
-
-**BMad-Integrated Mode** (story available):
-
-- Uses story acceptance criteria for coverage targeting
-- Aligns with test-design risk/priority assessment
-- Expands ATDD tests with edge cases and negative paths
-- Updates BMad status tracking
-
-**Standalone Mode** (no story):
-
-- Analyzes source code independently
-- Identifies coverage gaps automatically
-- Generates tests based on code analysis
-- Works with any project (BMad or non-BMad)
-
-**Auto-discover Mode** (no targets specified):
-
-- Scans codebase for features needing tests
-- Prioritizes features with no coverage
-- Generates comprehensive test plan
-
-### Avoid Duplicate Coverage
-
-**Critical principle:** Don't test same behavior at multiple levels
-
-**Good coverage:**
-
-- E2E: User can login β Dashboard loads (critical happy path)
-- API: POST /auth/login returns correct status codes (variations)
-- Component: LoginForm validates input (UI edge cases)
-
-**Bad coverage (duplicate):**
-
-- E2E: User can login β Dashboard loads
-- E2E: User can login with different emails β Dashboard loads (unnecessary duplication)
-- API: POST /auth/login returns 200 (already covered in E2E)
-
-Use E2E sparingly for critical paths. Use API/Component for variations and edge cases.
-
-### Priority Tagging
-
-**Tag every test with priority in test name:**
-
-```typescript
-test('[P0] should login with valid credentials', async ({ page }) => { ... });
-test('[P1] should display error for invalid credentials', async ({ page }) => { ... });
-test('[P2] should remember login preference', async ({ page }) => { ... });
-```
-
-**Enables selective test execution:**
-
-```bash
-# Run only P0 tests (critical paths)
-npm run test:e2e -- --grep "@P0"
-
-# Run P0 + P1 tests (pre-merge)
-npm run test:e2e -- --grep "@P0|@P1"
-```
-
-### No Page Objects
-
-**Do NOT create page object classes.** Keep tests simple and direct:
-
-```typescript
-// β CORRECT: Direct test
-test('should login', async ({ page }) => {
- await page.goto('/login');
- await page.fill('[data-testid="email"]', 'user@example.com');
- await page.click('[data-testid="login-button"]');
- await expect(page).toHaveURL('/dashboard');
-});
-
-// β WRONG: Page object abstraction
-class LoginPage {
- async login(email, password) { ... }
-}
-```
-
-Use fixtures for setup/teardown, not page objects for actions.
-
-### Deterministic Tests Only
-
-**No flaky patterns allowed:**
-
-```typescript
-// β WRONG: Hard wait
-await page.waitForTimeout(2000);
-
-// β CORRECT: Explicit wait
-await page.waitForSelector('[data-testid="user-name"]');
-await expect(page.locator('[data-testid="user-name"]')).toBeVisible();
-
-// β WRONG: Conditional flow
-if (await element.isVisible()) {
- await element.click();
-}
-
-// β CORRECT: Deterministic assertion
-await expect(element).toBeVisible();
-await element.click();
-
-// β WRONG: Try-catch for test logic
-try {
- await element.click();
-} catch (e) {
- // Test shouldn't catch errors
-}
-
-// β CORRECT: Let test fail if element not found
-await element.click();
-```
-
-### Self-Cleaning Tests
-
-**Every test must clean up its data:**
-
-```typescript
-// β CORRECT: Fixture with auto-cleanup
-export const test = base.extend({
- testUser: async ({ page }, use) => {
- const user = await createUser();
- await use(user);
- await deleteUser(user.id); // Auto-cleanup
- },
-});
-
-// β WRONG: Manual cleanup (can be forgotten)
-test('should login', async ({ page }) => {
- const user = await createUser();
- // ... test logic ...
- // Forgot to delete user!
-});
-```
-
-### File Size Limits
-
-**Keep test files lean (under {max_file_lines} lines):**
-
-- If file exceeds limit, split into multiple files by feature area
-- Group related tests in describe blocks
-- Extract common setup to fixtures
-
-### Knowledge Base Integration
-
-**Core Fragments (Auto-loaded in Step 1):**
-
-- `test-levels-framework.md` - E2E vs API vs Component vs Unit decision framework with characteristics matrix (467 lines, 4 examples)
-- `test-priorities-matrix.md` - P0-P3 classification with automated scoring and risk mapping (389 lines, 2 examples)
-- `fixture-architecture.md` - Pure function β fixture β mergeTests composition with auto-cleanup (406 lines, 5 examples)
-- `data-factories.md` - Factory patterns with faker: overrides, nested factories, API seeding (498 lines, 5 examples)
-- `selective-testing.md` - Tag-based, spec filters, diff-based selection, promotion rules (727 lines, 4 examples)
-- `ci-burn-in.md` - 10-iteration burn-in loop, parallel sharding, selective execution (678 lines, 4 examples)
-- `test-quality.md` - Deterministic tests, isolated with cleanup, explicit assertions, length/time optimization (658 lines, 5 examples)
-- `network-first.md` - Intercept before navigate, HAR capture, deterministic waiting strategies (489 lines, 5 examples)
-
-**Healing Fragments (Auto-loaded if `{auto_heal_failures}` enabled):**
-
-- `test-healing-patterns.md` - Common failure patterns: stale selectors, race conditions, dynamic data, network errors, hard waits (648 lines, 5 examples)
-- `selector-resilience.md` - Selector hierarchy (data-testid > ARIA > text > CSS), dynamic patterns, anti-patterns refactoring (541 lines, 4 examples)
-- `timing-debugging.md` - Race condition prevention, deterministic waiting, async debugging techniques (370 lines, 3 examples)
-
-**Manual Reference (Optional):**
-
-- Use `tea-index.csv` to find additional specialized fragments as needed
-
----
-
-## Output Summary
-
-After completing this workflow, provide a summary:
-
-````markdown
-## Automation Complete
-
-**Mode:** {standalone_mode ? "Standalone" : "BMad-Integrated"}
-**Target:** {story_id || target_feature || "Auto-discovered features"}
-
-**Tests Created:**
-
-- E2E: {e2e_count} tests ({p0_count} P0, {p1_count} P1, {p2_count} P2)
-- API: {api_count} tests ({p0_count} P0, {p1_count} P1, {p2_count} P2)
-- Component: {component_count} tests ({p1_count} P1, {p2_count} P2)
-- Unit: {unit_count} tests ({p2_count} P2, {p3_count} P3)
-
-**Infrastructure:**
-
-- Fixtures: {fixture_count} created/enhanced
-- Factories: {factory_count} created/enhanced
-- Helpers: {helper_count} created/enhanced
-
-**Documentation Updated:**
-
-- β Test README with execution instructions
-- β package.json scripts for test execution
-
-**Test Execution:**
-
-```bash
-# Run all tests
-npm run test:e2e
-
-# Run by priority
-npm run test:e2e:p0 # Critical paths only
-npm run test:e2e:p1 # P0 + P1 tests
-
-# Run specific file
-npm run test:e2e -- {first_test_file}
-```
-````
-
-**Coverage Status:**
-
-- β {coverage_percentage}% of features covered
-- β All P0 scenarios covered
-- β All P1 scenarios covered
-- β οΈ {gap_count} coverage gaps identified (documented in summary)
-
-**Quality Checks:**
-
-- β All tests follow Given-When-Then format
-- β All tests have priority tags
-- β All tests use data-testid selectors
-- β All tests are self-cleaning
-- β No hard waits or flaky patterns
-- β All test files under {max_file_lines} lines
-
-**Output File:** {output_summary}
-
-**Next Steps:**
-
-1. Review generated tests with team
-2. Run tests in CI pipeline
-3. Monitor for flaky tests in burn-in loop
-4. Integrate with quality gate: `bmad tea *gate`
-
-**Knowledge Base References Applied:**
-
-- Test level selection framework (E2E vs API vs Component vs Unit)
-- Priority classification (P0-P3)
-- Fixture architecture patterns with auto-cleanup
-- Data factory patterns using faker
-- Selective testing strategies
-- Test quality principles
-
-```
-
----
-
-## Validation
-
-After completing all steps, verify:
-
-- [ ] Execution mode determined (BMad-Integrated, Standalone, or Auto-discover)
-- [ ] BMad artifacts loaded if available (story, tech-spec, test-design, PRD)
-- [ ] Framework configuration loaded
-- [ ] Existing test coverage analyzed (gaps identified)
-- [ ] Knowledge base fragments loaded (test-levels, test-priorities, fixture-architecture, data-factories, selective-testing)
-- [ ] Automation targets identified (what needs testing)
-- [ ] Test levels selected appropriately (E2E, API, Component, Unit)
-- [ ] Duplicate coverage avoided (same behavior not tested at multiple levels)
-- [ ] Test priorities assigned (P0, P1, P2, P3)
-- [ ] Fixture architecture created/enhanced (with auto-cleanup)
-- [ ] Data factories created/enhanced (using faker)
-- [ ] Helper utilities created/enhanced (if needed)
-- [ ] E2E tests written (Given-When-Then, priority tags, data-testid selectors)
-- [ ] API tests written (Given-When-Then, priority tags, comprehensive coverage)
-- [ ] Component tests written (Given-When-Then, priority tags, UI behavior)
-- [ ] Unit tests written (Given-When-Then, priority tags, pure logic)
-- [ ] Network-first pattern applied (route interception before navigation)
-- [ ] Quality standards enforced (no hard waits, no flaky patterns, self-cleaning, deterministic)
-- [ ] Test README updated (execution instructions, priority tagging, patterns)
-- [ ] package.json scripts updated (test execution commands)
-- [ ] Test suite run locally (results captured)
-- [ ] Tests validated (if auto_validate enabled)
-- [ ] Failures healed (if auto_heal_failures enabled)
-- [ ] Healing report generated (if healing attempted)
-- [ ] Unfixable tests marked with test.fixme() (if any)
-- [ ] Automation summary created (tests, infrastructure, coverage, healing, DoD)
-- [ ] Output file formatted correctly
-
-Refer to `checklist.md` for comprehensive validation criteria.
-```
diff --git a/src/bmm/workflows/testarch/automate/workflow.yaml b/src/bmm/workflows/testarch/automate/workflow.yaml
deleted file mode 100644
index e244c051..00000000
--- a/src/bmm/workflows/testarch/automate/workflow.yaml
+++ /dev/null
@@ -1,54 +0,0 @@
-# Test Architect workflow: automate
-name: testarch-automate
-description: "Expand test automation coverage after implementation or analyze existing codebase to generate comprehensive test suite"
-author: "BMad"
-
-# Critical variables from config
-config_source: "{project-root}/_bmad/bmm/config.yaml"
-output_folder: "{config_source}:output_folder"
-user_name: "{config_source}:user_name"
-communication_language: "{config_source}:communication_language"
-document_output_language: "{config_source}:document_output_language"
-date: system-generated
-
-# Workflow components
-installed_path: "{project-root}/_bmad/bmm/workflows/testarch/automate"
-instructions: "{installed_path}/instructions.md"
-validation: "{installed_path}/checklist.md"
-template: false
-
-# Variables and inputs
-variables:
- # Execution mode and targeting
- standalone_mode: true # Can work without BMad artifacts (true) or integrate with BMad (false)
- coverage_target: "critical-paths" # critical-paths, comprehensive, selective
-
- # Directory paths
- test_dir: "{project-root}/tests" # Root test directory
- source_dir: "{project-root}/src" # Source code directory
-
-# Output configuration
-default_output_file: "{output_folder}/automation-summary.md"
-
-# Required tools
-required_tools:
- - read_file # Read source code, existing tests, BMad artifacts
- - write_file # Create test files, fixtures, factories, summaries
- - create_directory # Create test directories
- - list_files # Discover features and existing tests
- - search_repo # Find coverage gaps and patterns
- - glob # Find test files and source files
-
-tags:
- - qa
- - automation
- - test-architect
- - regression
- - coverage
-
-execution_hints:
- interactive: false # Minimize prompts
- autonomous: true # Proceed without user input unless blocked
- iterative: true
-
-web_bundle: false
diff --git a/src/bmm/workflows/testarch/ci/checklist.md b/src/bmm/workflows/testarch/ci/checklist.md
deleted file mode 100644
index 984e3308..00000000
--- a/src/bmm/workflows/testarch/ci/checklist.md
+++ /dev/null
@@ -1,247 +0,0 @@
-# CI/CD Pipeline Setup - Validation Checklist
-
-## Prerequisites
-
-- [ ] Git repository initialized (`.git/` exists)
-- [ ] Git remote configured (`git remote -v` shows origin)
-- [ ] Test framework configured (`playwright.config._` or `cypress.config._`)
-- [ ] Local tests pass (`npm run test:e2e` succeeds)
-- [ ] Team agrees on CI platform
-- [ ] Access to CI platform settings (if updating)
-
-Note: CI setup is typically a one-time task per repo and can be run any time after the test framework is configured.
-
-## Process Steps
-
-### Step 1: Preflight Checks
-
-- [ ] Git repository validated
-- [ ] Framework configuration detected
-- [ ] Local test execution successful
-- [ ] CI platform detected or selected
-- [ ] Node version identified (.nvmrc or default)
-- [ ] No blocking issues found
-
-### Step 2: CI Pipeline Configuration
-
-- [ ] CI configuration file created (`.github/workflows/test.yml` or `.gitlab-ci.yml`)
-- [ ] File is syntactically valid (no YAML errors)
-- [ ] Correct framework commands configured
-- [ ] Node version matches project
-- [ ] Test directory paths correct
-
-### Step 3: Parallel Sharding
-
-- [ ] Matrix strategy configured (4 shards default)
-- [ ] Shard syntax correct for framework
-- [ ] fail-fast set to false
-- [ ] Shard count appropriate for test suite size
-
-### Step 4: Burn-In Loop
-
-- [ ] Burn-in job created
-- [ ] 10 iterations configured
-- [ ] Proper exit on failure (`|| exit 1`)
-- [ ] Runs on appropriate triggers (PR, cron)
-- [ ] Failure artifacts uploaded
-
-### Step 5: Caching Configuration
-
-- [ ] Dependency cache configured (npm/yarn)
-- [ ] Cache key uses lockfile hash
-- [ ] Browser cache configured (Playwright/Cypress)
-- [ ] Restore-keys defined for fallback
-- [ ] Cache paths correct for platform
-
-### Step 6: Artifact Collection
-
-- [ ] Artifacts upload on failure only
-- [ ] Correct artifact paths (test-results/, traces/, etc.)
-- [ ] Retention days set (30 default)
-- [ ] Artifact names unique per shard
-- [ ] No sensitive data in artifacts
-
-### Step 7: Retry Logic
-
-- [ ] Retry action/strategy configured
-- [ ] Max attempts: 2-3
-- [ ] Timeout appropriate (30 min)
-- [ ] Retry only on transient errors
-
-### Step 8: Helper Scripts
-
-- [ ] `scripts/test-changed.sh` created
-- [ ] `scripts/ci-local.sh` created
-- [ ] `scripts/burn-in.sh` created (optional)
-- [ ] Scripts are executable (`chmod +x`)
-- [ ] Scripts use correct test commands
-- [ ] Shebang present (`#!/bin/bash`)
-
-### Step 9: Documentation
-
-- [ ] `docs/ci.md` created with pipeline guide
-- [ ] `docs/ci-secrets-checklist.md` created
-- [ ] Required secrets documented
-- [ ] Setup instructions clear
-- [ ] Troubleshooting section included
-- [ ] Badge URLs provided (optional)
-
-## Output Validation
-
-### Configuration Validation
-
-- [ ] CI file loads without errors
-- [ ] All paths resolve correctly
-- [ ] No hardcoded values (use env vars)
-- [ ] Triggers configured (push, pull_request, schedule)
-- [ ] Platform-specific syntax correct
-
-### Execution Validation
-
-- [ ] First CI run triggered (push to remote)
-- [ ] Pipeline starts without errors
-- [ ] All jobs appear in CI dashboard
-- [ ] Caching works (check logs for cache hit)
-- [ ] Tests execute in parallel
-- [ ] Artifacts collected on failure
-
-### Performance Validation
-
-- [ ] Lint stage: <2 minutes
-- [ ] Test stage (per shard): <10 minutes
-- [ ] Burn-in stage: <30 minutes
-- [ ] Total pipeline: <45 minutes
-- [ ] Cache reduces install time by 2-5 minutes
-
-## Quality Checks
-
-### Best Practices Compliance
-
-- [ ] Burn-in loop follows production patterns
-- [ ] Parallel sharding configured optimally
-- [ ] Failure-only artifact collection
-- [ ] Selective testing enabled (optional)
-- [ ] Retry logic handles transient failures only
-- [ ] No secrets in configuration files
-
-### Knowledge Base Alignment
-
-- [ ] Burn-in pattern matches `ci-burn-in.md`
-- [ ] Selective testing matches `selective-testing.md`
-- [ ] Artifact collection matches `visual-debugging.md`
-- [ ] Test quality matches `test-quality.md`
-
-### Security Checks
-
-- [ ] No credentials in CI configuration
-- [ ] Secrets use platform secret management
-- [ ] Environment variables for sensitive data
-- [ ] Artifact retention appropriate (not too long)
-- [ ] No debug output exposing secrets
-
-## Integration Points
-
-### Status File Integration
-
-- [ ] CI setup logged in Quality & Testing Progress section
-- [ ] Status updated with completion timestamp
-- [ ] Platform and configuration noted
-
-### Knowledge Base Integration
-
-- [ ] Relevant knowledge fragments loaded
-- [ ] Patterns applied from knowledge base
-- [ ] Documentation references knowledge base
-- [ ] Knowledge base references in README
-
-### Workflow Dependencies
-
-- [ ] `framework` workflow completed first
-- [ ] Can proceed to `atdd` workflow after CI setup
-- [ ] Can proceed to `automate` workflow
-- [ ] CI integrates with `gate` workflow
-
-## Completion Criteria
-
-**All must be true:**
-
-- [ ] All prerequisites met
-- [ ] All process steps completed
-- [ ] All output validations passed
-- [ ] All quality checks passed
-- [ ] All integration points verified
-- [ ] First CI run successful
-- [ ] Performance targets met
-- [ ] Documentation complete
-
-## Post-Workflow Actions
-
-**User must complete:**
-
-1. [ ] Commit CI configuration
-2. [ ] Push to remote repository
-3. [ ] Configure required secrets in CI platform
-4. [ ] Open PR to trigger first CI run
-5. [ ] Monitor and verify pipeline execution
-6. [ ] Adjust parallelism if needed (based on actual run times)
-7. [ ] Set up notifications (optional)
-
-**Recommended next workflows:**
-
-1. [ ] Run `atdd` workflow for test generation
-2. [ ] Run `automate` workflow for coverage expansion
-3. [ ] Run `gate` workflow for quality gates
-
-## Rollback Procedure
-
-If workflow fails:
-
-1. [ ] Delete CI configuration file
-2. [ ] Remove helper scripts directory
-3. [ ] Remove documentation (docs/ci.md, etc.)
-4. [ ] Clear CI platform secrets (if added)
-5. [ ] Review error logs
-6. [ ] Fix issues and retry workflow
-
-## Notes
-
-### Common Issues
-
-**Issue**: CI file syntax errors
-
-- **Solution**: Validate YAML syntax online or with linter
-
-**Issue**: Tests fail in CI but pass locally
-
-- **Solution**: Use `scripts/ci-local.sh` to mirror CI environment
-
-**Issue**: Caching not working
-
-- **Solution**: Check cache key formula, verify paths
-
-**Issue**: Burn-in too slow
-
-- **Solution**: Reduce iterations or run on cron only
-
-### Platform-Specific
-
-**GitHub Actions:**
-
-- Secrets: Repository Settings β Secrets and variables β Actions
-- Runners: Ubuntu latest recommended
-- Concurrency limits: 20 jobs for free tier
-
-**GitLab CI:**
-
-- Variables: Project Settings β CI/CD β Variables
-- Runners: Shared or project-specific
-- Pipeline quota: 400 minutes/month free tier
-
----
-
-**Checklist Complete**: Sign off when all items validated.
-
-**Completed by:** {name}
-**Date:** {date}
-**Platform:** {GitHub Actions, GitLab CI, Other}
-**Notes:** {notes}
diff --git a/src/bmm/workflows/testarch/ci/github-actions-template.yaml b/src/bmm/workflows/testarch/ci/github-actions-template.yaml
deleted file mode 100644
index 9f09a73f..00000000
--- a/src/bmm/workflows/testarch/ci/github-actions-template.yaml
+++ /dev/null
@@ -1,198 +0,0 @@
-# GitHub Actions CI/CD Pipeline for Test Execution
-# Generated by BMad TEA Agent - Test Architect Module
-# Optimized for: Playwright/Cypress, Parallel Sharding, Burn-In Loop
-
-name: Test Pipeline
-
-on:
- push:
- branches: [main, develop]
- pull_request:
- branches: [main, develop]
- schedule:
- # Weekly burn-in on Sundays at 2 AM UTC
- - cron: "0 2 * * 0"
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.ref }}
- cancel-in-progress: true
-
-jobs:
- # Lint stage - Code quality checks
- lint:
- name: Lint
- runs-on: ubuntu-latest
- timeout-minutes: 5
-
- steps:
- - uses: actions/checkout@v4
-
- - name: Determine Node version
- id: node-version
- run: |
- if [ -f .nvmrc ]; then
- echo "value=$(cat .nvmrc)" >> "$GITHUB_OUTPUT"
- echo "Using Node from .nvmrc"
- else
- echo "value=24" >> "$GITHUB_OUTPUT"
- echo "Using default Node 24 (current LTS)"
- fi
-
- - name: Setup Node.js
- uses: actions/setup-node@v4
- with:
- node-version: ${{ steps.node-version.outputs.value }}
- cache: "npm"
-
- - name: Install dependencies
- run: npm ci
-
- - name: Run linter
- run: npm run lint
-
- # Test stage - Parallel execution with sharding
- test:
- name: Test (Shard ${{ matrix.shard }})
- runs-on: ubuntu-latest
- timeout-minutes: 30
- needs: lint
-
- strategy:
- fail-fast: false
- matrix:
- shard: [1, 2, 3, 4]
-
- steps:
- - uses: actions/checkout@v4
-
- - name: Determine Node version
- id: node-version
- run: |
- if [ -f .nvmrc ]; then
- echo "value=$(cat .nvmrc)" >> "$GITHUB_OUTPUT"
- echo "Using Node from .nvmrc"
- else
- echo "value=22" >> "$GITHUB_OUTPUT"
- echo "Using default Node 22 (current LTS)"
- fi
-
- - name: Setup Node.js
- uses: actions/setup-node@v4
- with:
- node-version: ${{ steps.node-version.outputs.value }}
- cache: "npm"
-
- - name: Cache Playwright browsers
- uses: actions/cache@v4
- with:
- path: ~/.cache/ms-playwright
- key: ${{ runner.os }}-playwright-${{ hashFiles('**/package-lock.json') }}
- restore-keys: |
- ${{ runner.os }}-playwright-
-
- - name: Install dependencies
- run: npm ci
-
- - name: Install Playwright browsers
- run: npx playwright install --with-deps chromium
-
- - name: Run tests (shard ${{ matrix.shard }}/4)
- run: npm run test:e2e -- --shard=${{ matrix.shard }}/4
-
- - name: Upload test results
- if: failure()
- uses: actions/upload-artifact@v4
- with:
- name: test-results-${{ matrix.shard }}
- path: |
- test-results/
- playwright-report/
- retention-days: 30
-
- # Burn-in stage - Flaky test detection
- burn-in:
- name: Burn-In (Flaky Detection)
- runs-on: ubuntu-latest
- timeout-minutes: 60
- needs: test
- # Only run burn-in on PRs to main/develop or on schedule
- if: github.event_name == 'pull_request' || github.event_name == 'schedule'
-
- steps:
- - uses: actions/checkout@v4
-
- - name: Determine Node version
- id: node-version
- run: |
- if [ -f .nvmrc ]; then
- echo "value=$(cat .nvmrc)" >> "$GITHUB_OUTPUT"
- echo "Using Node from .nvmrc"
- else
- echo "value=22" >> "$GITHUB_OUTPUT"
- echo "Using default Node 22 (current LTS)"
- fi
-
- - name: Setup Node.js
- uses: actions/setup-node@v4
- with:
- node-version: ${{ steps.node-version.outputs.value }}
- cache: "npm"
-
- - name: Cache Playwright browsers
- uses: actions/cache@v4
- with:
- path: ~/.cache/ms-playwright
- key: ${{ runner.os }}-playwright-${{ hashFiles('**/package-lock.json') }}
-
- - name: Install dependencies
- run: npm ci
-
- - name: Install Playwright browsers
- run: npx playwright install --with-deps chromium
-
- - name: Run burn-in loop (10 iterations)
- run: |
- echo "π₯ Starting burn-in loop - detecting flaky tests"
- for i in {1..10}; do
- echo "ββββββββββββββββββββββββββββββββββββββββ"
- echo "π₯ Burn-in iteration $i/10"
- echo "ββββββββββββββββββββββββββββββββββββββββ"
- npm run test:e2e || exit 1
- done
- echo "β Burn-in complete - no flaky tests detected"
-
- - name: Upload burn-in failure artifacts
- if: failure()
- uses: actions/upload-artifact@v4
- with:
- name: burn-in-failures
- path: |
- test-results/
- playwright-report/
- retention-days: 30
-
- # Report stage - Aggregate and publish results
- report:
- name: Test Report
- runs-on: ubuntu-latest
- needs: [test, burn-in]
- if: always()
-
- steps:
- - name: Download all artifacts
- uses: actions/download-artifact@v4
- with:
- path: artifacts
-
- - name: Generate summary
- run: |
- echo "## Test Execution Summary" >> $GITHUB_STEP_SUMMARY
- echo "" >> $GITHUB_STEP_SUMMARY
- echo "- **Status**: ${{ needs.test.result }}" >> $GITHUB_STEP_SUMMARY
- echo "- **Burn-in**: ${{ needs.burn-in.result }}" >> $GITHUB_STEP_SUMMARY
- echo "- **Shards**: 4" >> $GITHUB_STEP_SUMMARY
- echo "" >> $GITHUB_STEP_SUMMARY
-
- if [ "${{ needs.burn-in.result }}" == "failure" ]; then
- echo "β οΈ **Flaky tests detected** - Review burn-in artifacts" >> $GITHUB_STEP_SUMMARY
- fi
diff --git a/src/bmm/workflows/testarch/ci/gitlab-ci-template.yaml b/src/bmm/workflows/testarch/ci/gitlab-ci-template.yaml
deleted file mode 100644
index f5336de4..00000000
--- a/src/bmm/workflows/testarch/ci/gitlab-ci-template.yaml
+++ /dev/null
@@ -1,149 +0,0 @@
-# GitLab CI/CD Pipeline for Test Execution
-# Generated by BMad TEA Agent - Test Architect Module
-# Optimized for: Playwright/Cypress, Parallel Sharding, Burn-In Loop
-
-stages:
- - lint
- - test
- - burn-in
- - report
-
-variables:
- # Disable git depth for accurate change detection
- GIT_DEPTH: 0
- # Use npm ci for faster, deterministic installs
- npm_config_cache: "$CI_PROJECT_DIR/.npm"
- # Playwright browser cache
- PLAYWRIGHT_BROWSERS_PATH: "$CI_PROJECT_DIR/.cache/ms-playwright"
- # Default Node version when .nvmrc is missing
- DEFAULT_NODE_VERSION: "24"
-
-# Caching configuration
-cache:
- key:
- files:
- - package-lock.json
- paths:
- - .npm/
- - .cache/ms-playwright/
- - node_modules/
-
-# Lint stage - Code quality checks
-lint:
- stage: lint
- image: node:$DEFAULT_NODE_VERSION
- before_script:
- - |
- NODE_VERSION=$(cat .nvmrc 2>/dev/null || echo "$DEFAULT_NODE_VERSION")
- echo "Using Node $NODE_VERSION"
- npm install -g n
- n "$NODE_VERSION"
- node -v
- - npm ci
- script:
- - npm run lint
- timeout: 5 minutes
-
-# Test stage - Parallel execution with sharding
-.test-template: &test-template
- stage: test
- image: node:$DEFAULT_NODE_VERSION
- needs:
- - lint
- before_script:
- - |
- NODE_VERSION=$(cat .nvmrc 2>/dev/null || echo "$DEFAULT_NODE_VERSION")
- echo "Using Node $NODE_VERSION"
- npm install -g n
- n "$NODE_VERSION"
- node -v
- - npm ci
- - npx playwright install --with-deps chromium
- artifacts:
- when: on_failure
- paths:
- - test-results/
- - playwright-report/
- expire_in: 30 days
- timeout: 30 minutes
-
-test:shard-1:
- <<: *test-template
- script:
- - npm run test:e2e -- --shard=1/4
-
-test:shard-2:
- <<: *test-template
- script:
- - npm run test:e2e -- --shard=2/4
-
-test:shard-3:
- <<: *test-template
- script:
- - npm run test:e2e -- --shard=3/4
-
-test:shard-4:
- <<: *test-template
- script:
- - npm run test:e2e -- --shard=4/4
-
-# Burn-in stage - Flaky test detection
-burn-in:
- stage: burn-in
- image: node:$DEFAULT_NODE_VERSION
- needs:
- - test:shard-1
- - test:shard-2
- - test:shard-3
- - test:shard-4
- # Only run burn-in on merge requests to main/develop or on schedule
- rules:
- - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
- - if: '$CI_PIPELINE_SOURCE == "schedule"'
- before_script:
- - |
- NODE_VERSION=$(cat .nvmrc 2>/dev/null || echo "$DEFAULT_NODE_VERSION")
- echo "Using Node $NODE_VERSION"
- npm install -g n
- n "$NODE_VERSION"
- node -v
- - npm ci
- - npx playwright install --with-deps chromium
- script:
- - |
- echo "π₯ Starting burn-in loop - detecting flaky tests"
- for i in {1..10}; do
- echo "ββββββββββββββββββββββββββββββββββββββββ"
- echo "π₯ Burn-in iteration $i/10"
- echo "ββββββββββββββββββββββββββββββββββββββββ"
- npm run test:e2e || exit 1
- done
- echo "β Burn-in complete - no flaky tests detected"
- artifacts:
- when: on_failure
- paths:
- - test-results/
- - playwright-report/
- expire_in: 30 days
- timeout: 60 minutes
-
-# Report stage - Aggregate results
-report:
- stage: report
- image: alpine:latest
- needs:
- - test:shard-1
- - test:shard-2
- - test:shard-3
- - test:shard-4
- - burn-in
- when: always
- script:
- - |
- echo "## Test Execution Summary"
- echo ""
- echo "- Pipeline: $CI_PIPELINE_ID"
- echo "- Shards: 4"
- echo "- Branch: $CI_COMMIT_REF_NAME"
- echo ""
- echo "View detailed results in job artifacts"
diff --git a/src/bmm/workflows/testarch/ci/instructions.md b/src/bmm/workflows/testarch/ci/instructions.md
deleted file mode 100644
index a23d2c16..00000000
--- a/src/bmm/workflows/testarch/ci/instructions.md
+++ /dev/null
@@ -1,536 +0,0 @@
-
-
-# CI/CD Pipeline Setup
-
-**Workflow ID**: `_bmad/bmm/testarch/ci`
-**Version**: 4.0 (BMad v6)
-
----
-
-## Overview
-
-Scaffolds a production-ready CI/CD quality pipeline with test execution, burn-in loops for flaky test detection, parallel sharding, artifact collection, and notification configuration. This workflow creates platform-specific CI configuration optimized for fast feedback and reliable test execution.
-
-Note: This is typically a one-time setup per repo; run it any time after the test framework exists, ideally before feature work starts.
-
----
-
-## Preflight Requirements
-
-**Critical:** Verify these requirements before proceeding. If any fail, HALT and notify the user.
-
-- β Git repository is initialized (`.git/` directory exists)
-- β Local test suite passes (`npm run test:e2e` succeeds)
-- β Test framework is configured (from `framework` workflow)
-- β Team agrees on target CI platform (GitHub Actions, GitLab CI, Circle CI, etc.)
-- β Access to CI platform settings/secrets available (if updating existing pipeline)
-
----
-
-## Step 1: Run Preflight Checks
-
-### Actions
-
-1. **Verify Git Repository**
- - Check for `.git/` directory
- - Confirm remote repository configured (`git remote -v`)
- - If not initialized, HALT with message: "Git repository required for CI/CD setup"
-
-2. **Validate Test Framework**
- - Look for `playwright.config.*` or `cypress.config.*`
- - Read framework configuration to extract:
- - Test directory location
- - Test command
- - Reporter configuration
- - Timeout settings
- - If not found, HALT with message: "Run `framework` workflow first to set up test infrastructure"
-
-3. **Run Local Tests**
- - Execute `npm run test:e2e` (or equivalent from package.json)
- - Ensure tests pass before CI setup
- - If tests fail, HALT with message: "Fix failing tests before setting up CI/CD"
-
-4. **Detect CI Platform**
- - Check for existing CI configuration:
- - `.github/workflows/*.yml` (GitHub Actions)
- - `.gitlab-ci.yml` (GitLab CI)
- - `.circleci/config.yml` (Circle CI)
- - `Jenkinsfile` (Jenkins)
- - If found, ask user: "Update existing CI configuration or create new?"
- - If not found, detect platform from git remote:
- - `github.com` β GitHub Actions (default)
- - `gitlab.com` β GitLab CI
- - Ask user if unable to auto-detect
-
-5. **Read Environment Configuration**
- - Use `.nvmrc` for Node version if present
- - If missing, default to a current LTS (Node 24) or newer instead of a fixed old version
- - Read `package.json` to identify dependencies (affects caching strategy)
-
-**Halt Condition:** If preflight checks fail, stop immediately and report which requirement failed.
-
----
-
-## Step 2: Scaffold CI Pipeline
-
-### Actions
-
-1. **Select CI Platform Template**
-
- Based on detection or user preference, use the appropriate template:
-
- **GitHub Actions** (`.github/workflows/test.yml`):
- - Most common platform
- - Excellent caching and matrix support
- - Free for public repos, generous free tier for private
-
- **GitLab CI** (`.gitlab-ci.yml`):
- - Integrated with GitLab
- - Built-in registry and runners
- - Powerful pipeline features
-
- **Circle CI** (`.circleci/config.yml`):
- - Fast execution with parallelism
- - Docker-first approach
- - Enterprise features
-
- **Jenkins** (`Jenkinsfile`):
- - Self-hosted option
- - Maximum customization
- - Requires infrastructure management
-
-2. **Generate Pipeline Configuration**
-
- Use templates from `{installed_path}/` directory:
- - `github-actions-template.yml`
- - `gitlab-ci-template.yml`
-
- **Key pipeline stages:**
-
- ```yaml
- stages:
- - lint # Code quality checks
- - test # Test execution (parallel shards)
- - burn-in # Flaky test detection
- - report # Aggregate results and publish
- ```
-
-3. **Configure Test Execution**
-
- **Parallel Sharding:**
-
- ```yaml
- strategy:
- fail-fast: false
- matrix:
- shard: [1, 2, 3, 4]
-
- steps:
- - name: Run tests
- run: npm run test:e2e -- --shard=${{ matrix.shard }}/${{ strategy.job-total }}
- ```
-
- **Purpose:** Splits tests into N parallel jobs for faster execution (target: <10 min per shard)
-
-4. **Add Burn-In Loop**
-
- **Critical pattern from production systems:**
-
- ```yaml
- burn-in:
- name: Flaky Test Detection
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v4
-
- - name: Setup Node
- uses: actions/setup-node@v4
- with:
- node-version-file: '.nvmrc'
-
- - name: Install dependencies
- run: npm ci
-
- - name: Run burn-in loop (10 iterations)
- run: |
- for i in {1..10}; do
- echo "π₯ Burn-in iteration $i/10"
- npm run test:e2e || exit 1
- done
-
- - name: Upload failure artifacts
- if: failure()
- uses: actions/upload-artifact@v4
- with:
- name: burn-in-failures
- path: test-results/
- retention-days: 30
- ```
-
- **Purpose:** Runs tests multiple times to catch non-deterministic failures before they reach main branch.
-
- **When to run:**
- - On pull requests to main/develop
- - Weekly on cron schedule
- - After significant test infrastructure changes
-
-5. **Configure Caching**
-
- **Node modules cache:**
-
- ```yaml
- - name: Cache dependencies
- uses: actions/cache@v4
- with:
- path: ~/.npm
- key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
- restore-keys: |
- ${{ runner.os }}-node-
- ```
-
- **Browser binaries cache (Playwright):**
-
- ```yaml
- - name: Cache Playwright browsers
- uses: actions/cache@v4
- with:
- path: ~/.cache/ms-playwright
- key: ${{ runner.os }}-playwright-${{ hashFiles('**/package-lock.json') }}
- ```
-
- **Purpose:** Reduces CI execution time by 2-5 minutes per run.
-
-6. **Configure Artifact Collection**
-
- **Failure artifacts only:**
-
- ```yaml
- - name: Upload test results
- if: failure()
- uses: actions/upload-artifact@v4
- with:
- name: test-results-${{ matrix.shard }}
- path: |
- test-results/
- playwright-report/
- retention-days: 30
- ```
-
- **Artifacts to collect:**
- - Traces (Playwright) - full debugging context
- - Screenshots - visual evidence of failures
- - Videos - interaction playback
- - HTML reports - detailed test results
- - Console logs - error messages and warnings
-
-7. **Add Retry Logic**
-
- ```yaml
- - name: Run tests with retries
- uses: nick-invision/retry@v2
- with:
- timeout_minutes: 30
- max_attempts: 3
- retry_on: error
- command: npm run test:e2e
- ```
-
- **Purpose:** Handles transient failures (network issues, race conditions)
-
-8. **Configure Notifications** (Optional)
-
- If `notify_on_failure` is enabled:
-
- ```yaml
- - name: Notify on failure
- if: failure()
- uses: 8398a7/action-slack@v3
- with:
- status: ${{ job.status }}
- text: 'Test failures detected in PR #${{ github.event.pull_request.number }}'
- webhook_url: ${{ secrets.SLACK_WEBHOOK }}
- ```
-
-9. **Generate Helper Scripts**
-
- **Selective testing script** (`scripts/test-changed.sh`):
-
- ```bash
- #!/bin/bash
- # Run only tests for changed files
-
- CHANGED_FILES=$(git diff --name-only HEAD~1)
-
- if echo "$CHANGED_FILES" | grep -q "src/.*\.ts$"; then
- echo "Running affected tests..."
- npm run test:e2e -- --grep="$(echo $CHANGED_FILES | sed 's/src\///g' | sed 's/\.ts//g')"
- else
- echo "No test-affecting changes detected"
- fi
- ```
-
- **Local mirror script** (`scripts/ci-local.sh`):
-
- ```bash
- #!/bin/bash
- # Mirror CI execution locally for debugging
-
- echo "π Running CI pipeline locally..."
-
- # Lint
- npm run lint || exit 1
-
- # Tests
- npm run test:e2e || exit 1
-
- # Burn-in (reduced iterations)
- for i in {1..3}; do
- echo "π₯ Burn-in $i/3"
- npm run test:e2e || exit 1
- done
-
- echo "β Local CI pipeline passed"
- ```
-
-10. **Generate Documentation**
-
- **CI README** (`docs/ci.md`):
- - Pipeline stages and purpose
- - How to run locally
- - Debugging failed CI runs
- - Secrets and environment variables needed
- - Notification setup
- - Badge URLs for README
-
- **Secrets checklist** (`docs/ci-secrets-checklist.md`):
- - Required secrets list (SLACK_WEBHOOK, etc.)
- - Where to configure in CI platform
- - Security best practices
-
----
-
-## Step 3: Deliverables
-
-### Primary Artifacts Created
-
-1. **CI Configuration File**
- - `.github/workflows/test.yml` (GitHub Actions)
- - `.gitlab-ci.yml` (GitLab CI)
- - `.circleci/config.yml` (Circle CI)
-
-2. **Pipeline Stages**
- - **Lint**: Code quality checks (ESLint, Prettier)
- - **Test**: Parallel test execution (4 shards)
- - **Burn-in**: Flaky test detection (10 iterations)
- - **Report**: Result aggregation and publishing
-
-3. **Helper Scripts**
- - `scripts/test-changed.sh` - Selective testing
- - `scripts/ci-local.sh` - Local CI mirror
- - `scripts/burn-in.sh` - Standalone burn-in execution
-
-4. **Documentation**
- - `docs/ci.md` - CI pipeline guide
- - `docs/ci-secrets-checklist.md` - Required secrets
- - Inline comments in CI configuration
-
-5. **Optimization Features**
- - Dependency caching (npm, browser binaries)
- - Parallel sharding (4 jobs default)
- - Retry logic (2 retries on failure)
- - Failure-only artifact upload
-
-### Performance Targets
-
-- **Lint stage**: <2 minutes
-- **Test stage** (per shard): <10 minutes
-- **Burn-in stage**: <30 minutes (10 iterations)
-- **Total pipeline**: <45 minutes
-
-**Speedup:** 20Γ faster than sequential execution through parallelism and caching.
-
----
-
-## Important Notes
-
-### Knowledge Base Integration
-
-**Critical:** Check configuration and load appropriate fragments.
-
-Read `{config_source}` and check `config.tea_use_playwright_utils`.
-
-**Core CI Patterns (Always load):**
-
-- `ci-burn-in.md` - Burn-in loop patterns: 10-iteration detection, GitHub Actions workflow, shard orchestration, selective execution (678 lines, 4 examples)
-- `selective-testing.md` - Changed test detection strategies: tag-based, spec filters, diff-based selection, promotion rules (727 lines, 4 examples)
-- `visual-debugging.md` - Artifact collection best practices: trace viewer, HAR recording, custom artifacts, accessibility integration (522 lines, 5 examples)
-- `test-quality.md` - CI-specific test quality criteria: deterministic tests, isolated with cleanup, explicit assertions, length/time optimization (658 lines, 5 examples)
-- `playwright-config.md` - CI-optimized configuration: parallelization, artifact output, project dependencies, sharding (722 lines, 5 examples)
-
-**If `config.tea_use_playwright_utils: true`:**
-
-Load playwright-utils CI-relevant fragments:
-
-- `burn-in.md` - Smart test selection with git diff analysis (very important for CI optimization)
-- `network-error-monitor.md` - Automatic HTTP 4xx/5xx detection (recommend in CI pipelines)
-
-Recommend:
-
-- Add burn-in script for pull request validation
-- Enable network-error-monitor in merged fixtures for catching silent failures
-- Reference full docs in `*framework` and `*automate` workflows
-
-### CI Platform-Specific Guidance
-
-**GitHub Actions:**
-
-- Use `actions/cache` for caching
-- Matrix strategy for parallelism
-- Secrets in repository settings
-- Free 2000 minutes/month for private repos
-
-**GitLab CI:**
-
-- Use `.gitlab-ci.yml` in root
-- `cache:` directive for caching
-- Parallel execution with `parallel: 4`
-- Variables in project CI/CD settings
-
-**Circle CI:**
-
-- Use `.circleci/config.yml`
-- Docker executors recommended
-- Parallelism with `parallelism: 4`
-- Context for shared secrets
-
-### Burn-In Loop Strategy
-
-**When to run:**
-
-- β On PRs to main/develop branches
-- β Weekly on schedule (cron)
-- β After test infrastructure changes
-- β Not on every commit (too slow)
-
-**Iterations:**
-
-- **10 iterations** for thorough detection
-- **3 iterations** for quick feedback
-- **100 iterations** for high-confidence stability
-
-**Failure threshold:**
-
-- Even ONE failure in burn-in β tests are flaky
-- Must fix before merging
-
-### Artifact Retention
-
-**Failure artifacts only:**
-
-- Saves storage costs
-- Maintains debugging capability
-- 30-day retention default
-
-**Artifact types:**
-
-- Traces (Playwright) - 5-10 MB per test
-- Screenshots - 100-500 KB per screenshot
-- Videos - 2-5 MB per test
-- HTML reports - 1-2 MB per run
-
-### Selective Testing
-
-**Detect changed files:**
-
-```bash
-git diff --name-only HEAD~1
-```
-
-**Run affected tests only:**
-
-- Faster feedback for small changes
-- Full suite still runs on main branch
-- Reduces CI time by 50-80% for focused PRs
-
-**Trade-off:**
-
-- May miss integration issues
-- Run full suite at least on merge
-
-### Local CI Mirror
-
-**Purpose:** Debug CI failures locally
-
-**Usage:**
-
-```bash
-./scripts/ci-local.sh
-```
-
-**Mirrors CI environment:**
-
-- Same Node version
-- Same test command
-- Same stages (lint β test β burn-in)
-- Reduced burn-in iterations (3 vs 10)
-
----
-
-## Output Summary
-
-After completing this workflow, provide a summary:
-
-```markdown
-## CI/CD Pipeline Complete
-
-**Platform**: GitHub Actions (or GitLab CI, etc.)
-
-**Artifacts Created**:
-
-- β Pipeline configuration: .github/workflows/test.yml
-- β Burn-in loop: 10 iterations for flaky detection
-- β Parallel sharding: 4 jobs for fast execution
-- β Caching: Dependencies + browser binaries
-- β Artifact collection: Failure-only traces/screenshots/videos
-- β Helper scripts: test-changed.sh, ci-local.sh, burn-in.sh
-- β Documentation: docs/ci.md, docs/ci-secrets-checklist.md
-
-**Performance:**
-
-- Lint: <2 min
-- Test (per shard): <10 min
-- Burn-in: <30 min
-- Total: <45 min (20Γ speedup vs sequential)
-
-**Next Steps**:
-
-1. Commit CI configuration: `git add .github/workflows/test.yml && git commit -m "ci: add test pipeline"`
-2. Push to remote: `git push`
-3. Configure required secrets in CI platform settings (see docs/ci-secrets-checklist.md)
-4. Open a PR to trigger first CI run
-5. Monitor pipeline execution and adjust parallelism if needed
-
-**Knowledge Base References Applied**:
-
-- Burn-in loop pattern (ci-burn-in.md)
-- Selective testing strategy (selective-testing.md)
-- Artifact collection (visual-debugging.md)
-- Test quality criteria (test-quality.md)
-```
-
----
-
-## Validation
-
-After completing all steps, verify:
-
-- [ ] CI configuration file created and syntactically valid
-- [ ] Burn-in loop configured (10 iterations)
-- [ ] Parallel sharding enabled (4 jobs)
-- [ ] Caching configured (dependencies + browsers)
-- [ ] Artifact collection on failure only
-- [ ] Helper scripts created and executable (`chmod +x`)
-- [ ] Documentation complete (ci.md, secrets checklist)
-- [ ] No errors or warnings during scaffold
-
-Refer to `checklist.md` for comprehensive validation criteria.
diff --git a/src/bmm/workflows/testarch/ci/workflow.yaml b/src/bmm/workflows/testarch/ci/workflow.yaml
deleted file mode 100644
index 223af205..00000000
--- a/src/bmm/workflows/testarch/ci/workflow.yaml
+++ /dev/null
@@ -1,47 +0,0 @@
-# Test Architect workflow: ci
-name: testarch-ci
-description: "Scaffold CI/CD quality pipeline with test execution, burn-in loops, and artifact collection"
-author: "BMad"
-
-# Critical variables from config
-config_source: "{project-root}/_bmad/bmm/config.yaml"
-output_folder: "{config_source}:output_folder"
-user_name: "{config_source}:user_name"
-communication_language: "{config_source}:communication_language"
-document_output_language: "{config_source}:document_output_language"
-date: system-generated
-
-# Workflow components
-installed_path: "{project-root}/_bmad/bmm/workflows/testarch/ci"
-instructions: "{installed_path}/instructions.md"
-validation: "{installed_path}/checklist.md"
-
-# Variables and inputs
-variables:
- ci_platform: "auto" # auto, github-actions, gitlab-ci, circle-ci, jenkins - user can override
- test_dir: "{project-root}/tests" # Root test directory
-
-# Output configuration
-default_output_file: "{project-root}/.github/workflows/test.yml" # GitHub Actions default
-
-# Required tools
-required_tools:
- - read_file # Read .nvmrc, package.json, framework config
- - write_file # Create CI config, scripts, documentation
- - create_directory # Create .github/workflows/ or .gitlab-ci/ directories
- - list_files # Detect existing CI configuration
- - search_repo # Find test files for selective testing
-
-tags:
- - qa
- - ci-cd
- - test-architect
- - pipeline
- - automation
-
-execution_hints:
- interactive: false # Minimize prompts, auto-detect when possible
- autonomous: true # Proceed without user input unless blocked
- iterative: true
-
-web_bundle: false
diff --git a/src/bmm/workflows/testarch/framework/checklist.md b/src/bmm/workflows/testarch/framework/checklist.md
deleted file mode 100644
index 07c6fe8d..00000000
--- a/src/bmm/workflows/testarch/framework/checklist.md
+++ /dev/null
@@ -1,320 +0,0 @@
-# Test Framework Setup - Validation Checklist
-
-This checklist ensures the framework workflow completes successfully and all deliverables meet quality standards.
-
----
-
-## Prerequisites
-
-Before starting the workflow:
-
-- [ ] Project root contains valid `package.json`
-- [ ] No existing modern E2E framework detected (`playwright.config.*`, `cypress.config.*`)
-- [ ] Project type identifiable (React, Vue, Angular, Next.js, Node, etc.)
-- [ ] Bundler identifiable (Vite, Webpack, Rollup, esbuild) or not applicable
-- [ ] User has write permissions to create directories and files
-
----
-
-## Process Steps
-
-### Step 1: Preflight Checks
-
-- [ ] package.json successfully read and parsed
-- [ ] Project type extracted correctly
-- [ ] Bundler identified (or marked as N/A for backend projects)
-- [ ] No framework conflicts detected
-- [ ] Architecture documents located (if available)
-
-### Step 2: Framework Selection
-
-- [ ] Framework auto-detection logic executed
-- [ ] Framework choice justified (Playwright vs Cypress)
-- [ ] Framework preference respected (if explicitly set)
-- [ ] User notified of framework selection and rationale
-
-### Step 3: Directory Structure
-
-- [ ] `tests/` root directory created
-- [ ] `tests/e2e/` directory created (or user's preferred structure)
-- [ ] `tests/support/` directory created (critical pattern)
-- [ ] `tests/support/fixtures/` directory created
-- [ ] `tests/support/fixtures/factories/` directory created
-- [ ] `tests/support/helpers/` directory created
-- [ ] `tests/support/page-objects/` directory created (if applicable)
-- [ ] All directories have correct permissions
-
-**Note**: Test organization is flexible (e2e/, api/, integration/). The **support/** folder is the key pattern.
-
-### Step 4: Configuration Files
-
-- [ ] Framework config file created (`playwright.config.ts` or `cypress.config.ts`)
-- [ ] Config file uses TypeScript (if `use_typescript: true`)
-- [ ] Timeouts configured correctly (action: 15s, navigation: 30s, test: 60s)
-- [ ] Base URL configured with environment variable fallback
-- [ ] Trace/screenshot/video set to retain-on-failure
-- [ ] Multiple reporters configured (HTML + JUnit + console)
-- [ ] Parallel execution enabled
-- [ ] CI-specific settings configured (retries, workers)
-- [ ] Config file is syntactically valid (no compilation errors)
-
-### Step 5: Environment Configuration
-
-- [ ] `.env.example` created in project root
-- [ ] `TEST_ENV` variable defined
-- [ ] `BASE_URL` variable defined with default
-- [ ] `API_URL` variable defined (if applicable)
-- [ ] Authentication variables defined (if applicable)
-- [ ] Feature flag variables defined (if applicable)
-- [ ] `.nvmrc` created with appropriate Node version
-
-### Step 6: Fixture Architecture
-
-- [ ] `tests/support/fixtures/index.ts` created
-- [ ] Base fixture extended from Playwright/Cypress
-- [ ] Type definitions for fixtures created
-- [ ] mergeTests pattern implemented (if multiple fixtures)
-- [ ] Auto-cleanup logic included in fixtures
-- [ ] Fixture architecture follows knowledge base patterns
-
-### Step 7: Data Factories
-
-- [ ] At least one factory created (e.g., UserFactory)
-- [ ] Factories use @faker-js/faker for realistic data
-- [ ] Factories track created entities (for cleanup)
-- [ ] Factories implement `cleanup()` method
-- [ ] Factories integrate with fixtures
-- [ ] Factories follow knowledge base patterns
-
-### Step 8: Sample Tests
-
-- [ ] Example test file created (`tests/e2e/example.spec.ts`)
-- [ ] Test uses fixture architecture
-- [ ] Test demonstrates data factory usage
-- [ ] Test uses proper selector strategy (data-testid)
-- [ ] Test follows Given-When-Then structure
-- [ ] Test includes proper assertions
-- [ ] Network interception demonstrated (if applicable)
-
-### Step 9: Helper Utilities
-
-- [ ] API helper created (if API testing needed)
-- [ ] Network helper created (if network mocking needed)
-- [ ] Auth helper created (if authentication needed)
-- [ ] Helpers follow functional patterns
-- [ ] Helpers have proper error handling
-
-### Step 10: Documentation
-
-- [ ] `tests/README.md` created
-- [ ] Setup instructions included
-- [ ] Running tests section included
-- [ ] Architecture overview section included
-- [ ] Best practices section included
-- [ ] CI integration section included
-- [ ] Knowledge base references included
-- [ ] Troubleshooting section included
-
-### Step 11: Package.json Updates
-
-- [ ] Minimal test script added to package.json: `test:e2e`
-- [ ] Test framework dependency added (if not already present)
-- [ ] Type definitions added (if TypeScript)
-- [ ] Users can extend with additional scripts as needed
-
----
-
-## Output Validation
-
-### Configuration Validation
-
-- [ ] Config file loads without errors
-- [ ] Config file passes linting (if linter configured)
-- [ ] Config file uses correct syntax for chosen framework
-- [ ] All paths in config resolve correctly
-- [ ] Reporter output directories exist or are created on test run
-
-### Test Execution Validation
-
-- [ ] Sample test runs successfully
-- [ ] Test execution produces expected output (pass/fail)
-- [ ] Test artifacts generated correctly (traces, screenshots, videos)
-- [ ] Test report generated successfully
-- [ ] No console errors or warnings during test run
-
-### Directory Structure Validation
-
-- [ ] All required directories exist
-- [ ] Directory structure matches framework conventions
-- [ ] No duplicate or conflicting directories
-- [ ] Directories accessible with correct permissions
-
-### File Integrity Validation
-
-- [ ] All generated files are syntactically correct
-- [ ] No placeholder text left in files (e.g., "TODO", "FIXME")
-- [ ] All imports resolve correctly
-- [ ] No hardcoded credentials or secrets in files
-- [ ] All file paths use correct separators for OS
-
----
-
-## Quality Checks
-
-### Code Quality
-
-- [ ] Generated code follows project coding standards
-- [ ] TypeScript types are complete and accurate (no `any` unless necessary)
-- [ ] No unused imports or variables
-- [ ] Consistent code formatting (matches project style)
-- [ ] No linting errors in generated files
-
-### Best Practices Compliance
-
-- [ ] Fixture architecture follows pure function β fixture β mergeTests pattern
-- [ ] Data factories implement auto-cleanup
-- [ ] Network interception occurs before navigation
-- [ ] Selectors use data-testid strategy
-- [ ] Artifacts only captured on failure
-- [ ] Tests follow Given-When-Then structure
-- [ ] No hard-coded waits or sleeps
-
-### Knowledge Base Alignment
-
-- [ ] Fixture pattern matches `fixture-architecture.md`
-- [ ] Data factories match `data-factories.md`
-- [ ] Network handling matches `network-first.md`
-- [ ] Config follows `playwright-config.md` or `test-config.md`
-- [ ] Test quality matches `test-quality.md`
-
-### Security Checks
-
-- [ ] No credentials in configuration files
-- [ ] .env.example contains placeholders, not real values
-- [ ] Sensitive test data handled securely
-- [ ] API keys and tokens use environment variables
-- [ ] No secrets committed to version control
-
----
-
-## Integration Points
-
-### Status File Integration
-
-- [ ] Framework initialization logged in Quality & Testing Progress section
-- [ ] Status file updated with completion timestamp
-- [ ] Status file shows framework: Playwright or Cypress
-
-### Knowledge Base Integration
-
-- [ ] Relevant knowledge fragments identified from tea-index.csv
-- [ ] Knowledge fragments successfully loaded
-- [ ] Patterns from knowledge base applied correctly
-- [ ] Knowledge base references included in documentation
-
-### Workflow Dependencies
-
-- [ ] Can proceed to `ci` workflow after completion
-- [ ] Can proceed to `test-design` workflow after completion
-- [ ] Can proceed to `atdd` workflow after completion
-- [ ] Framework setup compatible with downstream workflows
-
----
-
-## Completion Criteria
-
-**All of the following must be true:**
-
-- [ ] All prerequisite checks passed
-- [ ] All process steps completed without errors
-- [ ] All output validations passed
-- [ ] All quality checks passed
-- [ ] All integration points verified
-- [ ] Sample test executes successfully
-- [ ] User can run `npm run test:e2e` without errors
-- [ ] Documentation is complete and accurate
-- [ ] No critical issues or blockers identified
-
----
-
-## Post-Workflow Actions
-
-**User must complete:**
-
-1. [ ] Copy `.env.example` to `.env`
-2. [ ] Fill in environment-specific values in `.env`
-3. [ ] Run `npm install` to install test dependencies
-4. [ ] Run `npm run test:e2e` to verify setup
-5. [ ] Review `tests/README.md` for project-specific guidance
-
-**Recommended next workflows:**
-
-1. [ ] Run `ci` workflow to set up CI/CD pipeline
-2. [ ] Run `test-design` workflow to plan test coverage
-3. [ ] Run `atdd` workflow when ready to develop stories
-
----
-
-## Rollback Procedure
-
-If workflow fails and needs to be rolled back:
-
-1. [ ] Delete `tests/` directory
-2. [ ] Remove test scripts from package.json
-3. [ ] Delete `.env.example` (if created)
-4. [ ] Delete `.nvmrc` (if created)
-5. [ ] Delete framework config file
-6. [ ] Remove test dependencies from package.json (if added)
-7. [ ] Run `npm install` to clean up node_modules
-
----
-
-## Notes
-
-### Common Issues
-
-**Issue**: Config file has TypeScript errors
-
-- **Solution**: Ensure `@playwright/test` or `cypress` types are installed
-
-**Issue**: Sample test fails to run
-
-- **Solution**: Check BASE_URL in .env, ensure app is running
-
-**Issue**: Fixture cleanup not working
-
-- **Solution**: Verify cleanup() is called in fixture teardown
-
-**Issue**: Network interception not working
-
-- **Solution**: Ensure route setup occurs before page.goto()
-
-### Framework-Specific Considerations
-
-**Playwright:**
-
-- Requires Node.js 18+
-- Browser binaries auto-installed on first run
-- Trace viewer requires running `npx playwright show-trace`
-
-**Cypress:**
-
-- Requires Node.js 18+
-- Cypress app opens on first run
-- Component testing requires additional setup
-
-### Version Compatibility
-
-- [ ] Node.js version matches .nvmrc
-- [ ] Framework version compatible with Node.js version
-- [ ] TypeScript version compatible with framework
-- [ ] All peer dependencies satisfied
-
----
-
-**Checklist Complete**: Sign off when all items checked and validated.
-
-**Completed by:** {name}
-**Date:** {date}
-**Framework:** { Playwright / Cypress or something else}
-**Notes:** {notes}
diff --git a/src/bmm/workflows/testarch/framework/instructions.md b/src/bmm/workflows/testarch/framework/instructions.md
deleted file mode 100644
index 9f7af84e..00000000
--- a/src/bmm/workflows/testarch/framework/instructions.md
+++ /dev/null
@@ -1,481 +0,0 @@
-
-
-# Test Framework Setup
-
-**Workflow ID**: `_bmad/bmm/testarch/framework`
-**Version**: 4.0 (BMad v6)
-
----
-
-## Overview
-
-Initialize a production-ready test framework architecture (Playwright or Cypress) with fixtures, helpers, configuration, and best practices. This workflow scaffolds the complete testing infrastructure for modern web applications.
-
----
-
-## Preflight Requirements
-
-**Critical:** Verify these requirements before proceeding. If any fail, HALT and notify the user.
-
-- β `package.json` exists in project root
-- β No modern E2E test harness is already configured (check for existing `playwright.config.*` or `cypress.config.*`)
-- β Architectural/stack context available (project type, bundler, dependencies)
-
----
-
-## Step 1: Run Preflight Checks
-
-### Actions
-
-1. **Validate package.json**
- - Read `{project-root}/package.json`
- - Extract project type (React, Vue, Angular, Next.js, Node, etc.)
- - Identify bundler (Vite, Webpack, Rollup, esbuild)
- - Note existing test dependencies
-
-2. **Check for Existing Framework**
- - Search for `playwright.config.*`, `cypress.config.*`, `cypress.json`
- - Check `package.json` for `@playwright/test` or `cypress` dependencies
- - If found, HALT with message: "Existing test framework detected. Use workflow `upgrade-framework` instead."
-
-3. **Gather Context**
- - Look for architecture documents (`architecture.md`, `tech-spec*.md`)
- - Check for API documentation or endpoint lists
- - Identify authentication requirements
-
-**Halt Condition:** If preflight checks fail, stop immediately and report which requirement failed.
-
----
-
-## Step 2: Scaffold Framework
-
-### Actions
-
-1. **Framework Selection**
-
- **Default Logic:**
- - **Playwright** (recommended for):
- - Large repositories (100+ files)
- - Performance-critical applications
- - Multi-browser support needed
- - Complex user flows requiring video/trace debugging
- - Projects requiring worker parallelism
-
- - **Cypress** (recommended for):
- - Small teams prioritizing developer experience
- - Component testing focus
- - Real-time reloading during test development
- - Simpler setup requirements
-
- **Detection Strategy:**
- - Check `package.json` for existing preference
- - Consider `project_size` variable from workflow config
- - Use `framework_preference` variable if set
- - Default to **Playwright** if uncertain
-
-2. **Create Directory Structure**
-
- ```
- {project-root}/
- βββ tests/ # Root test directory
- β βββ e2e/ # Test files (users organize as needed)
- β βββ support/ # Framework infrastructure (key pattern)
- β β βββ fixtures/ # Test fixtures (data, mocks)
- β β βββ helpers/ # Utility functions
- β β βββ page-objects/ # Page object models (optional)
- β βββ README.md # Test suite documentation
- ```
-
- **Note**: Users organize test files (e2e/, api/, integration/, component/) as needed. The **support/** folder is the critical pattern for fixtures and helpers used across tests.
-
-3. **Generate Configuration File**
-
- **For Playwright** (`playwright.config.ts` or `playwright.config.js`):
-
- ```typescript
- import { defineConfig, devices } from '@playwright/test';
-
- export default defineConfig({
- testDir: './tests/e2e',
- fullyParallel: true,
- forbidOnly: !!process.env.CI,
- retries: process.env.CI ? 2 : 0,
- workers: process.env.CI ? 1 : undefined,
-
- timeout: 60 * 1000, // Test timeout: 60s
- expect: {
- timeout: 15 * 1000, // Assertion timeout: 15s
- },
-
- use: {
- baseURL: process.env.BASE_URL || 'http://localhost:3000',
- trace: 'retain-on-failure',
- screenshot: 'only-on-failure',
- video: 'retain-on-failure',
- actionTimeout: 15 * 1000, // Action timeout: 15s
- navigationTimeout: 30 * 1000, // Navigation timeout: 30s
- },
-
- reporter: [['html', { outputFolder: 'test-results/html' }], ['junit', { outputFile: 'test-results/junit.xml' }], ['list']],
-
- projects: [
- { name: 'chromium', use: { ...devices['Desktop Chrome'] } },
- { name: 'firefox', use: { ...devices['Desktop Firefox'] } },
- { name: 'webkit', use: { ...devices['Desktop Safari'] } },
- ],
- });
- ```
-
- **For Cypress** (`cypress.config.ts` or `cypress.config.js`):
-
- ```typescript
- import { defineConfig } from 'cypress';
-
- export default defineConfig({
- e2e: {
- baseUrl: process.env.BASE_URL || 'http://localhost:3000',
- specPattern: 'tests/e2e/**/*.cy.{js,jsx,ts,tsx}',
- supportFile: 'tests/support/e2e.ts',
- video: false,
- screenshotOnRunFailure: true,
-
- setupNodeEvents(on, config) {
- // implement node event listeners here
- },
- },
-
- retries: {
- runMode: 2,
- openMode: 0,
- },
-
- defaultCommandTimeout: 15000,
- requestTimeout: 30000,
- responseTimeout: 30000,
- pageLoadTimeout: 60000,
- });
- ```
-
-4. **Generate Environment Configuration**
-
- Create `.env.example`:
-
- ```bash
- # Test Environment Configuration
- TEST_ENV=local
- BASE_URL=http://localhost:3000
- API_URL=http://localhost:3001/api
-
- # Authentication (if applicable)
- TEST_USER_EMAIL=test@example.com
- TEST_USER_PASSWORD=
-
- # Feature Flags (if applicable)
- FEATURE_FLAG_NEW_UI=true
-
- # API Keys (if applicable)
- TEST_API_KEY=
- ```
-
-5. **Generate Node Version File**
-
- Create `.nvmrc`:
-
- ```
- 20.11.0
- ```
-
- (Use Node version from existing `.nvmrc` or default to current LTS)
-
-6. **Implement Fixture Architecture**
-
- **Knowledge Base Reference**: `testarch/knowledge/fixture-architecture.md`
-
- Create `tests/support/fixtures/index.ts`:
-
- ```typescript
- import { test as base } from '@playwright/test';
- import { UserFactory } from './factories/user-factory';
-
- type TestFixtures = {
- userFactory: UserFactory;
- };
-
- export const test = base.extend({
- userFactory: async ({}, use) => {
- const factory = new UserFactory();
- await use(factory);
- await factory.cleanup(); // Auto-cleanup
- },
- });
-
- export { expect } from '@playwright/test';
- ```
-
-7. **Implement Data Factories**
-
- **Knowledge Base Reference**: `testarch/knowledge/data-factories.md`
-
- Create `tests/support/fixtures/factories/user-factory.ts`:
-
- ```typescript
- import { faker } from '@faker-js/faker';
-
- export class UserFactory {
- private createdUsers: string[] = [];
-
- async createUser(overrides = {}) {
- const user = {
- email: faker.internet.email(),
- name: faker.person.fullName(),
- password: faker.internet.password({ length: 12 }),
- ...overrides,
- };
-
- // API call to create user
- const response = await fetch(`${process.env.API_URL}/users`, {
- method: 'POST',
- headers: { 'Content-Type': 'application/json' },
- body: JSON.stringify(user),
- });
-
- const created = await response.json();
- this.createdUsers.push(created.id);
- return created;
- }
-
- async cleanup() {
- // Delete all created users
- for (const userId of this.createdUsers) {
- await fetch(`${process.env.API_URL}/users/${userId}`, {
- method: 'DELETE',
- });
- }
- this.createdUsers = [];
- }
- }
- ```
-
-8. **Generate Sample Tests**
-
- Create `tests/e2e/example.spec.ts`:
-
- ```typescript
- import { test, expect } from '../support/fixtures';
-
- test.describe('Example Test Suite', () => {
- test('should load homepage', async ({ page }) => {
- await page.goto('/');
- await expect(page).toHaveTitle(/Home/i);
- });
-
- test('should create user and login', async ({ page, userFactory }) => {
- // Create test user
- const user = await userFactory.createUser();
-
- // Login
- await page.goto('/login');
- await page.fill('[data-testid="email-input"]', user.email);
- await page.fill('[data-testid="password-input"]', user.password);
- await page.click('[data-testid="login-button"]');
-
- // Assert login success
- await expect(page.locator('[data-testid="user-menu"]')).toBeVisible();
- });
- });
- ```
-
-9. **Update package.json Scripts**
-
- Add minimal test script to `package.json`:
-
- ```json
- {
- "scripts": {
- "test:e2e": "playwright test"
- }
- }
- ```
-
- **Note**: Users can add additional scripts as needed (e.g., `--ui`, `--headed`, `--debug`, `show-report`).
-
-10. **Generate Documentation**
-
- Create `tests/README.md` with setup instructions (see Step 3 deliverables).
-
----
-
-## Step 3: Deliverables
-
-### Primary Artifacts Created
-
-1. **Configuration File**
- - `playwright.config.ts` or `cypress.config.ts`
- - Timeouts: action 15s, navigation 30s, test 60s
- - Reporters: HTML + JUnit XML
-
-2. **Directory Structure**
- - `tests/` with `e2e/`, `api/`, `support/` subdirectories
- - `support/fixtures/` for test fixtures
- - `support/helpers/` for utility functions
-
-3. **Environment Configuration**
- - `.env.example` with `TEST_ENV`, `BASE_URL`, `API_URL`
- - `.nvmrc` with Node version
-
-4. **Test Infrastructure**
- - Fixture architecture (`mergeTests` pattern)
- - Data factories (faker-based, with auto-cleanup)
- - Sample tests demonstrating patterns
-
-5. **Documentation**
- - `tests/README.md` with setup instructions
- - Comments in config files explaining options
-
-### README Contents
-
-The generated `tests/README.md` should include:
-
-- **Setup Instructions**: How to install dependencies, configure environment
-- **Running Tests**: Commands for local execution, headed mode, debug mode
-- **Architecture Overview**: Fixture pattern, data factories, page objects
-- **Best Practices**: Selector strategy (data-testid), test isolation, cleanup
-- **CI Integration**: How tests run in CI/CD pipeline
-- **Knowledge Base References**: Links to relevant TEA knowledge fragments
-
----
-
-## Important Notes
-
-### Knowledge Base Integration
-
-**Critical:** Check configuration and load appropriate fragments.
-
-Read `{config_source}` and check `config.tea_use_playwright_utils`.
-
-**If `config.tea_use_playwright_utils: true` (Playwright Utils Integration):**
-
-Consult `{project-root}/_bmad/bmm/testarch/tea-index.csv` and load:
-
-- `overview.md` - Playwright utils installation and design principles
-- `fixtures-composition.md` - mergeTests composition with playwright-utils
-- `auth-session.md` - Token persistence setup (if auth needed)
-- `api-request.md` - API testing utilities (if API tests planned)
-- `burn-in.md` - Smart test selection for CI (recommend during framework setup)
-- `network-error-monitor.md` - Automatic HTTP error detection (recommend in merged fixtures)
-- `data-factories.md` - Factory patterns with faker (498 lines, 5 examples)
-
-Recommend installing playwright-utils:
-
-```bash
-npm install -D @seontechnologies/playwright-utils
-```
-
-Recommend adding burn-in and network-error-monitor to merged fixtures for enhanced reliability.
-
-**If `config.tea_use_playwright_utils: false` (Traditional Patterns):**
-
-Consult `{project-root}/_bmad/bmm/testarch/tea-index.csv` and load:
-
-- `fixture-architecture.md` - Pure function β fixture β `mergeTests` composition with auto-cleanup (406 lines, 5 examples)
-- `data-factories.md` - Faker-based factories with overrides, nested factories, API seeding, auto-cleanup (498 lines, 5 examples)
-- `network-first.md` - Network-first testing safeguards: intercept before navigate, HAR capture, deterministic waiting (489 lines, 5 examples)
-- `playwright-config.md` - Playwright-specific configuration: environment-based, timeout standards, artifact output, parallelization, project config (722 lines, 5 examples)
-- `test-quality.md` - Test design principles: deterministic, isolated with cleanup, explicit assertions, length/time limits (658 lines, 5 examples)
-
-### Framework-Specific Guidance
-
-**Playwright Advantages:**
-
-- Worker parallelism (significantly faster for large suites)
-- Trace viewer (powerful debugging with screenshots, network, console)
-- Multi-language support (TypeScript, JavaScript, Python, C#, Java)
-- Built-in API testing capabilities
-- Better handling of multiple browser contexts
-
-**Cypress Advantages:**
-
-- Superior developer experience (real-time reloading)
-- Excellent for component testing (Cypress CT or use Vitest)
-- Simpler setup for small teams
-- Better suited for watch mode during development
-
-**Avoid Cypress when:**
-
-- API chains are heavy and complex
-- Multi-tab/window scenarios are common
-- Worker parallelism is critical for CI performance
-
-### Selector Strategy
-
-**Always recommend**:
-
-- `data-testid` attributes for UI elements
-- `data-cy` attributes if Cypress is chosen
-- Avoid brittle CSS selectors or XPath
-
-### Contract Testing
-
-For microservices architectures, **recommend Pact** for consumer-driven contract testing alongside E2E tests.
-
-### Failure Artifacts
-
-Configure **failure-only** capture:
-
-- Screenshots: only on failure
-- Videos: retain on failure (delete on success)
-- Traces: retain on failure (Playwright)
-
-This reduces storage overhead while maintaining debugging capability.
-
----
-
-## Output Summary
-
-After completing this workflow, provide a summary:
-
-```markdown
-## Framework Scaffold Complete
-
-**Framework Selected**: Playwright (or Cypress)
-
-**Artifacts Created**:
-
-- β Configuration file: `playwright.config.ts`
-- β Directory structure: `tests/e2e/`, `tests/support/`
-- β Environment config: `.env.example`
-- β Node version: `.nvmrc`
-- β Fixture architecture: `tests/support/fixtures/`
-- β Data factories: `tests/support/fixtures/factories/`
-- β Sample tests: `tests/e2e/example.spec.ts`
-- β Documentation: `tests/README.md`
-
-**Next Steps**:
-
-1. Copy `.env.example` to `.env` and fill in environment variables
-2. Run `npm install` to install test dependencies
-3. Run `npm run test:e2e` to execute sample tests
-4. Review `tests/README.md` for detailed setup instructions
-
-**Knowledge Base References Applied**:
-
-- Fixture architecture pattern (pure functions + mergeTests)
-- Data factories with auto-cleanup (faker-based)
-- Network-first testing safeguards
-- Failure-only artifact capture
-```
-
----
-
-## Validation
-
-After completing all steps, verify:
-
-- [ ] Configuration file created and valid
-- [ ] Directory structure exists
-- [ ] Environment configuration generated
-- [ ] Sample tests run successfully
-- [ ] Documentation complete and accurate
-- [ ] No errors or warnings during scaffold
-
-Refer to `checklist.md` for comprehensive validation criteria.
diff --git a/src/bmm/workflows/testarch/framework/workflow.yaml b/src/bmm/workflows/testarch/framework/workflow.yaml
deleted file mode 100644
index 07fcea0c..00000000
--- a/src/bmm/workflows/testarch/framework/workflow.yaml
+++ /dev/null
@@ -1,49 +0,0 @@
-# Test Architect workflow: framework
-name: testarch-framework
-description: "Initialize production-ready test framework architecture (Playwright or Cypress) with fixtures, helpers, and configuration"
-author: "BMad"
-
-# Critical variables from config
-config_source: "{project-root}/_bmad/bmm/config.yaml"
-output_folder: "{config_source}:output_folder"
-user_name: "{config_source}:user_name"
-communication_language: "{config_source}:communication_language"
-document_output_language: "{config_source}:document_output_language"
-date: system-generated
-
-# Workflow components
-installed_path: "{project-root}/_bmad/bmm/workflows/testarch/framework"
-instructions: "{installed_path}/instructions.md"
-validation: "{installed_path}/checklist.md"
-
-# Variables and inputs
-variables:
- test_dir: "{project-root}/tests" # Root test directory
- use_typescript: true # Prefer TypeScript configuration
- framework_preference: "auto" # auto, playwright, cypress - user can override auto-detection
- project_size: "auto" # auto, small, large - influences framework recommendation
-
-# Output configuration
-default_output_file: "{test_dir}/README.md" # Main deliverable is test setup README
-
-# Required tools
-required_tools:
- - read_file # Read package.json, existing configs
- - write_file # Create config files, helpers, fixtures, tests
- - create_directory # Create test directory structure
- - list_files # Check for existing framework
- - search_repo # Find architecture docs
-
-tags:
- - qa
- - setup
- - test-architect
- - framework
- - initialization
-
-execution_hints:
- interactive: false # Minimize prompts; auto-detect when possible
- autonomous: true # Proceed without user input unless blocked
- iterative: true
-
-web_bundle: false
diff --git a/src/bmm/workflows/testarch/nfr-assess/checklist.md b/src/bmm/workflows/testarch/nfr-assess/checklist.md
deleted file mode 100644
index 1e76f366..00000000
--- a/src/bmm/workflows/testarch/nfr-assess/checklist.md
+++ /dev/null
@@ -1,407 +0,0 @@
-# Non-Functional Requirements Assessment - Validation Checklist
-
-**Workflow:** `testarch-nfr`
-**Purpose:** Ensure comprehensive and evidence-based NFR assessment with actionable recommendations
-
----
-
-Note: `nfr-assess` evaluates existing evidence; it does not run tests or CI workflows.
-
-## Prerequisites Validation
-
-- [ ] Implementation is deployed and accessible for evaluation
-- [ ] Evidence sources are available (test results, metrics, logs, CI results)
-- [ ] NFR categories are determined (performance, security, reliability, maintainability, custom)
-- [ ] Evidence directories exist and are accessible (`test_results_dir`, `metrics_dir`, `logs_dir`)
-- [ ] Knowledge base is loaded (nfr-criteria, ci-burn-in, test-quality)
-
----
-
-## Context Loading
-
-- [ ] Tech-spec.md loaded successfully (if available)
-- [ ] PRD.md loaded (if available)
-- [ ] Story file loaded (if applicable)
-- [ ] Relevant knowledge fragments loaded from `tea-index.csv`:
- - [ ] `nfr-criteria.md`
- - [ ] `ci-burn-in.md`
- - [ ] `test-quality.md`
- - [ ] `playwright-config.md` (if using Playwright)
-
----
-
-## NFR Categories and Thresholds
-
-### Performance
-
-- [ ] Response time threshold defined or marked as UNKNOWN
-- [ ] Throughput threshold defined or marked as UNKNOWN
-- [ ] Resource usage thresholds defined or marked as UNKNOWN
-- [ ] Scalability requirements defined or marked as UNKNOWN
-
-### Security
-
-- [ ] Authentication requirements defined or marked as UNKNOWN
-- [ ] Authorization requirements defined or marked as UNKNOWN
-- [ ] Data protection requirements defined or marked as UNKNOWN
-- [ ] Vulnerability management thresholds defined or marked as UNKNOWN
-- [ ] Compliance requirements identified (GDPR, HIPAA, PCI-DSS, etc.)
-
-### Reliability
-
-- [ ] Availability (uptime) threshold defined or marked as UNKNOWN
-- [ ] Error rate threshold defined or marked as UNKNOWN
-- [ ] MTTR (Mean Time To Recovery) threshold defined or marked as UNKNOWN
-- [ ] Fault tolerance requirements defined or marked as UNKNOWN
-- [ ] Disaster recovery requirements defined (RTO, RPO) or marked as UNKNOWN
-
-### Maintainability
-
-- [ ] Test coverage threshold defined or marked as UNKNOWN
-- [ ] Code quality threshold defined or marked as UNKNOWN
-- [ ] Technical debt threshold defined or marked as UNKNOWN
-- [ ] Documentation completeness threshold defined or marked as UNKNOWN
-
-### Custom NFR Categories (if applicable)
-
-- [ ] Custom NFR category 1: Thresholds defined or marked as UNKNOWN
-- [ ] Custom NFR category 2: Thresholds defined or marked as UNKNOWN
-- [ ] Custom NFR category 3: Thresholds defined or marked as UNKNOWN
-
----
-
-## Evidence Gathering
-
-### Performance Evidence
-
-- [ ] Load test results collected (JMeter, k6, Gatling, etc.)
-- [ ] Application metrics collected (response times, throughput, resource usage)
-- [ ] APM data collected (New Relic, Datadog, Dynatrace, etc.)
-- [ ] Lighthouse reports collected (if web app)
-- [ ] Playwright performance traces collected (if applicable)
-
-### Security Evidence
-
-- [ ] SAST results collected (SonarQube, Checkmarx, Veracode, etc.)
-- [ ] DAST results collected (OWASP ZAP, Burp Suite, etc.)
-- [ ] Dependency scanning results collected (Snyk, Dependabot, npm audit)
-- [ ] Penetration test reports collected (if available)
-- [ ] Security audit logs collected
-- [ ] Compliance audit results collected (if applicable)
-
-### Reliability Evidence
-
-- [ ] Uptime monitoring data collected (Pingdom, UptimeRobot, StatusCake)
-- [ ] Error logs collected
-- [ ] Error rate metrics collected
-- [ ] CI burn-in results collected (stability over time)
-- [ ] Chaos engineering test results collected (if available)
-- [ ] Failover/recovery test results collected (if available)
-- [ ] Incident reports and postmortems collected (if applicable)
-
-### Maintainability Evidence
-
-- [ ] Code coverage reports collected (Istanbul, NYC, c8, JaCoCo)
-- [ ] Static analysis results collected (ESLint, SonarQube, CodeClimate)
-- [ ] Technical debt metrics collected
-- [ ] Documentation audit results collected
-- [ ] Test review report collected (from test-review workflow, if available)
-- [ ] Git metrics collected (code churn, commit frequency, etc.)
-
----
-
-## NFR Assessment with Deterministic Rules
-
-### Performance Assessment
-
-- [ ] Response time assessed against threshold
-- [ ] Throughput assessed against threshold
-- [ ] Resource usage assessed against threshold
-- [ ] Scalability assessed against requirements
-- [ ] Status classified (PASS/CONCERNS/FAIL) with justification
-- [ ] Evidence source documented (file path, metric name)
-
-### Security Assessment
-
-- [ ] Authentication strength assessed against requirements
-- [ ] Authorization controls assessed against requirements
-- [ ] Data protection assessed against requirements
-- [ ] Vulnerability management assessed against thresholds
-- [ ] Compliance assessed against requirements
-- [ ] Status classified (PASS/CONCERNS/FAIL) with justification
-- [ ] Evidence source documented (file path, scan result)
-
-### Reliability Assessment
-
-- [ ] Availability (uptime) assessed against threshold
-- [ ] Error rate assessed against threshold
-- [ ] MTTR assessed against threshold
-- [ ] Fault tolerance assessed against requirements
-- [ ] Disaster recovery assessed against requirements (RTO, RPO)
-- [ ] CI burn-in assessed (stability over time)
-- [ ] Status classified (PASS/CONCERNS/FAIL) with justification
-- [ ] Evidence source documented (file path, monitoring data)
-
-### Maintainability Assessment
-
-- [ ] Test coverage assessed against threshold
-- [ ] Code quality assessed against threshold
-- [ ] Technical debt assessed against threshold
-- [ ] Documentation completeness assessed against threshold
-- [ ] Test quality assessed (from test-review, if available)
-- [ ] Status classified (PASS/CONCERNS/FAIL) with justification
-- [ ] Evidence source documented (file path, coverage report)
-
-### Custom NFR Assessment (if applicable)
-
-- [ ] Custom NFR 1 assessed against threshold with justification
-- [ ] Custom NFR 2 assessed against threshold with justification
-- [ ] Custom NFR 3 assessed against threshold with justification
-
----
-
-## Status Classification Validation
-
-### PASS Criteria Verified
-
-- [ ] Evidence exists for PASS status
-- [ ] Evidence meets or exceeds threshold
-- [ ] No concerns flagged in evidence
-- [ ] Quality is acceptable
-
-### CONCERNS Criteria Verified
-
-- [ ] Threshold is UNKNOWN (documented) OR
-- [ ] Evidence is MISSING or INCOMPLETE (documented) OR
-- [ ] Evidence is close to threshold (within 10%, documented) OR
-- [ ] Evidence shows intermittent issues (documented)
-
-### FAIL Criteria Verified
-
-- [ ] Evidence exists BUT does not meet threshold (documented) OR
-- [ ] Critical evidence is MISSING (documented) OR
-- [ ] Evidence shows consistent failures (documented) OR
-- [ ] Quality is unacceptable (documented)
-
-### No Threshold Guessing
-
-- [ ] All thresholds are either defined or marked as UNKNOWN
-- [ ] No thresholds were guessed or inferred
-- [ ] All UNKNOWN thresholds result in CONCERNS status
-
----
-
-## Quick Wins and Recommended Actions
-
-### Quick Wins Identified
-
-- [ ] Low-effort, high-impact improvements identified for CONCERNS/FAIL
-- [ ] Configuration changes (no code changes) identified
-- [ ] Optimization opportunities identified (caching, indexing, compression)
-- [ ] Monitoring additions identified (detect issues before failures)
-
-### Recommended Actions
-
-- [ ] Specific remediation steps provided (not generic advice)
-- [ ] Priority assigned (CRITICAL, HIGH, MEDIUM, LOW)
-- [ ] Estimated effort provided (hours, days)
-- [ ] Owner suggestions provided (dev, ops, security)
-
-### Monitoring Hooks
-
-- [ ] Performance monitoring suggested (APM, synthetic monitoring)
-- [ ] Error tracking suggested (Sentry, Rollbar, error logs)
-- [ ] Security monitoring suggested (intrusion detection, audit logs)
-- [ ] Alerting thresholds suggested (notify before breach)
-
-### Fail-Fast Mechanisms
-
-- [ ] Circuit breakers suggested for reliability
-- [ ] Rate limiting suggested for performance
-- [ ] Validation gates suggested for security
-- [ ] Smoke tests suggested for maintainability
-
----
-
-## Deliverables Generated
-
-### NFR Assessment Report
-
-- [ ] File created at `{output_folder}/nfr-assessment.md`
-- [ ] Template from `nfr-report-template.md` used
-- [ ] Executive summary included (overall status, critical issues)
-- [ ] Assessment by category included (performance, security, reliability, maintainability)
-- [ ] Evidence for each NFR documented
-- [ ] Status classifications documented (PASS/CONCERNS/FAIL)
-- [ ] Findings summary included (PASS count, CONCERNS count, FAIL count)
-- [ ] Quick wins section included
-- [ ] Recommended actions section included
-- [ ] Evidence gaps checklist included
-
-### Gate YAML Snippet (if enabled)
-
-- [ ] YAML snippet generated
-- [ ] Date included
-- [ ] Categories status included (performance, security, reliability, maintainability)
-- [ ] Overall status included (PASS/CONCERNS/FAIL)
-- [ ] Issue counts included (critical, high, medium, concerns)
-- [ ] Blockers flag included (true/false)
-- [ ] Recommendations included
-
-### Evidence Checklist (if enabled)
-
-- [ ] All NFRs with MISSING or INCOMPLETE evidence listed
-- [ ] Owners assigned for evidence collection
-- [ ] Suggested evidence sources provided
-- [ ] Deadlines set for evidence collection
-
-### Updated Story File (if enabled and requested)
-
-- [ ] "NFR Assessment" section added to story markdown
-- [ ] Link to NFR assessment report included
-- [ ] Overall status and critical issues included
-- [ ] Gate status included
-
----
-
-## Quality Assurance
-
-### Accuracy Checks
-
-- [ ] All NFR categories assessed (none skipped)
-- [ ] All thresholds documented (defined or UNKNOWN)
-- [ ] All evidence sources documented (file paths, metric names)
-- [ ] Status classifications are deterministic and consistent
-- [ ] No false positives (status correctly assigned)
-- [ ] No false negatives (all issues identified)
-
-### Completeness Checks
-
-- [ ] All NFR categories covered (performance, security, reliability, maintainability, custom)
-- [ ] All evidence sources checked (test results, metrics, logs, CI results)
-- [ ] All status types used appropriately (PASS, CONCERNS, FAIL)
-- [ ] All NFRs with CONCERNS/FAIL have recommendations
-- [ ] All evidence gaps have owners and deadlines
-
-### Actionability Checks
-
-- [ ] Recommendations are specific (not generic)
-- [ ] Remediation steps are clear and actionable
-- [ ] Priorities are assigned (CRITICAL, HIGH, MEDIUM, LOW)
-- [ ] Effort estimates are provided (hours, days)
-- [ ] Owners are suggested (dev, ops, security)
-
----
-
-## Integration with BMad Artifacts
-
-### With tech-spec.md
-
-- [ ] Tech spec loaded for NFR requirements and thresholds
-- [ ] Performance targets extracted
-- [ ] Security requirements extracted
-- [ ] Reliability SLAs extracted
-- [ ] Architectural decisions considered
-
-### With test-design.md
-
-- [ ] Test design loaded for NFR test plan
-- [ ] Test priorities referenced (P0/P1/P2/P3)
-- [ ] Assessment aligned with planned NFR validation
-
-### With PRD.md
-
-- [ ] PRD loaded for product-level NFR context
-- [ ] User experience goals considered
-- [ ] Unstated requirements checked
-- [ ] Product-level SLAs referenced
-
----
-
-## Quality Gates Validation
-
-### Release Blocker (FAIL)
-
-- [ ] Critical NFR status checked (security, reliability)
-- [ ] Performance failures assessed for user impact
-- [ ] Release blocker flagged if critical NFR has FAIL status
-
-### PR Blocker (HIGH CONCERNS)
-
-- [ ] High-priority NFR status checked
-- [ ] Multiple CONCERNS assessed
-- [ ] PR blocker flagged if HIGH priority issues exist
-
-### Warning (CONCERNS)
-
-- [ ] Any NFR with CONCERNS status flagged
-- [ ] Missing or incomplete evidence documented
-- [ ] Warning issued to address before next release
-
-### Pass (PASS)
-
-- [ ] All NFRs have PASS status
-- [ ] No blockers or concerns exist
-- [ ] Ready for release confirmed
-
----
-
-## Non-Prescriptive Validation
-
-- [ ] NFR categories adapted to team needs
-- [ ] Thresholds appropriate for project context
-- [ ] Assessment criteria customized as needed
-- [ ] Teams can extend with custom NFR categories
-- [ ] Integration with external tools supported (New Relic, Datadog, SonarQube, JIRA)
-
----
-
-## Documentation and Communication
-
-- [ ] NFR assessment report is readable and well-formatted
-- [ ] Tables render correctly in markdown
-- [ ] Code blocks have proper syntax highlighting
-- [ ] Links are valid and accessible
-- [ ] Recommendations are clear and prioritized
-- [ ] Overall status is prominent and unambiguous
-- [ ] Executive summary provides quick understanding
-
----
-
-## Final Validation
-
-- [ ] All prerequisites met
-- [ ] All NFR categories assessed with evidence (or gaps documented)
-- [ ] No thresholds were guessed (all defined or UNKNOWN)
-- [ ] Status classifications are deterministic and justified
-- [ ] Quick wins identified for all CONCERNS/FAIL
-- [ ] Recommended actions are specific and actionable
-- [ ] Evidence gaps documented with owners and deadlines
-- [ ] NFR assessment report generated and saved
-- [ ] Gate YAML snippet generated (if enabled)
-- [ ] Evidence checklist generated (if enabled)
-- [ ] Workflow completed successfully
-
----
-
-## Sign-Off
-
-**NFR Assessment Status:**
-
-- [ ] β PASS - All NFRs meet requirements, ready for release
-- [ ] β οΈ CONCERNS - Some NFRs have concerns, address before next release
-- [ ] β FAIL - Critical NFRs not met, BLOCKER for release
-
-**Next Actions:**
-
-- If PASS β : Proceed to `*gate` workflow or release
-- If CONCERNS β οΈ: Address HIGH/CRITICAL issues, re-run `*nfr-assess`
-- If FAIL β: Resolve FAIL status NFRs, re-run `*nfr-assess`
-
-**Critical Issues:** {COUNT}
-**High Priority Issues:** {COUNT}
-**Concerns:** {COUNT}
-
----
-
-
diff --git a/src/bmm/workflows/testarch/nfr-assess/instructions.md b/src/bmm/workflows/testarch/nfr-assess/instructions.md
deleted file mode 100644
index f23e6b10..00000000
--- a/src/bmm/workflows/testarch/nfr-assess/instructions.md
+++ /dev/null
@@ -1,726 +0,0 @@
-# Non-Functional Requirements Assessment - Instructions v4.0
-
-**Workflow:** `testarch-nfr`
-**Purpose:** Assess non-functional requirements (performance, security, reliability, maintainability) before release with evidence-based validation
-**Agent:** Test Architect (TEA)
-**Format:** Pure Markdown v4.0 (no XML blocks)
-
----
-
-## Overview
-
-This workflow performs a comprehensive assessment of non-functional requirements (NFRs) to validate that the implementation meets performance, security, reliability, and maintainability standards before release. It uses evidence-based validation with deterministic PASS/CONCERNS/FAIL rules and provides actionable recommendations for remediation.
-
-**Key Capabilities:**
-
-- Assess multiple NFR categories (performance, security, reliability, maintainability, custom)
-- Validate NFRs against defined thresholds from tech specs, PRD, or defaults
-- Classify status deterministically (PASS/CONCERNS/FAIL) based on evidence
-- Never guess thresholds - mark as CONCERNS if unknown
-- Generate gate-ready YAML snippets for CI/CD integration
-- Provide quick wins and recommended actions for remediation
-- Create evidence checklists for gaps
-
----
-
-## Prerequisites
-
-**Required:**
-
-- Implementation deployed locally or accessible for evaluation
-- Evidence sources available (test results, metrics, logs, CI results)
-
-**Recommended:**
-
-- NFR requirements defined in tech-spec.md, PRD.md, or story
-- Test results from performance, security, reliability tests
-- Application metrics (response times, error rates, throughput)
-- CI/CD pipeline results for burn-in validation
-
-**Halt Conditions:**
-
-- If NFR targets are undefined and cannot be obtained, halt and request definition
-- If implementation is not accessible for evaluation, halt and request deployment
-
----
-
-## Workflow Steps
-
-### Step 1: Load Context and Knowledge Base
-
-**Actions:**
-
-1. Load relevant knowledge fragments from `{project-root}/_bmad/bmm/testarch/tea-index.csv`:
- - `adr-quality-readiness-checklist.md` - 8-category 29-criteria NFR framework (testability, test data, scalability, DR, security, monitorability, QoS/QoE, deployability, ~450 lines)
- - `ci-burn-in.md` - CI/CD burn-in patterns for reliability validation (10-iteration detection, sharding, selective execution, 678 lines, 4 examples)
- - `test-quality.md` - Test quality expectations for maintainability (deterministic, isolated, explicit assertions, length/time limits, 658 lines, 5 examples)
- - `playwright-config.md` - Performance configuration patterns: parallelization, timeout standards, artifact output (722 lines, 5 examples)
- - `error-handling.md` - Reliability validation patterns: scoped exceptions, retry validation, telemetry logging, graceful degradation (736 lines, 4 examples)
-
-2. Read story file (if provided):
- - Extract NFR requirements
- - Identify specific thresholds or SLAs
- - Note any custom NFR categories
-
-3. Read related BMad artifacts (if available):
- - `tech-spec.md` - Technical NFR requirements and targets
- - `PRD.md` - Product-level NFR context (user expectations)
- - `test-design.md` - NFR test plan and priorities
-
-**Output:** Complete understanding of NFR targets, evidence sources, and validation criteria
-
----
-
-### Step 2: Identify NFR Categories and Thresholds
-
-**Actions:**
-
-1. Determine which NFR categories to assess using ADR Quality Readiness Checklist (8 standard categories):
- - **1. Testability & Automation**: Isolation, headless interaction, state control, sample requests (4 criteria)
- - **2. Test Data Strategy**: Segregation, generation, teardown (3 criteria)
- - **3. Scalability & Availability**: Statelessness, bottlenecks, SLA definitions, circuit breakers (4 criteria)
- - **4. Disaster Recovery**: RTO/RPO, failover, backups (3 criteria)
- - **5. Security**: AuthN/AuthZ, encryption, secrets, input validation (4 criteria)
- - **6. Monitorability, Debuggability & Manageability**: Tracing, logs, metrics, config (4 criteria)
- - **7. QoS & QoE**: Latency, throttling, perceived performance, degradation (4 criteria)
- - **8. Deployability**: Zero downtime, backward compatibility, rollback (3 criteria)
-
-2. Add custom NFR categories if specified (e.g., accessibility, internationalization, compliance) beyond the 8 standard categories
-
-3. Gather thresholds for each NFR:
- - From tech-spec.md (primary source)
- - From PRD.md (product-level SLAs)
- - From story file (feature-specific requirements)
- - From workflow variables (default thresholds)
- - Mark thresholds as UNKNOWN if not defined
-
-4. Never guess thresholds - if a threshold is unknown, mark the NFR as CONCERNS
-
-**Output:** Complete list of NFRs to assess with defined (or UNKNOWN) thresholds
-
----
-
-### Step 3: Gather Evidence
-
-**Actions:**
-
-1. For each NFR category, discover evidence sources:
-
- **Performance Evidence:**
- - Load test results (JMeter, k6, Lighthouse)
- - Application metrics (response times, throughput, resource usage)
- - Performance monitoring data (New Relic, Datadog, APM)
- - Playwright performance traces (if applicable)
-
- **Security Evidence:**
- - Security scan results (SAST, DAST, dependency scanning)
- - Authentication/authorization test results
- - Penetration test reports
- - Vulnerability assessment reports
- - Compliance audit results
-
- **Reliability Evidence:**
- - Error logs and error rates
- - Uptime monitoring data
- - Chaos engineering test results
- - Failover/recovery test results
- - CI burn-in results (stability over time)
-
- **Maintainability Evidence:**
- - Code coverage reports (Istanbul, NYC, c8)
- - Static analysis results (ESLint, SonarQube)
- - Technical debt metrics
- - Documentation completeness
- - Test quality assessment (from test-review workflow)
-
-2. Read relevant files from evidence directories:
- - `{test_results_dir}` for test execution results
- - `{metrics_dir}` for application metrics
- - `{logs_dir}` for application logs
- - CI/CD pipeline results (if `include_ci_results` is true)
-
-3. Mark NFRs without evidence as "NO EVIDENCE" - never infer or assume
-
-**Output:** Comprehensive evidence inventory for each NFR
-
----
-
-### Step 4: Assess NFRs with Deterministic Rules
-
-**Actions:**
-
-1. For each NFR, apply deterministic PASS/CONCERNS/FAIL rules:
-
- **PASS Criteria:**
- - Evidence exists AND meets defined threshold
- - No concerns flagged in evidence
- - Example: Response time is 350ms (threshold: 500ms) β PASS
-
- **CONCERNS Criteria:**
- - Threshold is UNKNOWN (not defined)
- - Evidence is MISSING or INCOMPLETE
- - Evidence is close to threshold (within 10%)
- - Evidence shows intermittent issues
- - Example: Response time is 480ms (threshold: 500ms, 96% of threshold) β CONCERNS
-
- **FAIL Criteria:**
- - Evidence exists BUT does not meet threshold
- - Critical evidence is MISSING
- - Evidence shows consistent failures
- - Example: Response time is 750ms (threshold: 500ms) β FAIL
-
-2. Document findings for each NFR:
- - Status (PASS/CONCERNS/FAIL)
- - Evidence source (file path, test name, metric name)
- - Actual value vs threshold
- - Justification for status classification
-
-3. Classify severity based on category:
- - **CRITICAL**: Security failures, reliability failures (affect users immediately)
- - **HIGH**: Performance failures, maintainability failures (affect users soon)
- - **MEDIUM**: Concerns without failures (may affect users eventually)
- - **LOW**: Missing evidence for non-critical NFRs
-
-**Output:** Complete NFR assessment with deterministic status classifications
-
----
-
-### Step 5: Identify Quick Wins and Recommended Actions
-
-**Actions:**
-
-1. For each NFR with CONCERNS or FAIL status, identify quick wins:
- - Low-effort, high-impact improvements
- - Configuration changes (no code changes needed)
- - Optimization opportunities (caching, indexing, compression)
- - Monitoring additions (detect issues before they become failures)
-
-2. Provide recommended actions for each issue:
- - Specific steps to remediate (not generic advice)
- - Priority (CRITICAL, HIGH, MEDIUM, LOW)
- - Estimated effort (hours, days)
- - Owner suggestion (dev, ops, security)
-
-3. Suggest monitoring hooks for gaps:
- - Add performance monitoring (APM, synthetic monitoring)
- - Add error tracking (Sentry, Rollbar, error logs)
- - Add security monitoring (intrusion detection, audit logs)
- - Add alerting thresholds (notify before thresholds are breached)
-
-4. Suggest fail-fast mechanisms:
- - Add circuit breakers for reliability
- - Add rate limiting for performance
- - Add validation gates for security
- - Add smoke tests for maintainability
-
-**Output:** Actionable remediation plan with prioritized recommendations
-
----
-
-### Step 6: Generate Deliverables
-
-**Actions:**
-
-1. Create NFR assessment markdown file:
- - Use template from `nfr-report-template.md`
- - Include executive summary (overall status, critical issues)
- - Add NFR-by-NFR assessment (status, evidence, thresholds)
- - Add findings summary (PASS count, CONCERNS count, FAIL count)
- - Add quick wins section
- - Add recommended actions section
- - Add evidence gaps checklist
- - Save to `{output_folder}/nfr-assessment.md`
-
-2. Generate gate YAML snippet (if enabled):
-
- ```yaml
- nfr_assessment:
- date: '2025-10-14'
- categories:
- performance: 'PASS'
- security: 'CONCERNS'
- reliability: 'PASS'
- maintainability: 'PASS'
- overall_status: 'CONCERNS'
- critical_issues: 0
- high_priority_issues: 1
- concerns: 2
- blockers: false
- ```
-
-3. Generate evidence checklist (if enabled):
- - List all NFRs with MISSING or INCOMPLETE evidence
- - Assign owners for evidence collection
- - Suggest evidence sources (tests, metrics, logs)
- - Set deadlines for evidence collection
-
-4. Update story file (if enabled and requested):
- - Add "NFR Assessment" section to story markdown
- - Link to NFR assessment report
- - Include overall status and critical issues
- - Add gate status
-
-**Output:** Complete NFR assessment documentation ready for review and CI/CD integration
-
----
-
-## Non-Prescriptive Approach
-
-**Minimal Examples:** This workflow provides principles and patterns, not rigid templates. Teams should adapt NFR categories, thresholds, and assessment criteria to their needs.
-
-**Key Patterns to Follow:**
-
-- Use evidence-based validation (no guessing or inference)
-- Apply deterministic rules (consistent PASS/CONCERNS/FAIL classification)
-- Never guess thresholds (mark as CONCERNS if unknown)
-- Provide actionable recommendations (specific steps, not generic advice)
-- Generate gate-ready artifacts (YAML snippets for CI/CD)
-
-**Extend as Needed:**
-
-- Add custom NFR categories (accessibility, internationalization, compliance)
-- Integrate with external tools (New Relic, Datadog, SonarQube, JIRA)
-- Add custom thresholds and rules
-- Link to external assessment systems
-
----
-
-## NFR Categories and Criteria
-
-### Performance
-
-**Criteria:**
-
-- Response time (p50, p95, p99 percentiles)
-- Throughput (requests per second, transactions per second)
-- Resource usage (CPU, memory, disk, network)
-- Scalability (horizontal, vertical)
-
-**Thresholds (Default):**
-
-- Response time p95: 500ms
-- Throughput: 100 RPS
-- CPU usage: < 70% average
-- Memory usage: < 80% max
-
-**Evidence Sources:**
-
-- Load test results (JMeter, k6, Gatling)
-- APM data (New Relic, Datadog, Dynatrace)
-- Lighthouse reports (for web apps)
-- Playwright performance traces
-
----
-
-### Security
-
-**Criteria:**
-
-- Authentication (login security, session management)
-- Authorization (access control, permissions)
-- Data protection (encryption, PII handling)
-- Vulnerability management (SAST, DAST, dependency scanning)
-- Compliance (GDPR, HIPAA, PCI-DSS)
-
-**Thresholds (Default):**
-
-- Security score: >= 85/100
-- Critical vulnerabilities: 0
-- High vulnerabilities: < 3
-- Authentication strength: MFA enabled
-
-**Evidence Sources:**
-
-- SAST results (SonarQube, Checkmarx, Veracode)
-- DAST results (OWASP ZAP, Burp Suite)
-- Dependency scanning (Snyk, Dependabot, npm audit)
-- Penetration test reports
-- Security audit logs
-
----
-
-### Reliability
-
-**Criteria:**
-
-- Availability (uptime percentage)
-- Error handling (graceful degradation, error recovery)
-- Fault tolerance (redundancy, failover)
-- Disaster recovery (backup, restore, RTO/RPO)
-- Stability (CI burn-in, chaos engineering)
-
-**Thresholds (Default):**
-
-- Uptime: >= 99.9% (three nines)
-- Error rate: < 0.1% (1 in 1000 requests)
-- MTTR (Mean Time To Recovery): < 15 minutes
-- CI burn-in: 100 consecutive successful runs
-
-**Evidence Sources:**
-
-- Uptime monitoring (Pingdom, UptimeRobot, StatusCake)
-- Error logs and error rates
-- CI burn-in results (see `ci-burn-in.md`)
-- Chaos engineering test results (Chaos Monkey, Gremlin)
-- Incident reports and postmortems
-
----
-
-### Maintainability
-
-**Criteria:**
-
-- Code quality (complexity, duplication, code smells)
-- Test coverage (unit, integration, E2E)
-- Documentation (code comments, README, architecture docs)
-- Technical debt (debt ratio, code churn)
-- Test quality (from test-review workflow)
-
-**Thresholds (Default):**
-
-- Test coverage: >= 80%
-- Code quality score: >= 85/100
-- Technical debt ratio: < 5%
-- Documentation completeness: >= 90%
-
-**Evidence Sources:**
-
-- Coverage reports (Istanbul, NYC, c8, JaCoCo)
-- Static analysis (ESLint, SonarQube, CodeClimate)
-- Documentation audit (manual or automated)
-- Test review report (from test-review workflow)
-- Git metrics (code churn, commit frequency)
-
----
-
-## Deterministic Assessment Rules
-
-### PASS Rules
-
-- Evidence exists
-- Evidence meets or exceeds threshold
-- No concerns flagged
-- Quality is acceptable
-
-**Example:**
-
-```markdown
-NFR: Response Time p95
-Threshold: 500ms
-Evidence: Load test result shows 350ms p95
-Status: PASS β
-```
-
----
-
-### CONCERNS Rules
-
-- Threshold is UNKNOWN
-- Evidence is MISSING or INCOMPLETE
-- Evidence is close to threshold (within 10%)
-- Evidence shows intermittent issues
-- Quality is marginal
-
-**Example:**
-
-```markdown
-NFR: Response Time p95
-Threshold: 500ms
-Evidence: Load test result shows 480ms p95 (96% of threshold)
-Status: CONCERNS β οΈ
-Recommendation: Optimize before production - very close to threshold
-```
-
----
-
-### FAIL Rules
-
-- Evidence exists BUT does not meet threshold
-- Critical evidence is MISSING
-- Evidence shows consistent failures
-- Quality is unacceptable
-
-**Example:**
-
-```markdown
-NFR: Response Time p95
-Threshold: 500ms
-Evidence: Load test result shows 750ms p95 (150% of threshold)
-Status: FAIL β
-Recommendation: BLOCKER - optimize performance before release
-```
-
----
-
-## Integration with BMad Artifacts
-
-### With tech-spec.md
-
-- Primary source for NFR requirements and thresholds
-- Load performance targets, security requirements, reliability SLAs
-- Use architectural decisions to understand NFR trade-offs
-
-### With test-design.md
-
-- Understand NFR test plan and priorities
-- Reference test priorities (P0/P1/P2/P3) for severity classification
-- Align assessment with planned NFR validation
-
-### With PRD.md
-
-- Understand product-level NFR expectations
-- Verify NFRs align with user experience goals
-- Check for unstated NFR requirements (implied by product goals)
-
----
-
-## Quality Gates
-
-### Release Blocker (FAIL)
-
-- Critical NFR has FAIL status (security, reliability)
-- Performance failure affects user experience severely
-- Do not release until FAIL is resolved
-
-### PR Blocker (HIGH CONCERNS)
-
-- High-priority NFR has FAIL status
-- Multiple CONCERNS exist
-- Block PR merge until addressed
-
-### Warning (CONCERNS)
-
-- Any NFR has CONCERNS status
-- Evidence is missing or incomplete
-- Address before next release
-
-### Pass (PASS)
-
-- All NFRs have PASS status
-- No blockers or concerns
-- Ready for release
-
----
-
-## Example NFR Assessment
-
-````markdown
-# NFR Assessment - Story 1.3
-
-**Feature:** User Authentication
-**Date:** 2025-10-14
-**Overall Status:** CONCERNS β οΈ (1 HIGH issue)
-
-## Executive Summary
-
-**Assessment:** 3 PASS, 1 CONCERNS, 0 FAIL
-**Blockers:** None
-**High Priority Issues:** 1 (Security - MFA not enforced)
-**Recommendation:** Address security concern before release
-
-## Performance Assessment
-
-### Response Time (p95)
-
-- **Status:** PASS β
-- **Threshold:** 500ms
-- **Actual:** 320ms (64% of threshold)
-- **Evidence:** Load test results (test-results/load-2025-10-14.json)
-- **Findings:** Response time well below threshold across all percentiles
-
-### Throughput
-
-- **Status:** PASS β
-- **Threshold:** 100 RPS
-- **Actual:** 250 RPS (250% of threshold)
-- **Evidence:** Load test results (test-results/load-2025-10-14.json)
-- **Findings:** System handles 2.5x target load without degradation
-
-## Security Assessment
-
-### Authentication Strength
-
-- **Status:** CONCERNS β οΈ
-- **Threshold:** MFA enabled for all users
-- **Actual:** MFA optional (not enforced)
-- **Evidence:** Security audit (security-audit-2025-10-14.md)
-- **Findings:** MFA is implemented but not enforced by default
-- **Recommendation:** HIGH - Enforce MFA for all new accounts, provide migration path for existing users
-
-### Data Protection
-
-- **Status:** PASS β
-- **Threshold:** PII encrypted at rest and in transit
-- **Actual:** AES-256 at rest, TLS 1.3 in transit
-- **Evidence:** Security scan (security-scan-2025-10-14.json)
-- **Findings:** All PII properly encrypted
-
-## Reliability Assessment
-
-### Uptime
-
-- **Status:** PASS β
-- **Threshold:** 99.9% (three nines)
-- **Actual:** 99.95% over 30 days
-- **Evidence:** Uptime monitoring (uptime-report-2025-10-14.csv)
-- **Findings:** Exceeds target with margin
-
-### Error Rate
-
-- **Status:** PASS β
-- **Threshold:** < 0.1% (1 in 1000)
-- **Actual:** 0.05% (1 in 2000)
-- **Evidence:** Error logs (logs/errors-2025-10.log)
-- **Findings:** Error rate well below threshold
-
-## Maintainability Assessment
-
-### Test Coverage
-
-- **Status:** PASS β
-- **Threshold:** >= 80%
-- **Actual:** 87%
-- **Evidence:** Coverage report (coverage/lcov-report/index.html)
-- **Findings:** Coverage exceeds threshold with good distribution
-
-### Code Quality
-
-- **Status:** PASS β
-- **Threshold:** >= 85/100
-- **Actual:** 92/100
-- **Evidence:** SonarQube analysis (sonarqube-report-2025-10-14.pdf)
-- **Findings:** High code quality score with low technical debt
-
-## Quick Wins
-
-1. **Enforce MFA (Security)** - HIGH - 4 hours
- - Add configuration flag to enforce MFA for new accounts
- - No code changes needed, only config adjustment
-
-## Recommended Actions
-
-### Immediate (Before Release)
-
-1. **Enforce MFA for all new accounts** - HIGH - 4 hours - Security Team
- - Add `ENFORCE_MFA=true` to production config
- - Update user onboarding flow to require MFA setup
- - Test MFA enforcement in staging environment
-
-### Short-term (Next Sprint)
-
-1. **Migrate existing users to MFA** - MEDIUM - 3 days - Product + Engineering
- - Design migration UX (prompt, incentives, deadline)
- - Implement migration flow with grace period
- - Communicate migration to existing users
-
-## Evidence Gaps
-
-- [ ] Chaos engineering test results (reliability)
- - Owner: DevOps Team
- - Deadline: 2025-10-21
- - Suggested evidence: Run chaos monkey tests in staging
-
-- [ ] Penetration test report (security)
- - Owner: Security Team
- - Deadline: 2025-10-28
- - Suggested evidence: Schedule third-party pentest
-
-## Gate YAML Snippet
-
-```yaml
-nfr_assessment:
- date: '2025-10-14'
- story_id: '1.3'
- categories:
- performance: 'PASS'
- security: 'CONCERNS'
- reliability: 'PASS'
- maintainability: 'PASS'
- overall_status: 'CONCERNS'
- critical_issues: 0
- high_priority_issues: 1
- medium_priority_issues: 0
- concerns: 1
- blockers: false
- recommendations:
- - 'Enforce MFA for all new accounts (HIGH - 4 hours)'
- evidence_gaps: 2
-```
-````
-
-## Recommendations Summary
-
-- **Release Blocker:** None β
-- **High Priority:** 1 (Enforce MFA before release)
-- **Medium Priority:** 1 (Migrate existing users to MFA)
-- **Next Steps:** Address HIGH priority item, then proceed to gate workflow
-
-```
-
----
-
-## Validation Checklist
-
-Before completing this workflow, verify:
-
-- β All NFR categories assessed (performance, security, reliability, maintainability, custom)
-- β Thresholds defined or marked as UNKNOWN
-- β Evidence gathered for each NFR (or marked as MISSING)
-- β Status classified deterministically (PASS/CONCERNS/FAIL)
-- β No thresholds were guessed (marked as CONCERNS if unknown)
-- β Quick wins identified for CONCERNS/FAIL
-- β Recommended actions are specific and actionable
-- β Evidence gaps documented with owners and deadlines
-- β NFR assessment report generated and saved
-- β Gate YAML snippet generated (if enabled)
-- β Evidence checklist generated (if enabled)
-
----
-
-## Notes
-
-- **Never Guess Thresholds:** If a threshold is unknown, mark as CONCERNS and recommend defining it
-- **Evidence-Based:** Every assessment must be backed by evidence (tests, metrics, logs, CI results)
-- **Deterministic Rules:** Use consistent PASS/CONCERNS/FAIL classification based on evidence
-- **Actionable Recommendations:** Provide specific steps, not generic advice
-- **Gate Integration:** Generate YAML snippets that can be consumed by CI/CD pipelines
-
----
-
-## Troubleshooting
-
-### "NFR thresholds not defined"
-- Check tech-spec.md for NFR requirements
-- Check PRD.md for product-level SLAs
-- Check story file for feature-specific requirements
-- If thresholds truly unknown, mark as CONCERNS and recommend defining them
-
-### "No evidence found"
-- Check evidence directories (test-results, metrics, logs)
-- Check CI/CD pipeline for test results
-- If evidence truly missing, mark NFR as "NO EVIDENCE" and recommend generating it
-
-### "CONCERNS status but no threshold exceeded"
-- CONCERNS is correct when threshold is UNKNOWN or evidence is MISSING/INCOMPLETE
-- CONCERNS is also correct when evidence is close to threshold (within 10%)
-- Document why CONCERNS was assigned
-
-### "FAIL status blocks release"
-- This is intentional - FAIL means critical NFR not met
-- Recommend remediation actions with specific steps
-- Re-run assessment after remediation
-
----
-
-## Related Workflows
-
-- **testarch-test-design** - Define NFR requirements and test plan
-- **testarch-framework** - Set up performance/security testing frameworks
-- **testarch-ci** - Configure CI/CD for NFR validation
-- **testarch-gate** - Use NFR assessment as input for quality gate decisions
-- **testarch-test-review** - Review test quality (maintainability NFR)
-
----
-
-
-```
diff --git a/src/bmm/workflows/testarch/nfr-assess/nfr-report-template.md b/src/bmm/workflows/testarch/nfr-assess/nfr-report-template.md
deleted file mode 100644
index 115ee969..00000000
--- a/src/bmm/workflows/testarch/nfr-assess/nfr-report-template.md
+++ /dev/null
@@ -1,461 +0,0 @@
-# NFR Assessment - {FEATURE_NAME}
-
-**Date:** {DATE}
-**Story:** {STORY_ID} (if applicable)
-**Overall Status:** {OVERALL_STATUS} {STATUS_ICON}
-
----
-
-Note: This assessment summarizes existing evidence; it does not run tests or CI workflows.
-
-## Executive Summary
-
-**Assessment:** {PASS_COUNT} PASS, {CONCERNS_COUNT} CONCERNS, {FAIL_COUNT} FAIL
-
-**Blockers:** {BLOCKER_COUNT} {BLOCKER_DESCRIPTION}
-
-**High Priority Issues:** {HIGH_PRIORITY_COUNT} {HIGH_PRIORITY_DESCRIPTION}
-
-**Recommendation:** {OVERALL_RECOMMENDATION}
-
----
-
-## Performance Assessment
-
-### Response Time (p95)
-
-- **Status:** {STATUS} {STATUS_ICON}
-- **Threshold:** {THRESHOLD_VALUE}
-- **Actual:** {ACTUAL_VALUE}
-- **Evidence:** {EVIDENCE_SOURCE}
-- **Findings:** {FINDINGS_DESCRIPTION}
-
-### Throughput
-
-- **Status:** {STATUS} {STATUS_ICON}
-- **Threshold:** {THRESHOLD_VALUE}
-- **Actual:** {ACTUAL_VALUE}
-- **Evidence:** {EVIDENCE_SOURCE}
-- **Findings:** {FINDINGS_DESCRIPTION}
-
-### Resource Usage
-
-- **CPU Usage**
- - **Status:** {STATUS} {STATUS_ICON}
- - **Threshold:** {THRESHOLD_VALUE}
- - **Actual:** {ACTUAL_VALUE}
- - **Evidence:** {EVIDENCE_SOURCE}
-
-- **Memory Usage**
- - **Status:** {STATUS} {STATUS_ICON}
- - **Threshold:** {THRESHOLD_VALUE}
- - **Actual:** {ACTUAL_VALUE}
- - **Evidence:** {EVIDENCE_SOURCE}
-
-### Scalability
-
-- **Status:** {STATUS} {STATUS_ICON}
-- **Threshold:** {THRESHOLD_DESCRIPTION}
-- **Actual:** {ACTUAL_DESCRIPTION}
-- **Evidence:** {EVIDENCE_SOURCE}
-- **Findings:** {FINDINGS_DESCRIPTION}
-
----
-
-## Security Assessment
-
-### Authentication Strength
-
-- **Status:** {STATUS} {STATUS_ICON}
-- **Threshold:** {THRESHOLD_DESCRIPTION}
-- **Actual:** {ACTUAL_DESCRIPTION}
-- **Evidence:** {EVIDENCE_SOURCE}
-- **Findings:** {FINDINGS_DESCRIPTION}
-- **Recommendation:** {RECOMMENDATION} (if CONCERNS or FAIL)
-
-### Authorization Controls
-
-- **Status:** {STATUS} {STATUS_ICON}
-- **Threshold:** {THRESHOLD_DESCRIPTION}
-- **Actual:** {ACTUAL_DESCRIPTION}
-- **Evidence:** {EVIDENCE_SOURCE}
-- **Findings:** {FINDINGS_DESCRIPTION}
-
-### Data Protection
-
-- **Status:** {STATUS} {STATUS_ICON}
-- **Threshold:** {THRESHOLD_DESCRIPTION}
-- **Actual:** {ACTUAL_DESCRIPTION}
-- **Evidence:** {EVIDENCE_SOURCE}
-- **Findings:** {FINDINGS_DESCRIPTION}
-
-### Vulnerability Management
-
-- **Status:** {STATUS} {STATUS_ICON}
-- **Threshold:** {THRESHOLD_DESCRIPTION} (e.g., "0 critical, <3 high vulnerabilities")
-- **Actual:** {ACTUAL_DESCRIPTION} (e.g., "0 critical, 1 high, 5 medium vulnerabilities")
-- **Evidence:** {EVIDENCE_SOURCE} (e.g., "Snyk scan results - scan-2025-10-14.json")
-- **Findings:** {FINDINGS_DESCRIPTION}
-
-### Compliance (if applicable)
-
-- **Status:** {STATUS} {STATUS_ICON}
-- **Standards:** {COMPLIANCE_STANDARDS} (e.g., "GDPR, HIPAA, PCI-DSS")
-- **Actual:** {ACTUAL_COMPLIANCE_STATUS}
-- **Evidence:** {EVIDENCE_SOURCE}
-- **Findings:** {FINDINGS_DESCRIPTION}
-
----
-
-## Reliability Assessment
-
-### Availability (Uptime)
-
-- **Status:** {STATUS} {STATUS_ICON}
-- **Threshold:** {THRESHOLD_VALUE} (e.g., "99.9%")
-- **Actual:** {ACTUAL_VALUE} (e.g., "99.95%")
-- **Evidence:** {EVIDENCE_SOURCE} (e.g., "Uptime monitoring - uptime-report-2025-10-14.csv")
-- **Findings:** {FINDINGS_DESCRIPTION}
-
-### Error Rate
-
-- **Status:** {STATUS} {STATUS_ICON}
-- **Threshold:** {THRESHOLD_VALUE} (e.g., "<0.1%")
-- **Actual:** {ACTUAL_VALUE} (e.g., "0.05%")
-- **Evidence:** {EVIDENCE_SOURCE} (e.g., "Error logs - logs/errors-2025-10.log")
-- **Findings:** {FINDINGS_DESCRIPTION}
-
-### MTTR (Mean Time To Recovery)
-
-- **Status:** {STATUS} {STATUS_ICON}
-- **Threshold:** {THRESHOLD_VALUE} (e.g., "<15 minutes")
-- **Actual:** {ACTUAL_VALUE} (e.g., "12 minutes")
-- **Evidence:** {EVIDENCE_SOURCE} (e.g., "Incident reports - incidents/")
-- **Findings:** {FINDINGS_DESCRIPTION}
-
-### Fault Tolerance
-
-- **Status:** {STATUS} {STATUS_ICON}
-- **Threshold:** {THRESHOLD_DESCRIPTION}
-- **Actual:** {ACTUAL_DESCRIPTION}
-- **Evidence:** {EVIDENCE_SOURCE}
-- **Findings:** {FINDINGS_DESCRIPTION}
-
-### CI Burn-In (Stability)
-
-- **Status:** {STATUS} {STATUS_ICON}
-- **Threshold:** {THRESHOLD_VALUE} (e.g., "100 consecutive successful runs")
-- **Actual:** {ACTUAL_VALUE} (e.g., "150 consecutive successful runs")
-- **Evidence:** {EVIDENCE_SOURCE} (e.g., "CI burn-in results - ci-burn-in-2025-10-14.log")
-- **Findings:** {FINDINGS_DESCRIPTION}
-
-### Disaster Recovery (if applicable)
-
-- **RTO (Recovery Time Objective)**
- - **Status:** {STATUS} {STATUS_ICON}
- - **Threshold:** {THRESHOLD_VALUE}
- - **Actual:** {ACTUAL_VALUE}
- - **Evidence:** {EVIDENCE_SOURCE}
-
-- **RPO (Recovery Point Objective)**
- - **Status:** {STATUS} {STATUS_ICON}
- - **Threshold:** {THRESHOLD_VALUE}
- - **Actual:** {ACTUAL_VALUE}
- - **Evidence:** {EVIDENCE_SOURCE}
-
----
-
-## Maintainability Assessment
-
-### Test Coverage
-
-- **Status:** {STATUS} {STATUS_ICON}
-- **Threshold:** {THRESHOLD_VALUE} (e.g., ">=80%")
-- **Actual:** {ACTUAL_VALUE} (e.g., "87%")
-- **Evidence:** {EVIDENCE_SOURCE} (e.g., "Coverage report - coverage/lcov-report/index.html")
-- **Findings:** {FINDINGS_DESCRIPTION}
-
-### Code Quality
-
-- **Status:** {STATUS} {STATUS_ICON}
-- **Threshold:** {THRESHOLD_VALUE} (e.g., ">=85/100")
-- **Actual:** {ACTUAL_VALUE} (e.g., "92/100")
-- **Evidence:** {EVIDENCE_SOURCE} (e.g., "SonarQube analysis - sonarqube-report-2025-10-14.pdf")
-- **Findings:** {FINDINGS_DESCRIPTION}
-
-### Technical Debt
-
-- **Status:** {STATUS} {STATUS_ICON}
-- **Threshold:** {THRESHOLD_VALUE} (e.g., "<5% debt ratio")
-- **Actual:** {ACTUAL_VALUE} (e.g., "3.2% debt ratio")
-- **Evidence:** {EVIDENCE_SOURCE} (e.g., "CodeClimate analysis - codeclimate-2025-10-14.json")
-- **Findings:** {FINDINGS_DESCRIPTION}
-
-### Documentation Completeness
-
-- **Status:** {STATUS} {STATUS_ICON}
-- **Threshold:** {THRESHOLD_VALUE} (e.g., ">=90%")
-- **Actual:** {ACTUAL_VALUE} (e.g., "95%")
-- **Evidence:** {EVIDENCE_SOURCE} (e.g., "Documentation audit - docs-audit-2025-10-14.md")
-- **Findings:** {FINDINGS_DESCRIPTION}
-
-### Test Quality (from test-review, if available)
-
-- **Status:** {STATUS} {STATUS_ICON}
-- **Threshold:** {THRESHOLD_DESCRIPTION}
-- **Actual:** {ACTUAL_DESCRIPTION}
-- **Evidence:** {EVIDENCE_SOURCE} (e.g., "Test review report - test-review-2025-10-14.md")
-- **Findings:** {FINDINGS_DESCRIPTION}
-
----
-
-## Custom NFR Assessments (if applicable)
-
-### {CUSTOM_NFR_NAME_1}
-
-- **Status:** {STATUS} {STATUS_ICON}
-- **Threshold:** {THRESHOLD_DESCRIPTION}
-- **Actual:** {ACTUAL_DESCRIPTION}
-- **Evidence:** {EVIDENCE_SOURCE}
-- **Findings:** {FINDINGS_DESCRIPTION}
-
-### {CUSTOM_NFR_NAME_2}
-
-- **Status:** {STATUS} {STATUS_ICON}
-- **Threshold:** {THRESHOLD_DESCRIPTION}
-- **Actual:** {ACTUAL_DESCRIPTION}
-- **Evidence:** {EVIDENCE_SOURCE}
-- **Findings:** {FINDINGS_DESCRIPTION}
-
----
-
-## Quick Wins
-
-{QUICK_WIN_COUNT} quick wins identified for immediate implementation:
-
-1. **{QUICK_WIN_TITLE_1}** ({NFR_CATEGORY}) - {PRIORITY} - {ESTIMATED_EFFORT}
- - {QUICK_WIN_DESCRIPTION}
- - No code changes needed / Minimal code changes
-
-2. **{QUICK_WIN_TITLE_2}** ({NFR_CATEGORY}) - {PRIORITY} - {ESTIMATED_EFFORT}
- - {QUICK_WIN_DESCRIPTION}
-
----
-
-## Recommended Actions
-
-### Immediate (Before Release) - CRITICAL/HIGH Priority
-
-1. **{ACTION_TITLE_1}** - {PRIORITY} - {ESTIMATED_EFFORT} - {OWNER}
- - {ACTION_DESCRIPTION}
- - {SPECIFIC_STEPS}
- - {VALIDATION_CRITERIA}
-
-2. **{ACTION_TITLE_2}** - {PRIORITY} - {ESTIMATED_EFFORT} - {OWNER}
- - {ACTION_DESCRIPTION}
- - {SPECIFIC_STEPS}
- - {VALIDATION_CRITERIA}
-
-### Short-term (Next Sprint) - MEDIUM Priority
-
-1. **{ACTION_TITLE_3}** - {PRIORITY} - {ESTIMATED_EFFORT} - {OWNER}
- - {ACTION_DESCRIPTION}
-
-2. **{ACTION_TITLE_4}** - {PRIORITY} - {ESTIMATED_EFFORT} - {OWNER}
- - {ACTION_DESCRIPTION}
-
-### Long-term (Backlog) - LOW Priority
-
-1. **{ACTION_TITLE_5}** - {PRIORITY} - {ESTIMATED_EFFORT} - {OWNER}
- - {ACTION_DESCRIPTION}
-
----
-
-## Monitoring Hooks
-
-{MONITORING_HOOK_COUNT} monitoring hooks recommended to detect issues before failures:
-
-### Performance Monitoring
-
-- [ ] {MONITORING_TOOL_1} - {MONITORING_DESCRIPTION}
- - **Owner:** {OWNER}
- - **Deadline:** {DEADLINE}
-
-- [ ] {MONITORING_TOOL_2} - {MONITORING_DESCRIPTION}
- - **Owner:** {OWNER}
- - **Deadline:** {DEADLINE}
-
-### Security Monitoring
-
-- [ ] {MONITORING_TOOL_3} - {MONITORING_DESCRIPTION}
- - **Owner:** {OWNER}
- - **Deadline:** {DEADLINE}
-
-### Reliability Monitoring
-
-- [ ] {MONITORING_TOOL_4} - {MONITORING_DESCRIPTION}
- - **Owner:** {OWNER}
- - **Deadline:** {DEADLINE}
-
-### Alerting Thresholds
-
-- [ ] {ALERT_DESCRIPTION} - Notify when {THRESHOLD_CONDITION}
- - **Owner:** {OWNER}
- - **Deadline:** {DEADLINE}
-
----
-
-## Fail-Fast Mechanisms
-
-{FAIL_FAST_COUNT} fail-fast mechanisms recommended to prevent failures:
-
-### Circuit Breakers (Reliability)
-
-- [ ] {CIRCUIT_BREAKER_DESCRIPTION}
- - **Owner:** {OWNER}
- - **Estimated Effort:** {EFFORT}
-
-### Rate Limiting (Performance)
-
-- [ ] {RATE_LIMITING_DESCRIPTION}
- - **Owner:** {OWNER}
- - **Estimated Effort:** {EFFORT}
-
-### Validation Gates (Security)
-
-- [ ] {VALIDATION_GATE_DESCRIPTION}
- - **Owner:** {OWNER}
- - **Estimated Effort:** {EFFORT}
-
-### Smoke Tests (Maintainability)
-
-- [ ] {SMOKE_TEST_DESCRIPTION}
- - **Owner:** {OWNER}
- - **Estimated Effort:** {EFFORT}
-
----
-
-## Evidence Gaps
-
-{EVIDENCE_GAP_COUNT} evidence gaps identified - action required:
-
-- [ ] **{NFR_NAME_1}** ({NFR_CATEGORY})
- - **Owner:** {OWNER}
- - **Deadline:** {DEADLINE}
- - **Suggested Evidence:** {SUGGESTED_EVIDENCE_SOURCE}
- - **Impact:** {IMPACT_DESCRIPTION}
-
-- [ ] **{NFR_NAME_2}** ({NFR_CATEGORY})
- - **Owner:** {OWNER}
- - **Deadline:** {DEADLINE}
- - **Suggested Evidence:** {SUGGESTED_EVIDENCE_SOURCE}
- - **Impact:** {IMPACT_DESCRIPTION}
-
----
-
-## Findings Summary
-
-**Based on ADR Quality Readiness Checklist (8 categories, 29 criteria)**
-
-| Category | Criteria Met | PASS | CONCERNS | FAIL | Overall Status |
-|----------|--------------|------|----------|------|----------------|
-| 1. Testability & Automation | {T_MET}/4 | {T_PASS} | {T_CONCERNS} | {T_FAIL} | {T_STATUS} {T_ICON} |
-| 2. Test Data Strategy | {TD_MET}/3 | {TD_PASS} | {TD_CONCERNS} | {TD_FAIL} | {TD_STATUS} {TD_ICON} |
-| 3. Scalability & Availability | {SA_MET}/4 | {SA_PASS} | {SA_CONCERNS} | {SA_FAIL} | {SA_STATUS} {SA_ICON} |
-| 4. Disaster Recovery | {DR_MET}/3 | {DR_PASS} | {DR_CONCERNS} | {DR_FAIL} | {DR_STATUS} {DR_ICON} |
-| 5. Security | {SEC_MET}/4 | {SEC_PASS} | {SEC_CONCERNS} | {SEC_FAIL} | {SEC_STATUS} {SEC_ICON} |
-| 6. Monitorability, Debuggability & Manageability | {MON_MET}/4 | {MON_PASS} | {MON_CONCERNS} | {MON_FAIL} | {MON_STATUS} {MON_ICON} |
-| 7. QoS & QoE | {QOS_MET}/4 | {QOS_PASS} | {QOS_CONCERNS} | {QOS_FAIL} | {QOS_STATUS} {QOS_ICON} |
-| 8. Deployability | {DEP_MET}/3 | {DEP_PASS} | {DEP_CONCERNS} | {DEP_FAIL} | {DEP_STATUS} {DEP_ICON} |
-| **Total** | **{TOTAL_MET}/29** | **{TOTAL_PASS}** | **{TOTAL_CONCERNS}** | **{TOTAL_FAIL}** | **{OVERALL_STATUS} {OVERALL_ICON}** |
-
-**Criteria Met Scoring:**
-- β₯26/29 (90%+) = Strong foundation
-- 20-25/29 (69-86%) = Room for improvement
-- <20/29 (<69%) = Significant gaps
-
----
-
-## Gate YAML Snippet
-
-```yaml
-nfr_assessment:
- date: '{DATE}'
- story_id: '{STORY_ID}'
- feature_name: '{FEATURE_NAME}'
- adr_checklist_score: '{TOTAL_MET}/29' # ADR Quality Readiness Checklist
- categories:
- testability_automation: '{T_STATUS}'
- test_data_strategy: '{TD_STATUS}'
- scalability_availability: '{SA_STATUS}'
- disaster_recovery: '{DR_STATUS}'
- security: '{SEC_STATUS}'
- monitorability: '{MON_STATUS}'
- qos_qoe: '{QOS_STATUS}'
- deployability: '{DEP_STATUS}'
- overall_status: '{OVERALL_STATUS}'
- critical_issues: { CRITICAL_COUNT }
- high_priority_issues: { HIGH_COUNT }
- medium_priority_issues: { MEDIUM_COUNT }
- concerns: { CONCERNS_COUNT }
- blockers: { BLOCKER_BOOLEAN } # true/false
- quick_wins: { QUICK_WIN_COUNT }
- evidence_gaps: { EVIDENCE_GAP_COUNT }
- recommendations:
- - '{RECOMMENDATION_1}'
- - '{RECOMMENDATION_2}'
- - '{RECOMMENDATION_3}'
-```
-
----
-
-## Related Artifacts
-
-- **Story File:** {STORY_FILE_PATH} (if applicable)
-- **Tech Spec:** {TECH_SPEC_PATH} (if available)
-- **PRD:** {PRD_PATH} (if available)
-- **Test Design:** {TEST_DESIGN_PATH} (if available)
-- **Evidence Sources:**
- - Test Results: {TEST_RESULTS_DIR}
- - Metrics: {METRICS_DIR}
- - Logs: {LOGS_DIR}
- - CI Results: {CI_RESULTS_PATH}
-
----
-
-## Recommendations Summary
-
-**Release Blocker:** {RELEASE_BLOCKER_SUMMARY}
-
-**High Priority:** {HIGH_PRIORITY_SUMMARY}
-
-**Medium Priority:** {MEDIUM_PRIORITY_SUMMARY}
-
-**Next Steps:** {NEXT_STEPS_DESCRIPTION}
-
----
-
-## Sign-Off
-
-**NFR Assessment:**
-
-- Overall Status: {OVERALL_STATUS} {OVERALL_ICON}
-- Critical Issues: {CRITICAL_COUNT}
-- High Priority Issues: {HIGH_COUNT}
-- Concerns: {CONCERNS_COUNT}
-- Evidence Gaps: {EVIDENCE_GAP_COUNT}
-
-**Gate Status:** {GATE_STATUS} {GATE_ICON}
-
-**Next Actions:**
-
-- If PASS β : Proceed to `*gate` workflow or release
-- If CONCERNS β οΈ: Address HIGH/CRITICAL issues, re-run `*nfr-assess`
-- If FAIL β: Resolve FAIL status NFRs, re-run `*nfr-assess`
-
-**Generated:** {DATE}
-**Workflow:** testarch-nfr v4.0
-
----
-
-
diff --git a/src/bmm/workflows/testarch/nfr-assess/workflow.yaml b/src/bmm/workflows/testarch/nfr-assess/workflow.yaml
deleted file mode 100644
index ce3f7381..00000000
--- a/src/bmm/workflows/testarch/nfr-assess/workflow.yaml
+++ /dev/null
@@ -1,49 +0,0 @@
-# Test Architect workflow: nfr-assess
-name: testarch-nfr
-description: "Assess non-functional requirements (performance, security, reliability, maintainability) before release with evidence-based validation"
-author: "BMad"
-
-# Critical variables from config
-config_source: "{project-root}/_bmad/bmm/config.yaml"
-output_folder: "{config_source}:output_folder"
-user_name: "{config_source}:user_name"
-communication_language: "{config_source}:communication_language"
-document_output_language: "{config_source}:document_output_language"
-date: system-generated
-
-# Workflow components
-installed_path: "{project-root}/_bmad/bmm/workflows/testarch/nfr-assess"
-instructions: "{installed_path}/instructions.md"
-validation: "{installed_path}/checklist.md"
-template: "{installed_path}/nfr-report-template.md"
-
-# Variables and inputs
-variables:
- # NFR category assessment (defaults to all categories)
- custom_nfr_categories: "" # Optional additional categories beyond standard (security, performance, reliability, maintainability)
-
-# Output configuration
-default_output_file: "{output_folder}/nfr-assessment.md"
-
-# Required tools
-required_tools:
- - read_file # Read story, test results, metrics, logs, BMad artifacts
- - write_file # Create NFR assessment, gate YAML, evidence checklist
- - list_files # Discover test results, metrics, logs
- - search_repo # Find NFR-related tests and evidence
- - glob # Find result files matching patterns
-
-tags:
- - qa
- - nfr
- - test-architect
- - performance
- - security
- - reliability
-
-execution_hints:
- interactive: false # Minimize prompts
- autonomous: true # Proceed without user input unless blocked
- iterative: true
-
-web_bundle: false
diff --git a/src/bmm/workflows/testarch/test-design/checklist.md b/src/bmm/workflows/testarch/test-design/checklist.md
deleted file mode 100644
index 7c4475ca..00000000
--- a/src/bmm/workflows/testarch/test-design/checklist.md
+++ /dev/null
@@ -1,407 +0,0 @@
-# Test Design and Risk Assessment - Validation Checklist
-
-## Prerequisites (Mode-Dependent)
-
-**System-Level Mode (Phase 3):**
-- [ ] PRD exists with functional and non-functional requirements
-- [ ] ADR (Architecture Decision Record) exists
-- [ ] Architecture document available (architecture.md or tech-spec)
-- [ ] Requirements are testable and unambiguous
-
-**Epic-Level Mode (Phase 4):**
-- [ ] Story markdown with clear acceptance criteria exists
-- [ ] PRD or epic documentation available
-- [ ] Architecture documents available (test-design-architecture.md + test-design-qa.md from Phase 3, if exists)
-- [ ] Requirements are testable and unambiguous
-
-## Process Steps
-
-### Step 1: Context Loading
-
-- [ ] PRD.md read and requirements extracted
-- [ ] Epics.md or specific epic documentation loaded
-- [ ] Story markdown with acceptance criteria analyzed
-- [ ] Architecture documents reviewed (if available)
-- [ ] Existing test coverage analyzed
-- [ ] Knowledge base fragments loaded (risk-governance, probability-impact, test-levels, test-priorities)
-
-### Step 2: Risk Assessment
-
-- [ ] Genuine risks identified (not just features)
-- [ ] Risks classified by category (TECH/SEC/PERF/DATA/BUS/OPS)
-- [ ] Probability scored (1-3 for each risk)
-- [ ] Impact scored (1-3 for each risk)
-- [ ] Risk scores calculated (probability Γ impact)
-- [ ] High-priority risks (score β₯6) flagged
-- [ ] Mitigation plans defined for high-priority risks
-- [ ] Owners assigned for each mitigation
-- [ ] Timelines set for mitigations
-- [ ] Residual risk documented
-
-### Step 3: Coverage Design
-
-- [ ] Acceptance criteria broken into atomic scenarios
-- [ ] Test levels selected (E2E/API/Component/Unit)
-- [ ] No duplicate coverage across levels
-- [ ] Priority levels assigned (P0/P1/P2/P3)
-- [ ] P0 scenarios meet strict criteria (blocks core + high risk + no workaround)
-- [ ] Data prerequisites identified
-- [ ] Tooling requirements documented
-- [ ] Execution order defined (smoke β P0 β P1 β P2/P3)
-
-### Step 4: Deliverables Generation
-
-- [ ] Risk assessment matrix created
-- [ ] Coverage matrix created
-- [ ] Execution order documented
-- [ ] Resource estimates calculated
-- [ ] Quality gate criteria defined
-- [ ] Output file written to correct location
-- [ ] Output file uses template structure
-
-## Output Validation
-
-### Risk Assessment Matrix
-
-- [ ] All risks have unique IDs (R-001, R-002, etc.)
-- [ ] Each risk has category assigned
-- [ ] Probability values are 1, 2, or 3
-- [ ] Impact values are 1, 2, or 3
-- [ ] Scores calculated correctly (P Γ I)
-- [ ] High-priority risks (β₯6) clearly marked
-- [ ] Mitigation strategies specific and actionable
-
-### Coverage Matrix
-
-- [ ] All requirements mapped to test levels
-- [ ] Priorities assigned to all scenarios
-- [ ] Risk linkage documented
-- [ ] Test counts realistic
-- [ ] Owners assigned where applicable
-- [ ] No duplicate coverage (same behavior at multiple levels)
-
-### Execution Strategy
-
-**CRITICAL: Keep execution strategy simple, avoid redundancy**
-
-- [ ] **Simple structure**: PR / Nightly / Weekly (NOT complex smoke/P0/P1/P2 tiers)
-- [ ] **PR execution**: All functional tests unless significant infrastructure overhead
-- [ ] **Nightly/Weekly**: Only performance, chaos, long-running, manual tests
-- [ ] **No redundancy**: Don't re-list all tests (already in coverage plan)
-- [ ] **Philosophy stated**: "Run everything in PRs if <15 min, defer only if expensive/long"
-- [ ] **Playwright parallelization noted**: 100s of tests in 10-15 min
-
-### Resource Estimates
-
-**CRITICAL: Use intervals/ranges, NOT exact numbers**
-
-- [ ] P0 effort provided as interval range (e.g., "~25-40 hours" NOT "36 hours")
-- [ ] P1 effort provided as interval range (e.g., "~20-35 hours" NOT "27 hours")
-- [ ] P2 effort provided as interval range (e.g., "~10-30 hours" NOT "15.5 hours")
-- [ ] P3 effort provided as interval range (e.g., "~2-5 hours" NOT "2.5 hours")
-- [ ] Total effort provided as interval range (e.g., "~55-110 hours" NOT "81 hours")
-- [ ] Timeline provided as week range (e.g., "~1.5-3 weeks" NOT "11 days")
-- [ ] Estimates include setup time and account for complexity variations
-- [ ] **No false precision**: Avoid exact calculations like "18 tests Γ 2 hours = 36 hours"
-
-### Quality Gate Criteria
-
-- [ ] P0 pass rate threshold defined (should be 100%)
-- [ ] P1 pass rate threshold defined (typically β₯95%)
-- [ ] High-risk mitigation completion required
-- [ ] Coverage targets specified (β₯80% recommended)
-
-## Quality Checks
-
-### Evidence-Based Assessment
-
-- [ ] Risk assessment based on documented evidence
-- [ ] No speculation on business impact
-- [ ] Assumptions clearly documented
-- [ ] Clarifications requested where needed
-- [ ] Historical data referenced where available
-
-### Risk Classification Accuracy
-
-- [ ] TECH risks are architecture/integration issues
-- [ ] SEC risks are security vulnerabilities
-- [ ] PERF risks are performance/scalability concerns
-- [ ] DATA risks are data integrity issues
-- [ ] BUS risks are business/revenue impacts
-- [ ] OPS risks are deployment/operational issues
-
-### Priority Assignment Accuracy
-
-**CRITICAL: Priority classification is separate from execution timing**
-
-- [ ] **Priority sections (P0/P1/P2/P3) do NOT include execution context** (e.g., no "Run on every commit" in headers)
-- [ ] **Priority sections have only "Criteria" and "Purpose"** (no "Execution:" field)
-- [ ] **Execution Strategy section** is separate and handles timing based on infrastructure overhead
-- [ ] P0: Truly blocks core functionality + High-risk (β₯6) + No workaround
-- [ ] P1: Important features + Medium-risk (3-4) + Common workflows
-- [ ] P2: Secondary features + Low-risk (1-2) + Edge cases
-- [ ] P3: Nice-to-have + Exploratory + Benchmarks
-- [ ] **Note at top of Test Coverage Plan**: Clarifies P0/P1/P2/P3 = priority/risk, NOT execution timing
-
-### Test Level Selection
-
-- [ ] E2E used only for critical paths
-- [ ] API tests cover complex business logic
-- [ ] Component tests for UI interactions
-- [ ] Unit tests for edge cases and algorithms
-- [ ] No redundant coverage
-
-## Integration Points
-
-### Knowledge Base Integration
-
-- [ ] risk-governance.md consulted
-- [ ] probability-impact.md applied
-- [ ] test-levels-framework.md referenced
-- [ ] test-priorities-matrix.md used
-- [ ] Additional fragments loaded as needed
-
-### Status File Integration
-
-- [ ] Test design logged in Quality & Testing Progress
-- [ ] Epic number and scope documented
-- [ ] Completion timestamp recorded
-
-### Workflow Dependencies
-
-- [ ] Can proceed to `*atdd` workflow with P0 scenarios
-- [ ] `*atdd` is a separate workflow and must be run explicitly (not auto-run)
-- [ ] Can proceed to `automate` workflow with full coverage plan
-- [ ] Risk assessment informs `gate` workflow criteria
-- [ ] Integrates with `ci` workflow execution order
-
-## System-Level Mode: Two-Document Validation
-
-**When in system-level mode (PRD + ADR input), validate BOTH documents:**
-
-### test-design-architecture.md
-
-- [ ] **Purpose statement** at top (serves as contract with Architecture team)
-- [ ] **Executive Summary** with scope, business context, architecture decisions, risk summary
-- [ ] **Quick Guide** section with three tiers:
- - [ ] π¨ BLOCKERS - Team Must Decide (Sprint 0 critical path items)
- - [ ] β οΈ HIGH PRIORITY - Team Should Validate (recommendations for approval)
- - [ ] π INFO ONLY - Solutions Provided (no decisions needed)
-- [ ] **Risk Assessment** section - **ACTIONABLE**
- - [ ] Total risks identified count
- - [ ] High-priority risks table (score β₯6) with all columns: Risk ID, Category, Description, Probability, Impact, Score, Mitigation, Owner, Timeline
- - [ ] Medium and low-priority risks tables
- - [ ] Risk category legend included
-- [ ] **Testability Concerns and Architectural Gaps** section - **ACTIONABLE**
- - [ ] **Sub-section: π¨ ACTIONABLE CONCERNS** at TOP
- - [ ] Blockers to Fast Feedback table (WHAT architecture must provide)
- - [ ] Architectural Improvements Needed (WHAT must be changed)
- - [ ] Each concern has: Owner, Timeline, Impact
- - [ ] **Sub-section: Testability Assessment Summary** at BOTTOM (FYI)
- - [ ] What Works Well (passing items)
- - [ ] Accepted Trade-offs (no action required)
- - [ ] This section only included if worth mentioning; otherwise omitted
-- [ ] **Risk Mitigation Plans** for all high-priority risks (β₯6)
- - [ ] Each plan has: Strategy (numbered steps), Owner, Timeline, Status, Verification
- - [ ] **Only Backend/DevOps/Arch/Security mitigations** (production code changes)
- - [ ] QA-owned mitigations belong in QA doc instead
-- [ ] **Assumptions and Dependencies** section
- - [ ] **Architectural assumptions only** (SLO targets, replication lag, system design)
- - [ ] Assumptions list (numbered)
- - [ ] Dependencies list with required dates
- - [ ] Risks to plan with impact and contingency
- - [ ] QA execution assumptions belong in QA doc instead
-- [ ] **NO test implementation code** (long examples belong in QA doc)
-- [ ] **NO test scripts** (no Playwright test(...) blocks, no assertions, no test setup code)
-- [ ] **NO NFR test examples** (NFR sections describe WHAT to test, not HOW to test)
-- [ ] **NO test scenario checklists** (belong in QA doc)
-- [ ] **NO bloat or repetition** (consolidate repeated notes, avoid over-explanation)
-- [ ] **Cross-references to QA doc** where appropriate (instead of duplication)
-- [ ] **RECIPE SECTIONS NOT IN ARCHITECTURE DOC:**
- - [ ] NO "Test Levels Strategy" section (unit/integration/E2E split belongs in QA doc only)
- - [ ] NO "NFR Testing Approach" section with detailed test procedures (belongs in QA doc only)
- - [ ] NO "Test Environment Requirements" section (belongs in QA doc only)
- - [ ] NO "Recommendations for Sprint 0" section with test framework setup (belongs in QA doc only)
- - [ ] NO "Quality Gate Criteria" section (pass rates, coverage targets belong in QA doc only)
- - [ ] NO "Tool Selection" section (Playwright, k6, etc. belongs in QA doc only)
-
-### test-design-qa.md
-
-**REQUIRED SECTIONS:**
-
-- [ ] **Purpose statement** at top (test execution recipe)
-- [ ] **Executive Summary** with risk summary and coverage summary
-- [ ] **Dependencies & Test Blockers** section in POSITION 2 (right after Executive Summary)
- - [ ] Backend/Architecture dependencies listed (what QA needs from other teams)
- - [ ] QA infrastructure setup listed (factories, fixtures, environments)
- - [ ] Code example with playwright-utils if config.tea_use_playwright_utils is true
- - [ ] Test from '@seontechnologies/playwright-utils/api-request/fixtures'
- - [ ] Expect from '@playwright/test' (playwright-utils does not re-export expect)
- - [ ] Code examples include assertions (no unused imports)
-- [ ] **Risk Assessment** section (brief, references Architecture doc)
- - [ ] High-priority risks table
- - [ ] Medium/low-priority risks table
- - [ ] Each risk shows "QA Test Coverage" column (how QA validates)
-- [ ] **Test Coverage Plan** with P0/P1/P2/P3 sections
- - [ ] Priority sections have ONLY "Criteria" (no execution context)
- - [ ] Note at top: "P0/P1/P2/P3 = priority, NOT execution timing"
- - [ ] Test tables with columns: Test ID | Requirement | Test Level | Risk Link | Notes
-- [ ] **Execution Strategy** section (organized by TOOL TYPE)
- - [ ] Every PR: Playwright tests (~10-15 min)
- - [ ] Nightly: k6 performance tests (~30-60 min)
- - [ ] Weekly: Chaos & long-running (~hours)
- - [ ] Philosophy: "Run everything in PRs unless expensive/long-running"
-- [ ] **QA Effort Estimate** section (QA effort ONLY)
- - [ ] Interval-based estimates (e.g., "~1-2 weeks" NOT "36 hours")
- - [ ] NO DevOps, Backend, Data Eng, Finance effort
- - [ ] NO Sprint breakdowns (too prescriptive)
-- [ ] **Appendix A: Code Examples & Tagging**
-- [ ] **Appendix B: Knowledge Base References**
-
-**DON'T INCLUDE (bloat):**
-- [ ] β NO Quick Reference section
-- [ ] β NO System Architecture Summary
-- [ ] β NO Test Environment Requirements as separate section (integrate into Dependencies)
-- [ ] β NO Testability Assessment section (covered in Dependencies)
-- [ ] β NO Test Levels Strategy section (obvious from test scenarios)
-- [ ] β NO NFR Readiness Summary
-- [ ] β NO Quality Gate Criteria section (teams decide for themselves)
-- [ ] β NO Follow-on Workflows section (BMAD commands self-explanatory)
-- [ ] β NO Approval section
-- [ ] β NO Infrastructure/DevOps/Finance effort tables (out of scope)
-- [ ] β NO Sprint 0/1/2/3 breakdown tables
-- [ ] β NO Next Steps section
-
-### Cross-Document Consistency
-
-- [ ] Both documents reference same risks by ID (R-001, R-002, etc.)
-- [ ] Both documents use consistent priority levels (P0, P1, P2, P3)
-- [ ] Both documents reference same Sprint 0 blockers
-- [ ] No duplicate content (cross-reference instead)
-- [ ] Dates and authors match across documents
-- [ ] ADR and PRD references consistent
-
-### Document Quality (Anti-Bloat Check)
-
-**CRITICAL: Check for bloat and repetition across BOTH documents**
-
-- [ ] **No repeated notes 10+ times** (e.g., "Timing is pessimistic until R-005 fixed" on every section)
-- [ ] **Repeated information consolidated** (write once at top, reference briefly if needed)
-- [ ] **No excessive detail** that doesn't add value (obvious concepts, redundant examples)
-- [ ] **Focus on unique/critical info** (only document what's different from standard practice)
-- [ ] **Architecture doc**: Concerns-focused, NOT implementation-focused
-- [ ] **QA doc**: Implementation-focused, NOT theory-focused
-- [ ] **Clear separation**: Architecture = WHAT and WHY, QA = HOW
-- [ ] **Professional tone**: No AI slop markers
- - [ ] Avoid excessive β /β emojis (use sparingly, only when adding clarity)
- - [ ] Avoid "absolutely", "excellent", "fantastic", overly enthusiastic language
- - [ ] Write professionally and directly
-- [ ] **Architecture doc length**: Target ~150-200 lines max (focus on actionable concerns only)
-- [ ] **QA doc length**: Keep concise, remove bloat sections
-
-### Architecture Doc Structure (Actionable-First Principle)
-
-**CRITICAL: Validate structure follows actionable-first, FYI-last principle**
-
-- [ ] **Actionable sections at TOP:**
- - [ ] Quick Guide (π¨ BLOCKERS first, then β οΈ HIGH PRIORITY, then π INFO ONLY last)
- - [ ] Risk Assessment (high-priority risks β₯6 at top)
- - [ ] Testability Concerns (concerns/blockers at top, passing items at bottom)
- - [ ] Risk Mitigation Plans (for high-priority risks β₯6)
-- [ ] **FYI sections at BOTTOM:**
- - [ ] Testability Assessment Summary (what works well - only if worth mentioning)
- - [ ] Assumptions and Dependencies
-- [ ] **ASRs categorized correctly:**
- - [ ] Actionable ASRs included in π¨ or β οΈ sections
- - [ ] FYI ASRs included in π section or omitted if obvious
-
-## Completion Criteria
-
-**All must be true:**
-
-- [ ] All prerequisites met
-- [ ] All process steps completed
-- [ ] All output validations passed
-- [ ] All quality checks passed
-- [ ] All integration points verified
-- [ ] Output file(s) complete and well-formatted
-- [ ] **System-level mode:** Both documents validated (if applicable)
-- [ ] **Epic-level mode:** Single document validated (if applicable)
-- [ ] Team review scheduled (if required)
-
-## Post-Workflow Actions
-
-**User must complete:**
-
-1. [ ] Review risk assessment with team
-2. [ ] Prioritize mitigation for high-priority risks (score β₯6)
-3. [ ] Allocate resources per estimates
-4. [ ] Run `*atdd` workflow to generate P0 tests (separate workflow; not auto-run)
-5. [ ] Set up test data factories and fixtures
-6. [ ] Schedule team review of test design document
-
-**Recommended next workflows:**
-
-1. [ ] Run `atdd` workflow for P0 test generation
-2. [ ] Run `framework` workflow if not already done
-3. [ ] Run `ci` workflow to configure pipeline stages
-
-## Rollback Procedure
-
-If workflow fails:
-
-1. [ ] Delete output file
-2. [ ] Review error logs
-3. [ ] Fix missing context (PRD, architecture docs)
-4. [ ] Clarify ambiguous requirements
-5. [ ] Retry workflow
-
-## Notes
-
-### Common Issues
-
-**Issue**: Too many P0 tests
-
-- **Solution**: Apply strict P0 criteria - must block core AND high risk AND no workaround
-
-**Issue**: Risk scores all high
-
-- **Solution**: Differentiate between high-impact (3) and degraded (2) impacts
-
-**Issue**: Duplicate coverage across levels
-
-- **Solution**: Use test pyramid - E2E for critical paths only
-
-**Issue**: Resource estimates too high or too precise
-
-- **Solution**:
- - Invest in fixtures/factories to reduce per-test setup time
- - Use interval ranges (e.g., "~55-110 hours") instead of exact numbers (e.g., "81 hours")
- - Widen intervals if high uncertainty exists
-
-**Issue**: Execution order section too complex or redundant
-
-- **Solution**:
- - Default: Run everything in PRs (<15 min with Playwright parallelization)
- - Only defer to nightly/weekly if expensive (k6, chaos, 4+ hour tests)
- - Don't create smoke/P0/P1/P2/P3 tier structure
- - Don't re-list all tests (already in coverage plan)
-
-### Best Practices
-
-- Base risk assessment on evidence, not assumptions
-- High-priority risks (β₯6) require immediate mitigation
-- P0 tests should cover <10% of total scenarios
-- Avoid testing same behavior at multiple levels
-- **Use interval-based estimates** (e.g., "~25-40 hours") instead of exact numbers to avoid false precision and provide flexibility
-- **Keep execution strategy simple**: Default to "run everything in PRs" (<15 min with Playwright), only defer if expensive/long-running
-- **Avoid execution order redundancy**: Don't create complex tier structures or re-list tests
-
----
-
-**Checklist Complete**: Sign off when all items validated.
-
-**Completed by:** {name}
-**Date:** {date}
-**Epic:** {epic title}
-**Notes:** {additional notes}
diff --git a/src/bmm/workflows/testarch/test-design/instructions.md b/src/bmm/workflows/testarch/test-design/instructions.md
deleted file mode 100644
index 1eae05be..00000000
--- a/src/bmm/workflows/testarch/test-design/instructions.md
+++ /dev/null
@@ -1,1158 +0,0 @@
-
-
-# Test Design and Risk Assessment
-
-**Workflow ID**: `_bmad/bmm/testarch/test-design`
-**Version**: 4.0 (BMad v6)
-
----
-
-## Overview
-
-Plans comprehensive test coverage strategy with risk assessment, priority classification, and execution ordering. This workflow operates in **two modes**:
-
-- **System-Level Mode (Phase 3)**: Testability review of architecture before solutioning gate check
-- **Epic-Level Mode (Phase 4)**: Per-epic test planning with risk assessment (current behavior)
-
-The workflow auto-detects which mode to use based on project phase.
-
----
-
-## Preflight: Detect Mode and Load Context
-
-**Critical:** Determine mode before proceeding.
-
-### Mode Detection (Flexible for Standalone Use)
-
-TEA test-design workflow supports TWO modes, detected automatically:
-
-1. **Check User Intent Explicitly (Priority 1)**
-
- **Deterministic Rules:**
- - User provided **PRD+ADR only** (no Epic+Stories) β **System-Level Mode**
- - User provided **Epic+Stories only** (no PRD+ADR) β **Epic-Level Mode**
- - User provided **BOTH PRD+ADR AND Epic+Stories** β **Prefer System-Level Mode** (architecture review comes first in Phase 3, then epic planning in Phase 4). If mode preference is unclear, ask user: "Should I create (A) System-level test design (PRD + ADR β Architecture doc + QA doc) or (B) Epic-level test design (Epic β Single test plan)?"
- - If user intent is clear from context, use that mode regardless of file structure
-
-2. **Fallback to File-Based Detection (Priority 2 - BMad-Integrated)**
- - Check for `{implementation_artifacts}/sprint-status.yaml`
- - If exists β **Epic-Level Mode** (Phase 4, single document output)
- - If NOT exists β **System-Level Mode** (Phase 3, TWO document outputs)
-
-3. **If Ambiguous, ASK USER (Priority 3)**
- - "I see you have [PRD/ADR/Epic/Stories]. Should I create:
- - (A) System-level test design (PRD + ADR β Architecture doc + QA doc)?
- - (B) Epic-level test design (Epic β Single test plan)?"
-
-**Mode Descriptions:**
-
-**System-Level Mode (PRD + ADR Input)**
-- **When to use:** Early in project (Phase 3 Solutioning), architecture being designed
-- **Input:** PRD, ADR, architecture.md (optional)
-- **Output:** TWO documents
- - `test-design-architecture.md` (for Architecture/Dev teams)
- - `test-design-qa.md` (for QA team)
-- **Focus:** Testability assessment, ASRs, NFR requirements, Sprint 0 setup
-
-**Epic-Level Mode (Epic + Stories Input)**
-- **When to use:** During implementation (Phase 4), per-epic planning
-- **Input:** Epic, Stories, tech-specs (optional)
-- **Output:** ONE document
- - `test-design-epic-{N}.md` (combined risk assessment + test plan)
-- **Focus:** Risk assessment, coverage plan, execution order, quality gates
-
-**Key Insight: TEA Works Standalone OR Integrated**
-
-**Standalone (No BMad artifacts):**
-- User provides PRD + ADR β System-Level Mode
-- User provides Epic description β Epic-Level Mode
-- TEA doesn't mandate full BMad workflow
-
-**BMad-Integrated (Full workflow):**
-- BMad creates `sprint-status.yaml` β Automatic Epic-Level detection
-- BMad creates PRD, ADR, architecture.md β Automatic System-Level detection
-- TEA leverages BMad artifacts for richer context
-
-**Message to User:**
-> You don't need to follow full BMad methodology to use TEA test-design.
-> Just provide PRD + ADR for system-level, or Epic for epic-level.
-> TEA will auto-detect and produce appropriate documents.
-
-**Halt Condition:** If mode cannot be determined AND user intent unclear AND required files missing, HALT and notify user:
-- "Please provide either: (A) PRD + ADR for system-level test design, OR (B) Epic + Stories for epic-level test design"
-
----
-
-## Step 1: Load Context (Mode-Aware)
-
-**Mode-Specific Loading:**
-
-### System-Level Mode (Phase 3)
-
-1. **Read Architecture Documentation**
- - Load architecture.md or tech-spec (REQUIRED)
- - Load PRD.md for functional and non-functional requirements
- - Load epics.md for feature scope
- - Identify technology stack decisions (frameworks, databases, deployment targets)
- - Note integration points and external system dependencies
- - Extract NFR requirements (performance SLOs, security requirements, etc.)
-
-2. **Check Playwright Utils Flag**
-
- Read `{config_source}` and check `config.tea_use_playwright_utils`.
-
- If true, note that `@seontechnologies/playwright-utils` provides utilities for test implementation. Reference in test design where relevant.
-
-3. **Load Knowledge Base Fragments (System-Level)**
-
- **Critical:** Consult `src/bmm/testarch/tea-index.csv` to load:
- - `adr-quality-readiness-checklist.md` - 8-category 29-criteria NFR framework (testability, security, scalability, DR, QoS, deployability, etc.)
- - `test-levels-framework.md` - Test levels strategy guidance
- - `risk-governance.md` - Testability risk identification
- - `test-quality.md` - Quality standards and Definition of Done
-
-4. **Analyze Existing Test Setup (if brownfield)**
- - Search for existing test directories
- - Identify current test framework (if any)
- - Note testability concerns in existing codebase
-
-### Epic-Level Mode (Phase 4)
-
-1. **Read Requirements Documentation**
- - Load PRD.md for high-level product requirements
- - Read epics.md or specific epic for feature scope
- - Read story markdown for detailed acceptance criteria
- - Identify all testable requirements
-
-2. **Load Architecture Context**
- - Read architecture.md for system design
- - Read tech-spec for implementation details
- - Read test-design-architecture.md and test-design-qa.md (if exist from Phase 3 system-level test design)
- - Identify technical constraints and dependencies
- - Note integration points and external systems
-
-3. **Analyze Existing Test Coverage**
- - Search for existing test files in `{test_dir}`
- - Identify coverage gaps
- - Note areas with insufficient testing
- - Check for flaky or outdated tests
-
-4. **Load Knowledge Base Fragments (Epic-Level)**
-
- **Critical:** Consult `src/bmm/testarch/tea-index.csv` to load:
- - `risk-governance.md` - Risk classification framework (6 categories: TECH, SEC, PERF, DATA, BUS, OPS), automated scoring, gate decision engine, owner tracking (625 lines, 4 examples)
- - `probability-impact.md` - Risk scoring methodology (probability Γ impact matrix, automated classification, dynamic re-assessment, gate integration, 604 lines, 4 examples)
- - `test-levels-framework.md` - Test level selection guidance (E2E vs API vs Component vs Unit with decision matrix, characteristics, when to use each, 467 lines, 4 examples)
- - `test-priorities-matrix.md` - P0-P3 prioritization criteria (automated priority calculation, risk-based mapping, tagging strategy, time budgets, 389 lines, 2 examples)
-
-**Halt Condition (Epic-Level only):** If story data or acceptance criteria are missing, check if brownfield exploration is needed. If neither requirements NOR exploration possible, HALT with message: "Epic-level test design requires clear requirements, acceptance criteria, or brownfield app URL for exploration"
-
----
-
-## Step 1.5: System-Level Testability Review (Phase 3 Only)
-
-**Skip this step if Epic-Level Mode.** This step only executes in System-Level Mode.
-
-### Actions
-
-1. **Review Architecture for Testability**
-
- **STRUCTURE PRINCIPLE: CONCERNS FIRST, PASSING ITEMS LAST**
-
- Evaluate architecture against these criteria and structure output as:
- 1. **Testability Concerns** (ACTIONABLE - what's broken/missing)
- 2. **Testability Assessment Summary** (FYI - what works well)
-
- **Testability Criteria:**
-
- **Controllability:**
- - Can we control system state for testing? (API seeding, factories, database reset)
- - Are external dependencies mockable? (interfaces, dependency injection)
- - Can we trigger error conditions? (chaos engineering, fault injection)
-
- **Observability:**
- - Can we inspect system state? (logging, metrics, traces)
- - Are test results deterministic? (no race conditions, clear success/failure)
- - Can we validate NFRs? (performance metrics, security audit logs)
-
- **Reliability:**
- - Are tests isolated? (parallel-safe, stateless, cleanup discipline)
- - Can we reproduce failures? (deterministic waits, HAR capture, seed data)
- - Are components loosely coupled? (mockable, testable boundaries)
-
- **In Architecture Doc Output:**
- - **Section A: Testability Concerns** (TOP) - List what's BROKEN or MISSING
- - Example: "No API for test data seeding β Cannot parallelize tests"
- - Example: "Hardcoded DB connection β Cannot test in CI"
- - **Section B: Testability Assessment Summary** (BOTTOM) - List what PASSES
- - Example: "β API-first design supports test isolation"
- - Only include if worth mentioning; otherwise omit this section entirely
-
-2. **Identify Architecturally Significant Requirements (ASRs)**
-
- **CRITICAL: ASRs must indicate if ACTIONABLE or FYI**
-
- From PRD NFRs and architecture decisions, identify quality requirements that:
- - Drive architecture decisions (e.g., "Must handle 10K concurrent users" β caching architecture)
- - Pose testability challenges (e.g., "Sub-second response time" β performance test infrastructure)
- - Require special test environments (e.g., "Multi-region deployment" β regional test instances)
-
- Score each ASR using risk matrix (probability Γ impact).
-
- **In Architecture Doc, categorize ASRs:**
- - **ACTIONABLE ASRs** (require architecture changes): Include in "Quick Guide" π¨ or β οΈ sections
- - **FYI ASRs** (already satisfied by architecture): Include in "Quick Guide" π section OR omit if obvious
-
- **Example:**
- - ASR-001 (Score 9): "Multi-region deployment requires region-specific test infrastructure" β **ACTIONABLE** (goes in π¨ BLOCKERS)
- - ASR-002 (Score 4): "OAuth 2.1 authentication already implemented in ADR-5" β **FYI** (goes in π INFO ONLY or omit)
-
- **Structure Principle:** Actionable ASRs at TOP, FYI ASRs at BOTTOM (or omit)
-
-3. **Define Test Levels Strategy**
-
- **IMPORTANT: This section goes in QA doc ONLY, NOT in Architecture doc**
-
- Based on architecture (mobile, web, API, microservices, monolith):
- - Recommend unit/integration/E2E split (e.g., 70/20/10 for API-heavy, 40/30/30 for UI-heavy)
- - Identify test environment needs (local, staging, ephemeral, production-like)
- - Define testing approach per technology (Playwright for web, Maestro for mobile, k6 for performance)
-
- **In Architecture doc:** Only mention test level split if it's an ACTIONABLE concern
- - Example: "API response time <100ms requires load testing infrastructure" (concern)
- - DO NOT include full test level strategy table in Architecture doc
-
-4. **Assess NFR Requirements (MINIMAL in Architecture Doc)**
-
- **CRITICAL: NFR testing approach is a RECIPE - belongs in QA doc ONLY**
-
- **In Architecture Doc:**
- - Only mention NFRs if they create testability CONCERNS
- - Focus on WHAT architecture must provide, not HOW to test
- - Keep it brief - 1-2 sentences per NFR category at most
-
- **Example - Security NFR in Architecture doc (if there's a concern):**
- β CORRECT (concern-focused, brief, WHAT/WHY only):
- - "System must prevent cross-customer data access (GDPR requirement). Requires test infrastructure for multi-tenant isolation in Sprint 0."
- - "OAuth tokens must expire after 1 hour (ADR-5). Requires test harness for token expiration validation."
-
- β INCORRECT (too detailed, belongs in QA doc):
- - Full table of security test scenarios
- - Test scripts with code examples
- - Detailed test procedures
- - Tool selection (e.g., "use Playwright E2E + OWASP ZAP")
- - Specific test approaches (e.g., "Test approach: Playwright E2E for auth/authz")
-
- **In QA Doc (full NFR testing approach):**
- - **Security**: Full test scenarios, tooling (Playwright + OWASP ZAP), test procedures
- - **Performance**: Load/stress/spike test scenarios, k6 scripts, SLO thresholds
- - **Reliability**: Error handling tests, retry logic validation, circuit breaker tests
- - **Maintainability**: Coverage targets, code quality gates, observability validation
-
- **Rule of Thumb:**
- - Architecture doc: "What NFRs exist and what concerns they create" (1-2 sentences)
- - QA doc: "How to test those NFRs" (full sections with tables, code, procedures)
-
-5. **Flag Testability Concerns**
-
- Identify architecture decisions that harm testability:
- - β Tight coupling (no interfaces, hard dependencies)
- - β No dependency injection (can't mock external services)
- - β Hardcoded configurations (can't test different envs)
- - β Missing observability (can't validate NFRs)
- - β Stateful designs (can't parallelize tests)
-
- **Critical:** If testability concerns are blockers (e.g., "Architecture makes performance testing impossible"), document as CONCERNS or FAIL recommendation for gate check.
-
-6. **Output System-Level Test Design (TWO Documents)**
-
- **IMPORTANT:** System-level mode produces TWO documents instead of one:
-
- **Document 1: test-design-architecture.md** (for Architecture/Dev teams)
- - Purpose: Architectural concerns, testability gaps, NFR requirements
- - Audience: Architects, Backend Devs, Frontend Devs, DevOps, Security Engineers
- - Focus: What architecture must deliver for testability
- - Template: `test-design-architecture-template.md`
-
- **Document 2: test-design-qa.md** (for QA team)
- - Purpose: Test execution recipe, coverage plan, Sprint 0 setup
- - Audience: QA Engineers, Test Automation Engineers, QA Leads
- - Focus: How QA will execute tests
- - Template: `test-design-qa-template.md`
-
- **Standard Structures (REQUIRED):**
-
- **test-design-architecture.md sections (in this order):**
-
- **STRUCTURE PRINCIPLE: Actionable items FIRST, FYI items LAST**
-
- 1. Executive Summary (scope, business context, architecture, risk summary)
- 2. Quick Guide (π¨ BLOCKERS / β οΈ HIGH PRIORITY / π INFO ONLY)
- 3. Risk Assessment (high/medium/low-priority risks with scoring) - **ACTIONABLE**
- 4. Testability Concerns and Architectural Gaps - **ACTIONABLE** (what arch team must do)
- - Sub-section: Blockers to Fast Feedback (ACTIONABLE - concerns FIRST)
- - Sub-section: Architectural Improvements Needed (ACTIONABLE)
- - Sub-section: Testability Assessment Summary (FYI - passing items LAST, only if worth mentioning)
- 5. Risk Mitigation Plans (detailed for high-priority risks β₯6) - **ACTIONABLE**
- 6. Assumptions and Dependencies - **FYI**
-
- **SECTIONS THAT DO NOT BELONG IN ARCHITECTURE DOC:**
- - β Test Levels Strategy (unit/integration/E2E split) - This is a RECIPE, belongs in QA doc ONLY
- - β NFR Testing Approach with test examples - This is a RECIPE, belongs in QA doc ONLY
- - β Test Environment Requirements - This is a RECIPE, belongs in QA doc ONLY
- - β Recommendations for Sprint 0 (test framework setup, factories) - This is a RECIPE, belongs in QA doc ONLY
- - β Quality Gate Criteria (pass rates, coverage targets) - This is a RECIPE, belongs in QA doc ONLY
- - β Tool Selection (Playwright, k6, etc.) - This is a RECIPE, belongs in QA doc ONLY
-
- **WHAT BELONGS IN ARCHITECTURE DOC:**
- - β Testability CONCERNS (what makes it hard to test)
- - β Architecture GAPS (what's missing for testability)
- - β What architecture team must DO (blockers, improvements)
- - β Risks and mitigation plans
- - β ASRs (Architecturally Significant Requirements) - but clarify if FYI or actionable
-
- **test-design-qa.md sections (in this order):**
- 1. Executive Summary (risk summary, coverage summary)
- 2. **Dependencies & Test Blockers** (CRITICAL: RIGHT AFTER SUMMARY - what QA needs from other teams)
- 3. Risk Assessment (scored risks with categories - reference Arch doc, don't duplicate)
- 4. Test Coverage Plan (P0/P1/P2/P3 with detailed scenarios + checkboxes)
- 5. **Execution Strategy** (SIMPLE: Organized by TOOL TYPE: PR (Playwright) / Nightly (k6) / Weekly (chaos/manual))
- 6. QA Effort Estimate (QA effort ONLY - no DevOps, Data Eng, Finance, Backend)
- 7. Appendices (code examples with playwright-utils, tagging strategy, knowledge base refs)
-
- **SECTIONS TO EXCLUDE FROM QA DOC:**
- - β Quality Gate Criteria (pass/fail thresholds - teams decide for themselves)
- - β Follow-on Workflows (bloat - BMAD commands are self-explanatory)
- - β Approval section (unnecessary formality)
- - β Test Environment Requirements (remove as separate section - integrate into Dependencies if needed)
- - β NFR Readiness Summary (bloat - covered in Risk Assessment)
- - β Testability Assessment (bloat - covered in Dependencies)
- - β Test Levels Strategy (bloat - obvious from test scenarios)
- - β Sprint breakdowns (too prescriptive)
- - β Infrastructure/DevOps/Data Eng effort tables (out of scope)
- - β Mitigation plans for non-QA work (belongs in Arch doc)
-
- **Content Guidelines:**
-
- **Architecture doc (DO):**
- - β Risk scoring visible (Probability Γ Impact = Score)
- - β Clear ownership (each blocker/ASR has owner + timeline)
- - β Testability requirements (what architecture must support)
- - β Mitigation plans (for each high-risk item β₯6)
- - β Brief conceptual examples ONLY if needed to clarify architecture concerns (5-10 lines max)
- - β **Target length**: ~150-200 lines max (focus on actionable concerns only)
- - β **Professional tone**: Avoid AI slop (excessive β /β emojis, "absolutely", "excellent", overly enthusiastic language)
-
- **Architecture doc (DON'T) - CRITICAL:**
- - β NO test scripts or test implementation code AT ALL - This is a communication doc for architects, not a testing guide
- - β NO Playwright test examples (e.g., test('...', async ({ request }) => ...))
- - β NO assertion logic (e.g., expect(...).toBe(...))
- - β NO test scenario checklists with checkboxes (belongs in QA doc)
- - β NO implementation details about HOW QA will test
- - β Focus on CONCERNS, not IMPLEMENTATION
-
- **QA doc (DO):**
- - β Test scenario recipes (clear P0/P1/P2/P3 with checkboxes)
- - β Full test implementation code samples when helpful
- - β **IMPORTANT: If config.tea_use_playwright_utils is true, ALL code samples MUST use @seontechnologies/playwright-utils fixtures and utilities**
- - β Import test fixtures from '@seontechnologies/playwright-utils/api-request/fixtures'
- - β Import expect from '@playwright/test' (playwright-utils does not re-export expect)
- - β Use apiRequest fixture with schema validation, retry logic, and structured responses
- - β Dependencies & Test Blockers section RIGHT AFTER Executive Summary (what QA needs from other teams)
- - β **QA effort estimates ONLY** (no DevOps, Data Eng, Finance, Backend effort - out of scope)
- - β Cross-references to Architecture doc (not duplication)
- - β **Professional tone**: Avoid AI slop (excessive β /β emojis, "absolutely", "excellent", overly enthusiastic language)
-
- **QA doc (DON'T):**
- - β NO architectural theory (just reference Architecture doc)
- - β NO ASR explanations (link to Architecture doc instead)
- - β NO duplicate risk assessments (reference Architecture doc)
- - β NO Quality Gate Criteria section (teams decide pass/fail thresholds for themselves)
- - β NO Follow-on Workflows section (bloat - BMAD commands are self-explanatory)
- - β NO Approval section (unnecessary formality)
- - β NO effort estimates for other teams (DevOps, Backend, Data Eng, Finance - out of scope, QA effort only)
- - β NO Sprint breakdowns (too prescriptive - e.g., "Sprint 0: 40 hours, Sprint 1: 48 hours")
- - β NO mitigation plans for Backend/Arch/DevOps work (those belong in Architecture doc)
- - β NO architectural assumptions or debates (those belong in Architecture doc)
-
- **Anti-Patterns to Avoid (Cross-Document Redundancy):**
-
- **CRITICAL: NO BLOAT, NO REPETITION, NO OVERINFO**
-
- β **DON'T duplicate OAuth requirements:**
- - Architecture doc: Explain OAuth 2.1 flow in detail
- - QA doc: Re-explain why OAuth 2.1 is required
-
- β **DO cross-reference instead:**
- - Architecture doc: "ASR-1: OAuth 2.1 required (see QA doc for 12 test scenarios)"
- - QA doc: "OAuth tests: 12 P0 scenarios (see Architecture doc R-001 for risk details)"
-
- β **DON'T repeat the same note 10+ times:**
- - Example: "Timing is pessimistic until R-005 is fixed" repeated on every P0, P1, P2 section
- - This creates bloat and makes docs hard to read
-
- β **DO consolidate repeated information:**
- - Write once at the top: "**Note**: All timing estimates are pessimistic pending R-005 resolution"
- - Reference briefly if needed: "(pessimistic timing)"
-
- β **DON'T include excessive detail that doesn't add value:**
- - Long explanations of obvious concepts
- - Redundant examples showing the same pattern
- - Over-documentation of standard practices
-
- β **DO focus on what's unique or critical:**
- - Document only what's different from standard practice
- - Highlight critical decisions and risks
- - Keep explanations concise and actionable
-
- **Markdown Cross-Reference Syntax Examples:**
-
- ```markdown
- # In test-design-architecture.md
-
- ### π¨ R-001: Multi-Tenant Isolation (Score: 9)
-
- **Test Coverage:** 8 P0 tests (see [QA doc - Multi-Tenant Isolation](test-design-qa.md#multi-tenant-isolation-8-tests-security-critical) for detailed scenarios)
-
- ---
-
- # In test-design-qa.md
-
- ## Testability Assessment
-
- **Prerequisites from Architecture Doc:**
- - [ ] R-001: Multi-tenant isolation validated (see [Architecture doc R-001](test-design-architecture.md#r-001-multi-tenant-isolation-score-9) for mitigation plan)
- - [ ] R-002: Test customer provisioned (see [Architecture doc π¨ BLOCKERS](test-design-architecture.md#blockers---team-must-decide-cant-proceed-without))
-
- ## Sprint 0 Setup Requirements
-
- **Source:** See [Architecture doc "Quick Guide"](test-design-architecture.md#quick-guide) for detailed mitigation plans
- ```
-
- **Key Points:**
- - Use relative links: `[Link Text](test-design-qa.md#section-anchor)`
- - Anchor format: lowercase, hyphens for spaces, remove emojis/special chars
- - Example anchor: `### π¨ R-001: Title` β `#r-001-title`
-
- β **DON'T put long code examples in Architecture doc:**
- - Example: 50+ lines of test implementation
-
- β **DO keep examples SHORT in Architecture doc:**
- - Example: 5-10 lines max showing what architecture must support
- - Full implementation goes in QA doc
-
- β **DON'T repeat same note 10+ times:**
- - Example: "Pessimistic timing until R-005 fixed" on every P0/P1/P2 section
-
- β **DO consolidate repeated notes:**
- - Single timing note at top
- - Reference briefly throughout: "(pessimistic)"
-
- **Write Both Documents:**
- - Use `test-design-architecture-template.md` for Architecture doc
- - Use `test-design-qa-template.md` for QA doc
- - Follow standard structures defined above
- - Cross-reference between docs (no duplication)
- - Validate against checklist.md (System-Level Mode section)
-
-**Common Over-Engineering to Avoid:**
-
- **In QA Doc:**
- 1. β Quality gate thresholds ("P0 must be 100%, P1 β₯95%") - Let teams decide for themselves
- 2. β Effort estimates for other teams - QA doc should only estimate QA effort
- 3. β Sprint breakdowns ("Sprint 0: 40 hours, Sprint 1: 48 hours") - Too prescriptive
- 4. β Approval sections - Unnecessary formality
- 5. β Assumptions about architecture (SLO targets, replication lag) - These are architectural concerns, belong in Arch doc
- 6. β Mitigation plans for Backend/Arch/DevOps - Those belong in Arch doc
- 7. β Follow-on workflows section - Bloat, BMAD commands are self-explanatory
- 8. β NFR Readiness Summary - Bloat, covered in Risk Assessment
-
- **Test Coverage Numbers Reality Check:**
- - With Playwright parallelization, running ALL Playwright tests is as fast as running just P0
- - Don't split Playwright tests by priority into different CI gates - it adds no value
- - Tool type matters, not priority labels
- - Defer based on infrastructure cost, not importance
-
-**After System-Level Mode:** Workflow COMPLETE. System-level outputs (test-design-architecture.md + test-design-qa.md) are written in this step. Steps 2-4 are epic-level only - do NOT execute them in system-level mode.
-
----
-
-## Step 1.6: Exploratory Mode Selection (Epic-Level Only)
-
-### Actions
-
-1. **Detect Planning Mode**
-
- Determine mode based on context:
-
- **Requirements-Based Mode (DEFAULT)**:
- - Have clear story/PRD with acceptance criteria
- - Uses: Existing workflow (Steps 2-4)
- - Appropriate for: Documented features, greenfield projects
-
- **Exploratory Mode (OPTIONAL - Brownfield)**:
- - Missing/incomplete requirements AND brownfield application exists
- - Uses: UI exploration to discover functionality
- - Appropriate for: Undocumented brownfield apps, legacy systems
-
-2. **Requirements-Based Mode (DEFAULT - Skip to Step 2)**
-
- If requirements are clear:
- - Continue with existing workflow (Step 2: Assess and Classify Risks)
- - Use loaded requirements from Step 1
- - Proceed with risk assessment based on documented requirements
-
-3. **Exploratory Mode (OPTIONAL - Brownfield Apps)**
-
- If exploring brownfield application:
-
- **A. Check MCP Availability**
-
- If config.tea_use_mcp_enhancements is true AND Playwright MCP tools available:
- - Use MCP-assisted exploration (Step 3.B)
-
- If MCP unavailable OR config.tea_use_mcp_enhancements is false:
- - Use manual exploration fallback (Step 3.C)
-
- **B. MCP-Assisted Exploration (If MCP Tools Available)**
-
- Use Playwright MCP browser tools to explore UI:
-
- **Setup:**
-
- ```
- 1. Use planner_setup_page to initialize browser
- 2. Navigate to {exploration_url}
- 3. Capture initial state with browser_snapshot
- ```
-
- **Exploration Process:**
-
- ```
- 4. Use browser_navigate to explore different pages
- 5. Use browser_click to interact with buttons, links, forms
- 6. Use browser_hover to reveal hidden menus/tooltips
- 7. Capture browser_snapshot at each significant state
- 8. Take browser_screenshot for documentation
- 9. Monitor browser_console_messages for JavaScript errors
- 10. Track browser_network_requests to identify API calls
- 11. Map user flows and interactive elements
- 12. Document discovered functionality
- ```
-
- **Discovery Documentation:**
- - Create list of discovered features (pages, workflows, forms)
- - Identify user journeys (navigation paths)
- - Map API endpoints (from network requests)
- - Note error states (from console messages)
- - Capture screenshots for visual reference
-
- **Convert to Test Scenarios:**
- - Transform discoveries into testable requirements
- - Prioritize based on user flow criticality
- - Identify risks from discovered functionality
- - Continue with Step 2 (Assess and Classify Risks) using discovered requirements
-
- **C. Manual Exploration Fallback (If MCP Unavailable)**
-
- If Playwright MCP is not available:
-
- **Notify User:**
-
- ```markdown
- Exploratory mode enabled but Playwright MCP unavailable.
-
- **Manual exploration required:**
-
- 1. Open application at: {exploration_url}
- 2. Explore all pages, workflows, and features
- 3. Document findings in markdown:
- - List of pages/features discovered
- - User journeys identified
- - API endpoints observed (DevTools Network tab)
- - JavaScript errors noted (DevTools Console)
- - Critical workflows mapped
-
- 4. Provide exploration findings to continue workflow
-
- **Alternative:** Disable exploratory_mode and provide requirements documentation
- ```
-
- Wait for user to provide exploration findings, then:
- - Parse user-provided discovery documentation
- - Convert to testable requirements
- - Continue with Step 2 (risk assessment)
-
-4. **Proceed to Risk Assessment**
-
- After mode selection (Requirements-Based OR Exploratory):
- - Continue to Step 2: Assess and Classify Risks
- - Use requirements from documentation (Requirements-Based) OR discoveries (Exploratory)
-
----
-
-## Step 2: Assess and Classify Risks
-
-### Actions
-
-1. **Identify Genuine Risks**
-
- Filter requirements to isolate actual risks (not just features):
- - Unresolved technical gaps
- - Security vulnerabilities
- - Performance bottlenecks
- - Data loss or corruption potential
- - Business impact failures
- - Operational deployment issues
-
-2. **Classify Risks by Category**
-
- Use these standard risk categories:
-
- **TECH** (Technical/Architecture):
- - Architecture flaws
- - Integration failures
- - Scalability issues
- - Technical debt
-
- **SEC** (Security):
- - Missing access controls
- - Authentication bypass
- - Data exposure
- - Injection vulnerabilities
-
- **PERF** (Performance):
- - SLA violations
- - Response time degradation
- - Resource exhaustion
- - Scalability limits
-
- **DATA** (Data Integrity):
- - Data loss
- - Data corruption
- - Inconsistent state
- - Migration failures
-
- **BUS** (Business Impact):
- - User experience degradation
- - Business logic errors
- - Revenue impact
- - Compliance violations
-
- **OPS** (Operations):
- - Deployment failures
- - Configuration errors
- - Monitoring gaps
- - Rollback issues
-
-3. **Score Risk Probability**
-
- Rate likelihood (1-3):
- - **1 (Unlikely)**: <10% chance, edge case
- - **2 (Possible)**: 10-50% chance, known scenario
- - **3 (Likely)**: >50% chance, common occurrence
-
-4. **Score Risk Impact**
-
- Rate severity (1-3):
- - **1 (Minor)**: Cosmetic, workaround exists, limited users
- - **2 (Degraded)**: Feature impaired, workaround difficult, affects many users
- - **3 (Critical)**: System failure, data loss, no workaround, blocks usage
-
-5. **Calculate Risk Score**
-
- ```
- Risk Score = Probability Γ Impact
-
- Scores:
- 1-2: Low risk (monitor)
- 3-4: Medium risk (plan mitigation)
- 6-9: High risk (immediate mitigation required)
- ```
-
-6. **Highlight High-Priority Risks**
-
- Flag all risks with score β₯6 for immediate attention.
-
-7. **Request Clarification**
-
- If evidence is missing or assumptions required:
- - Document assumptions clearly
- - Request user clarification
- - Do NOT speculate on business impact
-
-8. **Plan Mitigations**
-
- **CRITICAL: Mitigation placement depends on WHO does the work**
-
- For each high-priority risk:
- - Define mitigation strategy
- - Assign owner (dev, QA, ops)
- - Set timeline
- - Update residual risk expectation
-
- **Mitigation Plan Placement:**
-
- **Architecture Doc:**
- - Mitigations owned by Backend, DevOps, Architecture, Security, Data Eng
- - Example: "Add authorization layer for customer-scoped access" (Backend work)
- - Example: "Configure AWS Fault Injection Simulator" (DevOps work)
- - Example: "Define CloudWatch log schema for backfill events" (Architecture work)
-
- **QA Doc:**
- - Mitigations owned by QA (test development work)
- - Example: "Create factories for test data with randomization" (QA work)
- - Example: "Implement polling with retry for async validation" (QA test code)
- - Brief reference to Architecture doc mitigations (don't duplicate)
-
- **Rule of Thumb:**
- - If mitigation requires production code changes β Architecture doc
- - If mitigation is test infrastructure/code β QA doc
- - If mitigation involves multiple teams β Architecture doc with QA validation approach
-
- **Assumptions Placement:**
-
- **Architecture Doc:**
- - Architectural assumptions (SLO targets, replication lag, system design assumptions)
- - Example: "P95 <500ms inferred from <2s timeout (requires Product approval)"
- - Example: "Multi-region replication lag <1s assumed (ADR doesn't specify SLA)"
- - Example: "Recent Cache hit ratio >80% assumed (not in PRD/ADR)"
-
- **QA Doc:**
- - Test execution assumptions (test infrastructure readiness, test data availability)
- - Example: "Assumes test factories already created"
- - Example: "Assumes CI/CD pipeline configured"
- - Brief reference to Architecture doc for architectural assumptions
-
- **Rule of Thumb:**
- - If assumption is about system architecture/design β Architecture doc
- - If assumption is about test infrastructure/execution β QA doc
-
----
-
-## Step 3: Design Test Coverage
-
-### Actions
-
-1. **Break Down Acceptance Criteria**
-
- Convert each acceptance criterion into atomic test scenarios:
- - One scenario per testable behavior
- - Scenarios are independent
- - Scenarios are repeatable
- - Scenarios tie back to risk mitigations
-
-2. **Select Appropriate Test Levels**
-
- **Knowledge Base Reference**: `test-levels-framework.md`
-
- Map requirements to optimal test levels (avoid duplication):
-
- **E2E (End-to-End)**:
- - Critical user journeys
- - Multi-system integration
- - Production-like environment
- - Highest confidence, slowest execution
-
- **API (Integration)**:
- - Service contracts
- - Business logic validation
- - Fast feedback
- - Good for complex scenarios
-
- **Component**:
- - UI component behavior
- - Interaction testing
- - Visual regression
- - Fast, isolated
-
- **Unit**:
- - Business logic
- - Edge cases
- - Error handling
- - Fastest, most granular
-
- **Avoid duplicate coverage**: Don't test same behavior at multiple levels unless necessary.
-
-3. **Assign Priority Levels**
-
- **CRITICAL: P0/P1/P2/P3 indicates priority and risk level, NOT execution timing**
-
- **Knowledge Base Reference**: `test-priorities-matrix.md`
-
- **P0 (Critical)**:
- - Blocks core user journey
- - High-risk areas (score β₯6)
- - Revenue-impacting
- - Security-critical
- - No workaround exists
- - Affects majority of users
-
- **P1 (High)**:
- - Important user features
- - Medium-risk areas (score 3-4)
- - Common workflows
- - Workaround exists but difficult
-
- **P2 (Medium)**:
- - Secondary features
- - Low-risk areas (score 1-2)
- - Edge cases
- - Regression prevention
-
- **P3 (Low)**:
- - Nice-to-have
- - Exploratory
- - Performance benchmarks
- - Documentation validation
-
- **NOTE:** Priority classification is separate from execution timing. A P1 test might run in PRs if it's fast, or nightly if it requires expensive infrastructure (e.g., k6 performance test). See "Execution Strategy" section for timing guidance.
-
-4. **Outline Data and Tooling Prerequisites**
-
- For each test scenario, identify:
- - Test data requirements (factories, fixtures)
- - External services (mocks, stubs)
- - Environment setup
- - Tools and dependencies
-
-5. **Define Execution Strategy** (Keep It Simple)
-
- **IMPORTANT: Avoid over-engineering execution order**
-
- **Default Philosophy:**
- - Run **everything** in PRs if total duration <15 minutes
- - Playwright is fast with parallelization (100s of tests in ~10-15 min)
- - Only defer to nightly/weekly if there's significant overhead:
- - Performance tests (k6, load testing) - expensive infrastructure
- - Chaos engineering - requires special setup (AWS FIS)
- - Long-running tests - endurance (4+ hours), disaster recovery
- - Manual tests - require human intervention
-
- **Simple Execution Strategy (Organized by TOOL TYPE):**
-
- ```markdown
- ## Execution Strategy
-
- **Philosophy**: Run everything in PRs unless significant infrastructure overhead.
- Playwright with parallelization is extremely fast (100s of tests in ~10-15 min).
-
- **Organized by TOOL TYPE:**
-
- ### Every PR: Playwright Tests (~10-15 min)
- All functional tests (from any priority level):
- - All E2E, API, integration, unit tests using Playwright
- - Parallelized across {N} shards
- - Total: ~{N} tests (includes P0, P1, P2, P3)
-
- ### Nightly: k6 Performance Tests (~30-60 min)
- All performance tests (from any priority level):
- - Load, stress, spike, endurance
- - Reason: Expensive infrastructure, long-running (10-40 min per test)
-
- ### Weekly: Chaos & Long-Running (~hours)
- Special infrastructure tests (from any priority level):
- - Multi-region failover, disaster recovery, endurance
- - Reason: Very expensive, very long (4+ hours)
- ```
-
- **KEY INSIGHT: Organize by TOOL TYPE, not priority**
- - Playwright (fast, cheap) β PR
- - k6 (expensive, long) β Nightly
- - Chaos/Manual (very expensive, very long) β Weekly
-
- **Avoid:**
- - β Don't organize by priority (smoke β P0 β P1 β P2 β P3)
- - β Don't say "P1 runs on PR to main" (some P1 are Playwright/PR, some are k6/Nightly)
- - β Don't create artificial tiers - organize by tool type and infrastructure overhead
-
----
-
-## Step 4: Generate Deliverables
-
-### Actions
-
-1. **Create Risk Assessment Matrix**
-
- Use template structure:
-
- ```markdown
- | Risk ID | Category | Description | Probability | Impact | Score | Mitigation |
- | ------- | -------- | ----------- | ----------- | ------ | ----- | --------------- |
- | R-001 | SEC | Auth bypass | 2 | 3 | 6 | Add authz check |
- ```
-
-2. **Create Coverage Matrix**
-
- ```markdown
- | Requirement | Test Level | Priority | Risk Link | Test Count | Owner |
- | ----------- | ---------- | -------- | --------- | ---------- | ----- |
- | Login flow | E2E | P0 | R-001 | 3 | QA |
- ```
-
-3. **Document Execution Strategy** (Simple, Not Redundant)
-
- **IMPORTANT: Keep execution strategy simple and avoid redundancy**
-
- ```markdown
- ## Execution Strategy
-
- **Default: Run all functional tests in PRs (~10-15 min)**
- - All Playwright tests (parallelized across 4 shards)
- - Includes E2E, API, integration, unit tests
- - Total: ~{N} tests
-
- **Nightly: Performance & Infrastructure tests**
- - k6 load/stress/spike tests (~30-60 min)
- - Reason: Expensive infrastructure, long-running
-
- **Weekly: Chaos & Disaster Recovery**
- - Endurance tests (4+ hours)
- - Multi-region failover (requires AWS FIS)
- - Backup restore validation
- - Reason: Special infrastructure, very long-running
- ```
-
- **DO NOT:**
- - β Create redundant smoke/P0/P1/P2/P3 tier structure
- - β List all tests again in execution order (already in coverage plan)
- - β Split tests by priority unless there's infrastructure overhead
-
-4. **Include Resource Estimates**
-
- **IMPORTANT: Use intervals/ranges, not exact numbers**
-
- Provide rough estimates with intervals to avoid false precision:
-
- ```markdown
- ### Test Effort Estimates
-
- - P0 scenarios: 15 tests (~1.5-2.5 hours each) = **~25-40 hours**
- - P1 scenarios: 25 tests (~0.75-1.5 hours each) = **~20-35 hours**
- - P2 scenarios: 40 tests (~0.25-0.75 hours each) = **~10-30 hours**
- - **Total:** **~55-105 hours** (~1.5-3 weeks with 1 QA engineer)
- ```
-
- **Why intervals:**
- - Avoids false precision (estimates are never exact)
- - Provides flexibility for complexity variations
- - Accounts for unknowns and dependencies
- - More realistic and less prescriptive
-
- **Guidelines:**
- - P0 tests: 1.5-2.5 hours each (complex setup, security, performance)
- - P1 tests: 0.75-1.5 hours each (standard integration, API tests)
- - P2 tests: 0.25-0.75 hours each (edge cases, simple validation)
- - P3 tests: 0.1-0.5 hours each (exploratory, documentation)
-
- **Express totals as:**
- - Hour ranges: "~55-105 hours"
- - Week ranges: "~1.5-3 weeks"
- - Avoid: Exact numbers like "75 hours" or "11 days"
-
-5. **Add Gate Criteria**
-
- ```markdown
- ### Quality Gate Criteria
-
- - All P0 tests pass (100%)
- - P1 tests pass rate β₯95%
- - No high-risk (score β₯6) items unmitigated
- - Test coverage β₯80% for critical paths
- ```
-
-6. **Write to Output File**
-
- Save to `{output_folder}/test-design-epic-{epic_num}.md` using template structure.
-
----
-
-## Important Notes
-
-### Risk Category Definitions
-
-**TECH** (Technical/Architecture):
-
-- Architecture flaws or technical debt
-- Integration complexity
-- Scalability concerns
-
-**SEC** (Security):
-
-- Missing security controls
-- Authentication/authorization gaps
-- Data exposure risks
-
-**PERF** (Performance):
-
-- SLA risk or performance degradation
-- Resource constraints
-- Scalability bottlenecks
-
-**DATA** (Data Integrity):
-
-- Data loss or corruption potential
-- State consistency issues
-- Migration risks
-
-**BUS** (Business Impact):
-
-- User experience harm
-- Business logic errors
-- Revenue or compliance impact
-
-**OPS** (Operations):
-
-- Deployment or runtime failures
-- Configuration issues
-- Monitoring/observability gaps
-
-### Risk Scoring Methodology
-
-**Probability Γ Impact = Risk Score**
-
-Examples:
-
-- High likelihood (3) Γ Critical impact (3) = **Score 9** (highest priority)
-- Possible (2) Γ Critical (3) = **Score 6** (high priority threshold)
-- Unlikely (1) Γ Minor (1) = **Score 1** (low priority)
-
-**Threshold**: Scores β₯6 require immediate mitigation.
-
-### Test Level Selection Strategy
-
-**Avoid duplication:**
-
-- Don't test same behavior at E2E and API level
-- Use E2E for critical paths only
-- Use API tests for complex business logic
-- Use unit tests for edge cases
-
-**Tradeoffs:**
-
-- E2E: High confidence, slow execution, brittle
-- API: Good balance, fast, stable
-- Unit: Fastest feedback, narrow scope
-
-### Priority Assignment Guidelines
-
-**P0 criteria** (all must be true):
-
-- Blocks core functionality
-- High-risk (score β₯6)
-- No workaround exists
-- Affects majority of users
-
-**P1 criteria**:
-
-- Important feature
-- Medium risk (score 3-5)
-- Workaround exists but difficult
-
-**P2/P3**: Everything else, prioritized by value
-
-### Knowledge Base Integration
-
-**Core Fragments (Auto-loaded in Step 1):**
-
-- `risk-governance.md` - Risk classification (6 categories), automated scoring, gate decision engine, coverage traceability, owner tracking (625 lines, 4 examples)
-- `probability-impact.md` - Probability Γ impact matrix, automated classification thresholds, dynamic re-assessment, gate integration (604 lines, 4 examples)
-- `test-levels-framework.md` - E2E vs API vs Component vs Unit decision framework with characteristics matrix (467 lines, 4 examples)
-- `test-priorities-matrix.md` - P0-P3 automated priority calculation, risk-based mapping, tagging strategy, time budgets (389 lines, 2 examples)
-
-**Reference for Test Planning:**
-
-- `selective-testing.md` - Execution strategy: tag-based, spec filters, diff-based selection, promotion rules (727 lines, 4 examples)
-- `fixture-architecture.md` - Data setup patterns: pure function β fixture β mergeTests, auto-cleanup (406 lines, 5 examples)
-
-**Manual Reference (Optional):**
-
-- Use `tea-index.csv` to find additional specialized fragments as needed
-
-### Evidence-Based Assessment
-
-**Critical principle:** Base risk assessment on evidence, not speculation.
-
-**Evidence sources:**
-
-- PRD and user research
-- Architecture documentation
-- Historical bug data
-- User feedback
-- Security audit results
-
-**Avoid:**
-
-- Guessing business impact
-- Assuming user behavior
-- Inventing requirements
-
-**When uncertain:** Document assumptions and request clarification from user.
-
----
-
-## Output Summary
-
-After completing this workflow, provide a summary:
-
-```markdown
-## Test Design Complete
-
-**Epic**: {epic_num}
-**Scope**: {design_level}
-
-**Risk Assessment**:
-
-- Total risks identified: {count}
-- High-priority risks (β₯6): {high_count}
-- Categories: {categories}
-
-**Coverage Plan**:
-
-- P0 scenarios: {p0_count} ({p0_hours} hours)
-- P1 scenarios: {p1_count} ({p1_hours} hours)
-- P2/P3 scenarios: {p2p3_count} ({p2p3_hours} hours)
-- **Total effort**: {total_hours} hours (~{total_days} days)
-
-**Test Levels**:
-
-- E2E: {e2e_count}
-- API: {api_count}
-- Component: {component_count}
-- Unit: {unit_count}
-
-**Quality Gate Criteria**:
-
-- P0 pass rate: 100%
-- P1 pass rate: β₯95%
-- High-risk mitigations: 100%
-- Coverage: β₯80%
-
-**Output File**: {output_file}
-
-**Next Steps**:
-
-1. Review risk assessment with team
-2. Prioritize mitigation for high-risk items (score β₯6)
-3. Run `*atdd` to generate failing tests for P0 scenarios (separate workflow; not auto-run by `*test-design`)
-4. Allocate resources per effort estimates
-5. Set up test data factories and fixtures
-```
-
----
-
-## Validation
-
-After completing all steps, verify:
-
-- [ ] Risk assessment complete with all categories
-- [ ] All risks scored (probability Γ impact)
-- [ ] High-priority risks (β₯6) flagged
-- [ ] Coverage matrix maps requirements to test levels
-- [ ] Priority levels assigned (P0-P3)
-- [ ] Execution order defined
-- [ ] Resource estimates provided
-- [ ] Quality gate criteria defined
-- [ ] Output file created and formatted correctly
-
-Refer to `checklist.md` for comprehensive validation criteria.
diff --git a/src/bmm/workflows/testarch/test-design/test-design-architecture-template.md b/src/bmm/workflows/testarch/test-design/test-design-architecture-template.md
deleted file mode 100644
index 571f6f20..00000000
--- a/src/bmm/workflows/testarch/test-design/test-design-architecture-template.md
+++ /dev/null
@@ -1,213 +0,0 @@
-# Test Design for Architecture: {Feature Name}
-
-**Purpose:** Architectural concerns, testability gaps, and NFR requirements for review by Architecture/Dev teams. Serves as a contract between QA and Engineering on what must be addressed before test development begins.
-
-**Date:** {date}
-**Author:** {author}
-**Status:** Architecture Review Pending
-**Project:** {project_name}
-**PRD Reference:** {prd_link}
-**ADR Reference:** {adr_link}
-
----
-
-## Executive Summary
-
-**Scope:** {Brief description of feature scope}
-
-**Business Context** (from PRD):
-- **Revenue/Impact:** {Business metrics if applicable}
-- **Problem:** {Problem being solved}
-- **GA Launch:** {Target date or timeline}
-
-**Architecture** (from ADR {adr_number}):
-- **Key Decision 1:** {e.g., OAuth 2.1 authentication}
-- **Key Decision 2:** {e.g., Centralized MCP Server pattern}
-- **Key Decision 3:** {e.g., Stack: TypeScript, SDK v1.x}
-
-**Expected Scale** (from ADR):
-- {RPS, volume, users, etc.}
-
-**Risk Summary:**
-- **Total risks**: {N}
-- **High-priority (β₯6)**: {N} risks requiring immediate mitigation
-- **Test effort**: ~{N} tests (~{X} weeks for 1 QA, ~{Y} weeks for 2 QAs)
-
----
-
-## Quick Guide
-
-### π¨ BLOCKERS - Team Must Decide (Can't Proceed Without)
-
-**Sprint 0 Critical Path** - These MUST be completed before QA can write integration tests:
-
-1. **{Blocker ID}: {Blocker Title}** - {What architecture must provide} (recommended owner: {Team/Role})
-2. **{Blocker ID}: {Blocker Title}** - {What architecture must provide} (recommended owner: {Team/Role})
-3. **{Blocker ID}: {Blocker Title}** - {What architecture must provide} (recommended owner: {Team/Role})
-
-**What we need from team:** Complete these {N} items in Sprint 0 or test development is blocked.
-
----
-
-### β οΈ HIGH PRIORITY - Team Should Validate (We Provide Recommendation, You Approve)
-
-1. **{Risk ID}: {Title}** - {Recommendation + who should approve} (Sprint {N})
-2. **{Risk ID}: {Title}** - {Recommendation + who should approve} (Sprint {N})
-3. **{Risk ID}: {Title}** - {Recommendation + who should approve} (Sprint {N})
-
-**What we need from team:** Review recommendations and approve (or suggest changes).
-
----
-
-### π INFO ONLY - Solutions Provided (Review, No Decisions Needed)
-
-1. **Test strategy**: {Test level split} ({Rationale})
-2. **Tooling**: {Test frameworks and utilities}
-3. **Tiered CI/CD**: {Execution tiers with timing}
-4. **Coverage**: ~{N} test scenarios prioritized P0-P3 with risk-based classification
-5. **Quality gates**: {Pass criteria}
-
-**What we need from team:** Just review and acknowledge (we already have the solution).
-
----
-
-## For Architects and Devs - Open Topics π·
-
-### Risk Assessment
-
-**Total risks identified**: {N} ({X} high-priority score β₯6, {Y} medium, {Z} low)
-
-#### High-Priority Risks (Score β₯6) - IMMEDIATE ATTENTION
-
-| Risk ID | Category | Description | Probability | Impact | Score | Mitigation | Owner | Timeline |
-|---------|----------|-------------|-------------|--------|-------|------------|-------|----------|
-| **{R-ID}** | **{CAT}** | {Description} | {1-3} | {1-3} | **{Score}** | {Mitigation strategy} | {Owner} | {Date} |
-
-#### Medium-Priority Risks (Score 3-5)
-
-| Risk ID | Category | Description | Probability | Impact | Score | Mitigation | Owner |
-|---------|----------|-------------|-------------|--------|-------|------------|-------|
-| {R-ID} | {CAT} | {Description} | {1-3} | {1-3} | {Score} | {Mitigation} | {Owner} |
-
-#### Low-Priority Risks (Score 1-2)
-
-| Risk ID | Category | Description | Probability | Impact | Score | Action |
-|---------|----------|-------------|-------------|--------|-------|--------|
-| {R-ID} | {CAT} | {Description} | {1-3} | {1-3} | {Score} | Monitor |
-
-#### Risk Category Legend
-
-- **TECH**: Technical/Architecture (flaws, integration, scalability)
-- **SEC**: Security (access controls, auth, data exposure)
-- **PERF**: Performance (SLA violations, degradation, resource limits)
-- **DATA**: Data Integrity (loss, corruption, inconsistency)
-- **BUS**: Business Impact (UX harm, logic errors, revenue)
-- **OPS**: Operations (deployment, config, monitoring)
-
----
-
-### Testability Concerns and Architectural Gaps
-
-**π¨ ACTIONABLE CONCERNS - Architecture Team Must Address**
-
-{If system has critical testability concerns, list them here. If architecture supports testing well, state "No critical testability concerns identified" and skip to Testability Assessment Summary}
-
-#### 1. Blockers to Fast Feedback (WHAT WE NEED FROM ARCHITECTURE)
-
-| Concern | Impact | What Architecture Must Provide | Owner | Timeline |
-|---------|--------|--------------------------------|-------|----------|
-| **{Concern name}** | {Impact on testing} | {Specific architectural change needed} | {Team} | {Sprint} |
-
-**Example:**
-- **No API for test data seeding** β Cannot parallelize tests β Provide POST /test/seed endpoint (Backend, Sprint 0)
-
-#### 2. Architectural Improvements Needed (WHAT SHOULD BE CHANGED)
-
-{List specific improvements that would make the system more testable}
-
-1. **{Improvement name}**
- - **Current problem**: {What's wrong}
- - **Required change**: {What architecture must do}
- - **Impact if not fixed**: {Consequences}
- - **Owner**: {Team}
- - **Timeline**: {Sprint}
-
----
-
-### Testability Assessment Summary
-
-**π CURRENT STATE - FYI**
-
-{Only include this section if there are passing items worth mentioning. Otherwise omit.}
-
-#### What Works Well
-
-- β {Passing item 1} (e.g., "API-first design supports parallel test execution")
-- β {Passing item 2} (e.g., "Feature flags enable test isolation")
-- β {Passing item 3}
-
-#### Accepted Trade-offs (No Action Required)
-
-For {Feature} Phase 1, the following trade-offs are acceptable:
-- **{Trade-off 1}** - {Why acceptable for now}
-- **{Trade-off 2}** - {Why acceptable for now}
-
-{This is technical debt OR acceptable for Phase 1} that {should be revisited post-GA OR maintained as-is}
-
----
-
-### Risk Mitigation Plans (High-Priority Risks β₯6)
-
-**Purpose**: Detailed mitigation strategies for all {N} high-priority risks (score β₯6). These risks MUST be addressed before {GA launch date or milestone}.
-
-#### {R-ID}: {Risk Description} (Score: {Score}) - {CRITICALITY LEVEL}
-
-**Mitigation Strategy:**
-1. {Step 1}
-2. {Step 2}
-3. {Step 3}
-
-**Owner:** {Owner}
-**Timeline:** {Sprint or date}
-**Status:** Planned / In Progress / Complete
-**Verification:** {How to verify mitigation is effective}
-
----
-
-{Repeat for all high-priority risks}
-
----
-
-### Assumptions and Dependencies
-
-#### Assumptions
-
-1. {Assumption about architecture or requirements}
-2. {Assumption about team or timeline}
-3. {Assumption about scope or constraints}
-
-#### Dependencies
-
-1. {Dependency} - Required by {date/sprint}
-2. {Dependency} - Required by {date/sprint}
-
-#### Risks to Plan
-
-- **Risk**: {Risk to the test plan itself}
- - **Impact**: {How it affects testing}
- - **Contingency**: {Backup plan}
-
----
-
-**End of Architecture Document**
-
-**Next Steps for Architecture Team:**
-1. Review Quick Guide (π¨/β οΈ/π) and prioritize blockers
-2. Assign owners and timelines for high-priority risks (β₯6)
-3. Validate assumptions and dependencies
-4. Provide feedback to QA on testability gaps
-
-**Next Steps for QA Team:**
-1. Wait for Sprint 0 blockers to be resolved
-2. Refer to companion QA doc (test-design-qa.md) for test scenarios
-3. Begin test infrastructure setup (factories, fixtures, environments)
diff --git a/src/bmm/workflows/testarch/test-design/test-design-qa-template.md b/src/bmm/workflows/testarch/test-design/test-design-qa-template.md
deleted file mode 100644
index 037856b7..00000000
--- a/src/bmm/workflows/testarch/test-design/test-design-qa-template.md
+++ /dev/null
@@ -1,286 +0,0 @@
-# Test Design for QA: {Feature Name}
-
-**Purpose:** Test execution recipe for QA team. Defines what to test, how to test it, and what QA needs from other teams.
-
-**Date:** {date}
-**Author:** {author}
-**Status:** Draft
-**Project:** {project_name}
-
-**Related:** See Architecture doc (test-design-architecture.md) for testability concerns and architectural blockers.
-
----
-
-## Executive Summary
-
-**Scope:** {Brief description of testing scope}
-
-**Risk Summary:**
-- Total Risks: {N} ({X} high-priority score β₯6, {Y} medium, {Z} low)
-- Critical Categories: {Categories with most high-priority risks}
-
-**Coverage Summary:**
-- P0 tests: ~{N} (critical paths, security)
-- P1 tests: ~{N} (important features, integration)
-- P2 tests: ~{N} (edge cases, regression)
-- P3 tests: ~{N} (exploratory, benchmarks)
-- **Total**: ~{N} tests (~{X}-{Y} weeks with 1 QA)
-
----
-
-## Dependencies & Test Blockers
-
-**CRITICAL:** QA cannot proceed without these items from other teams.
-
-### Backend/Architecture Dependencies (Sprint 0)
-
-**Source:** See Architecture doc "Quick Guide" for detailed mitigation plans
-
-1. **{Dependency 1}** - {Team} - {Timeline}
- - {What QA needs}
- - {Why it blocks testing}
-
-2. **{Dependency 2}** - {Team} - {Timeline}
- - {What QA needs}
- - {Why it blocks testing}
-
-### QA Infrastructure Setup (Sprint 0)
-
-1. **Test Data Factories** - QA
- - {Entity} factory with faker-based randomization
- - Auto-cleanup fixtures for parallel safety
-
-2. **Test Environments** - QA
- - Local: {Setup details}
- - CI/CD: {Setup details}
- - Staging: {Setup details}
-
-**Example factory pattern:**
-
-```typescript
-import { test } from '@seontechnologies/playwright-utils/api-request/fixtures';
-import { expect } from '@playwright/test';
-import { faker } from '@faker-js/faker';
-
-test('example test @p0', async ({ apiRequest }) => {
- const testData = {
- id: `test-${faker.string.uuid()}`,
- email: faker.internet.email(),
- };
-
- const { status } = await apiRequest({
- method: 'POST',
- path: '/api/resource',
- body: testData,
- });
-
- expect(status).toBe(201);
-});
-```
-
----
-
-## Risk Assessment
-
-**Note:** Full risk details in Architecture doc. This section summarizes risks relevant to QA test planning.
-
-### High-Priority Risks (Score β₯6)
-
-| Risk ID | Category | Description | Score | QA Test Coverage |
-|---------|----------|-------------|-------|------------------|
-| **{R-ID}** | {CAT} | {Brief description} | **{Score}** | {How QA validates this risk} |
-
-### Medium/Low-Priority Risks
-
-| Risk ID | Category | Description | Score | QA Test Coverage |
-|---------|----------|-------------|-------|------------------|
-| {R-ID} | {CAT} | {Brief description} | {Score} | {How QA validates this risk} |
-
----
-
-## Test Coverage Plan
-
-**IMPORTANT:** P0/P1/P2/P3 = **priority and risk level** (what to focus on if time-constrained), NOT execution timing. See "Execution Strategy" for when tests run.
-
-### P0 (Critical)
-
-**Criteria:** Blocks core functionality + High risk (β₯6) + No workaround + Affects majority of users
-
-| Test ID | Requirement | Test Level | Risk Link | Notes |
-|---------|-------------|------------|-----------|-------|
-| **P0-001** | {Requirement} | {Level} | {R-ID} | {Notes} |
-| **P0-002** | {Requirement} | {Level} | {R-ID} | {Notes} |
-
-**Total P0:** ~{N} tests
-
----
-
-### P1 (High)
-
-**Criteria:** Important features + Medium risk (3-4) + Common workflows + Workaround exists but difficult
-
-| Test ID | Requirement | Test Level | Risk Link | Notes |
-|---------|-------------|------------|-----------|-------|
-| **P1-001** | {Requirement} | {Level} | {R-ID} | {Notes} |
-| **P1-002** | {Requirement} | {Level} | {R-ID} | {Notes} |
-
-**Total P1:** ~{N} tests
-
----
-
-### P2 (Medium)
-
-**Criteria:** Secondary features + Low risk (1-2) + Edge cases + Regression prevention
-
-| Test ID | Requirement | Test Level | Risk Link | Notes |
-|---------|-------------|------------|-----------|-------|
-| **P2-001** | {Requirement} | {Level} | {R-ID} | {Notes} |
-
-**Total P2:** ~{N} tests
-
----
-
-### P3 (Low)
-
-**Criteria:** Nice-to-have + Exploratory + Performance benchmarks + Documentation validation
-
-| Test ID | Requirement | Test Level | Notes |
-|---------|-------------|------------|-------|
-| **P3-001** | {Requirement} | {Level} | {Notes} |
-
-**Total P3:** ~{N} tests
-
----
-
-## Execution Strategy
-
-**Philosophy:** Run everything in PRs unless there's significant infrastructure overhead. Playwright with parallelization is extremely fast (100s of tests in ~10-15 min).
-
-**Organized by TOOL TYPE:**
-
-### Every PR: Playwright Tests (~10-15 min)
-
-**All functional tests** (from any priority level):
-- All E2E, API, integration, unit tests using Playwright
-- Parallelized across {N} shards
-- Total: ~{N} Playwright tests (includes P0, P1, P2, P3)
-
-**Why run in PRs:** Fast feedback, no expensive infrastructure
-
-### Nightly: k6 Performance Tests (~30-60 min)
-
-**All performance tests** (from any priority level):
-- Load, stress, spike, endurance tests
-- Total: ~{N} k6 tests (may include P0, P1, P2)
-
-**Why defer to nightly:** Expensive infrastructure (k6 Cloud), long-running (10-40 min per test)
-
-### Weekly: Chaos & Long-Running (~hours)
-
-**Special infrastructure tests** (from any priority level):
-- Multi-region failover (requires AWS Fault Injection Simulator)
-- Disaster recovery (backup restore, 4+ hours)
-- Endurance tests (4+ hours runtime)
-
-**Why defer to weekly:** Very expensive infrastructure, very long-running, infrequent validation sufficient
-
-**Manual tests** (excluded from automation):
-- DevOps validation (deployment, monitoring)
-- Finance validation (cost alerts)
-- Documentation validation
-
----
-
-## QA Effort Estimate
-
-**QA test development effort only** (excludes DevOps, Backend, Data Eng, Finance work):
-
-| Priority | Count | Effort Range | Notes |
-|----------|-------|--------------|-------|
-| P0 | ~{N} | ~{X}-{Y} weeks | Complex setup (security, performance, multi-step) |
-| P1 | ~{N} | ~{X}-{Y} weeks | Standard coverage (integration, API tests) |
-| P2 | ~{N} | ~{X}-{Y} days | Edge cases, simple validation |
-| P3 | ~{N} | ~{X}-{Y} days | Exploratory, benchmarks |
-| **Total** | ~{N} | **~{X}-{Y} weeks** | **1 QA engineer, full-time** |
-
-**Assumptions:**
-- Includes test design, implementation, debugging, CI integration
-- Excludes ongoing maintenance (~10% effort)
-- Assumes test infrastructure (factories, fixtures) ready
-
-**Dependencies from other teams:**
-- See "Dependencies & Test Blockers" section for what QA needs from Backend, DevOps, Data Eng
-
----
-
-## Appendix A: Code Examples & Tagging
-
-**Playwright Tags for Selective Execution:**
-
-```typescript
-import { test } from '@seontechnologies/playwright-utils/api-request/fixtures';
-import { expect } from '@playwright/test';
-
-// P0 critical test
-test('@P0 @API @Security unauthenticated request returns 401', async ({ apiRequest }) => {
- const { status, body } = await apiRequest({
- method: 'POST',
- path: '/api/endpoint',
- body: { data: 'test' },
- skipAuth: true,
- });
-
- expect(status).toBe(401);
- expect(body.error).toContain('unauthorized');
-});
-
-// P1 integration test
-test('@P1 @Integration data syncs correctly', async ({ apiRequest }) => {
- // Seed data
- await apiRequest({
- method: 'POST',
- path: '/api/seed',
- body: { /* test data */ },
- });
-
- // Validate
- const { status, body } = await apiRequest({
- method: 'GET',
- path: '/api/resource',
- });
-
- expect(status).toBe(200);
- expect(body).toHaveProperty('data');
-});
-```
-
-**Run specific tags:**
-
-```bash
-# Run only P0 tests
-npx playwright test --grep @P0
-
-# Run P0 + P1 tests
-npx playwright test --grep "@P0|@P1"
-
-# Run only security tests
-npx playwright test --grep @Security
-
-# Run all Playwright tests in PR (default)
-npx playwright test
-```
-
----
-
-## Appendix B: Knowledge Base References
-
-- **Risk Governance**: `risk-governance.md` - Risk scoring methodology
-- **Test Priorities Matrix**: `test-priorities-matrix.md` - P0-P3 criteria
-- **Test Levels Framework**: `test-levels-framework.md` - E2E vs API vs Unit selection
-- **Test Quality**: `test-quality.md` - Definition of Done (no hard waits, <300 lines, <1.5 min)
-
----
-
-**Generated by:** BMad TEA Agent
-**Workflow:** `_bmad/bmm/testarch/test-design`
-**Version:** 4.0 (BMad v6)
diff --git a/src/bmm/workflows/testarch/test-design/test-design-template.md b/src/bmm/workflows/testarch/test-design/test-design-template.md
deleted file mode 100644
index a064fe58..00000000
--- a/src/bmm/workflows/testarch/test-design/test-design-template.md
+++ /dev/null
@@ -1,294 +0,0 @@
-# Test Design: Epic {epic_num} - {epic_title}
-
-**Date:** {date}
-**Author:** {user_name}
-**Status:** Draft / Approved
-
----
-
-## Executive Summary
-
-**Scope:** {design_level} test design for Epic {epic_num}
-
-**Risk Summary:**
-
-- Total risks identified: {total_risks}
-- High-priority risks (β₯6): {high_priority_count}
-- Critical categories: {top_categories}
-
-**Coverage Summary:**
-
-- P0 scenarios: {p0_count} ({p0_hours} hours)
-- P1 scenarios: {p1_count} ({p1_hours} hours)
-- P2/P3 scenarios: {p2p3_count} ({p2p3_hours} hours)
-- **Total effort**: {total_hours} hours (~{total_days} days)
-
----
-
-## Risk Assessment
-
-### High-Priority Risks (Score β₯6)
-
-| Risk ID | Category | Description | Probability | Impact | Score | Mitigation | Owner | Timeline |
-| ------- | -------- | ------------- | ----------- | ------ | ----- | ------------ | ------- | -------- |
-| R-001 | SEC | {description} | 2 | 3 | 6 | {mitigation} | {owner} | {date} |
-| R-002 | PERF | {description} | 3 | 2 | 6 | {mitigation} | {owner} | {date} |
-
-### Medium-Priority Risks (Score 3-4)
-
-| Risk ID | Category | Description | Probability | Impact | Score | Mitigation | Owner |
-| ------- | -------- | ------------- | ----------- | ------ | ----- | ------------ | ------- |
-| R-003 | TECH | {description} | 2 | 2 | 4 | {mitigation} | {owner} |
-| R-004 | DATA | {description} | 1 | 3 | 3 | {mitigation} | {owner} |
-
-### Low-Priority Risks (Score 1-2)
-
-| Risk ID | Category | Description | Probability | Impact | Score | Action |
-| ------- | -------- | ------------- | ----------- | ------ | ----- | ------- |
-| R-005 | OPS | {description} | 1 | 2 | 2 | Monitor |
-| R-006 | BUS | {description} | 1 | 1 | 1 | Monitor |
-
-### Risk Category Legend
-
-- **TECH**: Technical/Architecture (flaws, integration, scalability)
-- **SEC**: Security (access controls, auth, data exposure)
-- **PERF**: Performance (SLA violations, degradation, resource limits)
-- **DATA**: Data Integrity (loss, corruption, inconsistency)
-- **BUS**: Business Impact (UX harm, logic errors, revenue)
-- **OPS**: Operations (deployment, config, monitoring)
-
----
-
-## Test Coverage Plan
-
-### P0 (Critical) - Run on every commit
-
-**Criteria**: Blocks core journey + High risk (β₯6) + No workaround
-
-| Requirement | Test Level | Risk Link | Test Count | Owner | Notes |
-| ------------- | ---------- | --------- | ---------- | ----- | ------- |
-| {requirement} | E2E | R-001 | 3 | QA | {notes} |
-| {requirement} | API | R-002 | 5 | QA | {notes} |
-
-**Total P0**: {p0_count} tests, {p0_hours} hours
-
-### P1 (High) - Run on PR to main
-
-**Criteria**: Important features + Medium risk (3-4) + Common workflows
-
-| Requirement | Test Level | Risk Link | Test Count | Owner | Notes |
-| ------------- | ---------- | --------- | ---------- | ----- | ------- |
-| {requirement} | API | R-003 | 4 | QA | {notes} |
-| {requirement} | Component | - | 6 | DEV | {notes} |
-
-**Total P1**: {p1_count} tests, {p1_hours} hours
-
-### P2 (Medium) - Run nightly/weekly
-
-**Criteria**: Secondary features + Low risk (1-2) + Edge cases
-
-| Requirement | Test Level | Risk Link | Test Count | Owner | Notes |
-| ------------- | ---------- | --------- | ---------- | ----- | ------- |
-| {requirement} | API | R-004 | 8 | QA | {notes} |
-| {requirement} | Unit | - | 15 | DEV | {notes} |
-
-**Total P2**: {p2_count} tests, {p2_hours} hours
-
-### P3 (Low) - Run on-demand
-
-**Criteria**: Nice-to-have + Exploratory + Performance benchmarks
-
-| Requirement | Test Level | Test Count | Owner | Notes |
-| ------------- | ---------- | ---------- | ----- | ------- |
-| {requirement} | E2E | 2 | QA | {notes} |
-| {requirement} | Unit | 8 | DEV | {notes} |
-
-**Total P3**: {p3_count} tests, {p3_hours} hours
-
----
-
-## Execution Order
-
-### Smoke Tests (<5 min)
-
-**Purpose**: Fast feedback, catch build-breaking issues
-
-- [ ] {scenario} (30s)
-- [ ] {scenario} (45s)
-- [ ] {scenario} (1min)
-
-**Total**: {smoke_count} scenarios
-
-### P0 Tests (<10 min)
-
-**Purpose**: Critical path validation
-
-- [ ] {scenario} (E2E)
-- [ ] {scenario} (API)
-- [ ] {scenario} (API)
-
-**Total**: {p0_count} scenarios
-
-### P1 Tests (<30 min)
-
-**Purpose**: Important feature coverage
-
-- [ ] {scenario} (API)
-- [ ] {scenario} (Component)
-
-**Total**: {p1_count} scenarios
-
-### P2/P3 Tests (<60 min)
-
-**Purpose**: Full regression coverage
-
-- [ ] {scenario} (Unit)
-- [ ] {scenario} (API)
-
-**Total**: {p2p3_count} scenarios
-
----
-
-## Resource Estimates
-
-### Test Development Effort
-
-| Priority | Count | Hours/Test | Total Hours | Notes |
-| --------- | ----------------- | ---------- | ----------------- | ----------------------- |
-| P0 | {p0_count} | 2.0 | {p0_hours} | Complex setup, security |
-| P1 | {p1_count} | 1.0 | {p1_hours} | Standard coverage |
-| P2 | {p2_count} | 0.5 | {p2_hours} | Simple scenarios |
-| P3 | {p3_count} | 0.25 | {p3_hours} | Exploratory |
-| **Total** | **{total_count}** | **-** | **{total_hours}** | **~{total_days} days** |
-
-### Prerequisites
-
-**Test Data:**
-
-- {factory_name} factory (faker-based, auto-cleanup)
-- {fixture_name} fixture (setup/teardown)
-
-**Tooling:**
-
-- {tool} for {purpose}
-- {tool} for {purpose}
-
-**Environment:**
-
-- {env_requirement}
-- {env_requirement}
-
----
-
-## Quality Gate Criteria
-
-### Pass/Fail Thresholds
-
-- **P0 pass rate**: 100% (no exceptions)
-- **P1 pass rate**: β₯95% (waivers required for failures)
-- **P2/P3 pass rate**: β₯90% (informational)
-- **High-risk mitigations**: 100% complete or approved waivers
-
-### Coverage Targets
-
-- **Critical paths**: β₯80%
-- **Security scenarios**: 100%
-- **Business logic**: β₯70%
-- **Edge cases**: β₯50%
-
-### Non-Negotiable Requirements
-
-- [ ] All P0 tests pass
-- [ ] No high-risk (β₯6) items unmitigated
-- [ ] Security tests (SEC category) pass 100%
-- [ ] Performance targets met (PERF category)
-
----
-
-## Mitigation Plans
-
-### R-001: {Risk Description} (Score: 6)
-
-**Mitigation Strategy:** {detailed_mitigation}
-**Owner:** {owner}
-**Timeline:** {date}
-**Status:** Planned / In Progress / Complete
-**Verification:** {how_to_verify}
-
-### R-002: {Risk Description} (Score: 6)
-
-**Mitigation Strategy:** {detailed_mitigation}
-**Owner:** {owner}
-**Timeline:** {date}
-**Status:** Planned / In Progress / Complete
-**Verification:** {how_to_verify}
-
----
-
-## Assumptions and Dependencies
-
-### Assumptions
-
-1. {assumption}
-2. {assumption}
-3. {assumption}
-
-### Dependencies
-
-1. {dependency} - Required by {date}
-2. {dependency} - Required by {date}
-
-### Risks to Plan
-
-- **Risk**: {risk_to_plan}
- - **Impact**: {impact}
- - **Contingency**: {contingency}
-
----
-
----
-
-## Follow-on Workflows (Manual)
-
-- Run `*atdd` to generate failing P0 tests (separate workflow; not auto-run).
-- Run `*automate` for broader coverage once implementation exists.
-
----
-
-## Approval
-
-**Test Design Approved By:**
-
-- [ ] Product Manager: {name} Date: {date}
-- [ ] Tech Lead: {name} Date: {date}
-- [ ] QA Lead: {name} Date: {date}
-
-**Comments:**
-
----
-
----
-
----
-
-## Appendix
-
-### Knowledge Base References
-
-- `risk-governance.md` - Risk classification framework
-- `probability-impact.md` - Risk scoring methodology
-- `test-levels-framework.md` - Test level selection
-- `test-priorities-matrix.md` - P0-P3 prioritization
-
-### Related Documents
-
-- PRD: {prd_link}
-- Epic: {epic_link}
-- Architecture: {arch_link}
-- Tech Spec: {tech_spec_link}
-
----
-
-**Generated by**: BMad TEA Agent - Test Architect Module
-**Workflow**: `_bmad/bmm/testarch/test-design`
-**Version**: 4.0 (BMad v6)
diff --git a/src/bmm/workflows/testarch/test-design/workflow.yaml b/src/bmm/workflows/testarch/test-design/workflow.yaml
deleted file mode 100644
index 961eff34..00000000
--- a/src/bmm/workflows/testarch/test-design/workflow.yaml
+++ /dev/null
@@ -1,71 +0,0 @@
-# Test Architect workflow: test-design
-name: testarch-test-design
-description: "Dual-mode workflow: (1) System-level testability review in Solutioning phase, or (2) Epic-level test planning in Implementation phase. Auto-detects mode based on project phase."
-author: "BMad"
-
-# Critical variables from config
-config_source: "{project-root}/_bmad/bmm/config.yaml"
-output_folder: "{config_source}:output_folder"
-user_name: "{config_source}:user_name"
-communication_language: "{config_source}:communication_language"
-document_output_language: "{config_source}:document_output_language"
-date: system-generated
-
-# Workflow components
-installed_path: "{project-root}/_bmad/bmm/workflows/testarch/test-design"
-instructions: "{installed_path}/instructions.md"
-validation: "{installed_path}/checklist.md"
-# Note: Template selection is mode-based (see instructions.md Step 1.5):
-# - System-level: test-design-architecture-template.md + test-design-qa-template.md
-# - Epic-level: test-design-template.md (unchanged)
-template: "{installed_path}/test-design-template.md"
-
-# Variables and inputs
-variables:
- design_level: "full" # full, targeted, minimal - scope of design effort
- mode: "auto-detect" # auto-detect (default), system-level, epic-level
-
-# Output configuration
-# Note: Actual output file determined dynamically based on mode detection
-# Declared outputs for new workflow format
-outputs:
- # System-Level Mode (Phase 3) - TWO documents
- - id: test-design-architecture
- description: "System-level test architecture: Architectural concerns, testability gaps, NFR requirements for Architecture/Dev teams"
- path: "{output_folder}/test-design-architecture.md"
- mode: system-level
- audience: architecture
-
- - id: test-design-qa
- description: "System-level test design: Test execution recipe, coverage plan, Sprint 0 setup for QA team"
- path: "{output_folder}/test-design-qa.md"
- mode: system-level
- audience: qa
-
- # Epic-Level Mode (Phase 4) - ONE document (unchanged)
- - id: epic-level
- description: "Epic-level test plan (Phase 4)"
- path: "{output_folder}/test-design-epic-{epic_num}.md"
- mode: epic-level
-# Note: No default_output_file - mode detection determines which outputs to write
-
-# Required tools
-required_tools:
- - read_file # Read PRD, epics, stories, architecture docs
- - write_file # Create test design document
- - list_files # Find related documentation
- - search_repo # Search for existing tests and patterns
-
-tags:
- - qa
- - planning
- - test-architect
- - risk-assessment
- - coverage
-
-execution_hints:
- interactive: false # Minimize prompts
- autonomous: true # Proceed without user input unless blocked
- iterative: true
-
-web_bundle: false
diff --git a/src/bmm/workflows/testarch/test-review/checklist.md b/src/bmm/workflows/testarch/test-review/checklist.md
deleted file mode 100644
index f4fca8af..00000000
--- a/src/bmm/workflows/testarch/test-review/checklist.md
+++ /dev/null
@@ -1,472 +0,0 @@
-# Test Quality Review - Validation Checklist
-
-Use this checklist to validate that the test quality review workflow completed successfully and all quality criteria were properly evaluated.
-
----
-
-## Prerequisites
-
-Note: `test-review` is optional and only audits existing tests; it does not generate tests.
-
-### Test File Discovery
-
-- [ ] Test file(s) identified for review (single/directory/suite scope)
-- [ ] Test files exist and are readable
-- [ ] Test framework detected (Playwright, Jest, Cypress, Vitest, etc.)
-- [ ] Test framework configuration found (playwright.config.ts, jest.config.js, etc.)
-
-### Knowledge Base Loading
-
-- [ ] tea-index.csv loaded successfully
-- [ ] `test-quality.md` loaded (Definition of Done)
-- [ ] `fixture-architecture.md` loaded (Pure function β Fixture patterns)
-- [ ] `network-first.md` loaded (Route intercept before navigate)
-- [ ] `data-factories.md` loaded (Factory patterns)
-- [ ] `test-levels-framework.md` loaded (E2E vs API vs Component vs Unit)
-- [ ] All other enabled fragments loaded successfully
-
-### Context Gathering
-
-- [ ] Story file discovered or explicitly provided (if available)
-- [ ] Test design document discovered or explicitly provided (if available)
-- [ ] Acceptance criteria extracted from story (if available)
-- [ ] Priority context (P0/P1/P2/P3) extracted from test-design (if available)
-
----
-
-## Process Steps
-
-### Step 1: Context Loading
-
-- [ ] Review scope determined (single/directory/suite)
-- [ ] Test file paths collected
-- [ ] Related artifacts discovered (story, test-design)
-- [ ] Knowledge base fragments loaded successfully
-- [ ] Quality criteria flags read from workflow variables
-
-### Step 2: Test File Parsing
-
-**For Each Test File:**
-
-- [ ] File read successfully
-- [ ] File size measured (lines, KB)
-- [ ] File structure parsed (describe blocks, it blocks)
-- [ ] Test IDs extracted (if present)
-- [ ] Priority markers extracted (if present)
-- [ ] Imports analyzed
-- [ ] Dependencies identified
-
-**Test Structure Analysis:**
-
-- [ ] Describe block count calculated
-- [ ] It/test block count calculated
-- [ ] BDD structure identified (Given-When-Then)
-- [ ] Fixture usage detected
-- [ ] Data factory usage detected
-- [ ] Network interception patterns identified
-- [ ] Assertions counted
-- [ ] Waits and timeouts cataloged
-- [ ] Conditionals (if/else) detected
-- [ ] Try/catch blocks detected
-- [ ] Shared state or globals detected
-
-### Step 3: Quality Criteria Validation
-
-**For Each Enabled Criterion:**
-
-#### BDD Format (if `check_given_when_then: true`)
-
-- [ ] Given-When-Then structure evaluated
-- [ ] Status assigned (PASS/WARN/FAIL)
-- [ ] Violations recorded with line numbers
-- [ ] Examples of good/bad patterns noted
-
-#### Test IDs (if `check_test_ids: true`)
-
-- [ ] Test ID presence validated
-- [ ] Test ID format checked (e.g., 1.3-E2E-001)
-- [ ] Status assigned (PASS/WARN/FAIL)
-- [ ] Missing IDs cataloged
-
-#### Priority Markers (if `check_priority_markers: true`)
-
-- [ ] P0/P1/P2/P3 classification validated
-- [ ] Status assigned (PASS/WARN/FAIL)
-- [ ] Missing priorities cataloged
-
-#### Hard Waits (if `check_hard_waits: true`)
-
-- [ ] sleep(), waitForTimeout(), hardcoded delays detected
-- [ ] Justification comments checked
-- [ ] Status assigned (PASS/WARN/FAIL)
-- [ ] Violations recorded with line numbers and recommended fixes
-
-#### Determinism (if `check_determinism: true`)
-
-- [ ] Conditionals (if/else/switch) detected
-- [ ] Try/catch abuse detected
-- [ ] Random values (Math.random, Date.now) detected
-- [ ] Status assigned (PASS/WARN/FAIL)
-- [ ] Violations recorded with recommended fixes
-
-#### Isolation (if `check_isolation: true`)
-
-- [ ] Cleanup hooks (afterEach/afterAll) validated
-- [ ] Shared state detected
-- [ ] Global variable mutations detected
-- [ ] Resource cleanup verified
-- [ ] Status assigned (PASS/WARN/FAIL)
-- [ ] Violations recorded with recommended fixes
-
-#### Fixture Patterns (if `check_fixture_patterns: true`)
-
-- [ ] Fixtures detected (test.extend)
-- [ ] Pure functions validated
-- [ ] mergeTests usage checked
-- [ ] beforeEach complexity analyzed
-- [ ] Status assigned (PASS/WARN/FAIL)
-- [ ] Violations recorded with recommended fixes
-
-#### Data Factories (if `check_data_factories: true`)
-
-- [ ] Factory functions detected
-- [ ] Hardcoded data (magic strings/numbers) detected
-- [ ] Faker.js or similar usage validated
-- [ ] API-first setup pattern checked
-- [ ] Status assigned (PASS/WARN/FAIL)
-- [ ] Violations recorded with recommended fixes
-
-#### Network-First (if `check_network_first: true`)
-
-- [ ] page.route() before page.goto() validated
-- [ ] Race conditions detected (route after navigate)
-- [ ] waitForResponse patterns checked
-- [ ] Status assigned (PASS/WARN/FAIL)
-- [ ] Violations recorded with recommended fixes
-
-#### Assertions (if `check_assertions: true`)
-
-- [ ] Explicit assertions counted
-- [ ] Implicit waits without assertions detected
-- [ ] Assertion specificity validated
-- [ ] Status assigned (PASS/WARN/FAIL)
-- [ ] Violations recorded with recommended fixes
-
-#### Test Length (if `check_test_length: true`)
-
-- [ ] File line count calculated
-- [ ] Threshold comparison (β€300 lines ideal)
-- [ ] Status assigned (PASS/WARN/FAIL)
-- [ ] Splitting recommendations generated (if >300 lines)
-
-#### Test Duration (if `check_test_duration: true`)
-
-- [ ] Test complexity analyzed (as proxy for duration if no execution data)
-- [ ] Threshold comparison (β€1.5 min target)
-- [ ] Status assigned (PASS/WARN/FAIL)
-- [ ] Optimization recommendations generated
-
-#### Flakiness Patterns (if `check_flakiness_patterns: true`)
-
-- [ ] Tight timeouts detected (e.g., { timeout: 1000 })
-- [ ] Race conditions detected
-- [ ] Timing-dependent assertions detected
-- [ ] Retry logic detected
-- [ ] Environment-dependent assumptions detected
-- [ ] Status assigned (PASS/WARN/FAIL)
-- [ ] Violations recorded with recommended fixes
-
----
-
-### Step 4: Quality Score Calculation
-
-**Violation Counting:**
-
-- [ ] Critical (P0) violations counted
-- [ ] High (P1) violations counted
-- [ ] Medium (P2) violations counted
-- [ ] Low (P3) violations counted
-- [ ] Violation breakdown by criterion recorded
-
-**Score Calculation:**
-
-- [ ] Starting score: 100
-- [ ] Critical violations deducted (-10 each)
-- [ ] High violations deducted (-5 each)
-- [ ] Medium violations deducted (-2 each)
-- [ ] Low violations deducted (-1 each)
-- [ ] Bonus points added (max +30):
- - [ ] Excellent BDD structure (+5 if applicable)
- - [ ] Comprehensive fixtures (+5 if applicable)
- - [ ] Comprehensive data factories (+5 if applicable)
- - [ ] Network-first pattern (+5 if applicable)
- - [ ] Perfect isolation (+5 if applicable)
- - [ ] All test IDs present (+5 if applicable)
-- [ ] Final score calculated: max(0, min(100, Starting - Violations + Bonus))
-
-**Quality Grade:**
-
-- [ ] Grade assigned based on score:
- - 90-100: A+ (Excellent)
- - 80-89: A (Good)
- - 70-79: B (Acceptable)
- - 60-69: C (Needs Improvement)
- - <60: F (Critical Issues)
-
----
-
-### Step 5: Review Report Generation
-
-**Report Sections Created:**
-
-- [ ] **Header Section**:
- - [ ] Test file(s) reviewed listed
- - [ ] Review date recorded
- - [ ] Review scope noted (single/directory/suite)
- - [ ] Quality score and grade displayed
-
-- [ ] **Executive Summary**:
- - [ ] Overall assessment (Excellent/Good/Needs Improvement/Critical)
- - [ ] Key strengths listed (3-5 bullet points)
- - [ ] Key weaknesses listed (3-5 bullet points)
- - [ ] Recommendation stated (Approve/Approve with comments/Request changes/Block)
-
-- [ ] **Quality Criteria Assessment**:
- - [ ] Table with all criteria evaluated
- - [ ] Status for each criterion (PASS/WARN/FAIL)
- - [ ] Violation count per criterion
-
-- [ ] **Critical Issues (Must Fix)**:
- - [ ] P0/P1 violations listed
- - [ ] Code location provided for each (file:line)
- - [ ] Issue explanation clear
- - [ ] Recommended fix provided with code example
- - [ ] Knowledge base reference provided
-
-- [ ] **Recommendations (Should Fix)**:
- - [ ] P2/P3 violations listed
- - [ ] Code location provided for each (file:line)
- - [ ] Issue explanation clear
- - [ ] Recommended improvement provided with code example
- - [ ] Knowledge base reference provided
-
-- [ ] **Best Practices Examples** (if good patterns found):
- - [ ] Good patterns highlighted from tests
- - [ ] Knowledge base fragments referenced
- - [ ] Examples provided for others to follow
-
-- [ ] **Knowledge Base References**:
- - [ ] All fragments consulted listed
- - [ ] Links to detailed guidance provided
-
----
-
-### Step 6: Optional Outputs Generation
-
-**Inline Comments** (if `generate_inline_comments: true`):
-
-- [ ] Inline comments generated at violation locations
-- [ ] Comment format: `// TODO (TEA Review): [Issue] - See test-review-{filename}.md`
-- [ ] Comments added to test files (no logic changes)
-- [ ] Test files remain valid and executable
-
-**Quality Badge** (if `generate_quality_badge: true`):
-
-- [ ] Badge created with quality score (e.g., "Test Quality: 87/100 (A)")
-- [ ] Badge format suitable for README or documentation
-- [ ] Badge saved to output folder
-
-**Story Update** (if `append_to_story: true` and story file exists):
-
-- [ ] "Test Quality Review" section created
-- [ ] Quality score included
-- [ ] Critical issues summarized
-- [ ] Link to full review report provided
-- [ ] Story file updated successfully
-
----
-
-### Step 7: Save and Notify
-
-**Outputs Saved:**
-
-- [ ] Review report saved to `{output_file}`
-- [ ] Inline comments written to test files (if enabled)
-- [ ] Quality badge saved (if enabled)
-- [ ] Story file updated (if enabled)
-- [ ] All outputs are valid and readable
-
-**Summary Message Generated:**
-
-- [ ] Quality score and grade included
-- [ ] Critical issue count stated
-- [ ] Recommendation provided (Approve/Request changes/Block)
-- [ ] Next steps clarified
-- [ ] Message displayed to user
-
----
-
-## Output Validation
-
-### Review Report Completeness
-
-- [ ] All required sections present
-- [ ] No placeholder text or TODOs in report
-- [ ] All code locations are accurate (file:line)
-- [ ] All code examples are valid and demonstrate fix
-- [ ] All knowledge base references are correct
-
-### Review Report Accuracy
-
-- [ ] Quality score matches violation breakdown
-- [ ] Grade matches score range
-- [ ] Violations correctly categorized by severity (P0/P1/P2/P3)
-- [ ] Violations correctly attributed to quality criteria
-- [ ] No false positives (violations are legitimate issues)
-- [ ] No false negatives (critical issues not missed)
-
-### Review Report Clarity
-
-- [ ] Executive summary is clear and actionable
-- [ ] Issue explanations are understandable
-- [ ] Recommended fixes are implementable
-- [ ] Code examples are correct and runnable
-- [ ] Recommendation (Approve/Request changes) is clear
-
----
-
-## Quality Checks
-
-### Knowledge-Based Validation
-
-- [ ] All feedback grounded in knowledge base fragments
-- [ ] Recommendations follow proven patterns
-- [ ] No arbitrary or opinion-based feedback
-- [ ] Knowledge fragment references accurate and relevant
-
-### Actionable Feedback
-
-- [ ] Every issue includes recommended fix
-- [ ] Every fix includes code example
-- [ ] Code examples demonstrate correct pattern
-- [ ] Fixes reference knowledge base for more detail
-
-### Severity Classification
-
-- [ ] Critical (P0) issues are genuinely critical (hard waits, race conditions, no assertions)
-- [ ] High (P1) issues impact maintainability/reliability (missing IDs, hardcoded data)
-- [ ] Medium (P2) issues are nice-to-have improvements (long files, missing priorities)
-- [ ] Low (P3) issues are minor style/preference (verbose tests)
-
-### Context Awareness
-
-- [ ] Review considers project context (some patterns may be justified)
-- [ ] Violations with justification comments noted as acceptable
-- [ ] Edge cases acknowledged
-- [ ] Recommendations are pragmatic, not dogmatic
-
----
-
-## Integration Points
-
-### Story File Integration
-
-- [ ] Story file discovered correctly (if available)
-- [ ] Acceptance criteria extracted and used for context
-- [ ] Test quality section appended to story (if enabled)
-- [ ] Link to review report added to story
-
-### Test Design Integration
-
-- [ ] Test design document discovered correctly (if available)
-- [ ] Priority context (P0/P1/P2/P3) extracted and used
-- [ ] Review validates tests align with prioritization
-- [ ] Misalignment flagged (e.g., P0 scenario missing tests)
-
-### Knowledge Base Integration
-
-- [ ] tea-index.csv loaded successfully
-- [ ] All required fragments loaded
-- [ ] Fragments applied correctly to validation
-- [ ] Fragment references in report are accurate
-
----
-
-## Edge Cases and Special Situations
-
-### Empty or Minimal Tests
-
-- [ ] If test file is empty, report notes "No tests found"
-- [ ] If test file has only boilerplate, report notes "No meaningful tests"
-- [ ] Score reflects lack of content appropriately
-
-### Legacy Tests
-
-- [ ] Legacy tests acknowledged in context
-- [ ] Review provides practical recommendations for improvement
-- [ ] Recognizes that complete refactor may not be feasible
-- [ ] Prioritizes critical issues (flakiness) over style
-
-### Test Framework Variations
-
-- [ ] Review adapts to test framework (Playwright vs Jest vs Cypress)
-- [ ] Framework-specific patterns recognized (e.g., Playwright fixtures)
-- [ ] Framework-specific violations detected (e.g., Cypress anti-patterns)
-- [ ] Knowledge fragments applied appropriately for framework
-
-### Justified Violations
-
-- [ ] Violations with justification comments in code noted as acceptable
-- [ ] Justifications evaluated for legitimacy
-- [ ] Report acknowledges justified patterns
-- [ ] Score not penalized for justified violations
-
----
-
-## Final Validation
-
-### Review Completeness
-
-- [ ] All enabled quality criteria evaluated
-- [ ] All test files in scope reviewed
-- [ ] All violations cataloged
-- [ ] All recommendations provided
-- [ ] Review report is comprehensive
-
-### Review Accuracy
-
-- [ ] Quality score is accurate
-- [ ] Violations are correct (no false positives)
-- [ ] Critical issues not missed (no false negatives)
-- [ ] Code locations are correct
-- [ ] Knowledge base references are accurate
-
-### Review Usefulness
-
-- [ ] Feedback is actionable
-- [ ] Recommendations are implementable
-- [ ] Code examples are correct
-- [ ] Review helps developer improve tests
-- [ ] Review educates on best practices
-
-### Workflow Complete
-
-- [ ] All checklist items completed
-- [ ] All outputs validated and saved
-- [ ] User notified with summary
-- [ ] Review ready for developer consumption
-- [ ] Follow-up actions identified (if any)
-
----
-
-## Notes
-
-Record any issues, observations, or important context during workflow execution:
-
-- **Test Framework**: [Playwright, Jest, Cypress, etc.]
-- **Review Scope**: [single file, directory, full suite]
-- **Quality Score**: [0-100 score, letter grade]
-- **Critical Issues**: [Count of P0/P1 violations]
-- **Recommendation**: [Approve / Approve with comments / Request changes / Block]
-- **Special Considerations**: [Legacy code, justified patterns, edge cases]
-- **Follow-up Actions**: [Re-review after fixes, pair programming, etc.]
diff --git a/src/bmm/workflows/testarch/test-review/instructions.md b/src/bmm/workflows/testarch/test-review/instructions.md
deleted file mode 100644
index d817d2a6..00000000
--- a/src/bmm/workflows/testarch/test-review/instructions.md
+++ /dev/null
@@ -1,628 +0,0 @@
-# Test Quality Review - Instructions v4.0
-
-**Workflow:** `testarch-test-review`
-**Purpose:** Review test quality using TEA's comprehensive knowledge base and validate against best practices for maintainability, determinism, isolation, and flakiness prevention
-**Agent:** Test Architect (TEA)
-**Format:** Pure Markdown v4.0 (no XML blocks)
-
----
-
-## Overview
-
-This workflow performs comprehensive test quality reviews using TEA's knowledge base of best practices. It validates tests against proven patterns for fixture architecture, network-first safeguards, data factories, determinism, isolation, and flakiness prevention. The review generates actionable feedback with quality scoring.
-
-**Key Capabilities:**
-
-- **Knowledge-Based Review**: Applies patterns from tea-index.csv fragments
-- **Quality Scoring**: 0-100 score based on violations and best practices
-- **Multi-Scope**: Review single file, directory, or entire test suite
-- **Pattern Detection**: Identifies flaky patterns, hard waits, race conditions
-- **Best Practice Validation**: BDD format, test IDs, priorities, assertions
-- **Actionable Feedback**: Critical issues (must fix) vs recommendations (should fix)
-- **Integration**: Works with story files, test-design, acceptance criteria
-
----
-
-## Prerequisites
-
-**Required:**
-
-- Test file(s) to review (auto-discovered or explicitly provided)
-- Test framework configuration (playwright.config.ts, jest.config.js, etc.)
-
-**Recommended:**
-
-- Story file with acceptance criteria (for context)
-- Test design document (for priority context)
-- Knowledge base fragments available in tea-index.csv
-
-**Halt Conditions:**
-
-- If test file path is invalid or file doesn't exist, halt and request correction
-- If test_dir is empty (no tests found), halt and notify user
-
----
-
-## Workflow Steps
-
-### Step 1: Load Context and Knowledge Base
-
-**Actions:**
-
-1. Check playwright-utils flag:
- - Read `{config_source}` and check `config.tea_use_playwright_utils`
-
-2. Load relevant knowledge fragments from `{project-root}/_bmad/bmm/testarch/tea-index.csv`:
-
- **Core Patterns (Always load):**
- - `test-quality.md` - Definition of Done (deterministic tests, isolated with cleanup, explicit assertions, <300 lines, <1.5 min, 658 lines, 5 examples)
- - `data-factories.md` - Factory functions with faker: overrides, nested factories, API-first setup (498 lines, 5 examples)
- - `test-levels-framework.md` - E2E vs API vs Component vs Unit appropriateness with decision matrix (467 lines, 4 examples)
- - `selective-testing.md` - Duplicate coverage detection with tag-based, spec filter, diff-based selection (727 lines, 4 examples)
- - `test-healing-patterns.md` - Common failure patterns: stale selectors, race conditions, dynamic data, network errors, hard waits (648 lines, 5 examples)
- - `selector-resilience.md` - Selector best practices (data-testid > ARIA > text > CSS hierarchy, anti-patterns, 541 lines, 4 examples)
- - `timing-debugging.md` - Race condition prevention and async debugging techniques (370 lines, 3 examples)
-
- **If `config.tea_use_playwright_utils: true` (All Utilities):**
- - `overview.md` - Playwright utils best practices
- - `api-request.md` - Validate apiRequest usage patterns
- - `network-recorder.md` - Review HAR record/playback implementation
- - `auth-session.md` - Check auth token management
- - `intercept-network-call.md` - Validate network interception
- - `recurse.md` - Review polling patterns
- - `log.md` - Check logging best practices
- - `file-utils.md` - Validate file operation patterns
- - `burn-in.md` - Review burn-in configuration
- - `network-error-monitor.md` - Check error monitoring setup
- - `fixtures-composition.md` - Validate mergeTests usage
-
- **If `config.tea_use_playwright_utils: false`:**
- - `fixture-architecture.md` - Pure function β Fixture β mergeTests composition with auto-cleanup (406 lines, 5 examples)
- - `network-first.md` - Route intercept before navigate to prevent race conditions (489 lines, 5 examples)
- - `playwright-config.md` - Environment-based configuration with fail-fast validation (722 lines, 5 examples)
- - `component-tdd.md` - Red-Green-Refactor patterns with provider isolation (480 lines, 4 examples)
- - `ci-burn-in.md` - Flaky test detection with 10-iteration burn-in loop (678 lines, 4 examples)
-
-3. Determine review scope:
- - **single**: Review one test file (`test_file_path` provided)
- - **directory**: Review all tests in directory (`test_dir` provided)
- - **suite**: Review entire test suite (discover all test files)
-
-4. Auto-discover related artifacts (if `auto_discover_story: true`):
- - Extract test ID from filename (e.g., `1.3-E2E-001.spec.ts` β story 1.3)
- - Search for story file (`story-1.3.md`)
- - Search for test design (`test-design-story-1.3.md` or `test-design-epic-1.md`)
-
-5. Read story file for context (if available):
- - Extract acceptance criteria
- - Extract priority classification
- - Extract expected test IDs
-
-**Output:** Complete knowledge base loaded, review scope determined, context gathered
-
----
-
-### Step 2: Discover and Parse Test Files
-
-**Actions:**
-
-1. **Discover test files** based on scope:
- - **single**: Use `test_file_path` variable
- - **directory**: Use `glob` to find all test files in `test_dir` (e.g., `*.spec.ts`, `*.test.js`)
- - **suite**: Use `glob` to find all test files recursively from project root
-
-2. **Parse test file metadata**:
- - File path and name
- - File size (warn if >15 KB or >300 lines)
- - Test framework detected (Playwright, Jest, Cypress, Vitest, etc.)
- - Imports and dependencies
- - Test structure (describe/context/it blocks)
-
-3. **Extract test structure**:
- - Count of describe blocks (test suites)
- - Count of it/test blocks (individual tests)
- - Test IDs (if present, e.g., `test.describe('1.3-E2E-001')`)
- - Priority markers (if present, e.g., `test.describe.only` for P0)
- - BDD structure (Given-When-Then comments or steps)
-
-4. **Identify test patterns**:
- - Fixtures used
- - Data factories used
- - Network interception patterns
- - Assertions used (expect, assert, toHaveText, etc.)
- - Waits and timeouts (page.waitFor, sleep, hardcoded delays)
- - Conditionals (if/else, switch, ternary)
- - Try/catch blocks
- - Shared state or globals
-
-**Output:** Complete test file inventory with structure and pattern analysis
-
----
-
-### Step 3: Validate Against Quality Criteria
-
-**Actions:**
-
-For each test file, validate against quality criteria (configurable via workflow variables):
-
-#### 1. BDD Format Validation (if `check_given_when_then: true`)
-
-- β **PASS**: Tests use Given-When-Then structure (comments or step organization)
-- β οΈ **WARN**: Tests have some structure but not explicit GWT
-- β **FAIL**: Tests lack clear structure, hard to understand intent
-
-**Knowledge Fragment**: test-quality.md, tdd-cycles.md
-
----
-
-#### 2. Test ID Conventions (if `check_test_ids: true`)
-
-- β **PASS**: Test IDs present and follow convention (e.g., `1.3-E2E-001`, `2.1-API-005`)
-- β οΈ **WARN**: Some test IDs missing or inconsistent
-- β **FAIL**: No test IDs, can't trace tests to requirements
-
-**Knowledge Fragment**: traceability.md, test-quality.md
-
----
-
-#### 3. Priority Markers (if `check_priority_markers: true`)
-
-- β **PASS**: Tests classified as P0/P1/P2/P3 (via markers or test-design reference)
-- β οΈ **WARN**: Some priority classifications missing
-- β **FAIL**: No priority classification, can't determine criticality
-
-**Knowledge Fragment**: test-priorities.md, risk-governance.md
-
----
-
-#### 4. Hard Waits Detection (if `check_hard_waits: true`)
-
-- β **PASS**: No hard waits detected (no `sleep()`, `wait(5000)`, hardcoded delays)
-- β οΈ **WARN**: Some hard waits used but with justification comments
-- β **FAIL**: Hard waits detected without justification (flakiness risk)
-
-**Patterns to detect:**
-
-- `sleep(1000)`, `setTimeout()`, `delay()`
-- `page.waitForTimeout(5000)` without explicit reason
-- `await new Promise(resolve => setTimeout(resolve, 3000))`
-
-**Knowledge Fragment**: test-quality.md, network-first.md
-
----
-
-#### 5. Determinism Check (if `check_determinism: true`)
-
-- β **PASS**: Tests are deterministic (no conditionals, no try/catch abuse, no random values)
-- β οΈ **WARN**: Some conditionals but with clear justification
-- β **FAIL**: Tests use if/else, switch, or try/catch to control flow (flakiness risk)
-
-**Patterns to detect:**
-
-- `if (condition) { test logic }` - tests should work deterministically
-- `try { test } catch { fallback }` - tests shouldn't swallow errors
-- `Math.random()`, `Date.now()` without factory abstraction
-
-**Knowledge Fragment**: test-quality.md, data-factories.md
-
----
-
-#### 6. Isolation Validation (if `check_isolation: true`)
-
-- β **PASS**: Tests clean up resources, no shared state, can run in any order
-- β οΈ **WARN**: Some cleanup missing but isolated enough
-- β **FAIL**: Tests share state, depend on execution order, leave resources
-
-**Patterns to check:**
-
-- afterEach/afterAll cleanup hooks present
-- No global variables mutated
-- Database/API state cleaned up after tests
-- Test data deleted or marked inactive
-
-**Knowledge Fragment**: test-quality.md, data-factories.md
-
----
-
-#### 7. Fixture Patterns (if `check_fixture_patterns: true`)
-
-- β **PASS**: Uses pure function β Fixture β mergeTests pattern
-- β οΈ **WARN**: Some fixtures used but not consistently
-- β **FAIL**: No fixtures, tests repeat setup code (maintainability risk)
-
-**Patterns to check:**
-
-- Fixtures defined (e.g., `test.extend({ customFixture: async ({}, use) => { ... }})`)
-- Pure functions used for fixture logic
-- mergeTests used to combine fixtures
-- No beforeEach with complex setup (should be in fixtures)
-
-**Knowledge Fragment**: fixture-architecture.md
-
----
-
-#### 8. Data Factories (if `check_data_factories: true`)
-
-- β **PASS**: Uses factory functions with overrides, API-first setup
-- β οΈ **WARN**: Some factories used but also hardcoded data
-- β **FAIL**: Hardcoded test data, magic strings/numbers (maintainability risk)
-
-**Patterns to check:**
-
-- Factory functions defined (e.g., `createUser()`, `generateInvoice()`)
-- Factories use faker.js or similar for realistic data
-- Factories accept overrides (e.g., `createUser({ email: 'custom@example.com' })`)
-- API-first setup (create via API, test via UI)
-
-**Knowledge Fragment**: data-factories.md
-
----
-
-#### 9. Network-First Pattern (if `check_network_first: true`)
-
-- β **PASS**: Route interception set up BEFORE navigation (race condition prevention)
-- β οΈ **WARN**: Some routes intercepted correctly, others after navigation
-- β **FAIL**: Route interception after navigation (race condition risk)
-
-**Patterns to check:**
-
-- `page.route()` called before `page.goto()`
-- `page.waitForResponse()` used with explicit URL pattern
-- No navigation followed immediately by route setup
-
-**Knowledge Fragment**: network-first.md
-
----
-
-#### 10. Assertions (if `check_assertions: true`)
-
-- β **PASS**: Explicit assertions present (expect, assert, toHaveText)
-- β οΈ **WARN**: Some tests rely on implicit waits instead of assertions
-- β **FAIL**: Missing assertions, tests don't verify behavior
-
-**Patterns to check:**
-
-- Each test has at least one assertion
-- Assertions are specific (not just truthy checks)
-- Assertions use framework-provided matchers (toHaveText, toBeVisible)
-
-**Knowledge Fragment**: test-quality.md
-
----
-
-#### 11. Test Length (if `check_test_length: true`)
-
-- β **PASS**: Test file β€200 lines (ideal), β€300 lines (acceptable)
-- β οΈ **WARN**: Test file 301-500 lines (consider splitting)
-- β **FAIL**: Test file >500 lines (too large, maintainability risk)
-
-**Knowledge Fragment**: test-quality.md
-
----
-
-#### 12. Test Duration (if `check_test_duration: true`)
-
-- β **PASS**: Individual tests β€1.5 minutes (target: <30 seconds)
-- β οΈ **WARN**: Some tests 1.5-3 minutes (consider optimization)
-- β **FAIL**: Tests >3 minutes (too slow, impacts CI/CD)
-
-**Note:** Duration estimation based on complexity analysis if execution data unavailable
-
-**Knowledge Fragment**: test-quality.md, selective-testing.md
-
----
-
-#### 13. Flakiness Patterns (if `check_flakiness_patterns: true`)
-
-- β **PASS**: No known flaky patterns detected
-- β οΈ **WARN**: Some potential flaky patterns (e.g., tight timeouts, race conditions)
-- β **FAIL**: Multiple flaky patterns detected (high flakiness risk)
-
-**Patterns to detect:**
-
-- Tight timeouts (e.g., `{ timeout: 1000 }`)
-- Race conditions (navigation before route interception)
-- Timing-dependent assertions (e.g., checking timestamps)
-- Retry logic in tests (hides flakiness)
-- Environment-dependent assumptions (hardcoded URLs, ports)
-
-**Knowledge Fragment**: test-quality.md, network-first.md, ci-burn-in.md
-
----
-
-### Step 4: Calculate Quality Score
-
-**Actions:**
-
-1. **Count violations** by severity:
- - **Critical (P0)**: Hard waits without justification, no assertions, race conditions, shared state
- - **High (P1)**: Missing test IDs, no BDD structure, hardcoded data, missing fixtures
- - **Medium (P2)**: Long test files (>300 lines), missing priorities, some conditionals
- - **Low (P3)**: Minor style issues, incomplete cleanup, verbose tests
-
-2. **Calculate quality score** (if `quality_score_enabled: true`):
-
-```
-Starting Score: 100
-
-Critical Violations: -10 points each
-High Violations: -5 points each
-Medium Violations: -2 points each
-Low Violations: -1 point each
-
-Bonus Points:
-+ Excellent BDD structure: +5
-+ Comprehensive fixtures: +5
-+ Comprehensive data factories: +5
-+ Network-first pattern: +5
-+ Perfect isolation: +5
-+ All test IDs present: +5
-
-Quality Score: max(0, min(100, Starting Score - Violations + Bonus))
-```
-
-3. **Quality Grade**:
- - **90-100**: Excellent (A+)
- - **80-89**: Good (A)
- - **70-79**: Acceptable (B)
- - **60-69**: Needs Improvement (C)
- - **<60**: Critical Issues (F)
-
-**Output:** Quality score calculated with violation breakdown
-
----
-
-### Step 5: Generate Review Report
-
-**Actions:**
-
-1. **Create review report** using `test-review-template.md`:
-
- **Header Section:**
- - Test file(s) reviewed
- - Review date
- - Review scope (single/directory/suite)
- - Quality score and grade
-
- **Executive Summary:**
- - Overall assessment (Excellent/Good/Needs Improvement/Critical)
- - Key strengths
- - Key weaknesses
- - Recommendation (Approve/Approve with comments/Request changes)
-
- **Quality Criteria Assessment:**
- - Table with all criteria evaluated
- - Status for each (PASS/WARN/FAIL)
- - Violation count per criterion
-
- **Critical Issues (Must Fix):**
- - Priority P0/P1 violations
- - Code location (file:line)
- - Explanation of issue
- - Recommended fix
- - Knowledge base reference
-
- **Recommendations (Should Fix):**
- - Priority P2/P3 violations
- - Code location (file:line)
- - Explanation of issue
- - Recommended improvement
- - Knowledge base reference
-
- **Best Practices Examples:**
- - Highlight good patterns found in tests
- - Reference knowledge base fragments
- - Provide examples for others to follow
-
- **Knowledge Base References:**
- - List all fragments consulted
- - Provide links to detailed guidance
-
-2. **Generate inline comments** (if `generate_inline_comments: true`):
- - Add TODO comments in test files at violation locations
- - Format: `// TODO (TEA Review): [Issue description] - See test-review-{filename}.md`
- - Never modify test logic, only add comments
-
-3. **Generate quality badge** (if `generate_quality_badge: true`):
- - Create badge with quality score (e.g., "Test Quality: 87/100 (A)")
- - Format for inclusion in README or documentation
-
-4. **Append to story file** (if `append_to_story: true` and story file exists):
- - Add "Test Quality Review" section to story
- - Include quality score and critical issues
- - Link to full review report
-
-**Output:** Comprehensive review report with actionable feedback
-
----
-
-### Step 6: Save Outputs and Notify
-
-**Actions:**
-
-1. **Save review report** to `{output_file}`
-2. **Save inline comments** to test files (if enabled)
-3. **Save quality badge** to output folder (if enabled)
-4. **Update story file** (if enabled)
-5. **Generate summary message** for user:
- - Quality score and grade
- - Critical issue count
- - Recommendation
-
-**Output:** All review artifacts saved and user notified
-
----
-
-## Quality Criteria Decision Matrix
-
-| Criterion | PASS | WARN | FAIL | Knowledge Fragment |
-| ------------------ | ------------------------- | -------------- | ------------------- | ----------------------- |
-| BDD Format | Given-When-Then present | Some structure | No structure | test-quality.md |
-| Test IDs | All tests have IDs | Some missing | No IDs | traceability.md |
-| Priority Markers | All classified | Some missing | No classification | test-priorities.md |
-| Hard Waits | No hard waits | Some justified | Hard waits present | test-quality.md |
-| Determinism | No conditionals/random | Some justified | Conditionals/random | test-quality.md |
-| Isolation | Clean up, no shared state | Some gaps | Shared state | test-quality.md |
-| Fixture Patterns | Pure fn β Fixture | Some fixtures | No fixtures | fixture-architecture.md |
-| Data Factories | Factory functions | Some factories | Hardcoded data | data-factories.md |
-| Network-First | Intercept before navigate | Some correct | Race conditions | network-first.md |
-| Assertions | Explicit assertions | Some implicit | Missing assertions | test-quality.md |
-| Test Length | β€300 lines | 301-500 lines | >500 lines | test-quality.md |
-| Test Duration | β€1.5 min | 1.5-3 min | >3 min | test-quality.md |
-| Flakiness Patterns | No flaky patterns | Some potential | Multiple patterns | ci-burn-in.md |
-
----
-
-## Example Review Summary
-
-````markdown
-# Test Quality Review: auth-login.spec.ts
-
-**Quality Score**: 78/100 (B - Acceptable)
-**Review Date**: 2025-10-14
-**Recommendation**: Approve with Comments
-
-## Executive Summary
-
-Overall, the test demonstrates good structure and coverage of the login flow. However, there are several areas for improvement to enhance maintainability and prevent flakiness.
-
-**Strengths:**
-
-- Excellent BDD structure with clear Given-When-Then comments
-- Good use of test IDs (1.3-E2E-001, 1.3-E2E-002)
-- Comprehensive assertions on authentication state
-
-**Weaknesses:**
-
-- Hard wait detected (page.waitForTimeout(2000)) - flakiness risk
-- Hardcoded test data (email: 'test@example.com') - use factories instead
-- Missing fixture for common login setup - DRY violation
-
-**Recommendation**: Address critical issue (hard wait) before merging. Other improvements can be addressed in follow-up PR.
-
-## Critical Issues (Must Fix)
-
-### 1. Hard Wait Detected (Line 45)
-
-**Severity**: P0 (Critical)
-**Issue**: `await page.waitForTimeout(2000)` introduces flakiness
-**Fix**: Use explicit wait for element or network request instead
-**Knowledge**: See test-quality.md, network-first.md
-
-```typescript
-// β Bad (current)
-await page.waitForTimeout(2000);
-await expect(page.locator('[data-testid="user-menu"]')).toBeVisible();
-
-// β Good (recommended)
-await expect(page.locator('[data-testid="user-menu"]')).toBeVisible({ timeout: 10000 });
-```
-````
-
-## Recommendations (Should Fix)
-
-### 1. Use Data Factory for Test User (Lines 23, 32, 41)
-
-**Severity**: P1 (High)
-**Issue**: Hardcoded email `test@example.com` - maintainability risk
-**Fix**: Create factory function for test users
-**Knowledge**: See data-factories.md
-
-```typescript
-// β Good (recommended)
-import { createTestUser } from './factories/user-factory';
-
-const testUser = createTestUser({ role: 'admin' });
-await loginPage.login(testUser.email, testUser.password);
-```
-
-### 2. Extract Login Setup to Fixture (Lines 18-28)
-
-**Severity**: P1 (High)
-**Issue**: Login setup repeated across tests - DRY violation
-**Fix**: Create fixture for authenticated state
-**Knowledge**: See fixture-architecture.md
-
-```typescript
-// β Good (recommended)
-const test = base.extend({
- authenticatedPage: async ({ page }, use) => {
- const user = createTestUser();
- await loginPage.login(user.email, user.password);
- await use(page);
- },
-});
-
-test('user can access dashboard', async ({ authenticatedPage }) => {
- // Test starts already logged in
-});
-```
-
-## Quality Score Breakdown
-
-- Starting Score: 100
-- Critical Violations (1 Γ -10): -10
-- High Violations (2 Γ -5): -10
-- Medium Violations (0 Γ -2): 0
-- Low Violations (1 Γ -1): -1
-- Bonus (BDD +5, Test IDs +5): +10
-- **Final Score**: 78/100 (B)
-
-```
-
----
-
-## Integration with Other Workflows
-
-### Before Test Review
-
-- **atdd**: Generate acceptance tests (TEA reviews them for quality)
-- **automate**: Expand regression suite (TEA reviews new tests)
-- **dev story**: Developer writes implementation tests (TEA reviews them)
-
-### After Test Review
-
-- **Developer**: Addresses critical issues, improves based on recommendations
-- **gate**: Test quality review feeds into gate decision (high-quality tests increase confidence)
-
-### Coordinates With
-
-- **Story File**: Review links to acceptance criteria context
-- **Test Design**: Review validates tests align with prioritization
-- **Knowledge Base**: Review references fragments for detailed guidance
-
----
-
-## Important Notes
-
-1. **Non-Prescriptive**: Review provides guidance, not rigid rules
-2. **Context Matters**: Some violations may be justified for specific scenarios
-3. **Knowledge-Based**: All feedback grounded in proven patterns from tea-index.csv
-4. **Actionable**: Every issue includes recommended fix with code examples
-5. **Quality Score**: Use as indicator, not absolute measure
-6. **Continuous Improvement**: Review same tests periodically as patterns evolve
-
----
-
-## Troubleshooting
-
-**Problem: No test files found**
-- Verify test_dir path is correct
-- Check test file extensions match glob pattern
-- Ensure test files exist in expected location
-
-**Problem: Quality score seems too low/high**
-- Review violation counts - may need to adjust thresholds
-- Consider context - some projects have different standards
-- Focus on critical issues first, not just score
-
-**Problem: Inline comments not generated**
-- Check generate_inline_comments: true in variables
-- Verify write permissions on test files
-- Review append_to_file: false (separate report mode)
-
-**Problem: Knowledge fragments not loading**
-- Verify tea-index.csv exists in testarch/ directory
-- Check fragment file paths are correct
-- Ensure auto_load_knowledge: true in variables
-```
diff --git a/src/bmm/workflows/testarch/test-review/test-review-template.md b/src/bmm/workflows/testarch/test-review/test-review-template.md
deleted file mode 100644
index 54127a5a..00000000
--- a/src/bmm/workflows/testarch/test-review/test-review-template.md
+++ /dev/null
@@ -1,390 +0,0 @@
-# Test Quality Review: {test_filename}
-
-**Quality Score**: {score}/100 ({grade} - {assessment})
-**Review Date**: {YYYY-MM-DD}
-**Review Scope**: {single | directory | suite}
-**Reviewer**: {user_name or TEA Agent}
-
----
-
-Note: This review audits existing tests; it does not generate tests.
-
-## Executive Summary
-
-**Overall Assessment**: {Excellent | Good | Acceptable | Needs Improvement | Critical Issues}
-
-**Recommendation**: {Approve | Approve with Comments | Request Changes | Block}
-
-### Key Strengths
-
-β {strength_1}
-β {strength_2}
-β {strength_3}
-
-### Key Weaknesses
-
-β {weakness_1}
-β {weakness_2}
-β {weakness_3}
-
-### Summary
-
-{1-2 paragraph summary of overall test quality, highlighting major findings and recommendation rationale}
-
----
-
-## Quality Criteria Assessment
-
-| Criterion | Status | Violations | Notes |
-| ------------------------------------ | ------------------------------- | ---------- | ------------ |
-| BDD Format (Given-When-Then) | {β PASS \| β οΈ WARN \| β FAIL} | {count} | {brief_note} |
-| Test IDs | {β PASS \| β οΈ WARN \| β FAIL} | {count} | {brief_note} |
-| Priority Markers (P0/P1/P2/P3) | {β PASS \| β οΈ WARN \| β FAIL} | {count} | {brief_note} |
-| Hard Waits (sleep, waitForTimeout) | {β PASS \| β οΈ WARN \| β FAIL} | {count} | {brief_note} |
-| Determinism (no conditionals) | {β PASS \| β οΈ WARN \| β FAIL} | {count} | {brief_note} |
-| Isolation (cleanup, no shared state) | {β PASS \| β οΈ WARN \| β FAIL} | {count} | {brief_note} |
-| Fixture Patterns | {β PASS \| β οΈ WARN \| β FAIL} | {count} | {brief_note} |
-| Data Factories | {β PASS \| β οΈ WARN \| β FAIL} | {count} | {brief_note} |
-| Network-First Pattern | {β PASS \| β οΈ WARN \| β FAIL} | {count} | {brief_note} |
-| Explicit Assertions | {β PASS \| β οΈ WARN \| β FAIL} | {count} | {brief_note} |
-| Test Length (β€300 lines) | {β PASS \| β οΈ WARN \| β FAIL} | {lines} | {brief_note} |
-| Test Duration (β€1.5 min) | {β PASS \| β οΈ WARN \| β FAIL} | {duration} | {brief_note} |
-| Flakiness Patterns | {β PASS \| β οΈ WARN \| β FAIL} | {count} | {brief_note} |
-
-**Total Violations**: {critical_count} Critical, {high_count} High, {medium_count} Medium, {low_count} Low
-
----
-
-## Quality Score Breakdown
-
-```
-Starting Score: 100
-Critical Violations: -{critical_count} Γ 10 = -{critical_deduction}
-High Violations: -{high_count} Γ 5 = -{high_deduction}
-Medium Violations: -{medium_count} Γ 2 = -{medium_deduction}
-Low Violations: -{low_count} Γ 1 = -{low_deduction}
-
-Bonus Points:
- Excellent BDD: +{0|5}
- Comprehensive Fixtures: +{0|5}
- Data Factories: +{0|5}
- Network-First: +{0|5}
- Perfect Isolation: +{0|5}
- All Test IDs: +{0|5}
- --------
-Total Bonus: +{bonus_total}
-
-Final Score: {final_score}/100
-Grade: {grade}
-```
-
----
-
-## Critical Issues (Must Fix)
-
-{If no critical issues: "No critical issues detected. β "}
-
-{For each critical issue:}
-
-### {issue_number}. {Issue Title}
-
-**Severity**: P0 (Critical)
-**Location**: `{filename}:{line_number}`
-**Criterion**: {criterion_name}
-**Knowledge Base**: [{fragment_name}]({fragment_path})
-
-**Issue Description**:
-{Detailed explanation of what the problem is and why it's critical}
-
-**Current Code**:
-
-```typescript
-// β Bad (current implementation)
-{
- code_snippet_showing_problem;
-}
-```
-
-**Recommended Fix**:
-
-```typescript
-// β Good (recommended approach)
-{
- code_snippet_showing_solution;
-}
-```
-
-**Why This Matters**:
-{Explanation of impact - flakiness risk, maintainability, reliability}
-
-**Related Violations**:
-{If similar issue appears elsewhere, note line numbers}
-
----
-
-## Recommendations (Should Fix)
-
-{If no recommendations: "No additional recommendations. Test quality is excellent. β "}
-
-{For each recommendation:}
-
-### {rec_number}. {Recommendation Title}
-
-**Severity**: {P1 (High) | P2 (Medium) | P3 (Low)}
-**Location**: `{filename}:{line_number}`
-**Criterion**: {criterion_name}
-**Knowledge Base**: [{fragment_name}]({fragment_path})
-
-**Issue Description**:
-{Detailed explanation of what could be improved and why}
-
-**Current Code**:
-
-```typescript
-// β οΈ Could be improved (current implementation)
-{
- code_snippet_showing_current_approach;
-}
-```
-
-**Recommended Improvement**:
-
-```typescript
-// β Better approach (recommended)
-{
- code_snippet_showing_improvement;
-}
-```
-
-**Benefits**:
-{Explanation of benefits - maintainability, readability, reusability}
-
-**Priority**:
-{Why this is P1/P2/P3 - urgency and impact}
-
----
-
-## Best Practices Found
-
-{If good patterns found, highlight them}
-
-{For each best practice:}
-
-### {practice_number}. {Best Practice Title}
-
-**Location**: `{filename}:{line_number}`
-**Pattern**: {pattern_name}
-**Knowledge Base**: [{fragment_name}]({fragment_path})
-
-**Why This Is Good**:
-{Explanation of why this pattern is excellent}
-
-**Code Example**:
-
-```typescript
-// β Excellent pattern demonstrated in this test
-{
- code_snippet_showing_best_practice;
-}
-```
-
-**Use as Reference**:
-{Encourage using this pattern in other tests}
-
----
-
-## Test File Analysis
-
-### File Metadata
-
-- **File Path**: `{relative_path_from_project_root}`
-- **File Size**: {line_count} lines, {kb_size} KB
-- **Test Framework**: {Playwright | Jest | Cypress | Vitest | Other}
-- **Language**: {TypeScript | JavaScript}
-
-### Test Structure
-
-- **Describe Blocks**: {describe_count}
-- **Test Cases (it/test)**: {test_count}
-- **Average Test Length**: {avg_lines_per_test} lines per test
-- **Fixtures Used**: {fixture_count} ({fixture_names})
-- **Data Factories Used**: {factory_count} ({factory_names})
-
-### Test Coverage Scope
-
-- **Test IDs**: {test_id_list}
-- **Priority Distribution**:
- - P0 (Critical): {p0_count} tests
- - P1 (High): {p1_count} tests
- - P2 (Medium): {p2_count} tests
- - P3 (Low): {p3_count} tests
- - Unknown: {unknown_count} tests
-
-### Assertions Analysis
-
-- **Total Assertions**: {assertion_count}
-- **Assertions per Test**: {avg_assertions_per_test} (avg)
-- **Assertion Types**: {assertion_types_used}
-
----
-
-## Context and Integration
-
-### Related Artifacts
-
-{If story file found:}
-
-- **Story File**: [{story_filename}]({story_path})
-- **Acceptance Criteria Mapped**: {ac_mapped}/{ac_total} ({ac_coverage}%)
-
-{If test-design found:}
-
-- **Test Design**: [{test_design_filename}]({test_design_path})
-- **Risk Assessment**: {risk_level}
-- **Priority Framework**: P0-P3 applied
-
-### Acceptance Criteria Validation
-
-{If story file available, map tests to ACs:}
-
-| Acceptance Criterion | Test ID | Status | Notes |
-| -------------------- | --------- | -------------------------- | ------- |
-| {AC_1} | {test_id} | {β Covered \| β Missing} | {notes} |
-| {AC_2} | {test_id} | {β Covered \| β Missing} | {notes} |
-| {AC_3} | {test_id} | {β Covered \| β Missing} | {notes} |
-
-**Coverage**: {covered_count}/{total_count} criteria covered ({coverage_percentage}%)
-
----
-
-## Knowledge Base References
-
-This review consulted the following knowledge base fragments:
-
-- **[test-quality.md](../../../testarch/knowledge/test-quality.md)** - Definition of Done for tests (no hard waits, <300 lines, <1.5 min, self-cleaning)
-- **[fixture-architecture.md](../../../testarch/knowledge/fixture-architecture.md)** - Pure function β Fixture β mergeTests pattern
-- **[network-first.md](../../../testarch/knowledge/network-first.md)** - Route intercept before navigate (race condition prevention)
-- **[data-factories.md](../../../testarch/knowledge/data-factories.md)** - Factory functions with overrides, API-first setup
-- **[test-levels-framework.md](../../../testarch/knowledge/test-levels-framework.md)** - E2E vs API vs Component vs Unit appropriateness
-- **[tdd-cycles.md](../../../testarch/knowledge/tdd-cycles.md)** - Red-Green-Refactor patterns
-- **[selective-testing.md](../../../testarch/knowledge/selective-testing.md)** - Duplicate coverage detection
-- **[ci-burn-in.md](../../../testarch/knowledge/ci-burn-in.md)** - Flakiness detection patterns (10-iteration loop)
-- **[test-priorities.md](../../../testarch/knowledge/test-priorities.md)** - P0/P1/P2/P3 classification framework
-- **[traceability.md](../../../testarch/knowledge/traceability.md)** - Requirements-to-tests mapping
-
-See [tea-index.csv](../../../testarch/tea-index.csv) for complete knowledge base.
-
----
-
-## Next Steps
-
-### Immediate Actions (Before Merge)
-
-1. **{action_1}** - {description}
- - Priority: {P0 | P1 | P2}
- - Owner: {team_or_person}
- - Estimated Effort: {time_estimate}
-
-2. **{action_2}** - {description}
- - Priority: {P0 | P1 | P2}
- - Owner: {team_or_person}
- - Estimated Effort: {time_estimate}
-
-### Follow-up Actions (Future PRs)
-
-1. **{action_1}** - {description}
- - Priority: {P2 | P3}
- - Target: {next_sprint | backlog}
-
-2. **{action_2}** - {description}
- - Priority: {P2 | P3}
- - Target: {next_sprint | backlog}
-
-### Re-Review Needed?
-
-{β No re-review needed - approve as-is}
-{β οΈ Re-review after critical fixes - request changes, then re-review}
-{β Major refactor required - block merge, pair programming recommended}
-
----
-
-## Decision
-
-**Recommendation**: {Approve | Approve with Comments | Request Changes | Block}
-
-**Rationale**:
-{1-2 paragraph explanation of recommendation based on findings}
-
-**For Approve**:
-
-> Test quality is excellent/good with {score}/100 score. {Minor issues noted can be addressed in follow-up PRs.} Tests are production-ready and follow best practices.
-
-**For Approve with Comments**:
-
-> Test quality is acceptable with {score}/100 score. {High-priority recommendations should be addressed but don't block merge.} Critical issues resolved, but improvements would enhance maintainability.
-
-**For Request Changes**:
-
-> Test quality needs improvement with {score}/100 score. {Critical issues must be fixed before merge.} {X} critical violations detected that pose flakiness/maintainability risks.
-
-**For Block**:
-
-> Test quality is insufficient with {score}/100 score. {Multiple critical issues make tests unsuitable for production.} Recommend pairing session with QA engineer to apply patterns from knowledge base.
-
----
-
-## Appendix
-
-### Violation Summary by Location
-
-{Table of all violations sorted by line number:}
-
-| Line | Severity | Criterion | Issue | Fix |
-| ------ | ------------- | ----------- | ------------- | ----------- |
-| {line} | {P0/P1/P2/P3} | {criterion} | {brief_issue} | {brief_fix} |
-| {line} | {P0/P1/P2/P3} | {criterion} | {brief_issue} | {brief_fix} |
-
-### Quality Trends
-
-{If reviewing same file multiple times, show trend:}
-
-| Review Date | Score | Grade | Critical Issues | Trend |
-| ------------ | ------------- | --------- | --------------- | ----------- |
-| {YYYY-MM-DD} | {score_1}/100 | {grade_1} | {count_1} | β¬οΈ Improved |
-| {YYYY-MM-DD} | {score_2}/100 | {grade_2} | {count_2} | β¬οΈ Declined |
-| {YYYY-MM-DD} | {score_3}/100 | {grade_3} | {count_3} | β‘οΈ Stable |
-
-### Related Reviews
-
-{If reviewing multiple files in directory/suite:}
-
-| File | Score | Grade | Critical | Status |
-| -------- | ----------- | ------- | -------- | ------------------ |
-| {file_1} | {score}/100 | {grade} | {count} | {Approved/Blocked} |
-| {file_2} | {score}/100 | {grade} | {count} | {Approved/Blocked} |
-| {file_3} | {score}/100 | {grade} | {count} | {Approved/Blocked} |
-
-**Suite Average**: {avg_score}/100 ({avg_grade})
-
----
-
-## Review Metadata
-
-**Generated By**: BMad TEA Agent (Test Architect)
-**Workflow**: testarch-test-review v4.0
-**Review ID**: test-review-{filename}-{YYYYMMDD}
-**Timestamp**: {YYYY-MM-DD HH:MM:SS}
-**Version**: 1.0
-
----
-
-## Feedback on This Review
-
-If you have questions or feedback on this review:
-
-1. Review patterns in knowledge base: `testarch/knowledge/`
-2. Consult tea-index.csv for detailed guidance
-3. Request clarification on specific violations
-4. Pair with QA engineer to apply patterns
-
-This review is guidance, not rigid rules. Context matters - if a pattern is justified, document it with a comment.
diff --git a/src/bmm/workflows/testarch/test-review/workflow.yaml b/src/bmm/workflows/testarch/test-review/workflow.yaml
deleted file mode 100644
index 58dad5ee..00000000
--- a/src/bmm/workflows/testarch/test-review/workflow.yaml
+++ /dev/null
@@ -1,48 +0,0 @@
-# Test Architect workflow: test-review
-name: testarch-test-review
-description: "Review test quality using comprehensive knowledge base and best practices validation"
-author: "BMad"
-
-# Critical variables from config
-config_source: "{project-root}/_bmad/bmm/config.yaml"
-output_folder: "{config_source}:output_folder"
-user_name: "{config_source}:user_name"
-communication_language: "{config_source}:communication_language"
-document_output_language: "{config_source}:document_output_language"
-date: system-generated
-
-# Workflow components
-installed_path: "{project-root}/_bmad/bmm/workflows/testarch/test-review"
-instructions: "{installed_path}/instructions.md"
-validation: "{installed_path}/checklist.md"
-template: "{installed_path}/test-review-template.md"
-
-# Variables and inputs
-variables:
- test_dir: "{project-root}/tests" # Root test directory
- review_scope: "single" # single (one file), directory (folder), suite (all tests)
-
-# Output configuration
-default_output_file: "{output_folder}/test-review.md"
-
-# Required tools
-required_tools:
- - read_file # Read test files, story, test-design
- - write_file # Create review report
- - list_files # Discover test files in directory
- - search_repo # Find tests by patterns
- - glob # Find test files matching patterns
-
-tags:
- - qa
- - test-architect
- - code-review
- - quality
- - best-practices
-
-execution_hints:
- interactive: false # Minimize prompts
- autonomous: true # Proceed without user input unless blocked
- iterative: true # Can review multiple files
-
-web_bundle: false
diff --git a/src/bmm/workflows/testarch/trace/checklist.md b/src/bmm/workflows/testarch/trace/checklist.md
deleted file mode 100644
index 78f345a1..00000000
--- a/src/bmm/workflows/testarch/trace/checklist.md
+++ /dev/null
@@ -1,642 +0,0 @@
-# Requirements Traceability & Gate Decision - Validation Checklist
-
-**Workflow:** `testarch-trace`
-**Purpose:** Ensure complete traceability matrix with actionable gap analysis AND make deployment readiness decision (PASS/CONCERNS/FAIL/WAIVED)
-
-This checklist covers **two sequential phases**:
-
-- **PHASE 1**: Requirements Traceability (always executed)
-- **PHASE 2**: Quality Gate Decision (executed if `enable_gate_decision: true`)
-
----
-
-# PHASE 1: REQUIREMENTS TRACEABILITY
-
-## Prerequisites Validation
-
-- [ ] Acceptance criteria are available (from story file OR inline)
-- [ ] Test suite exists (or gaps are acknowledged and documented)
-- [ ] If tests are missing, recommend `*atdd` (trace does not run it automatically)
-- [ ] Test directory path is correct (`test_dir` variable)
-- [ ] Story file is accessible (if using BMad mode)
-- [ ] Knowledge base is loaded (test-priorities, traceability, risk-governance)
-
----
-
-## Context Loading
-
-- [ ] Story file read successfully (if applicable)
-- [ ] Acceptance criteria extracted correctly
-- [ ] Story ID identified (e.g., 1.3)
-- [ ] `test-design.md` loaded (if available)
-- [ ] `tech-spec.md` loaded (if available)
-- [ ] `PRD.md` loaded (if available)
-- [ ] Relevant knowledge fragments loaded from `tea-index.csv`
-
----
-
-## Test Discovery and Cataloging
-
-- [ ] Tests auto-discovered using multiple strategies (test IDs, describe blocks, file paths)
-- [ ] Tests categorized by level (E2E, API, Component, Unit)
-- [ ] Test metadata extracted:
- - [ ] Test IDs (e.g., 1.3-E2E-001)
- - [ ] Describe/context blocks
- - [ ] It blocks (individual test cases)
- - [ ] Given-When-Then structure (if BDD)
- - [ ] Priority markers (P0/P1/P2/P3)
-- [ ] All relevant test files found (no tests missed due to naming conventions)
-
----
-
-## Criteria-to-Test Mapping
-
-- [ ] Each acceptance criterion mapped to tests (or marked as NONE)
-- [ ] Explicit references found (test IDs, describe blocks mentioning criterion)
-- [ ] Test level documented (E2E, API, Component, Unit)
-- [ ] Given-When-Then narrative verified for alignment
-- [ ] Traceability matrix table generated:
- - [ ] Criterion ID
- - [ ] Description
- - [ ] Test ID
- - [ ] Test File
- - [ ] Test Level
- - [ ] Coverage Status
-
----
-
-## Coverage Classification
-
-- [ ] Coverage status classified for each criterion:
- - [ ] **FULL** - All scenarios validated at appropriate level(s)
- - [ ] **PARTIAL** - Some coverage but missing edge cases or levels
- - [ ] **NONE** - No test coverage at any level
- - [ ] **UNIT-ONLY** - Only unit tests (missing integration/E2E validation)
- - [ ] **INTEGRATION-ONLY** - Only API/Component tests (missing unit confidence)
-- [ ] Classification justifications provided
-- [ ] Edge cases considered in FULL vs PARTIAL determination
-
----
-
-## Duplicate Coverage Detection
-
-- [ ] Duplicate coverage checked across test levels
-- [ ] Acceptable overlap identified (defense in depth for critical paths)
-- [ ] Unacceptable duplication flagged (same validation at multiple levels)
-- [ ] Recommendations provided for consolidation
-- [ ] Selective testing principles applied
-
----
-
-## Gap Analysis
-
-- [ ] Coverage gaps identified:
- - [ ] Criteria with NONE status
- - [ ] Criteria with PARTIAL status
- - [ ] Criteria with UNIT-ONLY status
- - [ ] Criteria with INTEGRATION-ONLY status
-- [ ] Gaps prioritized by risk level using test-priorities framework:
- - [ ] **CRITICAL** - P0 criteria without FULL coverage (BLOCKER)
- - [ ] **HIGH** - P1 criteria without FULL coverage (PR blocker)
- - [ ] **MEDIUM** - P2 criteria without FULL coverage (nightly gap)
- - [ ] **LOW** - P3 criteria without FULL coverage (acceptable)
-- [ ] Specific test recommendations provided for each gap:
- - [ ] Suggested test level (E2E, API, Component, Unit)
- - [ ] Test description (Given-When-Then)
- - [ ] Recommended test ID (e.g., 1.3-E2E-004)
- - [ ] Explanation of why test is needed
-
----
-
-## Coverage Metrics
-
-- [ ] Overall coverage percentage calculated (FULL coverage / total criteria)
-- [ ] P0 coverage percentage calculated
-- [ ] P1 coverage percentage calculated
-- [ ] P2 coverage percentage calculated (if applicable)
-- [ ] Coverage by level calculated:
- - [ ] E2E coverage %
- - [ ] API coverage %
- - [ ] Component coverage %
- - [ ] Unit coverage %
-
----
-
-## Test Quality Verification
-
-For each mapped test, verify:
-
-- [ ] Explicit assertions are present (not hidden in helpers)
-- [ ] Test follows Given-When-Then structure
-- [ ] No hard waits or sleeps (deterministic waiting only)
-- [ ] Self-cleaning (test cleans up its data)
-- [ ] File size < 300 lines
-- [ ] Test duration < 90 seconds
-
-Quality issues flagged:
-
-- [ ] **BLOCKER** issues identified (missing assertions, hard waits, flaky patterns)
-- [ ] **WARNING** issues identified (large files, slow tests, unclear structure)
-- [ ] **INFO** issues identified (style inconsistencies, missing documentation)
-
-Knowledge fragments referenced:
-
-- [ ] `test-quality.md` for Definition of Done
-- [ ] `fixture-architecture.md` for self-cleaning patterns
-- [ ] `network-first.md` for Playwright best practices
-- [ ] `data-factories.md` for test data patterns
-
----
-
-## Phase 1 Deliverables Generated
-
-### Traceability Matrix Markdown
-
-- [ ] File created at `{output_folder}/traceability-matrix.md`
-- [ ] Template from `trace-template.md` used
-- [ ] Full mapping table included
-- [ ] Coverage status section included
-- [ ] Gap analysis section included
-- [ ] Quality assessment section included
-- [ ] Recommendations section included
-
-### Coverage Badge/Metric (if enabled)
-
-- [ ] Badge markdown generated
-- [ ] Metrics exported to JSON for CI/CD integration
-
-### Updated Story File (if enabled)
-
-- [ ] "Traceability" section added to story markdown
-- [ ] Link to traceability matrix included
-- [ ] Coverage summary included
-
----
-
-## Phase 1 Quality Assurance
-
-### Accuracy Checks
-
-- [ ] All acceptance criteria accounted for (none skipped)
-- [ ] Test IDs correctly formatted (e.g., 1.3-E2E-001)
-- [ ] File paths are correct and accessible
-- [ ] Coverage percentages calculated correctly
-- [ ] No false positives (tests incorrectly mapped to criteria)
-- [ ] No false negatives (existing tests missed in mapping)
-
-### Completeness Checks
-
-- [ ] All test levels considered (E2E, API, Component, Unit)
-- [ ] All priorities considered (P0, P1, P2, P3)
-- [ ] All coverage statuses used appropriately (FULL, PARTIAL, NONE, UNIT-ONLY, INTEGRATION-ONLY)
-- [ ] All gaps have recommendations
-- [ ] All quality issues have severity and remediation guidance
-
-### Actionability Checks
-
-- [ ] Recommendations are specific (not generic)
-- [ ] Test IDs suggested for new tests
-- [ ] Given-When-Then provided for recommended tests
-- [ ] Impact explained for each gap
-- [ ] Priorities clear (CRITICAL, HIGH, MEDIUM, LOW)
-
----
-
-## Phase 1 Documentation
-
-- [ ] Traceability matrix is readable and well-formatted
-- [ ] Tables render correctly in markdown
-- [ ] Code blocks have proper syntax highlighting
-- [ ] Links are valid and accessible
-- [ ] Recommendations are clear and prioritized
-
----
-
-# PHASE 2: QUALITY GATE DECISION
-
-**Note**: Phase 2 executes only if `enable_gate_decision: true` in workflow.yaml
-
----
-
-## Prerequisites
-
-### Evidence Gathering
-
-- [ ] Test execution results obtained (CI/CD pipeline, test framework reports)
-- [ ] Story/epic/release file identified and read
-- [ ] Test design document discovered or explicitly provided (if available)
-- [ ] Traceability matrix discovered or explicitly provided (available from Phase 1)
-- [ ] NFR assessment discovered or explicitly provided (if available)
-- [ ] Code coverage report discovered or explicitly provided (if available)
-- [ ] Burn-in results discovered or explicitly provided (if available)
-
-### Evidence Validation
-
-- [ ] Evidence freshness validated (warn if >7 days old, recommend re-running workflows)
-- [ ] All required assessments available or user acknowledged gaps
-- [ ] Test results are complete (not partial or interrupted runs)
-- [ ] Test results match current codebase (not from outdated branch)
-
-### Knowledge Base Loading
-
-- [ ] `risk-governance.md` loaded successfully
-- [ ] `probability-impact.md` loaded successfully
-- [ ] `test-quality.md` loaded successfully
-- [ ] `test-priorities.md` loaded successfully
-- [ ] `ci-burn-in.md` loaded (if burn-in results available)
-
----
-
-## Process Steps
-
-### Step 1: Context Loading
-
-- [ ] Gate type identified (story/epic/release/hotfix)
-- [ ] Target ID extracted (story_id, epic_num, or release_version)
-- [ ] Decision thresholds loaded from workflow variables
-- [ ] Risk tolerance configuration loaded
-- [ ] Waiver policy loaded
-
-### Step 2: Evidence Parsing
-
-**Test Results:**
-
-- [ ] Total test count extracted
-- [ ] Passed test count extracted
-- [ ] Failed test count extracted
-- [ ] Skipped test count extracted
-- [ ] Test duration extracted
-- [ ] P0 test pass rate calculated
-- [ ] P1 test pass rate calculated
-- [ ] Overall test pass rate calculated
-
-**Quality Assessments:**
-
-- [ ] P0/P1/P2/P3 scenarios extracted from test-design.md (if available)
-- [ ] Risk scores extracted from test-design.md (if available)
-- [ ] Coverage percentages extracted from traceability-matrix.md (available from Phase 1)
-- [ ] Coverage gaps extracted from traceability-matrix.md (available from Phase 1)
-- [ ] NFR status extracted from nfr-assessment.md (if available)
-- [ ] Security issues count extracted from nfr-assessment.md (if available)
-
-**Code Coverage:**
-
-- [ ] Line coverage percentage extracted (if available)
-- [ ] Branch coverage percentage extracted (if available)
-- [ ] Function coverage percentage extracted (if available)
-- [ ] Critical path coverage validated (if available)
-
-**Burn-in Results:**
-
-- [ ] Burn-in iterations count extracted (if available)
-- [ ] Flaky tests count extracted (if available)
-- [ ] Stability score calculated (if available)
-
-### Step 3: Decision Rules Application
-
-**P0 Criteria Evaluation:**
-
-- [ ] P0 test pass rate evaluated (must be 100%)
-- [ ] P0 acceptance criteria coverage evaluated (must be 100%)
-- [ ] Security issues count evaluated (must be 0)
-- [ ] Critical NFR failures evaluated (must be 0)
-- [ ] Flaky tests evaluated (must be 0 if burn-in enabled)
-- [ ] P0 decision recorded: PASS or FAIL
-
-**P1 Criteria Evaluation:**
-
-- [ ] P1 test pass rate evaluated (threshold: min_p1_pass_rate)
-- [ ] P1 acceptance criteria coverage evaluated (threshold: 95%)
-- [ ] Overall test pass rate evaluated (threshold: min_overall_pass_rate)
-- [ ] Code coverage evaluated (threshold: min_coverage)
-- [ ] P1 decision recorded: PASS or CONCERNS
-
-**P2/P3 Criteria Evaluation:**
-
-- [ ] P2 failures tracked (informational, don't block if allow_p2_failures: true)
-- [ ] P3 failures tracked (informational, don't block if allow_p3_failures: true)
-- [ ] Residual risks documented
-
-**Final Decision:**
-
-- [ ] Decision determined: PASS / CONCERNS / FAIL / WAIVED
-- [ ] Decision rationale documented
-- [ ] Decision is deterministic (follows rules, not arbitrary)
-
-### Step 4: Documentation
-
-**Gate Decision Document Created:**
-
-- [ ] Story/epic/release info section complete (ID, title, description, links)
-- [ ] Decision clearly stated (PASS / CONCERNS / FAIL / WAIVED)
-- [ ] Decision date recorded
-- [ ] Evaluator recorded (user or agent name)
-
-**Evidence Summary Documented:**
-
-- [ ] Test results summary complete (total, passed, failed, pass rates)
-- [ ] Coverage summary complete (P0/P1 criteria, code coverage)
-- [ ] NFR validation summary complete (security, performance, reliability, maintainability)
-- [ ] Flakiness summary complete (burn-in iterations, flaky test count)
-
-**Rationale Documented:**
-
-- [ ] Decision rationale clearly explained
-- [ ] Key evidence highlighted
-- [ ] Assumptions and caveats noted (if any)
-
-**Residual Risks Documented (if CONCERNS or WAIVED):**
-
-- [ ] Unresolved P1/P2 issues listed
-- [ ] Probability Γ impact estimated for each risk
-- [ ] Mitigations or workarounds described
-
-**Waivers Documented (if WAIVED):**
-
-- [ ] Waiver reason documented (business justification)
-- [ ] Waiver approver documented (name, role)
-- [ ] Waiver expiry date documented
-- [ ] Remediation plan documented (fix in next release, due date)
-- [ ] Monitoring plan documented
-
-**Critical Issues Documented (if FAIL or CONCERNS):**
-
-- [ ] Top 5-10 critical issues listed
-- [ ] Priority assigned to each issue (P0/P1/P2)
-- [ ] Owner assigned to each issue
-- [ ] Due date assigned to each issue
-
-**Recommendations Documented:**
-
-- [ ] Next steps clearly stated for decision type
-- [ ] Deployment recommendation provided
-- [ ] Monitoring recommendations provided (if applicable)
-- [ ] Remediation recommendations provided (if applicable)
-
-### Step 5: Status Updates and Notifications
-
-**Gate YAML Created:**
-
-- [ ] Gate YAML snippet generated with decision and criteria
-- [ ] Evidence references included in YAML
-- [ ] Next steps included in YAML
-- [ ] YAML file saved to output folder
-
-**Stakeholder Notification Generated:**
-
-- [ ] Notification subject line created
-- [ ] Notification body created with summary
-- [ ] Recipients identified (PM, SM, DEV lead, stakeholders)
-- [ ] Notification ready for delivery (if notify_stakeholders: true)
-
-**Outputs Saved:**
-
-- [ ] Gate decision document saved to `{output_file}`
-- [ ] Gate YAML saved to `{output_folder}/gate-decision-{target}.yaml`
-- [ ] All outputs are valid and readable
-
----
-
-## Phase 2 Output Validation
-
-### Gate Decision Document
-
-**Completeness:**
-
-- [ ] All required sections present (info, decision, evidence, rationale, next steps)
-- [ ] No placeholder text or TODOs left in document
-- [ ] All evidence references are accurate and complete
-- [ ] All links to artifacts are valid
-
-**Accuracy:**
-
-- [ ] Decision matches applied criteria rules
-- [ ] Test results match CI/CD pipeline output
-- [ ] Coverage percentages match reports
-- [ ] NFR status matches assessment document
-- [ ] No contradictions or inconsistencies
-
-**Clarity:**
-
-- [ ] Decision rationale is clear and unambiguous
-- [ ] Technical jargon is explained or avoided
-- [ ] Stakeholders can understand next steps
-- [ ] Recommendations are actionable
-
-### Gate YAML
-
-**Format:**
-
-- [ ] YAML is valid (no syntax errors)
-- [ ] All required fields present (target, decision, date, evaluator, criteria, evidence)
-- [ ] Field values are correct data types (numbers, strings, dates)
-
-**Content:**
-
-- [ ] Criteria values match decision document
-- [ ] Evidence references are accurate
-- [ ] Next steps align with decision type
-
----
-
-## Phase 2 Quality Checks
-
-### Decision Integrity
-
-- [ ] Decision is deterministic (follows rules, not arbitrary)
-- [ ] P0 failures result in FAIL decision (unless waived)
-- [ ] Security issues result in FAIL decision (unless waived - but should never be waived)
-- [ ] Waivers have business justification and approver (if WAIVED)
-- [ ] Residual risks are documented (if CONCERNS or WAIVED)
-
-### Evidence-Based
-
-- [ ] Decision is based on actual test results (not guesses)
-- [ ] All claims are supported by evidence
-- [ ] No assumptions without documentation
-- [ ] Evidence sources are cited (CI run IDs, report URLs)
-
-### Transparency
-
-- [ ] Decision rationale is transparent and auditable
-- [ ] Criteria evaluation is documented step-by-step
-- [ ] Any deviations from standard process are explained
-- [ ] Waiver justifications are clear (if applicable)
-
-### Consistency
-
-- [ ] Decision aligns with risk-governance knowledge fragment
-- [ ] Priority framework (P0/P1/P2/P3) applied consistently
-- [ ] Terminology consistent with test-quality knowledge fragment
-- [ ] Decision matrix followed correctly
-
----
-
-## Phase 2 Integration Points
-
-### CI/CD Pipeline
-
-- [ ] Gate YAML is CI/CD-compatible
-- [ ] YAML can be parsed by pipeline automation
-- [ ] Decision can be used to block/allow deployments
-- [ ] Evidence references are accessible to pipeline
-
-### Stakeholders
-
-- [ ] Notification message is clear and actionable
-- [ ] Decision is explained in non-technical terms
-- [ ] Next steps are specific and time-bound
-- [ ] Recipients are appropriate for decision type
-
----
-
-## Phase 2 Compliance and Audit
-
-### Audit Trail
-
-- [ ] Decision date and time recorded
-- [ ] Evaluator identified (user or agent)
-- [ ] All evidence sources cited
-- [ ] Decision criteria documented
-- [ ] Rationale clearly explained
-
-### Traceability
-
-- [ ] Gate decision traceable to story/epic/release
-- [ ] Evidence traceable to specific test runs
-- [ ] Assessments traceable to workflows that created them
-- [ ] Waiver traceable to approver (if applicable)
-
-### Compliance
-
-- [ ] Security requirements validated (no unresolved vulnerabilities)
-- [ ] Quality standards met or waived with justification
-- [ ] Regulatory requirements addressed (if applicable)
-- [ ] Documentation sufficient for external audit
-
----
-
-## Phase 2 Edge Cases and Exceptions
-
-### Missing Evidence
-
-- [ ] If test-design.md missing, decision still possible with test results + trace
-- [ ] If traceability-matrix.md missing, decision still possible with test results (but Phase 1 should provide it)
-- [ ] If nfr-assessment.md missing, NFR validation marked as NOT ASSESSED
-- [ ] If code coverage missing, coverage criterion marked as NOT ASSESSED
-- [ ] User acknowledged gaps in evidence or provided alternative proof
-
-### Stale Evidence
-
-- [ ] Evidence freshness checked (if validate_evidence_freshness: true)
-- [ ] Warnings issued for assessments >7 days old
-- [ ] User acknowledged stale evidence or re-ran workflows
-- [ ] Decision document notes any stale evidence used
-
-### Conflicting Evidence
-
-- [ ] Conflicts between test results and assessments resolved
-- [ ] Most recent/authoritative source identified
-- [ ] Conflict resolution documented in decision rationale
-- [ ] User consulted if conflict cannot be resolved
-
-### Waiver Scenarios
-
-- [ ] Waiver only used for FAIL decision (not PASS or CONCERNS)
-- [ ] Waiver has business justification (not technical convenience)
-- [ ] Waiver has named approver with authority (VP/CTO/PO)
-- [ ] Waiver has expiry date (does NOT apply to future releases)
-- [ ] Waiver has remediation plan with concrete due date
-- [ ] Security vulnerabilities are NOT waived (enforced)
-
----
-
-# FINAL VALIDATION (Both Phases)
-
-## Non-Prescriptive Validation
-
-- [ ] Traceability format adapted to team needs (not rigid template)
-- [ ] Examples are minimal and focused on patterns
-- [ ] Teams can extend with custom classifications
-- [ ] Integration with external systems supported (JIRA, Azure DevOps)
-- [ ] Compliance requirements considered (if applicable)
-
----
-
-## Documentation and Communication
-
-- [ ] All documents are readable and well-formatted
-- [ ] Tables render correctly in markdown
-- [ ] Code blocks have proper syntax highlighting
-- [ ] Links are valid and accessible
-- [ ] Recommendations are clear and prioritized
-- [ ] Gate decision is prominent and unambiguous (Phase 2)
-
----
-
-## Final Validation
-
-**Phase 1 (Traceability):**
-
-- [ ] All prerequisites met
-- [ ] All acceptance criteria mapped or gaps documented
-- [ ] P0 coverage is 100% OR documented as BLOCKER
-- [ ] Gap analysis is complete and prioritized
-- [ ] Test quality issues identified and flagged
-- [ ] Deliverables generated and saved
-
-**Phase 2 (Gate Decision):**
-
-- [ ] All quality evidence gathered
-- [ ] Decision criteria applied correctly
-- [ ] Decision rationale documented
-- [ ] Gate YAML ready for CI/CD integration
-- [ ] Status file updated (if enabled)
-- [ ] Stakeholders notified (if enabled)
-
-**Workflow Complete:**
-
-- [ ] Phase 1 completed successfully
-- [ ] Phase 2 completed successfully (if enabled)
-- [ ] All outputs validated and saved
-- [ ] Ready to proceed based on gate decision
-
----
-
-## Sign-Off
-
-**Phase 1 - Traceability Status:**
-
-- [ ] β PASS - All quality gates met, no critical gaps
-- [ ] β οΈ WARN - P1 gaps exist, address before PR merge
-- [ ] β FAIL - P0 gaps exist, BLOCKER for release
-
-**Phase 2 - Gate Decision Status (if enabled):**
-
-- [ ] β PASS - Deploy to production
-- [ ] β οΈ CONCERNS - Deploy with monitoring
-- [ ] β FAIL - Block deployment, fix issues
-- [ ] π WAIVED - Deploy with business approval and remediation plan
-
-**Next Actions:**
-
-- If PASS (both phases): Proceed to deployment
-- If WARN/CONCERNS: Address gaps/issues, proceed with monitoring
-- If FAIL (either phase): Run `*atdd` for missing tests, fix issues, re-run `*trace`
-- If WAIVED: Deploy with approved waiver, schedule remediation
-
----
-
-## Notes
-
-Record any issues, deviations, or important observations during workflow execution:
-
-- **Phase 1 Issues**: [Note any traceability mapping challenges, missing tests, quality concerns]
-- **Phase 2 Issues**: [Note any missing, stale, or conflicting evidence]
-- **Decision Rationale**: [Document any nuanced reasoning or edge cases]
-- **Waiver Details**: [Document waiver negotiations or approvals]
-- **Follow-up Actions**: [List any actions required after gate decision]
-
----
-
-
diff --git a/src/bmm/workflows/testarch/trace/instructions.md b/src/bmm/workflows/testarch/trace/instructions.md
deleted file mode 100644
index deafb36c..00000000
--- a/src/bmm/workflows/testarch/trace/instructions.md
+++ /dev/null
@@ -1,1030 +0,0 @@
-# Test Architect Workflow: Requirements Traceability & Quality Gate Decision
-
-**Workflow:** `testarch-trace`
-**Purpose:** Generate requirements-to-tests traceability matrix, analyze coverage gaps, and make quality gate decisions (PASS/CONCERNS/FAIL/WAIVED)
-**Agent:** Test Architect (TEA)
-**Format:** Pure Markdown v4.0 (no XML blocks)
-
----
-
-## Overview
-
-This workflow operates in two sequential phases to validate test coverage and deployment readiness:
-
-**PHASE 1 - REQUIREMENTS TRACEABILITY:** Create comprehensive traceability matrix mapping acceptance criteria to implemented tests, identify coverage gaps, and provide actionable recommendations.
-
-**PHASE 2 - QUALITY GATE DECISION:** Use traceability results combined with test execution evidence to make gate decisions (PASS/CONCERNS/FAIL/WAIVED) that determine deployment readiness.
-
-**Key Capabilities:**
-
-- Map acceptance criteria to specific test cases across all levels (E2E, API, Component, Unit)
-- Classify coverage status (FULL, PARTIAL, NONE, UNIT-ONLY, INTEGRATION-ONLY)
-- Prioritize gaps by risk level (P0/P1/P2/P3) using test-priorities framework
-- Apply deterministic decision rules based on coverage and test execution results
-- Generate gate decisions with evidence and rationale
-- Support waivers for business-approved exceptions
-- Update workflow status and notify stakeholders
-
----
-
-## Prerequisites
-
-**Required (Phase 1):**
-
-- Acceptance criteria (from story file OR provided inline)
-- Implemented test suite (or acknowledge gaps to be addressed)
-
-**Required (Phase 2 - if `enable_gate_decision: true`):**
-
-- Test execution results (CI/CD test reports, pass/fail rates)
-- Test design with risk priorities (P0/P1/P2/P3)
-
-**Recommended:**
-
-- `test-design.md` (for risk assessment and priority context)
-- `nfr-assessment.md` (for release-level gates)
-- `tech-spec.md` (for technical implementation context)
-- Test framework configuration (playwright.config.ts, jest.config.js, etc.)
-
-**Halt Conditions:**
-
-- If story lacks any implemented tests AND no gaps are acknowledged, recommend running `*atdd` workflow first
-- If acceptance criteria are completely missing, halt and request them
-- If Phase 2 enabled but test execution results missing, warn and skip gate decision
-
-Note: `*trace` never runs `*atdd` automatically; it only recommends running it when tests are missing.
-
----
-
-## PHASE 1: REQUIREMENTS TRACEABILITY
-
-This phase focuses on mapping requirements to tests, analyzing coverage, and identifying gaps.
-
----
-
-### Step 1: Load Context and Knowledge Base
-
-**Actions:**
-
-1. Load relevant knowledge fragments from `{project-root}/_bmad/bmm/testarch/tea-index.csv`:
- - `test-priorities-matrix.md` - P0/P1/P2/P3 risk framework with automated priority calculation, risk-based mapping, tagging strategy (389 lines, 2 examples)
- - `risk-governance.md` - Risk-based testing approach: 6 categories (TECH, SEC, PERF, DATA, BUS, OPS), automated scoring, gate decision engine, coverage traceability (625 lines, 4 examples)
- - `probability-impact.md` - Risk scoring methodology: probability Γ impact matrix, automated classification, dynamic re-assessment, gate integration (604 lines, 4 examples)
- - `test-quality.md` - Definition of Done for tests: deterministic, isolated with cleanup, explicit assertions, length/time limits (658 lines, 5 examples)
- - `selective-testing.md` - Duplicate coverage patterns: tag-based, spec filters, diff-based selection, promotion rules (727 lines, 4 examples)
-
-2. Read story file (if provided):
- - Extract acceptance criteria
- - Identify story ID (e.g., 1.3)
- - Note any existing test design or priority information
-
-3. Read related BMad artifacts (if available):
- - `test-design.md` - Risk assessment and test priorities
- - `tech-spec.md` - Technical implementation details
- - `PRD.md` - Product requirements context
-
-**Output:** Complete understanding of requirements, priorities, and existing context
-
----
-
-### Step 2: Discover and Catalog Tests
-
-**Actions:**
-
-1. Auto-discover test files related to the story:
- - Search for test IDs (e.g., `1.3-E2E-001`, `1.3-UNIT-005`)
- - Search for describe blocks mentioning feature name
- - Search for file paths matching feature directory
- - Use `glob` to find test files in `{test_dir}`
-
-2. Categorize tests by level:
- - **E2E Tests**: Full user journeys through UI
- - **API Tests**: HTTP contract and integration tests
- - **Component Tests**: UI component behavior in isolation
- - **Unit Tests**: Business logic and pure functions
-
-3. Extract test metadata:
- - Test ID (if present)
- - Describe/context blocks
- - It blocks (individual test cases)
- - Given-When-Then structure (if BDD)
- - Assertions used
- - Priority markers (P0/P1/P2/P3)
-
-**Output:** Complete catalog of all tests for this feature
-
----
-
-### Step 3: Map Criteria to Tests
-
-**Actions:**
-
-1. For each acceptance criterion:
- - Search for explicit references (test IDs, describe blocks mentioning criterion)
- - Map to specific test files and it blocks
- - Use Given-When-Then narrative to verify alignment
- - Document test level (E2E, API, Component, Unit)
-
-2. Build traceability matrix:
-
- ```
- | Criterion ID | Description | Test ID | Test File | Test Level | Coverage Status |
- | ------------ | ----------- | ----------- | ---------------- | ---------- | --------------- |
- | AC-1 | User can... | 1.3-E2E-001 | e2e/auth.spec.ts | E2E | FULL |
- ```
-
-3. Classify coverage status for each criterion:
- - **FULL**: All scenarios validated at appropriate level(s)
- - **PARTIAL**: Some coverage but missing edge cases or levels
- - **NONE**: No test coverage at any level
- - **UNIT-ONLY**: Only unit tests (missing integration/E2E validation)
- - **INTEGRATION-ONLY**: Only API/Component tests (missing unit confidence)
-
-4. Check for duplicate coverage:
- - Same behavior tested at multiple levels unnecessarily
- - Flag violations of selective testing principles
- - Recommend consolidation where appropriate
-
-**Output:** Complete traceability matrix with coverage classifications
-
----
-
-### Step 4: Analyze Gaps and Prioritize
-
-**Actions:**
-
-1. Identify coverage gaps:
- - List criteria with NONE, PARTIAL, UNIT-ONLY, or INTEGRATION-ONLY status
- - Assign severity based on test-priorities framework:
- - **CRITICAL**: P0 criteria without FULL coverage (blocks release)
- - **HIGH**: P1 criteria without FULL coverage (PR blocker)
- - **MEDIUM**: P2 criteria without FULL coverage (nightly test gap)
- - **LOW**: P3 criteria without FULL coverage (acceptable gap)
-
-2. Recommend specific tests to add:
- - Suggest test level (E2E, API, Component, Unit)
- - Provide test description (Given-When-Then)
- - Recommend test ID (e.g., `1.3-E2E-004`)
- - Explain why this test is needed
-
-3. Calculate coverage metrics:
- - Overall coverage percentage (criteria with FULL coverage / total criteria)
- - P0 coverage percentage (critical paths)
- - P1 coverage percentage (high priority)
- - Coverage by level (E2E%, API%, Component%, Unit%)
-
-4. Check against quality gates:
- - P0 coverage >= 100% (required)
- - P1 coverage >= 90% (recommended)
- - Overall coverage >= 80% (recommended)
-
-**Output:** Prioritized gap analysis with actionable recommendations and coverage metrics
-
----
-
-### Step 5: Verify Test Quality
-
-**Actions:**
-
-1. For each mapped test, verify:
- - Explicit assertions are present (not hidden in helpers)
- - Test follows Given-When-Then structure
- - No hard waits or sleeps
- - Self-cleaning (test cleans up its data)
- - File size < 300 lines
- - Test duration < 90 seconds
-
-2. Flag quality issues:
- - **BLOCKER**: Missing assertions, hard waits, flaky patterns
- - **WARNING**: Large files, slow tests, unclear structure
- - **INFO**: Style inconsistencies, missing documentation
-
-3. Reference knowledge fragments:
- - `test-quality.md` for Definition of Done
- - `fixture-architecture.md` for self-cleaning patterns
- - `network-first.md` for Playwright best practices
- - `data-factories.md` for test data patterns
-
-**Output:** Quality assessment for each test with improvement recommendations
-
----
-
-### Step 6: Generate Deliverables (Phase 1)
-
-**Actions:**
-
-1. Create traceability matrix markdown file:
- - Use template from `trace-template.md`
- - Include full mapping table
- - Add coverage status section
- - Add gap analysis section
- - Add quality assessment section
- - Add recommendations section
- - Save to `{output_folder}/traceability-matrix.md`
-
-2. Generate gate YAML snippet (if enabled):
-
- ```yaml
- traceability:
- story_id: '1.3'
- coverage:
- overall: 85%
- p0: 100%
- p1: 90%
- p2: 75%
- gaps:
- critical: 0
- high: 1
- medium: 2
- status: 'PASS' # or "FAIL" if P0 < 100%
- ```
-
-3. Create coverage badge/metric (if enabled):
- - Generate badge markdown: ``
- - Export metrics to JSON for CI/CD integration
-
-4. Update story file (if enabled):
- - Add "Traceability" section to story markdown
- - Link to traceability matrix
- - Include coverage summary
- - Add gate status
-
-**Output:** Complete Phase 1 traceability deliverables
-
-**Next:** If `enable_gate_decision: true`, proceed to Phase 2. Otherwise, workflow complete.
-
----
-
-## PHASE 2: QUALITY GATE DECISION
-
-This phase uses traceability results to make a quality gate decision (PASS/CONCERNS/FAIL/WAIVED) based on evidence and decision rules.
-
-**When Phase 2 Runs:** Automatically after Phase 1 if `enable_gate_decision: true` (default: true)
-
-**Skip Conditions:** If test execution results (`test_results`) not provided, warn and skip Phase 2.
-
----
-
-### Step 7: Gather Quality Evidence
-
-**Actions:**
-
-1. **Load Phase 1 traceability results** (inherited context):
- - Coverage metrics (P0/P1/overall percentages)
- - Gap analysis (missing/partial tests)
- - Quality concerns (test quality flags)
- - Traceability matrix
-
-2. **Load test execution results** (if `test_results` provided):
- - Read CI/CD test reports (JUnit XML, TAP, JSON)
- - Extract pass/fail counts by priority
- - Calculate pass rates:
- - **P0 pass rate**: `(P0 passed / P0 total) * 100`
- - **P1 pass rate**: `(P1 passed / P1 total) * 100`
- - **Overall pass rate**: `(All passed / All total) * 100`
- - Identify failing tests and map to criteria
-
-3. **Load NFR assessment** (if `nfr_file` provided):
- - Read `nfr-assessment.md` or similar
- - Check critical NFR status (performance, security, scalability)
- - Flag any critical NFR failures
-
-4. **Load supporting artifacts**:
- - `test-design.md` β Risk priorities, DoD checklist
- - `story-*.md` or `Epics.md` β Requirements context
-
-5. **Validate evidence freshness** (if `validate_evidence_freshness: true`):
- - Check timestamps of test-design, traceability, NFR assessments
- - Warn if artifacts are >7 days old
-
-6. **Check prerequisite workflows** (if `check_all_workflows_complete: true`):
- - Verify test-design workflow complete
- - Verify trace workflow complete (Phase 1)
- - Verify nfr-assess workflow complete (if release-level gate)
-
-**Output:** Consolidated evidence bundle with all quality signals
-
----
-
-### Step 8: Apply Decision Rules
-
-**If `decision_mode: "deterministic"`** (rule-based - default):
-
-**Decision rules** (based on `workflow.yaml` thresholds):
-
-1. **PASS** if ALL of the following are true:
- - P0 coverage β₯ `min_p0_coverage` (default: 100%)
- - P1 coverage β₯ `min_p1_coverage` (default: 90%)
- - Overall coverage β₯ `min_overall_coverage` (default: 80%)
- - P0 test pass rate = `min_p0_pass_rate` (default: 100%)
- - P1 test pass rate β₯ `min_p1_pass_rate` (default: 95%)
- - Overall test pass rate β₯ `min_overall_pass_rate` (default: 90%)
- - Critical NFRs passed (if `nfr_file` provided)
- - No unresolved security issues β€ `max_security_issues` (default: 0)
- - No test quality red flags (hard waits, no assertions)
-
-2. **CONCERNS** if ANY of the following are true:
- - P1 coverage 80-89% (below threshold but not critical)
- - P1 test pass rate 90-94% (below threshold but not critical)
- - Overall pass rate 85-89%
- - P2 coverage <50% (informational)
- - Some non-critical NFRs failing
- - Minor test quality concerns (large test files, inferred mappings)
- - **Note**: CONCERNS does NOT block deployment but requires acknowledgment
-
-3. **FAIL** if ANY of the following are true:
- - P0 coverage <100% (missing critical tests)
- - P0 test pass rate <100% (failing critical tests)
- - P1 coverage <80% (significant gap)
- - P1 test pass rate <90% (significant failures)
- - Overall coverage <80%
- - Overall pass rate <85%
- - Critical NFRs failing (`max_critical_nfrs_fail` exceeded)
- - Unresolved security issues (`max_security_issues` exceeded)
- - Major test quality issues (tests with no assertions, pervasive hard waits)
-
-4. **WAIVED** (only if `allow_waivers: true`):
- - Decision would be FAIL based on rules above
- - Business stakeholder has approved waiver
- - Waiver documented with:
- - Justification (time constraint, known limitation, acceptable risk)
- - Approver name and date
- - Mitigation plan (follow-up stories, manual testing)
- - Waiver evidence linked (email, Slack thread, ticket)
-
-**Risk tolerance adjustments:**
-
-- If `allow_p2_failures: true` β P2 test failures do NOT affect gate decision
-- If `allow_p3_failures: true` β P3 test failures do NOT affect gate decision
-- If `escalate_p1_failures: true` β P1 failures require explicit manager/lead approval
-
-**If `decision_mode: "manual"`:**
-
-- Present evidence summary to team
-- Recommend decision based on rules above
-- Team makes final call in meeting/chat
-- Document decision with approver names
-
-**Output:** Gate decision (PASS/CONCERNS/FAIL/WAIVED) with rule-based rationale
-
----
-
-### Step 9: Document Decision and Evidence
-
-**Actions:**
-
-1. **Create gate decision document**:
- - Save to `gate_output_file` (default: `{output_folder}/gate-decision-{gate_type}-{story_id}.md`)
- - Use structure below
-
-2. **Document structure**:
-
-```markdown
-# Quality Gate Decision: {gate_type} {story_id/epic_num/release_version}
-
-**Decision**: [PASS / CONCERNS / FAIL / WAIVED]
-**Date**: {date}
-**Decider**: {decision_mode} (deterministic | manual)
-**Evidence Date**: {test_results_date}
-
----
-
-## Summary
-
-[1-2 sentence summary of decision and key factors]
-
----
-
-## Decision Criteria
-
-| Criterion | Threshold | Actual | Status |
-| ----------------- | --------- | -------- | ------ |
-| P0 Coverage | β₯100% | 100% | β PASS |
-| P1 Coverage | β₯90% | 88% | β οΈ FAIL |
-| Overall Coverage | β₯80% | 92% | β PASS |
-| P0 Pass Rate | 100% | 100% | β PASS |
-| P1 Pass Rate | β₯95% | 98% | β PASS |
-| Overall Pass Rate | β₯90% | 96% | β PASS |
-| Critical NFRs | All Pass | All Pass | β PASS |
-| Security Issues | 0 | 0 | β PASS |
-
-**Overall Status**: 7/8 criteria met β Decision: **CONCERNS**
-
----
-
-## Evidence Summary
-
-### Test Coverage (from Phase 1 Traceability)
-
-- **P0 Coverage**: 100% (5/5 criteria fully covered)
-- **P1 Coverage**: 88% (7/8 criteria fully covered)
-- **Overall Coverage**: 92% (12/13 criteria covered)
-- **Gap**: AC-5 (P1) missing E2E test
-
-### Test Execution Results
-
-- **P0 Pass Rate**: 100% (12/12 tests passed)
-- **P1 Pass Rate**: 98% (45/46 tests passed)
-- **Overall Pass Rate**: 96% (67/70 tests passed)
-- **Failures**: 3 P2 tests (non-blocking)
-
-### Non-Functional Requirements
-
-- Performance: β PASS (response time <500ms)
-- Security: β PASS (no vulnerabilities)
-- Scalability: β PASS (handles 10K users)
-
-### Test Quality
-
-- All tests have explicit assertions β
-- No hard waits detected β
-- Test files <300 lines β
-- Test IDs follow convention β
-
----
-
-## Decision Rationale
-
-**Why CONCERNS (not PASS)**:
-
-- P1 coverage at 88% is below 90% threshold
-- AC-5 (P1 priority) missing E2E test for error handling scenario
-- This is a known gap from test-design phase
-
-**Why CONCERNS (not FAIL)**:
-
-- P0 coverage is 100% (critical paths validated)
-- Overall coverage is 92% (above 80% threshold)
-- Test pass rate is excellent (96% overall)
-- Gap is isolated to one P1 criterion (not systemic)
-
-**Recommendation**:
-
-- Acknowledge gap and proceed with deployment
-- Add missing AC-5 E2E test in next sprint
-- Create follow-up story: "Add E2E test for AC-5 error handling"
-
----
-
-## Next Steps
-
-- [ ] Create follow-up story for AC-5 E2E test
-- [ ] Deploy to staging environment
-- [ ] Monitor production for edge cases related to AC-5
-- [ ] Update traceability matrix after follow-up test added
-
----
-
-## References
-
-- Traceability Matrix: `_bmad/output/traceability-matrix.md`
-- Test Design: `_bmad/output/test-design-epic-2.md`
-- Test Results: `ci-artifacts/test-report-2025-01-15.xml`
-- NFR Assessment: `_bmad/output/nfr-assessment-release-1.2.md`
-```
-
-3. **Include evidence links** (if `require_evidence: true`):
- - Link to traceability matrix
- - Link to test execution reports (CI artifacts)
- - Link to NFR assessment
- - Link to test-design document
- - Link to relevant PRs, commits, deployments
-
-4. **Waiver documentation** (if decision is WAIVED):
- - Approver name and role (e.g., "Jane Doe, Engineering Manager")
- - Approval date and method (e.g., "2025-01-15, Slack thread")
- - Justification (e.g., "Time-boxed MVP, missing tests will be added in v1.1")
- - Mitigation plan (e.g., "Manual testing by QA, follow-up stories created")
- - Evidence link (e.g., "Slack: #engineering 2025-01-15 3:42pm")
-
-**Output:** Complete gate decision document with evidence and rationale
-
----
-
-### Step 10: Update Status Tracking and Notify
-
-**Actions:**
-
-1. **Generate stakeholder notification** (if `notify_stakeholders: true`):
- - Create concise summary message for team communication
- - Include: Decision, key metrics, action items
- - Format for Slack/email/chat:
-
- ```
- π¦ Quality Gate Decision: Story 1.3 - User Login
-
- Decision: β οΈ CONCERNS
- - P0 Coverage: β 100%
- - P1 Coverage: β οΈ 88% (below 90%)
- - Test Pass Rate: β 96%
-
- Action Required:
- - Create follow-up story for AC-5 E2E test
- - Deploy to staging for validation
-
- Full Report: _bmad/output/gate-decision-story-1.3.md
- ```
-
-2. **Request sign-off** (if `require_sign_off: true`):
- - Prompt for named approver (tech lead, QA lead, PM)
- - Document approver name and timestamp in gate decision
- - Block until sign-off received (interactive prompt)
-
-**Output:** Status tracking updated, stakeholders notified, sign-off obtained (if required)
-
-**Workflow Complete**: Both Phase 1 (traceability) and Phase 2 (gate decision) deliverables generated.
-
----
-
-## Decision Matrix (Quick Reference)
-
-| Scenario | P0 Cov | P1 Cov | Overall Cov | P0 Pass | P1 Pass | Overall Pass | NFRs | Decision |
-| --------------- | ----------------- | ------ | ----------- | ------- | ------- | ------------ | ---- | ------------ |
-| All green | 100% | β₯90% | β₯80% | 100% | β₯95% | β₯90% | Pass | **PASS** |
-| Minor gap | 100% | 80-89% | β₯80% | 100% | 90-94% | 85-89% | Pass | **CONCERNS** |
-| Missing P0 | <100% | - | - | - | - | - | - | **FAIL** |
-| P0 test fail | 100% | - | - | <100% | - | - | - | **FAIL** |
-| P1 gap | 100% | <80% | - | 100% | - | - | - | **FAIL** |
-| NFR fail | 100% | β₯90% | β₯80% | 100% | β₯95% | β₯90% | Fail | **FAIL** |
-| Security issue | - | - | - | - | - | - | Yes | **FAIL** |
-| Business waiver | [FAIL conditions] | - | - | - | - | - | - | **WAIVED** |
-
----
-
-## Waiver Management
-
-**When to use waivers:**
-
-- Time-boxed MVP releases (known gaps, follow-up planned)
-- Low-risk P1 gaps with mitigation (manual testing, monitoring)
-- Technical debt acknowledged by product/engineering leadership
-- External dependencies blocking test automation
-
-**Waiver approval process:**
-
-1. Document gap and risk in gate decision
-2. Propose mitigation plan (manual testing, follow-up stories, monitoring)
-3. Request approval from stakeholder (EM, PM, QA lead)
-4. Link approval evidence (email, chat thread, meeting notes)
-5. Add waiver to gate decision document
-6. Create follow-up stories to close gaps
-
-**Waiver does NOT apply to:**
-
-- P0 gaps (always blocking)
-- Critical security issues (always blocking)
-- Critical NFR failures (performance, data integrity)
-
----
-
-## Example Gate Decisions
-
-### Example 1: PASS (All Criteria Met)
-
-```
-Decision: β PASS
-
-Summary: All quality criteria met. Story 1.3 is ready for production deployment.
-
-Evidence:
-- P0 Coverage: 100% (5/5 criteria)
-- P1 Coverage: 95% (19/20 criteria)
-- Overall Coverage: 92% (24/26 criteria)
-- P0 Pass Rate: 100% (12/12 tests)
-- P1 Pass Rate: 98% (45/46 tests)
-- Overall Pass Rate: 96% (67/70 tests)
-- NFRs: All pass (performance, security, scalability)
-
-Action: Deploy to production β
-```
-
-### Example 2: CONCERNS (Minor Gap, Non-Blocking)
-
-```
-Decision: β οΈ CONCERNS
-
-Summary: P1 coverage slightly below threshold (88% vs 90%). Recommend deploying with follow-up story.
-
-Evidence:
-- P0 Coverage: 100% β
-- P1 Coverage: 88% β οΈ (below 90%)
-- Overall Coverage: 92% β
-- Test Pass Rate: 96% β
-- Gap: AC-5 (P1) missing E2E test
-
-Action:
-- Deploy to staging for validation
-- Create follow-up story for AC-5 E2E test
-- Monitor production for edge cases related to AC-5
-```
-
-### Example 3: FAIL (P0 Gap, Blocking)
-
-```
-Decision: β FAIL
-
-Summary: P0 coverage incomplete. Missing critical validation test. BLOCKING deployment.
-
-Evidence:
-- P0 Coverage: 80% β (4/5 criteria, AC-2 missing)
-- AC-2: "User cannot login with invalid credentials" (P0 priority)
-- No tests validate login security for invalid credentials
-- This is a critical security gap
-
-Action:
-- Add P0 test for AC-2: 1.3-E2E-004 (invalid credentials)
-- Re-run traceability after test added
-- Re-evaluate gate decision after P0 coverage = 100%
-
-Deployment BLOCKED until P0 gap resolved β
-```
-
-### Example 4: WAIVED (Business Decision)
-
-```
-Decision: β οΈ WAIVED
-
-Summary: P1 coverage below threshold (75% vs 90%), but waived for MVP launch.
-
-Evidence:
-- P0 Coverage: 100% β
-- P1 Coverage: 75% β (below 90%)
-- Gap: 5 P1 criteria missing E2E tests (error handling, edge cases)
-
-Waiver:
-- Approver: Jane Doe, Engineering Manager
-- Date: 2025-01-15
-- Justification: Time-boxed MVP for investor demo. Core functionality (P0) fully validated. P1 gaps are low-risk edge cases.
-- Mitigation: Manual QA testing for P1 scenarios, follow-up stories created for automated tests in v1.1
-- Evidence: Slack #engineering 2025-01-15 3:42pm
-
-Action:
-- Deploy to production with manual QA validation β
-- Add 5 E2E tests for P1 gaps in v1.1 sprint
-- Monitor production logs for edge case occurrences
-```
-
----
-
-## Non-Prescriptive Approach
-
-**Minimal Examples:** This workflow provides principles and patterns, not rigid templates. Teams should adapt the traceability and gate decision formats to their needs.
-
-**Key Patterns to Follow:**
-
-- Map criteria to tests explicitly (don't rely on inference alone)
-- Prioritize by risk (P0 gaps are critical, P3 gaps are acceptable)
-- Check coverage at appropriate levels (E2E for journeys, Unit for logic)
-- Verify test quality (explicit assertions, no flakiness)
-- Apply deterministic gate rules for consistency
-- Document gate decisions with clear evidence
-- Use waivers judiciously (business approved, mitigation planned)
-
-**Extend as Needed:**
-
-- Add custom coverage classifications
-- Integrate with code coverage tools (Istanbul, NYC)
-- Link to external traceability systems (JIRA, Azure DevOps)
-- Add compliance or regulatory requirements
-- Customize gate decision thresholds per project
-- Add manual approval workflows for gate decisions
-
----
-
-## Coverage Classification Details
-
-### FULL Coverage
-
-- All scenarios validated at appropriate test level(s)
-- Edge cases considered
-- Both happy path and error paths tested
-- Assertions are explicit and complete
-
-### PARTIAL Coverage
-
-- Some scenarios validated but missing edge cases
-- Only happy path tested (missing error paths)
-- Assertions present but incomplete
-- Coverage exists but needs enhancement
-
-### NONE Coverage
-
-- No tests found for this criterion
-- Complete gap requiring new tests
-- Critical if P0/P1, acceptable if P3
-
-### UNIT-ONLY Coverage
-
-- Only unit tests exist (business logic validated)
-- Missing integration or E2E validation
-- Risk: Implementation may not work end-to-end
-- Recommendation: Add integration or E2E tests for critical paths
-
-### INTEGRATION-ONLY Coverage
-
-- Only API or Component tests exist
-- Missing unit test confidence for business logic
-- Risk: Logic errors may not be caught quickly
-- Recommendation: Add unit tests for complex algorithms or state machines
-
----
-
-## Duplicate Coverage Detection
-
-Use selective testing principles from `selective-testing.md`:
-
-**Acceptable Overlap:**
-
-- Unit tests for business logic + E2E tests for user journey (different aspects)
-- API tests for contract + E2E tests for full workflow (defense in depth for critical paths)
-
-**Unacceptable Duplication:**
-
-- Same validation at multiple levels (e.g., E2E testing math logic better suited for unit tests)
-- Multiple E2E tests covering identical user path
-- Component tests duplicating unit test logic
-
-**Recommendation Pattern:**
-
-- Test logic at unit level
-- Test integration at API/Component level
-- Test user experience at E2E level
-- Avoid testing framework behavior at any level
-
----
-
-## Integration with BMad Artifacts
-
-### With test-design.md
-
-- Use risk assessment to prioritize gap remediation
-- Reference test priorities (P0/P1/P2/P3) for severity classification and gate decision
-- Align traceability with originally planned test coverage
-
-### With tech-spec.md
-
-- Understand technical implementation details
-- Map criteria to specific code modules
-- Verify tests cover technical edge cases
-
-### With PRD.md
-
-- Understand full product context
-- Verify acceptance criteria align with product goals
-- Check for unstated requirements that need coverage
-
-### With nfr-assessment.md
-
-- Load non-functional validation results for gate decision
-- Check critical NFR status (performance, security, scalability)
-- Include NFR pass/fail in gate decision criteria
-
----
-
-## Quality Gates (Phase 1 Recommendations)
-
-### P0 Coverage (Critical Paths)
-
-- **Requirement:** 100% FULL coverage
-- **Severity:** BLOCKER if not met
-- **Action:** Do not release until P0 coverage is complete
-
-### P1 Coverage (High Priority)
-
-- **Requirement:** 90% FULL coverage
-- **Severity:** HIGH if not met
-- **Action:** Block PR merge until addressed
-
-### P2 Coverage (Medium Priority)
-
-- **Requirement:** No strict requirement (recommended 80%)
-- **Severity:** MEDIUM if gaps exist
-- **Action:** Address in nightly test improvements
-
-### P3 Coverage (Low Priority)
-
-- **Requirement:** No requirement
-- **Severity:** LOW if gaps exist
-- **Action:** Optional - add if time permits
-
----
-
-## Example Traceability Matrix
-
-````markdown
-# Traceability Matrix - Story 1.3
-
-**Story:** User Authentication
-**Date:** 2025-10-14
-**Status:** 85% Coverage (1 HIGH gap)
-
-## Coverage Summary
-
-| Priority | Total Criteria | FULL Coverage | Coverage % | Status |
-| --------- | -------------- | ------------- | ---------- | ------ |
-| P0 | 3 | 3 | 100% | β PASS |
-| P1 | 5 | 4 | 80% | β οΈ WARN |
-| P2 | 4 | 3 | 75% | β PASS |
-| P3 | 2 | 1 | 50% | β PASS |
-| **Total** | **14** | **11** | **79%** | β οΈ WARN |
-
-## Detailed Mapping
-
-### AC-1: User can login with email and password (P0)
-
-- **Coverage:** FULL β
-- **Tests:**
- - `1.3-E2E-001` - tests/e2e/auth.spec.ts:12
- - Given: User has valid credentials
- - When: User submits login form
- - Then: User is redirected to dashboard
- - `1.3-UNIT-001` - tests/unit/auth-service.spec.ts:8
- - Given: Valid email and password hash
- - When: validateCredentials is called
- - Then: Returns user object
-
-### AC-2: User sees error for invalid credentials (P0)
-
-- **Coverage:** FULL β
-- **Tests:**
- - `1.3-E2E-002` - tests/e2e/auth.spec.ts:28
- - Given: User has invalid password
- - When: User submits login form
- - Then: Error message is displayed
- - `1.3-UNIT-002` - tests/unit/auth-service.spec.ts:18
- - Given: Invalid password hash
- - When: validateCredentials is called
- - Then: Throws AuthenticationError
-
-### AC-3: User can reset password via email (P1)
-
-- **Coverage:** PARTIAL β οΈ
-- **Tests:**
- - `1.3-E2E-003` - tests/e2e/auth.spec.ts:44
- - Given: User requests password reset
- - When: User clicks reset link
- - Then: User can set new password
-- **Gaps:**
- - Missing: Email delivery validation
- - Missing: Expired token handling
- - Missing: Unit test for token generation
-- **Recommendation:** Add `1.3-API-001` for email service integration and `1.3-UNIT-003` for token logic
-
-## Gap Analysis
-
-### Critical Gaps (BLOCKER)
-
-- None β
-
-### High Priority Gaps (PR BLOCKER)
-
-1. **AC-3: Password reset email edge cases**
- - Missing tests for expired tokens, invalid tokens, email failures
- - Recommend: `1.3-API-001` (email service integration) and `1.3-E2E-004` (error paths)
- - Impact: Users may not be able to recover accounts in error scenarios
-
-### Medium Priority Gaps (Nightly)
-
-1. **AC-7: Session timeout handling** - UNIT-ONLY coverage (missing E2E validation)
-
-## Quality Assessment
-
-### Tests with Issues
-
-- `1.3-E2E-001` β οΈ - 145 seconds (exceeds 90s target) - Optimize fixture setup
-- `1.3-UNIT-005` β οΈ - 320 lines (exceeds 300 line limit) - Split into multiple test files
-
-### Tests Passing Quality Gates
-
-- 11/13 tests (85%) meet all quality criteria β
-
-## Gate YAML Snippet
-
-```yaml
-traceability:
- story_id: '1.3'
- coverage:
- overall: 79%
- p0: 100%
- p1: 80%
- p2: 75%
- p3: 50%
- gaps:
- critical: 0
- high: 1
- medium: 1
- low: 1
- status: 'WARN' # P1 coverage below 90% threshold
- recommendations:
- - 'Add 1.3-API-001 for email service integration'
- - 'Add 1.3-E2E-004 for password reset error paths'
- - 'Optimize 1.3-E2E-001 performance (145s β <90s)'
-```
-````
-
-## Recommendations
-
-1. **Address High Priority Gap:** Add password reset edge case tests before PR merge
-2. **Optimize Slow Test:** Refactor `1.3-E2E-001` to use faster fixture setup
-3. **Split Large Test:** Break `1.3-UNIT-005` into focused test files
-4. **Enhance P2 Coverage:** Add E2E validation for session timeout (currently UNIT-ONLY)
-
-```
-
----
-
-## Validation Checklist
-
-Before completing this workflow, verify:
-
-**Phase 1 (Traceability):**
-- β All acceptance criteria are mapped to tests (or gaps are documented)
-- β Coverage status is classified (FULL, PARTIAL, NONE, UNIT-ONLY, INTEGRATION-ONLY)
-- β Gaps are prioritized by risk level (P0/P1/P2/P3)
-- β P0 coverage is 100% or blockers are documented
-- β Duplicate coverage is identified and flagged
-- β Test quality is assessed (assertions, structure, performance)
-- β Traceability matrix is generated and saved
-
-**Phase 2 (Gate Decision - if enabled):**
-- β Test execution results loaded and pass rates calculated
-- β NFR assessment results loaded (if applicable)
-- β Decision rules applied consistently (PASS/CONCERNS/FAIL/WAIVED)
-- β Gate decision document created with evidence
-- β Waiver documented if decision is WAIVED (approver, justification, mitigation)
-- β Stakeholders notified (if enabled)
-
----
-
-## Notes
-
-**Phase 1 (Traceability):**
-- **Explicit Mapping:** Require tests to reference criteria explicitly (test IDs, describe blocks) for maintainability
-- **Risk-Based Prioritization:** Use test-priorities framework (P0/P1/P2/P3) to determine gap severity
-- **Quality Over Quantity:** Better to have fewer high-quality tests with FULL coverage than many low-quality tests with PARTIAL coverage
-- **Selective Testing:** Avoid duplicate coverage - test each behavior at the appropriate level only
-
-**Phase 2 (Gate Decision):**
-- **Deterministic Rules:** Use consistent thresholds (P0=100%, P1β₯90%, overallβ₯80%) for objectivity
-- **Evidence-Based:** Every decision must cite specific metrics (coverage %, pass rates, NFRs)
-- **Waiver Discipline:** Waivers require approver name, justification, mitigation plan, and evidence link
-- **Non-Blocking CONCERNS:** Use CONCERNS for minor gaps that don't justify blocking deployment (e.g., P1 at 88% vs 90%)
-- **Automate in CI/CD:** Generate YAML snippets that can be consumed by CI/CD pipelines for automated quality gates
-
----
-
-## Troubleshooting
-
-### "No tests found for this story"
-- Run `*atdd` workflow first to generate failing acceptance tests
-- Check test file naming conventions (may not match story ID pattern)
-- Verify test directory path is correct
-
-### "Cannot determine coverage status"
-- Tests may lack explicit mapping to criteria (no test IDs, unclear describe blocks)
-- Review test structure and add Given-When-Then narrative
-- Add test IDs in format: `{STORY_ID}-{LEVEL}-{SEQ}` (e.g., 1.3-E2E-001)
-
-### "P0 coverage below 100%"
-- This is a **BLOCKER** - do not release
-- Identify missing P0 tests in gap analysis
-- Run `*atdd` workflow to generate missing tests
-- Verify with stakeholders that P0 classification is correct
-
-### "Duplicate coverage detected"
-- Review selective testing principles in `selective-testing.md`
-- Determine if overlap is acceptable (defense in depth) or wasteful (same validation at multiple levels)
-- Consolidate tests at appropriate level (logic β unit, integration β API, journey β E2E)
-
-### "Test execution results missing" (Phase 2)
-- Phase 2 gate decision requires `test_results` (CI/CD test reports)
-- If missing, Phase 2 will be skipped with warning
-- Provide JUnit XML, TAP, or JSON test report path via `test_results` variable
-
-### "Gate decision is FAIL but deployment needed urgently"
-- Request business waiver (if `allow_waivers: true`)
-- Document approver, justification, mitigation plan
-- Create follow-up stories to address gaps
-- Use WAIVED decision only for non-P0 gaps
-
----
-
-## Related Workflows
-
-**Prerequisites:**
-- `testarch-test-design` - Define test priorities (P0/P1/P2/P3) before tracing (required for Phase 2)
-- `testarch-atdd` or `testarch-automate` - Generate tests before tracing coverage
-
-**Complements:**
-- `testarch-nfr-assess` - Non-functional requirements validation (recommended for release gates)
-- `testarch-test-review` - Review test quality issues flagged in traceability
-
-**Next Steps:**
-- If gate decision is PASS/CONCERNS β Deploy and monitor
-- If gate decision is FAIL β Add missing tests, re-run trace workflow
-- If gate decision is WAIVED β Deploy with mitigation, create follow-up stories
-
----
-
-
-```
diff --git a/src/bmm/workflows/testarch/trace/trace-template.md b/src/bmm/workflows/testarch/trace/trace-template.md
deleted file mode 100644
index ddc74019..00000000
--- a/src/bmm/workflows/testarch/trace/trace-template.md
+++ /dev/null
@@ -1,675 +0,0 @@
-# Traceability Matrix & Gate Decision - Story {STORY_ID}
-
-**Story:** {STORY_TITLE}
-**Date:** {DATE}
-**Evaluator:** {user_name or TEA Agent}
-
----
-
-Note: This workflow does not generate tests. If gaps exist, run `*atdd` or `*automate` to create coverage.
-
-## PHASE 1: REQUIREMENTS TRACEABILITY
-
-### Coverage Summary
-
-| Priority | Total Criteria | FULL Coverage | Coverage % | Status |
-| --------- | -------------- | ------------- | ---------- | ------------ |
-| P0 | {P0_TOTAL} | {P0_FULL} | {P0_PCT}% | {P0_STATUS} |
-| P1 | {P1_TOTAL} | {P1_FULL} | {P1_PCT}% | {P1_STATUS} |
-| P2 | {P2_TOTAL} | {P2_FULL} | {P2_PCT}% | {P2_STATUS} |
-| P3 | {P3_TOTAL} | {P3_FULL} | {P3_PCT}% | {P3_STATUS} |
-| **Total** | **{TOTAL}** | **{FULL}** | **{PCT}%** | **{STATUS}** |
-
-**Legend:**
-
-- β PASS - Coverage meets quality gate threshold
-- β οΈ WARN - Coverage below threshold but not critical
-- β FAIL - Coverage below minimum threshold (blocker)
-
----
-
-### Detailed Mapping
-
-#### {CRITERION_ID}: {CRITERION_DESCRIPTION} ({PRIORITY})
-
-- **Coverage:** {COVERAGE_STATUS} {STATUS_ICON}
-- **Tests:**
- - `{TEST_ID}` - {TEST_FILE}:{LINE}
- - **Given:** {GIVEN}
- - **When:** {WHEN}
- - **Then:** {THEN}
- - `{TEST_ID_2}` - {TEST_FILE_2}:{LINE}
- - **Given:** {GIVEN_2}
- - **When:** {WHEN_2}
- - **Then:** {THEN_2}
-
-- **Gaps:** (if PARTIAL or UNIT-ONLY or INTEGRATION-ONLY)
- - Missing: {MISSING_SCENARIO_1}
- - Missing: {MISSING_SCENARIO_2}
-
-- **Recommendation:** {RECOMMENDATION_TEXT}
-
----
-
-#### Example: AC-1: User can login with email and password (P0)
-
-- **Coverage:** FULL β
-- **Tests:**
- - `1.3-E2E-001` - tests/e2e/auth.spec.ts:12
- - **Given:** User has valid credentials
- - **When:** User submits login form
- - **Then:** User is redirected to dashboard
- - `1.3-UNIT-001` - tests/unit/auth-service.spec.ts:8
- - **Given:** Valid email and password hash
- - **When:** validateCredentials is called
- - **Then:** Returns user object
-
----
-
-#### Example: AC-3: User can reset password via email (P1)
-
-- **Coverage:** PARTIAL β οΈ
-- **Tests:**
- - `1.3-E2E-003` - tests/e2e/auth.spec.ts:44
- - **Given:** User requests password reset
- - **When:** User clicks reset link in email
- - **Then:** User can set new password
-
-- **Gaps:**
- - Missing: Email delivery validation
- - Missing: Expired token handling (error path)
- - Missing: Invalid token handling (security test)
- - Missing: Unit test for token generation logic
-
-- **Recommendation:** Add `1.3-API-001` for email service integration testing and `1.3-UNIT-003` for token generation logic. Add `1.3-E2E-004` for error path validation (expired/invalid tokens).
-
----
-
-### Gap Analysis
-
-#### Critical Gaps (BLOCKER) β
-
-{CRITICAL_GAP_COUNT} gaps found. **Do not release until resolved.**
-
-1. **{CRITERION_ID}: {CRITERION_DESCRIPTION}** (P0)
- - Current Coverage: {COVERAGE_STATUS}
- - Missing Tests: {MISSING_TEST_DESCRIPTION}
- - Recommend: {RECOMMENDED_TEST_ID} ({RECOMMENDED_TEST_LEVEL})
- - Impact: {IMPACT_DESCRIPTION}
-
----
-
-#### High Priority Gaps (PR BLOCKER) β οΈ
-
-{HIGH_GAP_COUNT} gaps found. **Address before PR merge.**
-
-1. **{CRITERION_ID}: {CRITERION_DESCRIPTION}** (P1)
- - Current Coverage: {COVERAGE_STATUS}
- - Missing Tests: {MISSING_TEST_DESCRIPTION}
- - Recommend: {RECOMMENDED_TEST_ID} ({RECOMMENDED_TEST_LEVEL})
- - Impact: {IMPACT_DESCRIPTION}
-
----
-
-#### Medium Priority Gaps (Nightly) β οΈ
-
-{MEDIUM_GAP_COUNT} gaps found. **Address in nightly test improvements.**
-
-1. **{CRITERION_ID}: {CRITERION_DESCRIPTION}** (P2)
- - Current Coverage: {COVERAGE_STATUS}
- - Recommend: {RECOMMENDED_TEST_ID} ({RECOMMENDED_TEST_LEVEL})
-
----
-
-#### Low Priority Gaps (Optional) βΉοΈ
-
-{LOW_GAP_COUNT} gaps found. **Optional - add if time permits.**
-
-1. **{CRITERION_ID}: {CRITERION_DESCRIPTION}** (P3)
- - Current Coverage: {COVERAGE_STATUS}
-
----
-
-### Quality Assessment
-
-#### Tests with Issues
-
-**BLOCKER Issues** β
-
-- `{TEST_ID}` - {ISSUE_DESCRIPTION} - {REMEDIATION}
-
-**WARNING Issues** β οΈ
-
-- `{TEST_ID}` - {ISSUE_DESCRIPTION} - {REMEDIATION}
-
-**INFO Issues** βΉοΈ
-
-- `{TEST_ID}` - {ISSUE_DESCRIPTION} - {REMEDIATION}
-
----
-
-#### Example Quality Issues
-
-**WARNING Issues** β οΈ
-
-- `1.3-E2E-001` - 145 seconds (exceeds 90s target) - Optimize fixture setup to reduce test duration
-- `1.3-UNIT-005` - 320 lines (exceeds 300 line limit) - Split into multiple focused test files
-
-**INFO Issues** βΉοΈ
-
-- `1.3-E2E-002` - Missing Given-When-Then structure - Refactor describe block to use BDD format
-
----
-
-#### Tests Passing Quality Gates
-
-**{PASSING_TEST_COUNT}/{TOTAL_TEST_COUNT} tests ({PASSING_PCT}%) meet all quality criteria** β
-
----
-
-### Duplicate Coverage Analysis
-
-#### Acceptable Overlap (Defense in Depth)
-
-- {CRITERION_ID}: Tested at unit (business logic) and E2E (user journey) β
-
-#### Unacceptable Duplication β οΈ
-
-- {CRITERION_ID}: Same validation at E2E and Component level
- - Recommendation: Remove {TEST_ID} or consolidate with {OTHER_TEST_ID}
-
----
-
-### Coverage by Test Level
-
-| Test Level | Tests | Criteria Covered | Coverage % |
-| ---------- | ----------------- | -------------------- | ---------------- |
-| E2E | {E2E_COUNT} | {E2E_CRITERIA} | {E2E_PCT}% |
-| API | {API_COUNT} | {API_CRITERIA} | {API_PCT}% |
-| Component | {COMP_COUNT} | {COMP_CRITERIA} | {COMP_PCT}% |
-| Unit | {UNIT_COUNT} | {UNIT_CRITERIA} | {UNIT_PCT}% |
-| **Total** | **{TOTAL_TESTS}** | **{TOTAL_CRITERIA}** | **{TOTAL_PCT}%** |
-
----
-
-### Traceability Recommendations
-
-#### Immediate Actions (Before PR Merge)
-
-1. **{ACTION_1}** - {DESCRIPTION}
-2. **{ACTION_2}** - {DESCRIPTION}
-
-#### Short-term Actions (This Sprint)
-
-1. **{ACTION_1}** - {DESCRIPTION}
-2. **{ACTION_2}** - {DESCRIPTION}
-
-#### Long-term Actions (Backlog)
-
-1. **{ACTION_1}** - {DESCRIPTION}
-
----
-
-#### Example Recommendations
-
-**Immediate Actions (Before PR Merge)**
-
-1. **Add P1 Password Reset Tests** - Implement `1.3-API-001` for email service integration and `1.3-E2E-004` for error path validation. P1 coverage currently at 80%, target is 90%.
-2. **Optimize Slow E2E Test** - Refactor `1.3-E2E-001` to use faster fixture setup. Currently 145s, target is <90s.
-
-**Short-term Actions (This Sprint)**
-
-1. **Enhance P2 Coverage** - Add E2E validation for session timeout (`1.3-E2E-005`). Currently UNIT-ONLY coverage.
-2. **Split Large Test File** - Break `1.3-UNIT-005` (320 lines) into multiple focused test files (<300 lines each).
-
-**Long-term Actions (Backlog)**
-
-1. **Enrich P3 Coverage** - Add tests for edge cases in P3 criteria if time permits.
-
----
-
-## PHASE 2: QUALITY GATE DECISION
-
-**Gate Type:** {story | epic | release | hotfix}
-**Decision Mode:** {deterministic | manual}
-
----
-
-### Evidence Summary
-
-#### Test Execution Results
-
-- **Total Tests**: {total_count}
-- **Passed**: {passed_count} ({pass_percentage}%)
-- **Failed**: {failed_count} ({fail_percentage}%)
-- **Skipped**: {skipped_count} ({skip_percentage}%)
-- **Duration**: {total_duration}
-
-**Priority Breakdown:**
-
-- **P0 Tests**: {p0_passed}/{p0_total} passed ({p0_pass_rate}%) {β | β}
-- **P1 Tests**: {p1_passed}/{p1_total} passed ({p1_pass_rate}%) {β | β οΈ | β}
-- **P2 Tests**: {p2_passed}/{p2_total} passed ({p2_pass_rate}%) {informational}
-- **P3 Tests**: {p3_passed}/{p3_total} passed ({p3_pass_rate}%) {informational}
-
-**Overall Pass Rate**: {overall_pass_rate}% {β | β οΈ | β}
-
-**Test Results Source**: {CI_run_id | test_report_url | local_run}
-
----
-
-#### Coverage Summary (from Phase 1)
-
-**Requirements Coverage:**
-
-- **P0 Acceptance Criteria**: {p0_covered}/{p0_total} covered ({p0_coverage}%) {β | β}
-- **P1 Acceptance Criteria**: {p1_covered}/{p1_total} covered ({p1_coverage}%) {β | β οΈ | β}
-- **P2 Acceptance Criteria**: {p2_covered}/{p2_total} covered ({p2_coverage}%) {informational}
-- **Overall Coverage**: {overall_coverage}%
-
-**Code Coverage** (if available):
-
-- **Line Coverage**: {line_coverage}% {β | β οΈ | β}
-- **Branch Coverage**: {branch_coverage}% {β | β οΈ | β}
-- **Function Coverage**: {function_coverage}% {β | β οΈ | β}
-
-**Coverage Source**: {coverage_report_url | coverage_file_path}
-
----
-
-#### Non-Functional Requirements (NFRs)
-
-**Security**: {PASS | CONCERNS | FAIL | NOT_ASSESSED} {β | β οΈ | β}
-
-- Security Issues: {security_issue_count}
-- {details_if_issues}
-
-**Performance**: {PASS | CONCERNS | FAIL | NOT_ASSESSED} {β | β οΈ | β}
-
-- {performance_metrics_summary}
-
-**Reliability**: {PASS | CONCERNS | FAIL | NOT_ASSESSED} {β | β οΈ | β}
-
-- {reliability_metrics_summary}
-
-**Maintainability**: {PASS | CONCERNS | FAIL | NOT_ASSESSED} {β | β οΈ | β}
-
-- {maintainability_metrics_summary}
-
-**NFR Source**: {nfr_assessment_file_path | not_assessed}
-
----
-
-#### Flakiness Validation
-
-**Burn-in Results** (if available):
-
-- **Burn-in Iterations**: {iteration_count} (e.g., 10)
-- **Flaky Tests Detected**: {flaky_test_count} {β if 0 | β if >0}
-- **Stability Score**: {stability_percentage}%
-
-**Flaky Tests List** (if any):
-
-- {flaky_test_1_name} - {failure_rate}
-- {flaky_test_2_name} - {failure_rate}
-
-**Burn-in Source**: {CI_burn_in_run_id | not_available}
-
----
-
-### Decision Criteria Evaluation
-
-#### P0 Criteria (Must ALL Pass)
-
-| Criterion | Threshold | Actual | Status |
-| --------------------- | --------- | ------------------------- | -------- | -------- |
-| P0 Coverage | 100% | {p0_coverage}% | {β PASS | β FAIL} |
-| P0 Test Pass Rate | 100% | {p0_pass_rate}% | {β PASS | β FAIL} |
-| Security Issues | 0 | {security_issue_count} | {β PASS | β FAIL} |
-| Critical NFR Failures | 0 | {critical_nfr_fail_count} | {β PASS | β FAIL} |
-| Flaky Tests | 0 | {flaky_test_count} | {β PASS | β FAIL} |
-
-**P0 Evaluation**: {β ALL PASS | β ONE OR MORE FAILED}
-
----
-
-#### P1 Criteria (Required for PASS, May Accept for CONCERNS)
-
-| Criterion | Threshold | Actual | Status |
-| ---------------------- | ------------------------- | -------------------- | -------- | ----------- | -------- |
-| P1 Coverage | β₯{min_p1_coverage}% | {p1_coverage}% | {β PASS | β οΈ CONCERNS | β FAIL} |
-| P1 Test Pass Rate | β₯{min_p1_pass_rate}% | {p1_pass_rate}% | {β PASS | β οΈ CONCERNS | β FAIL} |
-| Overall Test Pass Rate | β₯{min_overall_pass_rate}% | {overall_pass_rate}% | {β PASS | β οΈ CONCERNS | β FAIL} |
-| Overall Coverage | β₯{min_coverage}% | {overall_coverage}% | {β PASS | β οΈ CONCERNS | β FAIL} |
-
-**P1 Evaluation**: {β ALL PASS | β οΈ SOME CONCERNS | β FAILED}
-
----
-
-#### P2/P3 Criteria (Informational, Don't Block)
-
-| Criterion | Actual | Notes |
-| ----------------- | --------------- | ------------------------------------------------------------ |
-| P2 Test Pass Rate | {p2_pass_rate}% | {allow_p2_failures ? "Tracked, doesn't block" : "Evaluated"} |
-| P3 Test Pass Rate | {p3_pass_rate}% | {allow_p3_failures ? "Tracked, doesn't block" : "Evaluated"} |
-
----
-
-### GATE DECISION: {PASS | CONCERNS | FAIL | WAIVED}
-
----
-
-### Rationale
-
-{Explain decision based on criteria evaluation}
-
-{Highlight key evidence that drove decision}
-
-{Note any assumptions or caveats}
-
-**Example (PASS):**
-
-> All P0 criteria met with 100% coverage and pass rates across critical tests. All P1 criteria exceeded thresholds with 98% overall pass rate and 92% coverage. No security issues detected. No flaky tests in validation. Feature is ready for production deployment with standard monitoring.
-
-**Example (CONCERNS):**
-
-> All P0 criteria met, ensuring critical user journeys are protected. However, P1 coverage (88%) falls below threshold (90%) due to missing E2E test for AC-5 edge case. Overall pass rate (96%) is excellent. Issues are non-critical and have acceptable workarounds. Risk is low enough to deploy with enhanced monitoring.
-
-**Example (FAIL):**
-
-> CRITICAL BLOCKERS DETECTED:
->
-> 1. P0 coverage incomplete (80%) - AC-2 security validation missing
-> 2. P0 test failures (75% pass rate) in core search functionality
-> 3. Unresolved SQL injection vulnerability in search filter (CRITICAL)
->
-> Release MUST BE BLOCKED until P0 issues are resolved. Security vulnerability cannot be waived.
-
-**Example (WAIVED):**
-
-> Original decision was FAIL due to P0 test failure in legacy Excel 2007 export module (affects <1% of users). However, release contains critical GDPR compliance features required by regulatory deadline (Oct 15). Business has approved waiver given:
->
-> - Regulatory priority overrides legacy module risk
-> - Workaround available (use Excel 2010+)
-> - Issue will be fixed in v2.4.1 hotfix (due Oct 20)
-> - Enhanced monitoring in place
-
----
-
-### {Section: Delete if not applicable}
-
-#### Residual Risks (For CONCERNS or WAIVED)
-
-List unresolved P1/P2 issues that don't block release but should be tracked:
-
-1. **{Risk Description}**
- - **Priority**: P1 | P2
- - **Probability**: Low | Medium | High
- - **Impact**: Low | Medium | High
- - **Risk Score**: {probability Γ impact}
- - **Mitigation**: {workaround or monitoring plan}
- - **Remediation**: {fix in next sprint/release}
-
-**Overall Residual Risk**: {LOW | MEDIUM | HIGH}
-
----
-
-#### Waiver Details (For WAIVED only)
-
-**Original Decision**: β FAIL
-
-**Reason for Failure**:
-
-- {list_of_blocking_issues}
-
-**Waiver Information**:
-
-- **Waiver Reason**: {business_justification}
-- **Waiver Approver**: {name}, {role} (e.g., Jane Doe, VP Engineering)
-- **Approval Date**: {YYYY-MM-DD}
-- **Waiver Expiry**: {YYYY-MM-DD} (**NOTE**: Does NOT apply to next release)
-
-**Monitoring Plan**:
-
-- {enhanced_monitoring_1}
-- {enhanced_monitoring_2}
-- {escalation_criteria}
-
-**Remediation Plan**:
-
-- **Fix Target**: {next_release_version} (e.g., v2.4.1 hotfix)
-- **Due Date**: {YYYY-MM-DD}
-- **Owner**: {team_or_person}
-- **Verification**: {how_fix_will_be_verified}
-
-**Business Justification**:
-{detailed_explanation_of_why_waiver_is_acceptable}
-
----
-
-#### Critical Issues (For FAIL or CONCERNS)
-
-Top blockers requiring immediate attention:
-
-| Priority | Issue | Description | Owner | Due Date | Status |
-| -------- | ------------- | ------------------- | ------------ | ------------ | ------------------ |
-| P0 | {issue_title} | {brief_description} | {owner_name} | {YYYY-MM-DD} | {OPEN/IN_PROGRESS} |
-| P0 | {issue_title} | {brief_description} | {owner_name} | {YYYY-MM-DD} | {OPEN/IN_PROGRESS} |
-| P1 | {issue_title} | {brief_description} | {owner_name} | {YYYY-MM-DD} | {OPEN/IN_PROGRESS} |
-
-**Blocking Issues Count**: {p0_blocker_count} P0 blockers, {p1_blocker_count} P1 issues
-
----
-
-### Gate Recommendations
-
-#### For PASS Decision β
-
-1. **Proceed to deployment**
- - Deploy to staging environment
- - Validate with smoke tests
- - Monitor key metrics for 24-48 hours
- - Deploy to production with standard monitoring
-
-2. **Post-Deployment Monitoring**
- - {metric_1_to_monitor}
- - {metric_2_to_monitor}
- - {alert_thresholds}
-
-3. **Success Criteria**
- - {success_criterion_1}
- - {success_criterion_2}
-
----
-
-#### For CONCERNS Decision β οΈ
-
-1. **Deploy with Enhanced Monitoring**
- - Deploy to staging with extended validation period
- - Enable enhanced logging/monitoring for known risk areas:
- - {risk_area_1}
- - {risk_area_2}
- - Set aggressive alerts for potential issues
- - Deploy to production with caution
-
-2. **Create Remediation Backlog**
- - Create story: "{fix_title_1}" (Priority: {priority})
- - Create story: "{fix_title_2}" (Priority: {priority})
- - Target sprint: {next_sprint}
-
-3. **Post-Deployment Actions**
- - Monitor {specific_areas} closely for {time_period}
- - Weekly status updates on remediation progress
- - Re-assess after fixes deployed
-
----
-
-#### For FAIL Decision β
-
-1. **Block Deployment Immediately**
- - Do NOT deploy to any environment
- - Notify stakeholders of blocking issues
- - Escalate to tech lead and PM
-
-2. **Fix Critical Issues**
- - Address P0 blockers listed in Critical Issues section
- - Owner assignments confirmed
- - Due dates agreed upon
- - Daily standup on blocker resolution
-
-3. **Re-Run Gate After Fixes**
- - Re-run full test suite after fixes
- - Re-run `bmad tea *trace` workflow
- - Verify decision is PASS before deploying
-
----
-
-#### For WAIVED Decision π
-
-1. **Deploy with Business Approval**
- - Confirm waiver approver has signed off
- - Document waiver in release notes
- - Notify all stakeholders of waived risks
-
-2. **Aggressive Monitoring**
- - {enhanced_monitoring_plan}
- - {escalation_procedures}
- - Daily checks on waived risk areas
-
-3. **Mandatory Remediation**
- - Fix MUST be completed by {due_date}
- - Issue CANNOT be waived in next release
- - Track remediation progress weekly
- - Verify fix in next gate
-
----
-
-### Next Steps
-
-**Immediate Actions** (next 24-48 hours):
-
-1. {action_1}
-2. {action_2}
-3. {action_3}
-
-**Follow-up Actions** (next sprint/release):
-
-1. {action_1}
-2. {action_2}
-3. {action_3}
-
-**Stakeholder Communication**:
-
-- Notify PM: {decision_summary}
-- Notify SM: {decision_summary}
-- Notify DEV lead: {decision_summary}
-
----
-
-## Integrated YAML Snippet (CI/CD)
-
-```yaml
-traceability_and_gate:
- # Phase 1: Traceability
- traceability:
- story_id: "{STORY_ID}"
- date: "{DATE}"
- coverage:
- overall: {OVERALL_PCT}%
- p0: {P0_PCT}%
- p1: {P1_PCT}%
- p2: {P2_PCT}%
- p3: {P3_PCT}%
- gaps:
- critical: {CRITICAL_COUNT}
- high: {HIGH_COUNT}
- medium: {MEDIUM_COUNT}
- low: {LOW_COUNT}
- quality:
- passing_tests: {PASSING_COUNT}
- total_tests: {TOTAL_TESTS}
- blocker_issues: {BLOCKER_COUNT}
- warning_issues: {WARNING_COUNT}
- recommendations:
- - "{RECOMMENDATION_1}"
- - "{RECOMMENDATION_2}"
-
- # Phase 2: Gate Decision
- gate_decision:
- decision: "{PASS | CONCERNS | FAIL | WAIVED}"
- gate_type: "{story | epic | release | hotfix}"
- decision_mode: "{deterministic | manual}"
- criteria:
- p0_coverage: {p0_coverage}%
- p0_pass_rate: {p0_pass_rate}%
- p1_coverage: {p1_coverage}%
- p1_pass_rate: {p1_pass_rate}%
- overall_pass_rate: {overall_pass_rate}%
- overall_coverage: {overall_coverage}%
- security_issues: {security_issue_count}
- critical_nfrs_fail: {critical_nfr_fail_count}
- flaky_tests: {flaky_test_count}
- thresholds:
- min_p0_coverage: 100
- min_p0_pass_rate: 100
- min_p1_coverage: {min_p1_coverage}
- min_p1_pass_rate: {min_p1_pass_rate}
- min_overall_pass_rate: {min_overall_pass_rate}
- min_coverage: {min_coverage}
- evidence:
- test_results: "{CI_run_id | test_report_url}"
- traceability: "{trace_file_path}"
- nfr_assessment: "{nfr_file_path}"
- code_coverage: "{coverage_report_url}"
- next_steps: "{brief_summary_of_recommendations}"
- waiver: # Only if WAIVED
- reason: "{business_justification}"
- approver: "{name}, {role}"
- expiry: "{YYYY-MM-DD}"
- remediation_due: "{YYYY-MM-DD}"
-```
-
----
-
-## Related Artifacts
-
-- **Story File:** {STORY_FILE_PATH}
-- **Test Design:** {TEST_DESIGN_PATH} (if available)
-- **Tech Spec:** {TECH_SPEC_PATH} (if available)
-- **Test Results:** {TEST_RESULTS_PATH}
-- **NFR Assessment:** {NFR_FILE_PATH} (if available)
-- **Test Files:** {TEST_DIR_PATH}
-
----
-
-## Sign-Off
-
-**Phase 1 - Traceability Assessment:**
-
-- Overall Coverage: {OVERALL_PCT}%
-- P0 Coverage: {P0_PCT}% {P0_STATUS}
-- P1 Coverage: {P1_PCT}% {P1_STATUS}
-- Critical Gaps: {CRITICAL_COUNT}
-- High Priority Gaps: {HIGH_COUNT}
-
-**Phase 2 - Gate Decision:**
-
-- **Decision**: {PASS | CONCERNS | FAIL | WAIVED} {STATUS_ICON}
-- **P0 Evaluation**: {β ALL PASS | β ONE OR MORE FAILED}
-- **P1 Evaluation**: {β ALL PASS | β οΈ SOME CONCERNS | β FAILED}
-
-**Overall Status:** {STATUS} {STATUS_ICON}
-
-**Next Steps:**
-
-- If PASS β : Proceed to deployment
-- If CONCERNS β οΈ: Deploy with monitoring, create remediation backlog
-- If FAIL β: Block deployment, fix critical issues, re-run workflow
-- If WAIVED π: Deploy with business approval and aggressive monitoring
-
-**Generated:** {DATE}
-**Workflow:** testarch-trace v4.0 (Enhanced with Gate Decision)
-
----
-
-
diff --git a/src/bmm/workflows/testarch/trace/workflow.yaml b/src/bmm/workflows/testarch/trace/workflow.yaml
deleted file mode 100644
index fc5193ef..00000000
--- a/src/bmm/workflows/testarch/trace/workflow.yaml
+++ /dev/null
@@ -1,57 +0,0 @@
-# Test Architect workflow: trace (enhanced with gate decision)
-name: testarch-trace
-description: "Generate requirements-to-tests traceability matrix, analyze coverage, and make quality gate decision (PASS/CONCERNS/FAIL/WAIVED)"
-author: "BMad"
-
-# Critical variables from config
-config_source: "{project-root}/_bmad/bmm/config.yaml"
-output_folder: "{config_source}:output_folder"
-user_name: "{config_source}:user_name"
-communication_language: "{config_source}:communication_language"
-document_output_language: "{config_source}:document_output_language"
-date: system-generated
-
-# Workflow components
-installed_path: "{project-root}/_bmad/bmm/workflows/testarch/trace"
-instructions: "{installed_path}/instructions.md"
-validation: "{installed_path}/checklist.md"
-template: "{installed_path}/trace-template.md"
-
-# Variables and inputs
-variables:
- # Directory paths
- test_dir: "{project-root}/tests" # Root test directory
- source_dir: "{project-root}/src" # Source code directory
-
- # Workflow behavior
- coverage_levels: "e2e,api,component,unit" # Which test levels to trace
- gate_type: "story" # story | epic | release | hotfix - determines gate scope
- decision_mode: "deterministic" # deterministic (rule-based) | manual (team decision)
-
-# Output configuration
-default_output_file: "{output_folder}/traceability-matrix.md"
-
-# Required tools
-required_tools:
- - read_file # Read story, test files, BMad artifacts
- - write_file # Create traceability matrix, gate YAML
- - list_files # Discover test files
- - search_repo # Find tests by test ID, describe blocks
- - glob # Find test files matching patterns
-
-tags:
- - qa
- - traceability
- - test-architect
- - coverage
- - requirements
- - gate
- - decision
- - release
-
-execution_hints:
- interactive: false # Minimize prompts
- autonomous: true # Proceed without user input unless blocked
- iterative: true
-
-web_bundle: false