diff --git a/src/modules/bmm/workflows/4-implementation/multi-agent-review/instructions.md b/src/modules/bmm/workflows/4-implementation/multi-agent-review/instructions.md
new file mode 100644
index 00000000..a109b3c7
--- /dev/null
+++ b/src/modules/bmm/workflows/4-implementation/multi-agent-review/instructions.md
@@ -0,0 +1,140 @@
+# Multi-Agent Code Review
+
+**Purpose:** Perform comprehensive code review using multiple specialized AI agents, each focusing on different quality aspects.
+
+## Overview
+
+Unlike traditional single-reviewer code review, multi-agent review leverages multiple specialized agents:
+- **Architecture Agent**: Reviews system design, patterns, and structure
+- **Security Agent**: Identifies vulnerabilities and security risks
+- **Performance Agent**: Analyzes efficiency and optimization opportunities
+- **Testing Agent**: Evaluates test coverage and quality
+- **Code Quality Agent**: Reviews style, maintainability, and best practices
+
+## Workflow
+
+### Step 1: Load Story Context
+
+```bash
+# Read story file
+story_file="{story_file}"
+test -f "$story_file" || (echo "â Story file not found: $story_file" && exit 1)
+```
+
+Read the story file to understand:
+- What was supposed to be implemented
+- Acceptance criteria
+- Tasks and subtasks
+- File list
+
+### Step 2: Invoke Multi-Agent Review Skill
+
+```xml
+
+ {story_id}
+ {base_branch}
+
+```
+
+The skill will:
+1. Analyze changed files in the story
+2. Select appropriate agents based on code changes
+3. Run parallel reviews from multiple perspectives
+4. Aggregate findings with severity ratings
+5. Return comprehensive review report
+
+### Step 3: Save Review Report
+
+```bash
+# The skill returns a review report
+# Save it to: {review_report}
+```
+
+Display summary:
+```
+đ¤ MULTI-AGENT CODE REVIEW COMPLETE
+
+Agents Used: {agent_count}
+- Architecture Agent
+- Security Agent
+- Performance Agent
+- Testing Agent
+- Code Quality Agent
+
+Findings:
+- đ´ CRITICAL: {critical_count}
+- đ HIGH: {high_count}
+- đĄ MEDIUM: {medium_count}
+- đĩ LOW: {low_count}
+- âšī¸ INFO: {info_count}
+
+Report saved to: {review_report}
+```
+
+### Step 4: Present Findings
+
+For each finding, display:
+```
+[{severity}] {title}
+Agent: {agent_name}
+Location: {file}:{line}
+
+{description}
+
+Recommendation:
+{recommendation}
+
+---
+```
+
+### Step 5: Next Steps
+
+Suggest actions based on findings:
+```
+đ RECOMMENDED NEXT STEPS:
+
+If CRITICAL findings exist:
+ â ī¸ MUST FIX before proceeding
+ - Address all critical security/correctness issues
+ - Re-run review after fixes
+
+If only HIGH/MEDIUM findings:
+ â
Story may proceed
+ - Consider addressing high-priority items
+ - Create follow-up tasks for medium items
+ - Document LOW items as tech debt
+
+If only LOW/INFO findings:
+ â
Code quality looks good
+ - Optional: Address style/optimization suggestions
+ - Proceed to completion
+```
+
+## Integration with Super-Dev-Pipeline
+
+This workflow is designed to be called from super-dev-pipeline step 7 (code review) when the story complexity is COMPLEX or when user explicitly requests multi-agent review.
+
+**When to Use:**
+- Complex stories (âĨ16 tasks or high-risk keywords)
+- Stories involving security-sensitive code
+- Stories with significant architectural changes
+- When single-agent review has been inconclusive
+- User explicitly requests comprehensive review
+
+**When NOT to Use:**
+- Micro stories (â¤3 tasks)
+- Standard stories with simple changes
+- Stories that passed adversarial review cleanly
+
+## Output Files
+
+- `{review_report}`: Full review findings in markdown
+- Integrated into story completion summary
+- Referenced in audit trail
+
+## Error Handling
+
+If multi-agent-review skill fails:
+- Fall back to adversarial code review
+- Log the failure reason
+- Continue pipeline with warning
diff --git a/src/modules/bmm/workflows/4-implementation/multi-agent-review/workflow.yaml b/src/modules/bmm/workflows/4-implementation/multi-agent-review/workflow.yaml
new file mode 100644
index 00000000..c52c145a
--- /dev/null
+++ b/src/modules/bmm/workflows/4-implementation/multi-agent-review/workflow.yaml
@@ -0,0 +1,39 @@
+name: multi-agent-review
+description: "Smart multi-agent code review with dynamic agent selection based on changed code. Uses multiple specialized AI agents to review different aspects: architecture, security, performance, testing, and code quality."
+author: "BMad"
+version: "1.0.0"
+
+# Critical variables from config
+config_source: "{project-root}/_bmad/bmm/config.yaml"
+output_folder: "{config_source}:output_folder"
+sprint_artifacts: "{config_source}:sprint_artifacts"
+communication_language: "{config_source}:communication_language"
+
+# Workflow components
+installed_path: "{project-root}/_bmad/bmm/workflows/4-implementation/multi-agent-review"
+instructions: "{installed_path}/instructions.md"
+
+# Input parameters
+story_id: "{story_id}" # Required
+story_file: "{sprint_artifacts}/story-{story_id}.md"
+base_branch: "main" # Optional: branch to compare against
+
+# Review settings
+review_settings:
+ agents_to_use: "auto" # auto | all | custom
+ focus_areas:
+ - architecture
+ - security
+ - performance
+ - testing
+ - code_quality
+ minimum_agents: 3
+ maximum_agents: 5
+ generate_report: true
+ auto_fix_suggested: false # Set to true to automatically apply suggested fixes
+
+# Output
+review_report: "{sprint_artifacts}/review-{story_id}-multi-agent.md"
+
+standalone: true
+web_bundle: false
diff --git a/src/modules/bmm/workflows/4-implementation/super-dev-pipeline/IMPLEMENTATION-PLAN.md b/src/modules/bmm/workflows/4-implementation/super-dev-pipeline/IMPLEMENTATION-PLAN.md
new file mode 100644
index 00000000..b683ce3b
--- /dev/null
+++ b/src/modules/bmm/workflows/4-implementation/super-dev-pipeline/IMPLEMENTATION-PLAN.md
@@ -0,0 +1,391 @@
+# Super-Dev-Pipeline v2.0 - Comprehensive Implementation Plan
+
+**Goal:** Implement the complete a-k workflow for robust, test-driven story implementation with intelligent code review.
+
+## Architecture
+
+**batch-super-dev:** Story discovery & selection loop (unchanged)
+**super-dev-pipeline:** Steps a-k for each story (MAJOR ENHANCEMENT)
+
+---
+
+## Complete Workflow (Steps a-k)
+
+### â
Step 1: Init + Validate Story (a-c)
+**File:** `step-01-init.md` (COMPLETED)
+- [x] a. Validate story file exists and is robust
+- [x] b. If no story file, run /create-story-with-gap-analysis (auto-invoke)
+- [x] c. Validate story is robust after creation
+
+**Status:** â
DONE - Already implemented in commit a68b7a65
+
+### â
Step 2: Smart Gap Analysis (d)
+**File:** `step-02-pre-gap-analysis.md` (NEEDS ENHANCEMENT)
+- [ ] d. Run gap analysis (smart: skip if we just ran create-story-with-gap-analysis)
+
+**Status:** â ī¸ NEEDS UPDATE - Add logic to skip if story was just created in step 1
+
+**Implementation:**
+```yaml
+# In step-02-pre-gap-analysis.md
+Check state from step 1:
+ If story_just_created == true:
+ Skip gap analysis (already done in create-story-with-gap-analysis)
+ Display: â
Gap analysis skipped (already performed during story creation)
+ Else:
+ Run gap analysis as normal
+```
+
+### â
Step 3: Write Tests (e) - NEW
+**File:** `step-03-write-tests.md` (COMPLETED)
+- [x] e. Write tests that should pass for story to be valid
+
+**Status:** â
DONE - Created comprehensive TDD step file
+
+**Features:**
+- Write tests BEFORE implementation
+- Test all acceptance criteria
+- Red phase (tests fail initially)
+- Comprehensive coverage requirements
+
+### â ī¸ Step 4: Implement (f)
+**File:** `step-04-implement.md` (NEEDS RENAME)
+- [ ] f. Run dev-story to implement actual code changes
+
+**Status:** â ī¸ NEEDS RENAME - Rename `step-03-implement.md` â `step-04-implement.md`
+
+**Implementation:**
+```bash
+# Rename file
+mv step-03-implement.md step-04-implement.md
+
+# Update references
+# Update workflow.yaml step 4 definition
+# Update next step references in step-03-write-tests.md
+```
+
+### â ī¸ Step 5: Post-Validation (g)
+**File:** `step-05-post-validation.md` (NEEDS RENAME)
+- [ ] g. Run post-validation to ensure claimed work was ACTUALLY implemented
+
+**Status:** â ī¸ NEEDS RENAME - Rename `step-04-post-validation.md` â `step-05-post-validation.md`
+
+### â
Step 6: Run Quality Checks (h) - NEW
+**File:** `step-06-run-quality-checks.md` (COMPLETED)
+- [x] h. Run tests, type checks, linter - fix all problems
+
+**Status:** â
DONE - Created comprehensive quality gate step
+
+**Features:**
+- Run test suite (must pass 100%)
+- Check test coverage (âĨ80%)
+- Run type checker (zero errors)
+- Run linter (zero errors/warnings)
+- Auto-fix what's possible
+- Manual fix remaining issues
+- BLOCKING step - cannot proceed until ALL pass
+
+### â ī¸ Step 7: Intelligent Code Review (i)
+**File:** `step-07-code-review.md` (NEEDS RENAME + ENHANCEMENT)
+- [ ] i. Run adversarial review for basic/standard, multi-agent-review for complex
+
+**Status:** â ī¸ NEEDS WORK
+1. Rename `step-05-code-review.md` â `step-07-code-review.md`
+2. Enhance to actually invoke multi-agent-review workflow
+3. Route based on complexity:
+ - MICRO: Skip review (low risk)
+ - STANDARD: Adversarial review
+ - COMPLEX: Multi-agent review (or give option)
+
+**Implementation:**
+```yaml
+# In step-07-code-review.md
+
+Complexity-based routing:
+
+If complexity_level == "micro":
+ Display: â
Code review skipped (micro story, low risk)
+ Skip to step 8
+
+Else if complexity_level == "standard":
+ Display: đ Running adversarial code review...
+ Run adversarial review (existing logic)
+ Save findings to {review_report}
+
+Else if complexity_level == "complex":
+ Display: đ¤ Running multi-agent code review...
+
+ {story_id}
+
+ Save findings to {review_report}
+```
+
+### â
Step 8: Review Analysis (j) - NEW
+**File:** `step-08-review-analysis.md` (COMPLETED)
+- [x] j. Analyze review findings - distinguish real issues from gold plating
+
+**Status:** â
DONE - Created comprehensive review analysis step
+
+**Features:**
+- Categorize findings: MUST FIX, SHOULD FIX, CONSIDER, REJECTED, OPTIONAL
+- Critical thinking framework
+- Document rejection rationale
+- Estimated fix time
+- Classification report
+
+### â ī¸ Step 9: Fix Issues - NEW
+**File:** `step-09-fix-issues.md` (NEEDS CREATION)
+- [ ] Fix real issues from review analysis
+
+**Status:** đ´ TODO - Create new step file
+
+**Implementation:**
+```markdown
+# Step 9: Fix Issues
+
+Load classification report from step 8
+
+For each MUST FIX issue:
+ 1. Read file at location
+ 2. Understand the issue
+ 3. Implement fix
+ 4. Verify fix works (run tests)
+ 5. Commit fix
+
+For each SHOULD FIX issue:
+ 1. Read file at location
+ 2. Understand the issue
+ 3. Implement fix
+ 4. Verify fix works (run tests)
+ 5. Commit fix
+
+For CONSIDER items:
+ - If time permits and in scope, fix
+ - Otherwise, document as tech debt
+
+For REJECTED items:
+ - Skip (already documented why in step 8)
+
+For OPTIONAL items:
+ - Create tech debt tickets
+ - Skip implementation
+
+After all fixes:
+ - Re-run quality checks (step 6)
+ - Ensure all tests still pass
+```
+
+### â ī¸ Step 10: Complete + Update Status (k)
+**File:** `step-10-complete.md` (NEEDS RENAME + ENHANCEMENT)
+- [ ] k. Update story to "done", update sprint-status.yaml (MANDATORY)
+
+**Status:** â ī¸ NEEDS WORK
+1. Rename `step-06-complete.md` â `step-10-complete.md`
+2. Add MANDATORY sprint-status.yaml update
+3. Update story status to "done"
+4. Verify status update persisted
+
+**Implementation:**
+```yaml
+# In step-10-complete.md
+
+CRITICAL ENFORCEMENT:
+
+1. Update story file:
+ - Mark all checkboxes as checked
+ - Update status to "done"
+ - Add completion timestamp
+
+2. Update sprint-status.yaml (MANDATORY):
+ development_status:
+ {story_id}: done # â
COMPLETED: {brief_summary}
+
+3. Verify update persisted:
+ - Re-read sprint-status.yaml
+ - Confirm status == "done"
+ - HALT if verification fails
+
+NO EXCEPTIONS - Story MUST be marked done in both files
+```
+
+### â ī¸ Step 11: Summary
+**File:** `step-11-summary.md` (NEEDS RENAME)
+- [ ] Final summary report
+
+**Status:** â ī¸ NEEDS RENAME - Rename `step-07-summary.md` â `step-11-summary.md`
+
+---
+
+## Multi-Agent Review Workflow
+
+### â
Workflow Created
+**Location:** `src/modules/bmm/workflows/4-implementation/multi-agent-review/`
+
+**Files:**
+- [x] `workflow.yaml` (COMPLETED)
+- [x] `instructions.md` (COMPLETED)
+
+**Status:** â
DONE - Workflow wrapper around multi-agent-review skill
+
+**Integration:**
+- Invoked from step-07-code-review.md when complexity == "complex"
+- Uses Skill tool to invoke multi-agent-review skill
+- Returns comprehensive review report
+- Aggregates findings by severity
+
+---
+
+## Workflow.yaml Updates Needed
+
+**File:** `src/modules/bmm/workflows/4-implementation/super-dev-pipeline/workflow.yaml`
+
+**Changes Required:**
+1. Update version to `1.5.0`
+2. Update description to mention test-first approach
+3. Redefine steps array (11 steps instead of 7)
+4. Add multi-agent-review workflow path
+5. Update complexity routing for new steps
+6. Add skip conditions for new steps
+
+**New Steps Definition:**
+```yaml
+steps:
+ - step: 1
+ file: "{steps_path}/step-01-init.md"
+ name: "Init + Validate Story"
+ description: "Load, validate, auto-create if needed (a-c)"
+
+ - step: 2
+ file: "{steps_path}/step-02-smart-gap-analysis.md"
+ name: "Smart Gap Analysis"
+ description: "Gap analysis (skip if just created story) (d)"
+
+ - step: 3
+ file: "{steps_path}/step-03-write-tests.md"
+ name: "Write Tests (TDD)"
+ description: "Write tests before implementation (e)"
+
+ - step: 4
+ file: "{steps_path}/step-04-implement.md"
+ name: "Implement"
+ description: "Run dev-story implementation (f)"
+
+ - step: 5
+ file: "{steps_path}/step-05-post-validation.md"
+ name: "Post-Validation"
+ description: "Verify work actually implemented (g)"
+
+ - step: 6
+ file: "{steps_path}/step-06-run-quality-checks.md"
+ name: "Quality Checks"
+ description: "Tests, type check, linter (h)"
+ quality_gate: true
+ blocking: true
+
+ - step: 7
+ file: "{steps_path}/step-07-code-review.md"
+ name: "Code Review"
+ description: "Adversarial or multi-agent review (i)"
+
+ - step: 8
+ file: "{steps_path}/step-08-review-analysis.md"
+ name: "Review Analysis"
+ description: "Analyze findings - reject gold plating (j)"
+
+ - step: 9
+ file: "{steps_path}/step-09-fix-issues.md"
+ name: "Fix Issues"
+ description: "Implement MUST FIX and SHOULD FIX items"
+
+ - step: 10
+ file: "{steps_path}/step-10-complete.md"
+ name: "Complete + Update Status"
+ description: "Mark done, update sprint-status.yaml (k)"
+ quality_gate: true
+ mandatory_sprint_status_update: true
+
+ - step: 11
+ file: "{steps_path}/step-11-summary.md"
+ name: "Summary"
+ description: "Final report"
+```
+
+---
+
+## File Rename Operations
+
+Execute these renames:
+```bash
+cd src/modules/bmm/workflows/4-implementation/super-dev-pipeline/steps/
+
+# Rename existing files to new step numbers
+mv step-03-implement.md step-04-implement.md
+mv step-04-post-validation.md step-05-post-validation.md
+mv step-05-code-review.md step-07-code-review.md
+mv step-06-complete.md step-10-complete.md
+mv step-06a-queue-commit.md step-10a-queue-commit.md
+mv step-07-summary.md step-11-summary.md
+
+# Update step-02 to step-02-smart-gap-analysis.md (add "smart" logic)
+# No rename needed, just update content
+```
+
+---
+
+## Implementation Checklist
+
+### Phase 1: File Structure â
(Partially Done)
+- [x] Create multi-agent-review workflow
+- [x] Create step-03-write-tests.md
+- [x] Create step-06-run-quality-checks.md
+- [x] Create step-08-review-analysis.md
+- [ ] Create step-09-fix-issues.md
+- [ ] Rename existing step files
+- [ ] Update workflow.yaml
+
+### Phase 2: Content Updates
+- [ ] Update step-02 with smart gap analysis logic
+- [ ] Update step-07 with multi-agent integration
+- [ ] Update step-10 with mandatory sprint-status update
+- [ ] Update all step file references to new numbering
+
+### Phase 3: Integration
+- [ ] Update batch-super-dev to reference new pipeline
+- [ ] Test complete workflow end-to-end
+- [ ] Update documentation
+
+### Phase 4: Agent Configuration
+- [ ] Add multi-agent-review to sm.agent.yaml
+- [ ] Add multi-agent-review to dev.agent.yaml (optional)
+- [ ] Update agent menu descriptions
+
+---
+
+## Testing Plan
+
+1. **Test micro story:** Should skip steps 3, 7, 8, 9 (write tests, code review, analysis, fix)
+2. **Test standard story:** Should run all steps with adversarial review
+3. **Test complex story:** Should run all steps with multi-agent review
+4. **Test story creation:** Verify auto-create in step 1 works
+5. **Test smart gap analysis:** Verify step 2 skips if story just created
+6. **Test quality gate:** Verify step 6 blocks on failing tests
+7. **Test review analysis:** Verify step 8 correctly categorizes findings
+8. **Test sprint-status update:** Verify step 10 updates sprint-status.yaml
+
+---
+
+## Version History
+
+**v1.4.0** (Current - Committed): Auto-create story via /create-story-with-gap-analysis
+**v1.5.0** (In Progress): Complete a-k workflow with TDD, quality gates, intelligent review
+
+---
+
+## Next Steps
+
+1. Create `step-09-fix-issues.md`
+2. Perform all file renames
+3. Update `workflow.yaml` with new 11-step structure
+4. Test each step individually
+5. Test complete workflow end-to-end
+6. Commit and document
diff --git a/src/modules/bmm/workflows/4-implementation/super-dev-pipeline/steps/step-03-write-tests.md b/src/modules/bmm/workflows/4-implementation/super-dev-pipeline/steps/step-03-write-tests.md
new file mode 100644
index 00000000..534c5a85
--- /dev/null
+++ b/src/modules/bmm/workflows/4-implementation/super-dev-pipeline/steps/step-03-write-tests.md
@@ -0,0 +1,248 @@
+---
+name: 'step-03-write-tests'
+description: 'Write comprehensive tests BEFORE implementation (TDD approach)'
+
+# Path Definitions
+workflow_path: '{project-root}/_bmad/bmm/workflows/4-implementation/super-dev-pipeline'
+
+# File References
+thisStepFile: '{workflow_path}/steps/step-03-write-tests.md'
+stateFile: '{state_file}'
+storyFile: '{story_file}'
+
+# Next step
+nextStep: '{workflow_path}/steps/step-04-implement.md'
+---
+
+# Step 3: Write Tests (TDD Approach)
+
+**Goal:** Write comprehensive tests that validate story acceptance criteria BEFORE writing implementation code.
+
+## Why Test-First?
+
+1. **Clear requirements**: Writing tests forces clarity about what "done" means
+2. **Better design**: TDD leads to more testable, modular code
+3. **Confidence**: Know immediately when implementation is complete
+4. **Regression safety**: Tests catch future breakage
+
+## Principles
+
+- **Test acceptance criteria**: Each AC should have corresponding tests
+- **Test behavior, not implementation**: Focus on what, not how
+- **Red-Green-Refactor**: Tests should fail initially (red), then pass when implemented (green)
+- **Comprehensive coverage**: Unit tests, integration tests, and E2E tests as needed
+
+---
+
+## Process
+
+### 1. Analyze Story Requirements
+
+```
+Read {storyFile} completely.
+
+Extract:
+- All Acceptance Criteria
+- All Tasks and Subtasks
+- All Files in File List
+- Definition of Done requirements
+```
+
+### 2. Determine Test Strategy
+
+For each acceptance criterion, determine:
+```
+Testing Level:
+- Unit tests: For individual functions/components
+- Integration tests: For component interactions
+- E2E tests: For full user workflows
+
+Test Framework:
+- Jest (JavaScript/TypeScript)
+- PyTest (Python)
+- xUnit (C#/.NET)
+- JUnit (Java)
+- Etc. based on project stack
+```
+
+### 3. Write Test Stubs
+
+Create test files FIRST (before implementation):
+
+```bash
+Example for React component:
+__tests__/components/UserDashboard.test.tsx
+
+Example for API endpoint:
+__tests__/api/users.test.ts
+
+Example for service:
+__tests__/services/auth.test.ts
+```
+
+### 4. Write Test Cases
+
+For each acceptance criterion:
+
+```typescript
+// Example: React component test
+describe('UserDashboard', () => {
+ describe('AC1: Display user profile information', () => {
+ it('should render user name', () => {
+ render();
+ expect(screen.getByText('John Doe')).toBeInTheDocument();
+ });
+
+ it('should render user email', () => {
+ render();
+ expect(screen.getByText('john@example.com')).toBeInTheDocument();
+ });
+
+ it('should render user avatar', () => {
+ render();
+ expect(screen.getByAltText('User avatar')).toBeInTheDocument();
+ });
+ });
+
+ describe('AC2: Allow user to edit profile', () => {
+ it('should show edit button when not in edit mode', () => {
+ render();
+ expect(screen.getByRole('button', { name: /edit/i })).toBeInTheDocument();
+ });
+
+ it('should enable edit mode when edit button clicked', () => {
+ render();
+ fireEvent.click(screen.getByRole('button', { name: /edit/i }));
+ expect(screen.getByRole('textbox', { name: /name/i })).toBeInTheDocument();
+ });
+
+ it('should save changes when save button clicked', async () => {
+ const onSave = vi.fn();
+ render();
+
+ fireEvent.click(screen.getByRole('button', { name: /edit/i }));
+ fireEvent.change(screen.getByRole('textbox', { name: /name/i }), {
+ target: { value: 'Jane Doe' }
+ });
+ fireEvent.click(screen.getByRole('button', { name: /save/i }));
+
+ await waitFor(() => {
+ expect(onSave).toHaveBeenCalledWith({ ...mockUser, name: 'Jane Doe' });
+ });
+ });
+ });
+});
+```
+
+### 5. Verify Tests Fail (Red Phase)
+
+```bash
+# Run tests - they SHOULD fail because implementation doesn't exist yet
+npm test
+
+# Expected output:
+# â FAIL __tests__/components/UserDashboard.test.tsx
+# UserDashboard
+# AC1: Display user profile information
+# â should render user name (5ms)
+# â should render user email (3ms)
+# â should render user avatar (2ms)
+#
+# This is GOOD! Tests failing = requirements are clear
+```
+
+**If tests pass unexpectedly:**
+```
+â ī¸ WARNING: Some tests are passing before implementation!
+
+This means either:
+1. Functionality already exists (brownfield - verify and document)
+2. Tests are not actually testing the new requirements
+3. Tests have mocking issues (testing mocks instead of real code)
+
+Review and fix before proceeding.
+```
+
+### 6. Document Test Coverage
+
+Create test coverage report:
+```yaml
+Test Coverage Summary:
+ Acceptance Criteria: {total_ac_count}
+ Acceptance Criteria with Tests: {tested_ac_count}
+ Coverage: {coverage_percentage}%
+
+ Tasks: {total_task_count}
+ Tasks with Tests: {tested_task_count}
+ Coverage: {task_coverage_percentage}%
+
+Test Files Created:
+ - {test_file_1}
+ - {test_file_2}
+ - {test_file_3}
+
+Total Test Cases: {test_case_count}
+```
+
+### 7. Commit Tests
+
+```bash
+git add {test_files}
+git commit -m "test(story-{story_id}): add tests for {story_title}
+
+Write comprehensive tests for all acceptance criteria:
+{list_of_acs}
+
+Test coverage:
+- {tested_ac_count}/{total_ac_count} ACs covered
+- {test_case_count} test cases
+- Unit tests: {unit_test_count}
+- Integration tests: {integration_test_count}
+- E2E tests: {e2e_test_count}
+
+Tests currently failing (red phase) - expected behavior.
+Will implement functionality in next step."
+```
+
+### 8. Update State
+
+```yaml
+# Update {stateFile}
+current_step: 3
+tests_written: true
+test_files: [{test_file_list}]
+test_coverage: {coverage_percentage}%
+tests_status: "failing (red phase - expected)"
+ready_for_implementation: true
+```
+
+---
+
+## Quality Checks
+
+Before proceeding to implementation:
+
+â
**All acceptance criteria have corresponding tests**
+â
**Tests are comprehensive (happy path + edge cases + error cases)**
+â
**Tests follow project testing conventions**
+â
**Tests are isolated and don't depend on each other**
+â
**Tests have clear, descriptive names**
+â
**Mock data is realistic and well-organized**
+â
**Tests are failing for the right reasons (not implemented yet)**
+
+---
+
+## Skip Conditions
+
+This step can be skipped if:
+- Complexity level = "micro" AND tasks ⤠2
+- Story is documentation-only (no code changes)
+- Story is pure refactoring with existing comprehensive tests
+
+---
+
+## Next Step
+
+Proceed to **Step 4: Implement** ({nextStep})
+
+Now that tests are written and failing (red phase), implement the functionality to make them pass (green phase).
diff --git a/src/modules/bmm/workflows/4-implementation/super-dev-pipeline/steps/step-06-run-quality-checks.md b/src/modules/bmm/workflows/4-implementation/super-dev-pipeline/steps/step-06-run-quality-checks.md
new file mode 100644
index 00000000..6fb23699
--- /dev/null
+++ b/src/modules/bmm/workflows/4-implementation/super-dev-pipeline/steps/step-06-run-quality-checks.md
@@ -0,0 +1,368 @@
+---
+name: 'step-06-run-quality-checks'
+description: 'Run tests, type checks, and linter - fix all problems before code review'
+
+# Path Definitions
+workflow_path: '{project-root}/_bmad/bmm/workflows/4-implementation/super-dev-pipeline'
+
+# File References
+thisStepFile: '{workflow_path}/steps/step-06-run-quality-checks.md'
+stateFile: '{state_file}'
+storyFile: '{story_file}'
+
+# Next step
+nextStep: '{workflow_path}/steps/step-07-code-review.md'
+---
+
+# Step 6: Run Quality Checks
+
+**Goal:** Verify implementation quality through automated checks: tests, type checking, and linting. Fix ALL problems before proceeding to human/AI code review.
+
+## Why Automate First?
+
+1. **Fast feedback**: Automated checks run in seconds
+2. **Catch obvious issues**: Type errors, lint violations, failing tests
+3. **Save review time**: Don't waste code review time on mechanical issues
+4. **Enforce standards**: Consistent code style and quality
+
+## Principles
+
+- **Zero tolerance**: ALL checks must pass
+- **Fix, don't skip**: If a check fails, fix it - don't disable the check
+- **Iterate quickly**: Run-fix-run loop until all green
+- **Document workarounds**: If you must suppress a check, document why
+
+---
+
+## Process
+
+### 1. Run Test Suite
+
+```bash
+echo "đ Running test suite..."
+
+# Run all tests
+npm test
+
+# Or for other stacks:
+# pytest
+# dotnet test
+# mvn test
+# cargo test
+```
+
+**Expected output:**
+```
+â
PASS __tests__/components/UserDashboard.test.tsx
+ UserDashboard
+ AC1: Display user profile information
+ â should render user name (12ms)
+ â should render user email (8ms)
+ â should render user avatar (6ms)
+ AC2: Allow user to edit profile
+ â should show edit button when not in edit mode (10ms)
+ â should enable edit mode when edit button clicked (15ms)
+ â should save changes when save button clicked (22ms)
+
+Test Suites: 1 passed, 1 total
+Tests: 6 passed, 6 total
+Time: 2.134s
+```
+
+**If tests fail:**
+```
+â Test failures detected!
+
+Failed tests:
+ - UserDashboard âē AC2 âē should save changes when save button clicked
+ Expected: { name: 'Jane Doe', email: 'john@example.com' }
+ Received: undefined
+
+Action required:
+1. Analyze the failure
+2. Fix the implementation
+3. Re-run tests
+4. Repeat until all tests pass
+
+DO NOT PROCEED until all tests pass.
+```
+
+### 2. Check Test Coverage
+
+```bash
+echo "đ Checking test coverage..."
+
+# Generate coverage report
+npm run test:coverage
+
+# Or for other stacks:
+# pytest --cov
+# dotnet test /p:CollectCoverage=true
+# cargo tarpaulin
+```
+
+**Minimum coverage thresholds:**
+```yaml
+Line Coverage: âĨ80%
+Branch Coverage: âĨ75%
+Function Coverage: âĨ80%
+Statement Coverage: âĨ80%
+```
+
+**If coverage is low:**
+```
+â ī¸ Test coverage below threshold!
+
+Current coverage:
+ Lines: 72% (threshold: 80%)
+ Branches: 68% (threshold: 75%)
+ Functions: 85% (threshold: 80%)
+
+Uncovered areas:
+ - src/components/UserDashboard.tsx: lines 45-52 (error handling)
+ - src/services/userService.ts: lines 23-28 (edge case)
+
+Action required:
+1. Add tests for uncovered code paths
+2. Re-run coverage check
+3. Achieve âĨ80% coverage before proceeding
+```
+
+### 3. Run Type Checker
+
+```bash
+echo "đ Running type checker..."
+
+# For TypeScript
+npx tsc --noEmit
+
+# For Python
+# mypy src/
+
+# For C#
+# dotnet build
+
+# For Java
+# mvn compile
+```
+
+**Expected output:**
+```
+â
No type errors found
+```
+
+**If type errors found:**
+```
+â Type errors detected!
+
+src/components/UserDashboard.tsx:45:12 - error TS2345: Argument of type 'string | undefined' is not assignable to parameter of type 'string'.
+
+45 onSave(user.name);
+ ~~~~~~~~~
+
+src/services/userService.ts:23:18 - error TS2339: Property 'id' does not exist on type 'User'.
+
+23 return user.id;
+ ~~
+
+Found 2 errors in 2 files.
+
+Action required:
+1. Fix type errors
+2. Re-run type checker
+3. Repeat until zero errors
+
+DO NOT PROCEED with type errors.
+```
+
+### 4. Run Linter
+
+```bash
+echo "⨠Running linter..."
+
+# For JavaScript/TypeScript
+npm run lint
+
+# For Python
+# pylint src/
+
+# For C#
+# dotnet format --verify-no-changes
+
+# For Java
+# mvn checkstyle:check
+```
+
+**Expected output:**
+```
+â
No linting errors found
+```
+
+**If lint errors found:**
+```
+â Lint errors detected!
+
+src/components/UserDashboard.tsx
+ 45:1 error 'useState' is not defined no-undef
+ 52:12 error Unexpected console statement no-console
+ 67:5 warning Unexpected var, use let or const instead no-var
+
+src/services/userService.ts
+ 23:1 error Missing return type on function @typescript-eslint/explicit-function-return-type
+
+â 4 problems (3 errors, 1 warning)
+
+Action required:
+1. Run auto-fix if available: npm run lint:fix
+2. Manually fix remaining errors
+3. Re-run linter
+4. Repeat until zero errors and zero warnings
+
+DO NOT PROCEED with lint errors.
+```
+
+### 5. Auto-Fix What's Possible
+
+```bash
+echo "đ§ Attempting auto-fixes..."
+
+# Run formatters and auto-fixable linters
+npm run lint:fix
+npm run format
+
+# Stage the auto-fixes
+git add .
+```
+
+### 6. Manual Fixes
+
+For issues that can't be auto-fixed:
+
+```typescript
+// Example: Fix type error
+// Before:
+const userName = user.name; // Type error if name is optional
+onSave(userName);
+
+// After:
+const userName = user.name ?? ''; // Handle undefined case
+onSave(userName);
+```
+
+```typescript
+// Example: Fix lint error
+// Before:
+var count = 0; // ESLint: no-var
+
+// After:
+let count = 0; // Use let instead of var
+```
+
+### 7. Verify All Checks Pass
+
+Run everything again to confirm:
+
+```bash
+echo "â
Final verification..."
+
+# Run all checks
+npm test && \
+ npx tsc --noEmit && \
+ npm run lint
+
+echo "â
ALL QUALITY CHECKS PASSED!"
+```
+
+### 8. Commit Quality Fixes
+
+```bash
+# Only if fixes were needed
+if git diff --cached --quiet; then
+ echo "No fixes needed - all checks passed first time!"
+else
+ git commit -m "fix(story-{story_id}): address quality check issues
+
+- Fix type errors
+- Resolve lint violations
+- Improve test coverage to {coverage}%
+
+All automated checks now passing:
+â
Tests: {test_count} passed
+â
Type check: No errors
+â
Linter: No violations
+â
Coverage: {coverage}%"
+fi
+```
+
+### 9. Update State
+
+```yaml
+# Update {stateFile}
+current_step: 6
+quality_checks:
+ tests_passed: true
+ test_count: {test_count}
+ coverage: {coverage}%
+ type_check_passed: true
+ lint_passed: true
+ all_checks_passed: true
+ready_for_code_review: true
+```
+
+---
+
+## Quality Gate
+
+**CRITICAL:** This is a **BLOCKING STEP**. You **MUST NOT** proceed to code review until ALL of the following pass:
+
+â
**All tests passing** (0 failures)
+â
**Test coverage âĨ80%** (or project threshold)
+â
**Zero type errors**
+â
**Zero lint errors**
+â
**Zero lint warnings** (or all warnings justified and documented)
+
+If ANY check fails:
+1. Fix the issue
+2. Re-run all checks
+3. Repeat until ALL PASS
+4. THEN proceed to next step
+
+---
+
+## Troubleshooting
+
+**Tests fail sporadically:**
+- Check for test interdependencies
+- Look for timing issues (use `waitFor` in async tests)
+- Check for environment-specific issues
+
+**Type errors in third-party libraries:**
+- Install `@types` packages
+- Use type assertions carefully (document why)
+- Consider updating library versions
+
+**Lint rules conflict with team standards:**
+- Discuss with team before changing config
+- Document exceptions in comments
+- Update lint config if truly inappropriate
+
+**Coverage can't reach 80%:**
+- Focus on critical paths first
+- Test error cases and edge cases
+- Consider if untested code is actually needed
+
+---
+
+## Skip Conditions
+
+This step CANNOT be skipped. All stories must pass quality checks.
+
+The only exception: Documentation-only stories with zero code changes.
+
+---
+
+## Next Step
+
+Proceed to **Step 7: Code Review** ({nextStep})
+
+Now that all automated checks pass, the code is ready for human/AI review.
diff --git a/src/modules/bmm/workflows/4-implementation/super-dev-pipeline/steps/step-08-review-analysis.md b/src/modules/bmm/workflows/4-implementation/super-dev-pipeline/steps/step-08-review-analysis.md
new file mode 100644
index 00000000..020c6597
--- /dev/null
+++ b/src/modules/bmm/workflows/4-implementation/super-dev-pipeline/steps/step-08-review-analysis.md
@@ -0,0 +1,327 @@
+---
+name: 'step-08-review-analysis'
+description: 'Intelligently analyze code review findings - distinguish real issues from gold plating'
+
+# Path Definitions
+workflow_path: '{project-root}/_bmad/bmm/workflows/4-implementation/super-dev-pipeline'
+
+# File References
+thisStepFile: '{workflow_path}/steps/step-08-review-analysis.md'
+stateFile: '{state_file}'
+storyFile: '{story_file}'
+reviewReport: '{sprint_artifacts}/review-{story_id}.md'
+
+# Next step
+nextStep: '{workflow_path}/steps/step-09-fix-issues.md'
+---
+
+# Step 8: Review Analysis
+
+**Goal:** Critically analyze code review findings to distinguish **real problems** from **gold plating**, **false positives**, and **overzealous suggestions**.
+
+## The Problem
+
+AI code reviewers (and human reviewers) sometimes:
+- đ¨ **Gold plate**: Suggest unnecessary perfectionism
+- đ **Overreact**: Flag non-issues to appear thorough
+- đ **Over-engineer**: Suggest abstractions for simple cases
+- âī¸ **Misjudge context**: Apply rules without understanding tradeoffs
+
+## The Solution
+
+**Critical thinking filter**: Evaluate each finding objectively.
+
+---
+
+## Process
+
+### 1. Load Review Report
+
+```bash
+# Read the code review report
+review_report="{reviewReport}"
+test -f "$review_report" || (echo "â ī¸ No review report found" && exit 0)
+```
+
+Parse findings by severity:
+- đ´ CRITICAL
+- đ HIGH
+- đĄ MEDIUM
+- đĩ LOW
+- âšī¸ INFO
+
+### 2. Categorize Each Finding
+
+For EACH finding, ask these questions:
+
+#### Question 1: Is this a REAL problem?
+
+```
+Real Problem Indicators:
+â
Would cause bugs or incorrect behavior
+â
Would cause security vulnerabilities
+â
Would cause performance issues in production
+â
Would make future maintenance significantly harder
+â
Violates team/project standards documented in codebase
+
+NOT Real Problems:
+â "Could be more elegant" (subjective style preference)
+â "Consider adding abstraction" (YAGNI - you aren't gonna need it)
+â "This pattern is not ideal" (works fine, alternative is marginal)
+â "Add comprehensive error handling" (for impossible error cases)
+â "Add logging everywhere" (log signal, not noise)
+```
+
+#### Question 2: Does this finding understand CONTEXT?
+
+```
+Context Considerations:
+đ Story scope: Does fixing this exceed story requirements?
+đ¯ Project maturity: Is this MVP, beta, or production-hardened?
+⥠Performance criticality: Is this a hot path or cold path?
+đĨ Team standards: Does team actually follow this pattern?
+đ Data scale: Does this handle actual expected volume?
+
+Example of MISSING context:
+Finding: "Add database indexing for better performance"
+Reality: Table has 100 rows total, query runs once per day
+Verdict: â REJECT - Premature optimization
+```
+
+#### Question 3: Is this ACTIONABLE?
+
+```
+Actionable Findings:
+â
Specific file, line number, exact issue
+â
Clear explanation of problem
+â
Concrete recommendation for fix
+â
Can be fixed in reasonable time
+
+NOT Actionable:
+â Vague: "Code quality could be improved"
+â No location: "Some error handling is missing"
+â No recommendation: "This might cause issues"
+â Massive scope: "Refactor entire architecture"
+```
+
+### 3. Classification Decision Tree
+
+For each finding, classify as:
+
+```
+âââââââââââââââââââââââââââââââââââââââââââ
+â Finding Classification Decision Tree â
+âââââââââââââââââââââââââââââââââââââââââââ
+
+Is it a CRITICAL security/correctness issue?
+ââ YES â đ´ MUST FIX
+ââ NO â
+
+Does it violate documented project standards?
+ââ YES â đ SHOULD FIX
+ââ NO â
+
+Would it prevent future maintenance?
+ââ YES â đĄ CONSIDER FIX (if in scope)
+ââ NO â
+
+Is it gold plating / over-engineering?
+ââ YES â âĒ REJECT (document why)
+ââ NO â
+
+Is it a style/opinion without real impact?
+ââ YES â âĒ REJECT (document why)
+ââ NO â đĩ OPTIONAL (tech debt backlog)
+```
+
+### 4. Create Classification Report
+
+```markdown
+# Code Review Analysis: Story {story_id}
+
+## Review Metadata
+- Reviewer: {reviewer_type} (Adversarial / Multi-Agent)
+- Total Findings: {total_findings}
+- Review Date: {date}
+
+## Classification Results
+
+### đ´ MUST FIX (Critical - Blocking)
+Total: {must_fix_count}
+
+1. **[SECURITY] Unvalidated user input in API endpoint**
+ - File: `src/api/users.ts:45`
+ - Issue: POST /api/users accepts unvalidated input, SQL injection risk
+ - Why this is real: Security vulnerability, could lead to data breach
+ - Action: Add input validation with Zod schema
+ - Estimated effort: 30 min
+
+2. **[CORRECTNESS] Race condition in state update**
+ - File: `src/components/UserForm.tsx:67`
+ - Issue: Multiple async setState calls without proper sequencing
+ - Why this is real: Causes intermittent bugs in production
+ - Action: Use functional setState or useReducer
+ - Estimated effort: 20 min
+
+### đ SHOULD FIX (High Priority)
+Total: {should_fix_count}
+
+3. **[STANDARDS] Missing error handling per team convention**
+ - File: `src/services/userService.ts:34`
+ - Issue: API calls lack try-catch per documented standards
+ - Why this matters: Team standard in CONTRIBUTING.md section 3.2
+ - Action: Wrap in try-catch, log errors
+ - Estimated effort: 15 min
+
+### đĄ CONSIDER FIX (Medium - If in scope)
+Total: {consider_count}
+
+4. **[MAINTAINABILITY] Complex nested conditional**
+ - File: `src/utils/validation.ts:23`
+ - Issue: 4-level nested if-else hard to read
+ - Why this matters: Could confuse future maintainers
+ - Action: Extract to guard clauses or lookup table
+ - Estimated effort: 45 min
+ - **Scope consideration**: Nice to have, but not blocking
+
+### âĒ REJECTED (Gold Plating / False Positives)
+Total: {rejected_count}
+
+5. **[REJECTED] "Add comprehensive logging to all functions"**
+ - Reason: Gold plating - logging should be signal, not noise
+ - Context: These are simple utility functions, no debugging issues
+ - Verdict: REJECT - Would create log spam
+
+6. **[REJECTED] "Extract component for reusability"**
+ - Reason: YAGNI - component used only once, no reuse planned
+ - Context: Story scope is single-use dashboard widget
+ - Verdict: REJECT - Premature abstraction
+
+7. **[REJECTED] "Add database connection pooling"**
+ - Reason: Premature optimization - current load is minimal
+ - Context: App has 10 concurrent users max, no performance issues
+ - Verdict: REJECT - Optimize when needed, not speculatively
+
+8. **[REJECTED] "Consider microservices architecture"**
+ - Reason: Out of scope - architectural decision beyond story
+ - Context: Story is adding a single API endpoint
+ - Verdict: REJECT - Massive overreach
+
+### đĩ OPTIONAL (Tech Debt Backlog)
+Total: {optional_count}
+
+9. **[STYLE] Inconsistent naming convention**
+ - File: `src/utils/helpers.ts:12`
+ - Issue: camelCase vs snake_case mixing
+ - Why low priority: Works fine, linter doesn't flag it
+ - Action: Standardize to camelCase when touching this file later
+ - Create tech debt ticket: TD-{number}
+
+## Summary
+
+**Action Plan:**
+- đ´ MUST FIX: {must_fix_count} issues (blocking)
+- đ SHOULD FIX: {should_fix_count} issues (high priority)
+- đĄ CONSIDER: {consider_count} issues (if time permits)
+- âĒ REJECTED: {rejected_count} findings (documented why)
+- đĩ OPTIONAL: {optional_count} items (tech debt backlog)
+
+**Estimated fix time:** {total_fix_time_hours} hours
+
+**Proceed to:** Step 9 - Fix Issues (implement MUST FIX + SHOULD FIX items)
+```
+
+### 5. Document Rejections
+
+**CRITICAL:** When rejecting findings, ALWAYS document WHY:
+
+```markdown
+## Rejected Findings - Rationale
+
+### Finding: "Add caching layer for all API calls"
+**Rejected because:**
+- ⥠Premature optimization - no performance issues detected
+- đ Traffic analysis shows <100 requests/day
+- đ¯ Story scope is feature addition, not optimization
+- đ° Cost: 2 days implementation, 0 proven benefit
+- đ Decision: Monitor first, optimize if needed
+
+### Finding: "Refactor to use dependency injection"
+**Rejected because:**
+- đī¸ Over-engineering - current approach works fine
+- đ Codebase size doesn't justify DI complexity
+- đĨ Team unfamiliar with DI patterns
+- đ¯ Story scope: simple feature, not architecture overhaul
+- đ Decision: Keep it simple, revisit if codebase grows
+
+### Finding: "Add comprehensive JSDoc to all functions"
+**Rejected because:**
+- đ Gold plating - TypeScript types provide documentation
+- âąī¸ Time sink - 4+ hours for marginal benefit
+- đ¯ Team standard: JSDoc only for public APIs
+- đ Decision: Follow team convention, not reviewer preference
+```
+
+### 6. Update State
+
+```yaml
+# Update {stateFile}
+current_step: 8
+review_analysis:
+ must_fix: {must_fix_count}
+ should_fix: {should_fix_count}
+ consider: {consider_count}
+ rejected: {rejected_count}
+ optional: {optional_count}
+ estimated_fix_time: "{total_fix_time_hours}h"
+ rejections_documented: true
+ analysis_complete: true
+```
+
+---
+
+## Critical Thinking Framework
+
+Use this framework to evaluate EVERY finding:
+
+### The "So What?" Test
+- **Ask:** "So what if we don't fix this?"
+- **If answer is:** "Nothing bad happens" â REJECT
+- **If answer is:** "Production breaks" â MUST FIX
+
+### The "YAGNI" Test (You Aren't Gonna Need It)
+- **Ask:** "Do we need this NOW for current requirements?"
+- **If answer is:** "Maybe someday" â REJECT
+- **If answer is:** "Yes, breaks without it" â FIX
+
+### The "Scope" Test
+- **Ask:** "Is this within the story's scope?"
+- **If answer is:** "No, requires new story" â REJECT (or create new story)
+- **If answer is:** "Yes, part of ACs" â FIX
+
+### The "Team Standard" Test
+- **Ask:** "Does our team actually do this?"
+- **If answer is:** "No, reviewer's opinion" â REJECT
+- **If answer is:** "Yes, in CONTRIBUTING.md" â FIX
+
+---
+
+## Common Rejection Patterns
+
+Learn to recognize these patterns:
+
+1. **"Consider adding..."** - Usually gold plating unless critical
+2. **"It would be better if..."** - Subjective opinion, often rejectable
+3. **"For maximum performance..."** - Premature optimization
+4. **"To follow best practices..."** - Check if team actually follows it
+5. **"This could be refactored..."** - Does it need refactoring NOW?
+6. **"Add comprehensive..."** - Comprehensive = overkill most of the time
+7. **"Future-proof by..."** - Can't predict future, solve current problems
+
+---
+
+## Next Step
+
+Proceed to **Step 9: Fix Issues** ({nextStep})
+
+Implement MUST FIX and SHOULD FIX items. Skip rejected items (already documented why).
diff --git a/src/modules/bmm/workflows/4-implementation/super-dev-pipeline/workflow.yaml.backup b/src/modules/bmm/workflows/4-implementation/super-dev-pipeline/workflow.yaml.backup
new file mode 100644
index 00000000..fd417a36
--- /dev/null
+++ b/src/modules/bmm/workflows/4-implementation/super-dev-pipeline/workflow.yaml.backup
@@ -0,0 +1,218 @@
+name: super-dev-pipeline
+description: "Step-file architecture with complexity-based routing, smart batching, and auto-story-creation. Micro stories get lightweight path, standard/complex get full quality gates."
+author: "BMad"
+version: "1.4.0" # Added auto-create story via /create-story-with-gap-analysis when story missing or incomplete
+
+# Critical variables from config
+config_source: "{project-root}/_bmad/bmm/config.yaml"
+output_folder: "{config_source}:output_folder"
+sprint_artifacts: "{config_source}:sprint_artifacts"
+communication_language: "{config_source}:communication_language"
+date: system-generated
+
+# Workflow paths
+installed_path: "{project-root}/_bmad/bmm/workflows/4-implementation/super-dev-pipeline"
+steps_path: "{installed_path}/steps"
+templates_path: "{installed_path}/templates"
+checklists_path: "{installed_path}/checklists"
+
+# State management
+state_file: "{sprint_artifacts}/super-dev-state-{{story_id}}.yaml"
+audit_trail: "{sprint_artifacts}/audit-super-dev-{{story_id}}-{{date}}.yaml"
+
+# Auto-create story settings (NEW v1.4.0)
+# When story is missing or lacks proper context, auto-invoke /create-story-with-gap-analysis
+auto_create_story:
+ enabled: true # Set to false to revert to old HALT behavior
+ create_story_workflow: "{project-root}/_bmad/bmm/workflows/4-implementation/create-story-with-gap-analysis"
+ triggers:
+ - story_not_found # Story file doesn't exist
+ - no_tasks # Story exists but has no tasks
+ - missing_sections # Story missing required sections (Tasks, Acceptance Criteria)
+
+# Complexity level (passed from batch-super-dev or set manually)
+# Controls which pipeline steps to execute
+complexity_level: "standard" # micro | standard | complex
+
+# Complexity-based step skipping (NEW v1.2.0)
+complexity_routing:
+ micro:
+ skip_steps: [2, 5] # Skip pre-gap analysis and code review
+ description: "Lightweight path for simple stories (â¤3 tasks, low risk)"
+ standard:
+ skip_steps: [] # Full pipeline
+ description: "Normal path with all quality gates"
+ complex:
+ skip_steps: [] # Full pipeline + warnings
+ description: "Enhanced path for high-risk stories"
+ warn_before_start: true
+ suggest_split: true
+
+# Workflow modes
+modes:
+ interactive:
+ description: "Human-in-the-loop with menu navigation between steps"
+ checkpoint_on_failure: true
+ requires_approval: true
+ smart_batching: true # User can approve batching plan
+ batch:
+ description: "Unattended execution for batch-super-dev"
+ checkpoint_on_failure: true
+ requires_approval: false
+ fail_fast: true
+ smart_batching: true # Auto-enabled for efficiency
+
+# Smart batching configuration
+smart_batching:
+ enabled: true
+ detect_patterns: true
+ default_to_safe: true # When uncertain, execute individually
+ min_batch_size: 3 # Minimum tasks to form a batch
+ fallback_on_failure: true # Revert to individual if batch fails
+
+ # Batchable pattern definitions
+ batchable_patterns:
+ - pattern: "package_installation"
+ keywords: ["Add", "package.json", "npm install", "dependency"]
+ risk_level: "low"
+ validation: "npm install && npm run build"
+
+ - pattern: "module_registration"
+ keywords: ["Import", "Module", "app.module", "register"]
+ risk_level: "low"
+ validation: "tsc --noEmit"
+
+ - pattern: "code_deletion"
+ keywords: ["Delete", "Remove", "rm ", "unlink"]
+ risk_level: "low"
+ validation: "npm test && npm run build"
+
+ - pattern: "import_update"
+ keywords: ["Update import", "Change import", "import from"]
+ risk_level: "low"
+ validation: "npm run build"
+
+ # Non-batchable pattern definitions (always execute individually)
+ individual_patterns:
+ - pattern: "business_logic"
+ keywords: ["circuit breaker", "fallback", "caching for", "strategy"]
+ risk_level: "medium"
+
+ - pattern: "security"
+ keywords: ["auth", "permission", "security", "encrypt"]
+ risk_level: "high"
+
+ - pattern: "data_migration"
+ keywords: ["migration", "schema", "ALTER TABLE", "database"]
+ risk_level: "high"
+
+# Agent role definitions (loaded once, switched as needed)
+agents:
+ dev:
+ name: "Developer"
+ persona: "{project-root}/_bmad/bmm/agents/dev.md"
+ description: "Pre-gap, implementation, post-validation, code review"
+ used_in_steps: [2, 3, 4, 5]
+ sm:
+ name: "Scrum Master"
+ persona: "{project-root}/_bmad/bmm/agents/sm.md"
+ description: "Story completion and status"
+ used_in_steps: [6]
+
+# Step file definitions
+steps:
+ - step: 1
+ file: "{steps_path}/step-01-init.md"
+ name: "Initialize"
+ description: "Load story context and detect development mode"
+ agent: null
+ quality_gate: false
+
+ - step: 2
+ file: "{steps_path}/step-02-pre-gap-analysis.md"
+ name: "Pre-Gap Analysis"
+ description: "Validate tasks against codebase (critical for brownfield)"
+ agent: dev
+ quality_gate: true
+
+ - step: 3
+ file: "{steps_path}/step-03-implement.md"
+ name: "Implement"
+ description: "Adaptive implementation (TDD for new, refactor for existing)"
+ agent: dev
+ quality_gate: true
+
+ - step: 4
+ file: "{steps_path}/step-04-post-validation.md"
+ name: "Post-Validation"
+ description: "Verify completed tasks against codebase reality"
+ agent: dev
+ quality_gate: true
+ iterative: true # May re-invoke step 3 if gaps found
+
+ - step: 5
+ file: "{steps_path}/step-05-code-review.md"
+ name: "Code Review"
+ description: "Adversarial code review finding 3-10 issues"
+ agent: dev
+ quality_gate: true
+
+ - step: 6
+ file: "{steps_path}/step-06-complete.md"
+ name: "Complete"
+ description: "Commit and push changes"
+ agent: sm
+ quality_gate: false
+
+ - step: 7
+ file: "{steps_path}/step-07-summary.md"
+ name: "Summary"
+ description: "Generate audit trail"
+ agent: null
+ quality_gate: false
+
+# Quality gates
+quality_gates:
+ pre_gap_analysis:
+ step: 2
+ criteria:
+ - "All tasks validated or refined"
+ - "No missing context"
+ - "Implementation path clear"
+
+ implementation:
+ step: 3
+ criteria:
+ - "All tasks completed"
+ - "Tests pass"
+ - "Code follows project patterns"
+
+ post_validation:
+ step: 4
+ criteria:
+ - "All completed tasks verified against codebase"
+ - "Zero false positives remaining"
+ - "Files/functions/tests actually exist"
+
+ code_review:
+ step: 5
+ criteria:
+ - "3-10 specific issues identified"
+ - "All issues resolved or documented"
+ - "Security review complete"
+
+# Document loading strategies
+input_file_patterns:
+ story:
+ description: "Story file being developed"
+ pattern: "{sprint_artifacts}/story-*.md"
+ load_strategy: "FULL_LOAD"
+ cache: true
+
+ project_context:
+ description: "Critical rules and patterns"
+ pattern: "**/project-context.md"
+ load_strategy: "FULL_LOAD"
+ cache: true
+
+standalone: true