Add batch-super-dev workflow with auto-installation and quality gates

Features:
- Interactive batch selector for processing multiple ready-for-dev stories
- 5-layer quality gate system (pre-gap, post-validation, reconciliation, code review, sprint-status sync)
- Smart story reconciliation (auto-updates checkboxes based on Dev Agent Record)
- Anti-vibe-coding enforcement (verifies files exist, tests pass, implementations aren't stubs)
- Story validation & auto-creation (validates 12 BMAD sections, creates missing stories)
- Auto-generates /batch-super-dev and /super-dev-pipeline slash commands on installation

Platform Support:
- Claude Code: Auto-creates .claude-commands/batch-super-dev.md
- Windsurf: Auto-creates slash commands for cross-compatibility

Workflow Location:
- Source: src/modules/bmm/workflows/4-implementation/batch-super-dev/
- Installed: _bmad/bmm/workflows/4-implementation/batch-super-dev/

Quality Gates:
1. Pre-gap analysis validates story completeness
2. Post-implementation validation verifies every task (anti-vibe-coding)
3. Smart reconciliation updates checkboxes based on evidence
4. Multi-agent code review finds issues before commit
5. Python script validates sprint-status.yaml accuracy
This commit is contained in:
Jonah Schulte 2026-01-07 12:10:23 -05:00
parent 1ddf2afcea
commit b517a8b1d0
8 changed files with 1726 additions and 19 deletions

View File

@ -1,13 +1,15 @@
---
description: 'Autonomous epic processing using story-pipeline - creates and develops all stories in an epic with minimal human intervention (65% more token efficient!)'
name: batch-super-dev
description: "Interactive batch selector for super-dev-pipeline - select and process multiple ready-for-dev stories with full quality gates"
group: implementation
---
IT IS CRITICAL THAT YOU FOLLOW THESE STEPS - while staying in character as the current agent persona you may have loaded:
<steps CRITICAL="TRUE">
1. Always LOAD the FULL @_bmad/core/tasks/workflow.xml
2. READ its entire contents - this is the CORE OS for EXECUTING the specific workflow-config @_bmad/bmm/workflows/4-implementation/autonomous-epic/workflow.yaml
3. Pass the yaml path _bmad/bmm/workflows/4-implementation/autonomous-epic/workflow.yaml as 'workflow-config' parameter to the workflow.xml instructions
2. READ its entire contents - this is the CORE OS for EXECUTING the specific workflow-config @_bmad/bmm/workflows/4-implementation/batch-super-dev/workflow.yaml
3. Pass the yaml path _bmad/bmm/workflows/4-implementation/batch-super-dev/workflow.yaml as 'workflow-config' parameter to the workflow.xml instructions
4. Follow workflow.xml instructions EXACTLY as written to process and follow the specific workflow config and its instructions
5. Save outputs after EACH section when generating any documents from templates
</steps>

View File

@ -1,3 +1,5 @@
const fs = require('fs-extra');
const path = require('node:path');
const chalk = require('chalk');
/**
@ -11,19 +13,57 @@ const chalk = require('chalk');
* @returns {Promise<boolean>} - Success status
*/
async function install(options) {
const { logger, platformInfo } = options;
// projectRoot and config available for future use
const { projectRoot, logger, platformInfo } = options;
try {
const platformName = platformInfo ? platformInfo.name : 'Claude Code';
logger.log(chalk.cyan(` BMM-${platformName} Specifics installed`));
// Add Claude Code specific BMM configurations here
// For example:
// - Custom command configurations
// - Agent party configurations
// - Workflow integrations
// - Template mappings
// Create .claude-commands directory
const commandsDir = path.join(projectRoot, '.claude-commands');
await fs.ensureDir(commandsDir);
// Install batch-super-dev slash command
const batchSuperDevCommand = `---
name: batch-super-dev
description: "Interactive batch selector for super-dev-pipeline - select and process multiple ready-for-dev stories with full quality gates"
group: implementation
---
IT IS CRITICAL THAT YOU FOLLOW THESE STEPS - while staying in character as the current agent persona you may have loaded:
<steps CRITICAL="TRUE">
1. Always LOAD the FULL @_bmad/core/tasks/workflow.xml
2. READ its entire contents - this is the CORE OS for EXECUTING the specific workflow-config @_bmad/bmm/workflows/4-implementation/batch-super-dev/workflow.yaml
3. Pass the yaml path _bmad/bmm/workflows/4-implementation/batch-super-dev/workflow.yaml as 'workflow-config' parameter to the workflow.xml instructions
4. Follow workflow.xml instructions EXACTLY as written to process and follow the specific workflow config and its instructions
5. Save outputs after EACH section when generating any documents from templates
</steps>
`;
const batchCommandPath = path.join(commandsDir, 'batch-super-dev.md');
await fs.writeFile(batchCommandPath, batchSuperDevCommand);
logger.log(chalk.green(` ✓ Created /batch-super-dev command`));
// Install super-dev-pipeline slash command
const superDevPipelineCommand = `---
description: 'Step-file workflow with anti-vibe-coding enforcement - works for greenfield AND brownfield development'
---
IT IS CRITICAL THAT YOU FOLLOW THESE STEPS - while staying in character as the current agent persona you may have loaded:
<steps CRITICAL="TRUE">
1. Always LOAD the FULL @_bmad/core/tasks/workflow.xml
2. READ its entire contents - this is the CORE OS for EXECUTING the specific workflow-config @_bmad/bmm/workflows/4-implementation/super-dev-pipeline/workflow.yaml
3. Pass the yaml path _bmad/bmm/workflows/4-implementation/super-dev-pipeline/workflow.yaml as 'workflow-config' parameter to the workflow.xml instructions
4. Follow workflow.xml instructions EXACTLY as written to process and follow the specific workflow config and its instructions
5. Save outputs after EACH section when generating any documents from templates
</steps>
`;
const superDevCommandPath = path.join(commandsDir, 'super-dev-pipeline.md');
await fs.writeFile(superDevCommandPath, superDevPipelineCommand);
logger.log(chalk.green(` ✓ Created /super-dev-pipeline command`));
return true;
} catch (error) {

View File

@ -1,3 +1,5 @@
const fs = require('fs-extra');
const path = require('node:path');
const chalk = require('chalk');
/**
@ -10,17 +12,58 @@ const chalk = require('chalk');
* @returns {Promise<boolean>} - Success status
*/
async function install(options) {
const { logger } = options;
// projectRoot and config available for future use
const { projectRoot, logger } = options;
try {
logger.log(chalk.cyan(' BMM-Windsurf Specifics installed'));
// Add Windsurf specific BMM configurations here
// For example:
// - Custom cascades
// - Workflow adaptations
// - Template configurations
// Create .windsurfrules/commands directory if Windsurf uses slash commands
// Note: Windsurf's command system may differ from Claude Code
// This creates commands in .claude-commands as a fallback for cross-compatibility
const commandsDir = path.join(projectRoot, '.claude-commands');
await fs.ensureDir(commandsDir);
// Install batch-super-dev slash command
const batchSuperDevCommand = `---
name: batch-super-dev
description: "Interactive batch selector for super-dev-pipeline - select and process multiple ready-for-dev stories with full quality gates"
group: implementation
---
IT IS CRITICAL THAT YOU FOLLOW THESE STEPS - while staying in character as the current agent persona you may have loaded:
<steps CRITICAL="TRUE">
1. Always LOAD the FULL @_bmad/core/tasks/workflow.xml
2. READ its entire contents - this is the CORE OS for EXECUTING the specific workflow-config @_bmad/bmm/workflows/4-implementation/batch-super-dev/workflow.yaml
3. Pass the yaml path _bmad/bmm/workflows/4-implementation/batch-super-dev/workflow.yaml as 'workflow-config' parameter to the workflow.xml instructions
4. Follow workflow.xml instructions EXACTLY as written to process and follow the specific workflow config and its instructions
5. Save outputs after EACH section when generating any documents from templates
</steps>
`;
const batchCommandPath = path.join(commandsDir, 'batch-super-dev.md');
await fs.writeFile(batchCommandPath, batchSuperDevCommand);
logger.log(chalk.green(` ✓ Created /batch-super-dev command`));
// Install super-dev-pipeline slash command
const superDevPipelineCommand = `---
description: 'Step-file workflow with anti-vibe-coding enforcement - works for greenfield AND brownfield development'
---
IT IS CRITICAL THAT YOU FOLLOW THESE STEPS - while staying in character as the current agent persona you may have loaded:
<steps CRITICAL="TRUE">
1. Always LOAD the FULL @_bmad/core/tasks/workflow.xml
2. READ its entire contents - this is the CORE OS for EXECUTING the specific workflow-config @_bmad/bmm/workflows/4-implementation/super-dev-pipeline/workflow.yaml
3. Pass the yaml path _bmad/bmm/workflows/4-implementation/super-dev-pipeline/workflow.yaml as 'workflow-config' parameter to the workflow.xml instructions
4. Follow workflow.xml instructions EXACTLY as written to process and follow the specific workflow config and its instructions
5. Save outputs after EACH section when generating any documents from templates
</steps>
`;
const superDevCommandPath = path.join(commandsDir, 'super-dev-pipeline.md');
await fs.writeFile(superDevCommandPath, superDevPipelineCommand);
logger.log(chalk.green(` ✓ Created /super-dev-pipeline command`));
return true;
} catch (error) {

View File

@ -0,0 +1,634 @@
# Batch Super-Dev Workflow
**Version:** 1.2.0 (Added Story Validation & Auto-Creation)
**Created:** 2026-01-06
**Author:** BMad
---
## Overview
Interactive batch workflow for processing multiple `ready-for-dev` stories sequentially or in parallel using the super-dev-pipeline with full quality gates.
**New in v1.2.0:** Smart Story Validation & Auto-Creation - validates story files, creates missing stories, regenerates invalid ones automatically.
**New in v1.1.0:** Smart Story Reconciliation - automatically verifies story accuracy after each implementation.
---
## Features
### Core Capabilities
1. **🆕 Smart Story Validation & Auto-Creation** (NEW v1.2.0)
- Validates all selected stories before processing
- Checks for 12 required BMAD sections
- Validates content quality (Current State ≥100 words, gap analysis present)
- **Auto-creates missing story files** with codebase gap analysis
- **Auto-regenerates invalid stories** (incomplete or stub files)
- Interactive prompts (or fully automated with settings)
- Backups existing files before regeneration
2. **Interactive Story Selection**
- Lists all `ready-for-dev` stories from sprint-status.yaml
- Shows story status icons (✅ file exists, ❌ missing, 🔄 needs status update)
- Supports flexible selection syntax: single, ranges, comma-separated, "all"
- Optional epic filtering (process only Epic 3 stories, etc.)
3. **Execution Modes**
- **Sequential:** Process stories one-by-one in current session (easier monitoring)
- **Parallel:** Spawn Task agents to process stories concurrently (faster, autonomous)
- Configurable parallelism: 2, 4, or all stories at once
4. **Full Quality Gates** (from super-dev-pipeline)
- Pre-gap analysis (validate story completeness)
- Test-driven implementation
- Post-validation (verify requirements met)
- Multi-agent code review (4 specialized agents)
- Targeted git commits
- Definition of done verification
5. **Smart Story Reconciliation** (v1.1.0)
- Automatically checks story accuracy after implementation
- Verifies Acceptance Criteria checkboxes match Dev Agent Record
- Verifies Tasks/Subtasks checkboxes match implementation
- Verifies Definition of Done completion
- Updates story status (done/review/in-progress) based on actual completion
- Synchronizes sprint-status.yaml with story file status
- **Prevents "done" stories with unchecked items**
---
## Smart Story Validation & Auto-Creation (NEW v1.2.0)
### What It Does
Before processing any selected stories, the workflow automatically validates each story file:
1. **File Existence Check** - Verifies story file exists (tries multiple naming patterns)
2. **Section Validation** - Ensures all 12 BMAD sections are present
3. **Content Quality Check** - Validates sufficient content (not stubs):
- Current State: ≥100 words
- Gap analysis markers: ✅/❌ present
- Acceptance Criteria: ≥3 items
- Tasks: ≥5 items
4. **Auto-Creation** - Creates missing stories with codebase gap analysis
5. **Auto-Regeneration** - Regenerates invalid/incomplete story files
### Why This Matters
**Problem this solves:**
Before v1.2.0:
```
User: "Process stories 3.1, 3.2, 3.3, 3.4"
Workflow: "Story 3.3 file missing - please create it first"
User: Ctrl+C → /create-story → /batch-super-dev again
```
After v1.2.0:
```
User: "Process stories 3.1, 3.2, 3.3, 3.4"
Workflow: "Story 3.3 missing - create it? (yes)"
User: "yes"
Workflow: Creates story 3.3 with gap analysis → Processes all 4 stories
```
**Prevents:**
- Incomplete story files being processed
- Missing gap analysis
- Stub files (< 100 words)
- Manual back-and-forth workflow interruptions
### Validation Process
```
Load Sprint Status
Display Available Stories
🆕 VALIDATE EACH STORY ← NEW STEP 2.5
For each story:
┌─ File missing? → Prompt: "Create story with gap analysis?"
│ └─ yes → /create-story-with-gap-analysis → ✅ Created
│ └─ no → ⏭️ Skip story
┌─ File exists but invalid?
│ (< 12 sections OR < 100 words OR no gap analysis)
│ → Prompt: "Regenerate story with codebase scan?"
│ └─ yes → Backup original → /create-story-with-gap-analysis → ✅ Regenerated
│ └─ no → ⏭️ Skip story
└─ File valid? → ✅ Ready to process
Remove skipped stories
Display Validated Stories
User Selection (only validated stories)
Process Stories
```
### Configuration Options
**In workflow.yaml:**
```yaml
# Story validation settings (NEW in v1.2.0)
validation:
enabled: true # Enable/disable validation
auto_create_missing: false # Auto-create without prompting (use cautiously)
auto_regenerate_invalid: false # Auto-regenerate without prompting (use cautiously)
min_sections: 12 # BMAD format requires all 12
min_current_state_words: 100 # Minimum content length
require_gap_analysis: true # Must have ✅/❌ markers
backup_before_regenerate: true # Create .backup before regenerating
```
**Interactive Mode (default):**
- Prompts before creating/regenerating each story
- Safe, user retains control
- Recommended for most workflows
**Fully Automated Mode:**
```yaml
validation:
auto_create_missing: true
auto_regenerate_invalid: true
```
- Creates/regenerates without prompting
- Faster for large batches
- Use with caution (may overwrite valid stories)
### Example Session (v1.2.0)
```
🤖 /batch-super-dev
📊 Ready-for-Dev Stories (5)
1. **3-1-vehicle-card**
→ Story file exists
2. **3-2-vehicle-search**
→ Story file exists
3. **3-3-vehicle-compare**
→ Story file missing
4. **3-4-vehicle-details** ⚠️
→ File exists (7/12 sections, stub content)
5. **3-5-vehicle-history**
→ Story file exists
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
🔍 VALIDATING STORY FILES
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Story 3-1-vehicle-card: ✅ Valid (12/12 sections, gap analysis present)
Story 3-2-vehicle-search: ✅ Valid (12/12 sections, gap analysis present)
📝 Story 3-3-vehicle-compare: File missing
Create story file with gap analysis? (yes/no): yes
Creating story 3-3-vehicle-compare with codebase gap analysis...
→ Scanning apps/frontend/web for existing components...
→ Scanning packages/widgets for related widgets...
→ Analyzing gap: 3 files exist, 5 need creation
✅ Story 3-3-vehicle-compare created successfully (12/12 sections)
⚠️ Story 3-4-vehicle-details: File incomplete or invalid
- Sections: 7/12
- Current State: stub (32 words, expected ≥100)
- Gap analysis: missing
Regenerate story with codebase scan? (yes/no): yes
Regenerating story 3-4-vehicle-details with gap analysis...
→ Backing up to docs/sprint-artifacts/3-4-vehicle-details.md.backup
→ Scanning codebase for VehicleDetails implementation...
→ Found: packages/widgets/vehicle-details-v2 (partial)
→ Analyzing gap: 8 files exist, 3 need creation
✅ Story 3-4-vehicle-details regenerated successfully (12/12 sections)
Story 3-5-vehicle-history: ✅ Valid (12/12 sections, gap analysis present)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
✅ Story Validation Complete
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
**Validated:** 5 stories ready to process
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Select stories to process: all
[Proceeds to process all 5 validated stories...]
```
---
## Smart Story Reconciliation (v1.1.0)
### What It Does
After each story completes, the workflow automatically:
1. **Loads Dev Agent Record** - Reads implementation summary, file list, test results
2. **Analyzes Acceptance Criteria** - Checks which ACs have evidence of completion
3. **Analyzes Tasks** - Verifies which tasks have been implemented
4. **Analyzes Definition of Done** - Confirms quality gates passed
5. **Calculates Completion %** - AC%, Tasks%, DoD% percentages
6. **Determines Correct Status:**
- `done`: AC≥95% AND Tasks≥95% AND DoD≥95%
- `review`: AC≥80% AND Tasks≥80% AND DoD≥80%
- `in-progress`: Below 80% on any category
7. **Updates Story File** - Checks/unchecks boxes to match reality
8. **Updates sprint-status.yaml** - Synchronizes status entry
### Why This Matters
**Problem this solves:**
Story 20.8 (before reconciliation):
- Dev Agent Record: "COMPLETE - 10 files created, 37 tests passing"
- Acceptance Criteria: All unchecked ❌
- Tasks: All unchecked ❌
- Definition of Done: All unchecked ❌
- sprint-status.yaml: `ready-for-dev`
- **Reality:** Story was 100% complete but looked 0% complete!
**After reconciliation:**
- Acceptance Criteria: 17/18 checked ✅
- Tasks: 24/24 checked ✅
- Definition of Done: 24/25 checked ✅
- sprint-status.yaml: `done`
- **Accurate representation of actual completion**
### Reconciliation Process
```
Implementation Complete
Load Dev Agent Record
Parse: Implementation Summary, File List, Test Results, Completion Notes
For each checkbox in ACs/Tasks/DoD:
- Search Dev Agent Record for evidence
- Determine expected status (checked/unchecked/partial)
- Compare actual vs expected
- Record discrepancies
Calculate completion percentages:
- AC: X/Y checked (Z%)
- Tasks: X/Y checked (Z%)
- DoD: X/Y checked (Z%)
Determine correct story status (done/review/in-progress)
Apply changes (with user confirmation):
- Update checkboxes in story file
- Update story status header
- Update sprint-status.yaml entry
Report final completion statistics
```
### Reconciliation Output
```
🔧 Story 20.8: Reconciling 42 issues
Changes to apply:
1. AC1: FlexibleGridSection component - CHECK (File created: FlexibleGridSection.tsx)
2. AC2: Screenshot automation - CHECK (File created: screenshot-pages.ts)
3. Task 1.3: Create page corpus generator - CHECK (File created: generate-page-corpus.ts)
... (39 more)
Apply these reconciliation changes? (yes/no): yes
✅ Story 20.8: Reconciliation complete (42 changes applied)
📊 Story 20.8 - Final Status
Acceptance Criteria: 17/18 (94%)
Tasks/Subtasks: 24/24 (100%)
Definition of Done: 24/25 (96%)
Story Status: done
sprint-status.yaml: done
✅ Story is COMPLETE and accurately reflects implementation
```
---
## Usage
### Basic Usage
```bash
# Process all ready-for-dev stories
/batch-super-dev
# Follow prompts:
# 1. See list of ready stories
# 2. Select stories to process (1,3-5,8 or "all")
# 3. Choose execution mode (sequential/parallel)
# 4. Confirm execution plan
# 5. Stories process automatically with reconciliation
# 6. Review batch summary
```
### Epic Filtering
```bash
# Only process Epic 3 stories
/batch-super-dev filter_by_epic=3
```
### Selection Syntax
```
Single: 1
Multiple: 1,3,5
Range: 1-5 (processes 1,2,3,4,5)
Mixed: 1,3-5,8 (processes 1,3,4,5,8)
All: all (processes all ready-for-dev stories)
```
### Execution Modes
**Sequential (Recommended for ≤5 stories):**
- Processes one story at a time in current session
- Easier to monitor progress
- Lower resource usage
- Can pause/cancel between stories
**Parallel (Recommended for >5 stories):**
- Spawns autonomous Task agents
- Much faster (2-4x speedup)
- Choose parallelism: 2 (conservative), 4 (moderate), all (aggressive)
- Requires more system resources
---
## Workflow Configuration
**File:** `_bmad/bmm/workflows/4-implementation/batch-super-dev/workflow.yaml`
### Key Settings
```yaml
# Safety limits
max_stories: 20 # Won't process more than 20 in one batch
# Pacing
pause_between_stories: 5 # Seconds between stories (sequential mode)
# Error handling
continue_on_failure: true # Keep processing if one story fails
# Reconciliation (NEW v1.1.0)
reconciliation:
enabled: true # Auto-reconcile after each story
require_confirmation: true # Ask before applying changes
update_sprint_status: true # Sync sprint-status.yaml
```
---
## Workflow Steps
### 1. Load Sprint Status
- Parses sprint-status.yaml
- Filters stories with status="ready-for-dev"
- Excludes epics and retrospectives
- Optionally filters by epic number
### 2. Display Available Stories
- Shows all ready-for-dev stories
- Verifies story files exist
- Displays status icons and comments
### 2.5. 🆕 Validate and Create/Regenerate Stories (NEW v1.2.0)
**For each story:**
- Check file existence (multiple naming patterns)
- Validate 12 BMAD sections present
- Check content quality (Current State ≥100 words, gap analysis)
- **If missing:** Prompt to create with gap analysis
- **If invalid:** Prompt to regenerate with codebase scan
- **If valid:** Mark ready to process
- Remove skipped stories from selection
### 3. Get User Selection
- Interactive story picker
- Supports flexible selection syntax
- Validates selection and confirms
### 3.5. Choose Execution Strategy
- Sequential vs Parallel
- If parallel: choose concurrency level
- Confirm execution plan
### 4. Process Stories
**Sequential Mode:**
- For each selected story:
- Invoke super-dev-pipeline
- Execute reconciliation (Step 4.5)
- Report results
- Pause between stories
**Parallel Mode:**
- Split stories into batches
- Spawn Task agents for each batch
- Wait for batch completion
- Execute reconciliation for each
- Report batch results
### 4.5. Smart Story Reconciliation (NEW)
**Executed after each story completes:**
- Load Dev Agent Record
- Analyze ACs/Tasks/DoD vs implementation
- Calculate completion percentages
- Determine correct story status
- Update checkboxes and status
- Sync sprint-status.yaml
See: `step-4.5-reconcile-story-status.md` for detailed algorithm
### 5. Display Batch Summary
- Shows completion statistics
- Lists failed stories (if any)
- Lists reconciliation warnings (if any)
- Provides next steps
- Saves batch log
---
## Output Files
### Batch Log
**Location:** `docs/sprint-artifacts/batch-super-dev-{date}.log`
**Contains:**
- Start/end timestamps
- Selected stories
- Completed stories
- Failed stories
- Reconciliation warnings
- Success rate
- Total duration
### Reconciliation Results (per story)
**Embedded in Dev Agent Record:**
- Reconciliation summary
- Changes applied
- Final completion percentages
- Status determination reasoning
---
## Error Handling
### Story Implementation Fails
- Increments failed counter
- Adds to failed_stories list
- If `continue_on_failure=true`, continues with remaining stories
- If `continue_on_failure=false`, stops batch
### Reconciliation Fails
- Story still marked as completed (implementation succeeded)
- Adds to reconciliation_warnings list
- User warned to manually verify story accuracy
- Does NOT fail the batch
### Task Agent Fails (Parallel Mode)
- Collects error from TaskOutput
- Marks story as failed
- Continues with remaining stories in batch
---
## Best Practices
### Story Selection
- ✅ Start small: Process 2-3 stories first to verify workflow
- ✅ Group by epic: Related stories often share context
- ✅ Check file status: ✅ stories are ready, ❌ need creation first
- ❌ Don't process 20 stories at once on first run
### Execution Mode
- Sequential for ≤5 stories (easier monitoring)
- Parallel for >5 stories (faster completion)
- Use parallelism=2 first, then increase if stable
### During Execution
- Monitor progress output
- Check reconciliation reports
- Verify changes look correct
- Spot-check 1-2 completed stories
### After Completion
1. Review batch summary
2. Check reconciliation warnings
3. Verify sprint-status.yaml updated
4. Run tests: `pnpm test`
5. Check coverage: `pnpm test --coverage`
6. Review commits: `git log -<count>`
7. Spot-check 2-3 stories for quality
---
## Troubleshooting
### Reconciliation Reports Many Warnings
**Cause:** Dev Agent Record may be incomplete or stories weren't fully implemented
**Fix:**
1. Review listed stories manually
2. Check Dev Agent Record has all required sections
3. Re-run super-dev-pipeline for problematic stories
4. Manually reconcile checkboxes if needed
### Parallel Mode Hangs
**Cause:** Too many agents running concurrently, system resources exhausted
**Fix:**
1. Kill hung agents: `/tasks` then `kill <task-id>`
2. Reduce parallelism: Use 2 instead of 4
3. Process remaining stories sequentially
### Story Marked "done" but has Unchecked Items
**Cause:** Reconciliation may have missed some checkboxes
**Fix:**
1. Review Dev Agent Record
2. Check which checkboxes should be checked
3. Manually check them or re-run reconciliation:
- Load story file
- Compare ACs/Tasks/DoD to Dev Agent Record
- Update checkboxes to match reality
---
## Version History
### v1.2.0 (2026-01-06)
- **NEW:** Smart Story Validation & Auto-Creation (Step 2.5)
- Validates story files before processing
- Auto-creates missing stories with gap analysis
- Auto-regenerates invalid/incomplete stories
- Checks 12 BMAD sections, content quality
- Interactive or fully automated modes
- Backups before regeneration
- **Removes friction:** No more "story file missing" interruptions
- **Ensures quality:** Only valid stories with gap analysis proceed
- **Configuration:** New `validation` settings in workflow.yaml
### v1.1.0 (2026-01-06)
- **NEW:** Smart Story Reconciliation (Step 4.5)
- Auto-verifies story accuracy after implementation
- Updates checkboxes based on Dev Agent Record
- Synchronizes sprint-status.yaml
- Prevents "done" stories with unchecked items
- Added reconciliation warnings to batch summary
- Added reconciliation statistics to output
### v1.0.0 (2026-01-05)
- Initial release
- Interactive story selector
- Sequential and parallel execution modes
- Integration with super-dev-pipeline
- Batch summary and logging
---
## Related Workflows
- **super-dev-pipeline:** Individual story implementation (invoked by batch-super-dev)
- **create-story-with-gap-analysis:** Create new stories with codebase scan
- **sprint-status:** View/update sprint status
- **multi-agent-review:** Standalone code review (part of super-dev-pipeline)
---
## Support
**Questions or Issues:**
- Check workflow logs: `docs/sprint-artifacts/batch-super-dev-*.log`
- Review reconciliation step: `step-4.5-reconcile-story-status.md`
- Check story file format: Ensure 12-section BMAD format
- Verify Dev Agent Record populated: Required for reconciliation
---
**Last Updated:** 2026-01-06
**Status:** Active - Production-ready with reconciliation
**Maintained By:** BMad

View File

@ -0,0 +1,572 @@
# Batch Super-Dev - Interactive Story Selector
<critical>The workflow execution engine is governed by: {project-root}/_bmad/core/tasks/workflow.xml</critical>
<critical>You MUST have already loaded and processed: {project-root}/_bmad/bmm/workflows/4-implementation/batch-super-dev/workflow.yaml</critical>
<workflow>
<step n="1" goal="Load and parse sprint-status.yaml">
<action>Read {sprint_status} file</action>
<action>Parse metadata: project, project_key, tracking_system</action>
<action>Parse development_status map</action>
<action>Filter stories with status = "ready-for-dev"</action>
<action>Exclude entries that are epics (keys starting with "epic-") or retrospectives (keys ending with "-retrospective")</action>
<check if="filter_by_epic is not empty">
<action>Further filter stories to only include those starting with "{filter_by_epic}-"</action>
<example>If filter_by_epic = "3", only include stories like "3-1-...", "3-2-...", etc.</example>
</check>
<action>Sort filtered stories by epic number, then story number (e.g., 1-1, 1-2, 2-1, 3-1)</action>
<action>Store as: ready_for_dev_stories (list of story keys)</action>
<check if="ready_for_dev_stories is empty">
<output>✅ No ready-for-dev stories found.
All stories are either in-progress, review, or done!
Run `/bmad:bmm:workflows:sprint-status` to see current status.</output>
<action>Exit workflow</action>
</check>
</step>
<step n="2" goal="Display available stories with details">
<action>Read comment field for each story from sprint-status.yaml (text after # on the same line)</action>
<action>For each story, verify story file exists using multiple naming patterns:</action>
<action>Try in order: 1) {sprint_artifacts}/{story_key}.md, 2) {sprint_artifacts}/story-{story_key}.md, 3) {sprint_artifacts}/{story_key_with_dots}.md</action>
<action>Mark stories as: ✅ (file exists), ❌ (file missing), 🔄 (already implemented but not marked done)</action>
<output>
## 📦 Ready-for-Dev Stories ({{count}})
{{#if filter_by_epic}}
**Filtered by Epic {{filter_by_epic}}**
{{/if}}
{{#each ready_for_dev_stories}}
{{@index}}. **{{key}}** {{file_status_icon}}
{{#if comment}}→ {{comment}}{{/if}}
{{#if file_path}} File: {{file_path}}{{/if}}
{{/each}}
---
**Legend:**
- ✅ Story file exists, ready to implement
- 🔄 Already implemented, just needs status update
- ❌ Story file missing, needs creation first
**Total:** {{count}} stories available
**Max batch size:** {{max_stories}} stories
</output>
</step>
<step n="2.5" goal="Validate and create/regenerate stories as needed">
<output>
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
🔍 VALIDATING STORY FILES
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
</output>
<iterate>For each story in ready_for_dev_stories:</iterate>
<substep n="2.5a" title="Check story file existence and validity">
<action>Check if story file exists (already done in Step 2)</action>
<check if="file_status_icon == '❌' (file missing)">
<output>
📝 Story {{story_key}}: File missing
</output>
<ask>Create story file with gap analysis? (yes/no):</ask>
<check if="response == 'yes'">
<output>Creating story {{story_key}} with codebase gap analysis...</output>
<action>Invoke workflow: /bmad:bmm:workflows:create-story-with-gap-analysis</action>
<action>Parameters: story_key={{story_key}}</action>
<check if="story creation succeeded">
<output>✅ Story {{story_key}} created successfully (12/12 sections)</output>
<action>Update file_status_icon to ✅</action>
<action>Mark story as validated</action>
</check>
<check if="story creation failed">
<output>❌ Story creation failed: {{story_key}}</output>
<action>Mark story for removal from selection</action>
<action>Add to skipped_stories list with reason: "Creation failed"</action>
</check>
</check>
<check if="response == 'no'">
<output>⏭️ Skipping story {{story_key}} (file missing)</output>
<action>Mark story for removal from selection</action>
<action>Add to skipped_stories list with reason: "User declined creation"</action>
</check>
</check>
<check if="file_status_icon == '✅' (file exists)">
<action>Read story file: {{file_path}}</action>
<action>Parse sections and validate BMAD format</action>
<action>Check for all 12 required sections:
1. Business Context
2. Current State
3. Acceptance Criteria
4. Tasks and Subtasks
5. Technical Requirements
6. Architecture Compliance
7. Testing Requirements
8. Dev Agent Guardrails
9. Definition of Done
10. References
11. Dev Agent Record
12. Change Log
</action>
<action>Count sections present: sections_found</action>
<action>Check Current State content length (word count)</action>
<action>Check Acceptance Criteria item count</action>
<action>Check Tasks item count</action>
<action>Look for gap analysis markers (✅/❌) in Current State</action>
<check if="sections_found < 12 OR Current State < 100 words OR no gap analysis markers">
<output>
⚠️ Story {{story_key}}: File incomplete or invalid
- Sections: {{sections_found}}/12
{{#if Current State < 100 words}}- Current State: stub ({{word_count}} words, expected 100){{/if}}
{{#if no gap analysis}}- Gap analysis: missing{{/if}}
</output>
<ask>Regenerate story with codebase scan? (yes/no):</ask>
<check if="response == 'yes'">
<output>Regenerating story {{story_key}} with gap analysis...</output>
<action>Backup existing file to {{file_path}}.backup</action>
<action>Invoke workflow: /bmad:bmm:workflows:create-story-with-gap-analysis</action>
<action>Parameters: story_key={{story_key}}</action>
<check if="regeneration succeeded">
<output>✅ Story {{story_key}} regenerated successfully (12/12 sections)</output>
<action>Mark story as validated</action>
</check>
<check if="regeneration failed">
<output>❌ Regeneration failed, using backup: {{story_key}}</output>
<action>Restore from backup</action>
<action>Mark story for removal with warning</action>
<action>Add to skipped_stories list with reason: "Regeneration failed"</action>
</check>
</check>
<check if="response == 'no'">
<output>⏭️ Skipping story {{story_key}} (file incomplete)</output>
<action>Mark story for removal from selection</action>
<action>Add to skipped_stories list with reason: "User declined regeneration"</action>
</check>
</check>
<check if="sections_found == 12 AND sufficient content">
<output>✅ Story {{story_key}}: Valid (12/12 sections, gap analysis present)</output>
<action>Mark story as validated</action>
</check>
</check>
<check if="file_status_icon == '🔄' (already implemented)">
<output>✅ Story {{story_key}}: Already implemented (will skip or reconcile only)</output>
<action>Mark story as validated (already done)</action>
</check>
</substep>
<action>Remove skipped stories from ready_for_dev_stories</action>
<action>Update count of available stories</action>
<check if="skipped_stories is not empty">
<output>
⏭️ Skipped Stories ({{skipped_count}}):
{{#each skipped_stories}}
- {{story_key}}: {{reason}}
{{/each}}
</output>
</check>
<check if="ready_for_dev_stories is empty after validation">
<output>
❌ No valid stories remaining after validation.
All stories were either:
- Missing files (user declined creation)
- Invalid/incomplete (user declined regeneration)
- Already implemented
Run `/bmad:bmm:workflows:sprint-status` to see status.
</output>
<action>Exit workflow</action>
</check>
<output>
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
✅ Story Validation Complete
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
**Validated:** {{validated_count}} stories ready to process
{{#if skipped_count > 0}}**Skipped:** {{skipped_count}} stories{{/if}}
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
</output>
</step>
<step n="3" goal="Get user selection">
<ask>
**Select stories to process:**
Enter story numbers to process (examples):
- Single: `1`
- Multiple: `1,3,5`
- Range: `1-5` (processes 1,2,3,4,5)
- Mixed: `1,3-5,8` (processes 1,3,4,5,8)
- All: `all` (processes all {{count}} stories)
Or:
- `cancel` - Exit without processing
**Your selection:**
</ask>
<action>Parse user input</action>
<check if="input == 'cancel'">
<output>❌ Batch processing cancelled.</output>
<action>Exit workflow</action>
</check>
<check if="input == 'all'">
<action>Set selected_stories = all ready_for_dev_stories</action>
</check>
<check if="input is numeric selection">
<action>Parse selection (handle commas, ranges)</action>
<example>Input "1,3-5,8" → indexes [1,3,4,5,8] → map to story keys</example>
<action>Map selected indexes to story keys from ready_for_dev_stories</action>
<action>Store as: selected_stories</action>
</check>
<check if="selected_stories count > max_stories">
<output>⚠️ You selected {{count}} stories, but max_stories is {{max_stories}}.
Only the first {{max_stories}} will be processed.</output>
<action>Truncate selected_stories to first max_stories entries</action>
</check>
<action>Display confirmation</action>
<output>
## 📋 Selected Stories ({{count}})
{{#each selected_stories}}
{{@index}}. {{key}}
{{/each}}
**Estimated time:** {{count}} stories × 30-60 min/story = {{estimated_hours}} hours
</output>
</step>
<step n="3.5" goal="Choose execution strategy">
<action>Use AskUserQuestion to determine execution mode and parallelization</action>
<ask>
**How should these stories be processed?**
**Execution Mode:**
- Sequential: Run stories one-by-one in this session (slower, easier to monitor)
- Parallel: Spawn Task agents to process stories concurrently (faster, autonomous)
**If Parallel, how many agents in parallel?**
- Conservative: 2 agents (low resource usage, easier debugging)
- Moderate: 4 agents (balanced performance)
- Aggressive: All stories at once (fastest, high resource usage)
</ask>
<action>Capture responses: execution_mode, parallel_count</action>
<check if="execution_mode == 'sequential'">
<action>Set parallel_count = 1</action>
<action>Set use_task_agents = false</action>
</check>
<check if="execution_mode == 'parallel'">
<action>Set use_task_agents = true</action>
<action>If parallel_count == 'all': set parallel_count = count of selected_stories</action>
</check>
<output>
## ⚙️ Execution Plan
**Mode:** {{execution_mode}}
{{#if use_task_agents}}
**Task Agents:** {{parallel_count}} running concurrently
**Agent Type:** general-purpose (autonomous)
{{else}}
**Sequential processing** in current session
{{/if}}
**Stories to process:** {{count}}
**Estimated total time:**
{{#if use_task_agents}}
- With {{parallel_count}} agents: {{estimated_hours / parallel_count}} hours
{{else}}
- Sequential: {{estimated_hours}} hours
{{/if}}
</output>
<ask>Confirm execution plan? (yes/no):</ask>
<check if="response != 'yes'">
<output>❌ Batch processing cancelled.</output>
<action>Exit workflow</action>
</check>
</step>
<step n="4" goal="Process stories with super-dev-pipeline">
<action>Initialize counters: completed=0, failed=0, failed_stories=[], reconciliation_warnings=[], reconciliation_warnings_count=0</action>
<action>Set start_time = current timestamp</action>
<check if="use_task_agents == true">
<action>Jump to Step 4-Parallel (Task Agent execution)</action>
</check>
<check if="use_task_agents == false">
<action>Continue to Step 4-Sequential (In-session execution)</action>
</check>
</step>
<step n="4-Sequential" goal="Sequential processing in current session">
<output>
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
🚀 SEQUENTIAL BATCH PROCESSING STARTED
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
**Stories:** {{count}}
**Mode:** super-dev-pipeline (batch, sequential)
**Continue on failure:** {{continue_on_failure}}
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
</output>
<iterate>For each story in selected_stories:</iterate>
<substep n="4s-a" title="Process individual story">
<output>
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
📦 Story {{current_index}}/{{total_count}}: {{story_key}}
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
</output>
<action>Invoke workflow: /bmad:bmm:workflows:super-dev-pipeline</action>
<action>Parameters: mode=batch, story_key={{story_key}}</action>
<check if="super-dev-pipeline succeeded">
<output>✅ Implementation complete: {{story_key}}</output>
<action>Execute Step 4.5: Smart Story Reconciliation</action>
<action>Load reconciliation instructions: {installed_path}/step-4.5-reconcile-story-status.md</action>
<action>Execute reconciliation with story_key={{story_key}}</action>
<check if="reconciliation succeeded">
<output>✅ COMPLETED: {{story_key}} (reconciled)</output>
<action>Increment completed counter</action>
</check>
<check if="reconciliation failed">
<output>⚠️ WARNING: {{story_key}} completed but reconciliation failed</output>
<action>Increment completed counter (implementation was successful)</action>
<action>Add to reconciliation_warnings: {story_key: {{story_key}}, warning_message: "Reconciliation failed - manual verification needed"}</action>
<action>Increment reconciliation_warnings_count</action>
</check>
</check>
<check if="super-dev-pipeline failed">
<output>❌ FAILED: {{story_key}}</output>
<action>Increment failed counter</action>
<action>Add story_key to failed_stories list</action>
<check if="continue_on_failure == false">
<output>⚠️ Stopping batch due to failure (continue_on_failure=false)</output>
<action>Jump to Step 5 (Summary)</action>
</check>
</check>
<check if="display_progress == true">
<output>
**Progress:** {{completed}} completed, {{failed}} failed, {{remaining}} remaining
</output>
</check>
<check if="not last story AND pause_between_stories > 0">
<output>⏸️ Pausing {{pause_between_stories}} seconds before next story...</output>
<action>Wait {{pause_between_stories}} seconds</action>
</check>
</substep>
<action>After all stories processed, jump to Step 5 (Summary)</action>
</step>
<step n="4-Parallel" goal="Parallel processing with Task agents">
<output>
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
🚀 PARALLEL BATCH PROCESSING STARTED
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
**Stories:** {{count}}
**Mode:** Task agents (autonomous, parallel)
**Agents in parallel:** {{parallel_count}}
**Continue on failure:** {{continue_on_failure}}
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
</output>
<action>Split selected_stories into batches of size parallel_count</action>
<action>Example: If 10 stories and parallel_count=4, create batches: [1-4], [5-8], [9-10]</action>
<iterate>For each batch of stories:</iterate>
<substep n="4p-a" title="Spawn Task agents for batch">
<output>
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
📦 Batch {{batch_index}}/{{total_batches}}: Spawning {{stories_in_batch}} agents
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Stories in this batch:
{{#each stories_in_batch}}
{{@index}}. {{story_key}}
{{/each}}
Spawning Task agents in parallel...
</output>
<action>For each story in current batch, spawn Task agent with these parameters:</action>
<action>
Task tool parameters:
- subagent_type: "general-purpose"
- description: "Implement story {{story_key}}"
- prompt: "Execute super-dev-pipeline workflow for story {{story_key}}.
CRITICAL INSTRUCTIONS:
1. Load workflow.xml: _bmad/core/tasks/workflow.xml
2. Load workflow config: _bmad/bmm/workflows/4-implementation/super-dev-pipeline/workflow.yaml
3. Execute in BATCH mode with story_key={{story_key}}
4. Follow all 7 pipeline steps (init, pre-gap, implement, post-validate, code-review, complete, summary)
5. Commit changes when complete
6. Report final status (done/failed) with file list
Story file will be auto-resolved from multiple naming conventions."
- run_in_background: false (wait for completion to track results)
</action>
<action>Store task IDs for this batch: task_ids[]</action>
<output>
✅ Spawned {{stories_in_batch}} Task agents
Agents will process stories autonomously with full quality gates:
- Pre-gap analysis (validate tasks)
- Implementation (TDD/refactor)
- Post-validation (verify completion)
- Code review (find 3-10 issues)
- Git commit (targeted files only)
{{#if not last_batch}}
Waiting for this batch to complete before spawning next batch...
{{/if}}
</output>
<action>Wait for all agents in batch to complete</action>
<action>Collect results from each agent via TaskOutput</action>
<iterate>For each completed agent:</iterate>
<check if="agent succeeded">
<output>✅ Implementation complete: {{story_key}}</output>
<action>Execute Step 4.5: Smart Story Reconciliation</action>
<action>Load reconciliation instructions: {installed_path}/step-4.5-reconcile-story-status.md</action>
<action>Execute reconciliation with story_key={{story_key}}</action>
<check if="reconciliation succeeded">
<output>✅ COMPLETED: {{story_key}} (reconciled)</output>
<action>Increment completed counter</action>
</check>
<check if="reconciliation failed">
<output>⚠️ WARNING: {{story_key}} completed but reconciliation failed</output>
<action>Increment completed counter (implementation was successful)</action>
<action>Add to reconciliation_warnings: {story_key: {{story_key}}, warning_message: "Reconciliation failed - manual verification needed"}</action>
<action>Increment reconciliation_warnings_count</action>
</check>
</check>
<check if="agent failed">
<output>❌ FAILED: {{story_key}}</output>
<action>Increment failed counter</action>
<action>Add story_key to failed_stories list</action>
</check>
<output>
**Batch {{batch_index}} Complete:** {{batch_completed}} succeeded, {{batch_failed}} failed
**Overall Progress:** {{completed}}/{{total_count}} completed
</output>
</substep>
<action>After all batches processed, jump to Step 5 (Summary)</action>
</step>
<step n="5" goal="Display batch summary">
<action>Calculate end_time and total_duration</action>
<action>Calculate success_rate = (completed / total_count) * 100</action>
<output>
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
📊 BATCH SUMMARY
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
**Total stories:** {{total_count}}
**✅ Completed:** {{completed}}
**❌ Failed:** {{failed}}
**⚠️ Reconciliation warnings:** {{reconciliation_warnings_count}}
**Success rate:** {{success_rate}}%
**Duration:** {{total_duration}}
{{#if failed > 0}}
**Failed stories:**
{{#each failed_stories}}
- {{this}}
{{/each}}
**Retry failed stories:**
```bash
{{#each failed_stories}}
/bmad:bmm:workflows:super-dev-pipeline mode=batch story_key={{this}}
{{/each}}
```
{{/if}}
{{#if reconciliation_warnings_count > 0}}
**⚠️ Reconciliation warnings (stories completed but status may be inaccurate):**
{{#each reconciliation_warnings}}
- {{story_key}}: {{warning_message}}
{{/each}}
**Manual reconciliation needed:**
Review these stories to ensure checkboxes and status are accurate.
Check Dev Agent Record vs Acceptance Criteria/Tasks/DoD sections.
{{/if}}
**Next steps:**
1. Check sprint-status.yaml - stories should be marked "done" or "review"
2. Run tests: `pnpm test`
3. Check coverage: `pnpm test --coverage`
4. Review commits: `git log -{{completed}}`
5. Spot-check 2-3 stories for quality
**Run another batch?**
`/bmad:bmm:workflows:batch-super-dev`
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
</output>
<action>Save batch log to {batch_log}</action>
<action>Log contents: start_time, end_time, total_duration, selected_stories, completed_stories, failed_stories, success_rate</action>
</step>
</workflow>

View File

@ -0,0 +1,370 @@
# Step 4.5: Smart Story Reconciliation
<critical>Execute AFTER super-dev-pipeline completes but BEFORE marking story as "completed"</critical>
<critical>This ensures story checkboxes and status accurately reflect actual implementation</critical>
## Goal
Verify story file accuracy by reconciling:
1. **Acceptance Criteria checkboxes** vs Dev Agent Record
2. **Tasks/Subtasks checkboxes** vs Dev Agent Record
3. **Definition of Done checkboxes** vs Dev Agent Record
4. **Story status** (should be "done" if implementation complete)
5. **sprint-status.yaml entry** (should match story file status)
---
## Execution
### 1. Load Story File
<action>Read story file: {story_file_path}</action>
<action>Extract sections:
- Acceptance Criteria (## Acceptance Criteria)
- Tasks / Subtasks (## Tasks / Subtasks)
- Definition of Done (## Definition of Done)
- Dev Agent Record (## Dev Agent Record)
- Story status header (**Status:** ...)
</action>
### 2. Analyze Dev Agent Record
<action>Read "Dev Agent Record" section</action>
<check if="Dev Agent Record is empty or says '(To be filled by dev agent)'">
<output>⚠️ Story {{story_key}}: Dev Agent Record is empty - cannot reconcile</output>
<output>This suggests super-dev-pipeline did not complete successfully.</output>
<action>Mark story as FAILED reconciliation</action>
<action>Return early (skip remaining checks)</action>
</check>
<action>Parse Dev Agent Record fields:
- **Agent Model Used** (should have model name, not empty)
- **Implementation Summary** (should describe what was built)
- **File List** (should list new/modified files)
- **Test Results** (should show test counts)
- **Completion Notes** (should document what works)
</action>
<check if="Implementation Summary contains 'COMPLETE' or lists specific deliverables">
<action>Set implementation_status = COMPLETE</action>
</check>
<check if="Implementation Summary is vague or says 'pending'">
<action>Set implementation_status = INCOMPLETE</action>
<output>⚠️ Story {{story_key}}: Implementation appears incomplete based on Dev Agent Record</output>
</check>
### 3. Reconcile Acceptance Criteria
<action>For each AC subsection (AC1, AC2, AC3, AC4, etc.):</action>
<iterate>For each checkbox in AC section:</iterate>
<substep n="3a" title="Identify expected status from Dev Agent Record">
<action>Search Implementation Summary and File List for keywords from checkbox text</action>
<example>
Checkbox: "[ ] FlexibleGridSection component (renders dynamic grid layouts)"
Implementation Summary mentions: "FlexibleGridSection component created"
File List includes: "FlexibleGridSection.tsx"
→ Expected status: CHECKED
</example>
<action>Determine expected_checkbox_status:
- CHECKED if Implementation Summary confirms it OR File List shows created files OR Test Results mention it
- UNCHECKED if no evidence in Dev Agent Record
- PARTIAL if mentioned as "pending" or "infrastructure ready"
</action>
</substep>
<substep n="3b" title="Compare actual vs expected">
<action>Read actual checkbox state from story file ([x] vs [ ] vs [~])</action>
<check if="actual != expected">
<output>🔧 Reconciling AC: "{{checkbox_text}}"
Actual: {{actual_status}}
Expected: {{expected_status}}
Reason: {{evidence_from_dev_record}}
</output>
<action>Add to reconciliation_changes list</action>
</check>
</substep>
<action>After checking all ACs:
- Count total AC items
- Count checked AC items (after reconciliation)
- Calculate AC completion percentage
</action>
### 4. Reconcile Tasks / Subtasks
<action>For each Task (Task 1, Task 2, etc.):</action>
<iterate>For each checkbox in Tasks section:</iterate>
<substep n="4a" title="Identify expected status from Dev Agent Record">
<action>Search Implementation Summary and File List for task keywords</action>
<example>
Task checkbox: "[ ] **2.2:** Create FlexibleGridSection component"
File List includes: "apps/frontend/web/src/components/FlexibleGridSection.tsx"
→ Expected status: CHECKED
</example>
<action>Determine expected_checkbox_status using same logic as AC section</action>
</substep>
<substep n="4b" title="Compare and reconcile">
<action>Read actual checkbox state</action>
<check if="actual != expected">
<output>🔧 Reconciling Task: "{{task_text}}"
Actual: {{actual_status}}
Expected: {{expected_status}}
Reason: {{evidence_from_dev_record}}
</output>
<action>Add to reconciliation_changes list</action>
</check>
</substep>
<action>After checking all Tasks:
- Count total task items
- Count checked task items (after reconciliation)
- Calculate task completion percentage
</action>
### 5. Reconcile Definition of Done
<action>For each DoD category (Code Quality, Testing, Security, etc.):</action>
<iterate>For each checkbox in DoD section:</iterate>
<substep n="5a" title="Determine expected status">
<action>Check Test Results, Completion Notes for evidence</action>
<example>
DoD checkbox: "[ ] Type check passes: `pnpm type-check` (zero errors)"
Completion Notes say: "Type check passes ✅"
→ Expected status: CHECKED
</example>
<example>
DoD checkbox: "[ ] Unit tests: 90%+ coverage"
Test Results say: "37 tests passing"
Completion Notes say: "100% coverage on FlexibleGridSection"
→ Expected status: CHECKED
</example>
<action>Determine expected_checkbox_status</action>
</substep>
<substep n="5b" title="Compare and reconcile">
<action>Read actual checkbox state</action>
<check if="actual != expected">
<output>🔧 Reconciling DoD: "{{dod_text}}"
Actual: {{actual_status}}
Expected: {{expected_status}}
Reason: {{evidence_from_dev_record}}
</output>
<action>Add to reconciliation_changes list</action>
</check>
</substep>
<action>After checking all DoD items:
- Count total DoD items
- Count checked DoD items (after reconciliation)
- Calculate DoD completion percentage
</action>
### 6. Determine Correct Story Status
<action>Based on completion percentages, determine correct story status:</action>
<check if="AC >= 95% AND Tasks >= 95% AND DoD >= 95%">
<action>Set correct_story_status = "done"</action>
</check>
<check if="AC >= 80% AND Tasks >= 80% AND DoD >= 80%">
<action>Set correct_story_status = "review"</action>
</check>
<check if="AC < 80% OR Tasks < 80% OR DoD < 80%">
<action>Set correct_story_status = "in-progress"</action>
</check>
<check if="implementation_status == INCOMPLETE">
<action>Override: Set correct_story_status = "in-progress"</action>
<output>⚠️ Overriding status to "in-progress" due to incomplete implementation</output>
</check>
<action>Read current story status from story file (**Status:** ...)</action>
<check if="current_story_status != correct_story_status">
<output>🔧 Story status mismatch:
Current: {{current_story_status}}
Expected: {{correct_story_status}}
Reason: AC={{ac_pct}}% Tasks={{tasks_pct}}% DoD={{dod_pct}}%
</output>
<action>Add to reconciliation_changes list</action>
</check>
### 7. Verify sprint-status.yaml Entry
<action>Read {sprint_status} file</action>
<action>Find entry for {{story_key}}</action>
<action>Extract current status from sprint-status.yaml</action>
<check if="sprint_status_yaml_status != correct_story_status">
<output>🔧 sprint-status.yaml mismatch:
Current: {{sprint_status_yaml_status}}
Expected: {{correct_story_status}}
</output>
<action>Add to reconciliation_changes list</action>
</check>
### 8. Apply Reconciliation Changes
<check if="reconciliation_changes is empty">
<output>✅ Story {{story_key}}: Already accurate (0 changes needed)</output>
<action>Return SUCCESS (no updates needed)</action>
</check>
<check if="reconciliation_changes is NOT empty">
<output>
🔧 Story {{story_key}}: Reconciling {{count}} issues
**Changes to apply:**
{{#each reconciliation_changes}}
{{@index}}. {{change_description}}
{{/each}}
</output>
<ask>Apply these reconciliation changes? (yes/no):</ask>
<check if="response != 'yes'">
<output>⏭️ Skipping reconciliation for {{story_key}}</output>
<action>Return SUCCESS (user declined changes)</action>
</check>
<substep n="8a" title="Update Acceptance Criteria">
<action>For each AC checkbox that needs updating:</action>
<action>Use Edit tool to update checkbox from [ ] to [x] or [~]</action>
<action>Add note explaining why: "- [x] Item - COMPLETE: {{evidence}}"</action>
</substep>
<substep n="8b" title="Update Tasks / Subtasks">
<action>For each Task checkbox that needs updating:</action>
<action>Use Edit tool to update checkbox</action>
<action>Update task header if all subtasks complete: "### Task 1: ... ✅ COMPLETE"</action>
</substep>
<substep n="8c" title="Update Definition of Done">
<action>For each DoD checkbox that needs updating:</action>
<action>Use Edit tool to update checkbox</action>
<action>Add verification note: "- [x] Item ✅ (verified in Dev Agent Record)"</action>
</substep>
<substep n="8d" title="Update Story Status">
<check if="story status needs updating">
<action>Use Edit tool to update status line</action>
<action>Change from: **Status:** {{old_status}}</action>
<action>Change to: **Status:** {{correct_story_status}}</action>
</check>
</substep>
<substep n="8e" title="Update sprint-status.yaml">
<check if="sprint-status.yaml needs updating">
<action>Use Edit tool to update status entry</action>
<action>Update comment if needed to reflect completion</action>
<example>
Before: 20-8-...: ready-for-dev # Story description
After: 20-8-...: done # ✅ COMPLETED: Component + tests + docs
</example>
</check>
</substep>
<output>✅ Story {{story_key}}: Reconciliation complete ({{count}} changes applied)</output>
</check>
### 9. Final Verification
<action>Re-read story file to verify changes applied correctly</action>
<action>Calculate final completion percentages</action>
<output>
📊 Story {{story_key}} - Final Status
**Acceptance Criteria:** {{ac_checked}}/{{ac_total}} ({{ac_pct}}%)
**Tasks/Subtasks:** {{tasks_checked}}/{{tasks_total}} ({{tasks_pct}}%)
**Definition of Done:** {{dod_checked}}/{{dod_total}} ({{dod_pct}}%)
**Story Status:** {{correct_story_status}}
**sprint-status.yaml:** {{correct_story_status}}
{{#if correct_story_status == "done"}}
✅ Story is COMPLETE and accurately reflects implementation
{{/if}}
{{#if correct_story_status == "review"}}
⚠️ Story needs review (some items incomplete)
{{/if}}
{{#if correct_story_status == "in-progress"}}
⚠️ Story has significant gaps (implementation incomplete)
{{/if}}
</output>
<action>Return SUCCESS with reconciliation summary</action>
---
## Success Criteria
Story reconciliation succeeds when:
1. ✅ All checkboxes match Dev Agent Record evidence
2. ✅ Story status accurately reflects completion (done/review/in-progress)
3. ✅ sprint-status.yaml entry matches story file status
4. ✅ Completion percentages calculated and reported
5. ✅ Changes documented in reconciliation summary
---
## Error Handling
<check if="story file not found">
<output>❌ Story {{story_key}}: File not found at {{story_file_path}}</output>
<action>Return FAILED reconciliation</action>
</check>
<check if="Dev Agent Record missing or empty">
<output>⚠️ Story {{story_key}}: Cannot reconcile - Dev Agent Record not populated</output>
<action>Mark as INCOMPLETE (not implemented yet)</action>
<action>Return WARNING reconciliation</action>
</check>
<check if="Edit tool fails">
<output>❌ Story {{story_key}}: Failed to apply changes (Edit tool error)</output>
<action>Log error details</action>
<action>Return FAILED reconciliation</action>
</check>
---
## Integration with batch-super-dev
**Insert this step:**
- **Sequential mode:** After Step 4s-a (Process individual story), before marking completed
- **Parallel mode:** After Step 4p-a (Spawn Task agents), after agent completes but before marking completed
**Flow:**
```
super-dev-pipeline completes → Step 4.5 (Reconcile) → Mark as completed/failed
```
**Benefits:**
- Ensures all batch-processed stories have accurate status
- Catches mismatches automatically
- Prevents "done" stories with unchecked items
- Maintains sprint-status.yaml accuracy

View File

@ -0,0 +1,47 @@
name: batch-super-dev
description: "Interactive batch selector for super-dev-pipeline - select multiple ready-for-dev stories and process them sequentially with full quality gates. Includes smart story validation and automatic creation/regeneration."
author: "BMad"
version: "1.2.0"
# Critical variables from config
config_source: "{project-root}/_bmad/bmm/config.yaml"
output_folder: "{config_source}:output_folder"
sprint_artifacts: "{config_source}:sprint_artifacts"
communication_language: "{config_source}:communication_language"
date: system-generated
# Workflow paths
installed_path: "{project-root}/_bmad/bmm/workflows/4-implementation/batch-super-dev"
instructions: "{installed_path}/instructions.md"
# State management
sprint_status: "{sprint_artifacts}/sprint-status.yaml"
batch_log: "{sprint_artifacts}/batch-super-dev-{date}.log"
# Variables
filter_by_epic: "" # Optional: Filter stories by epic number (e.g., "3" for only Epic 3 stories)
max_stories: 20 # Safety limit - won't process more than this in one batch
pause_between_stories: 5 # Seconds to pause between stories (allows monitoring, prevents rate limits)
# Super-dev-pipeline invocation settings
super_dev_settings:
mode: "batch" # Always use batch mode for autonomous execution
workflow_path: "{project-root}/_bmad/bmm/workflows/4-implementation/super-dev-pipeline"
# Story validation settings (NEW in v1.2.0)
validation:
enabled: true # Validate story files before processing
auto_create_missing: false # If true, auto-create without prompting (use with caution)
auto_regenerate_invalid: false # If true, auto-regenerate without prompting (use with caution)
min_sections: 12 # BMAD format requires all 12 sections
min_current_state_words: 100 # Current State must have substantial content
require_gap_analysis: true # Current State must have ✅/❌ markers
backup_before_regenerate: true # Create .backup file before regenerating
# Execution settings
execution:
continue_on_failure: true # Keep processing remaining stories if one fails
display_progress: true # Show running summary after each story
save_state: true # Save progress to resume if interrupted
standalone: true

View File

@ -4,7 +4,6 @@ header: "Creative Innovation Suite (CIS) Module"
subheader: "No custom configuration required - uses Core settings only"
default_selected: false # This module will not be selected by default for new installations
# Variables from Core Config inserted:
## user_name
## communication_language