This commit is contained in:
Alex Verkhovsky 2025-12-18 21:14:49 +00:00 committed by GitHub
commit 3359b2a0c2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
12 changed files with 939 additions and 287 deletions

View File

@ -0,0 +1,82 @@
<task id="_bmad/core/tasks/review-adversarial-general.xml" name="Adversarial Review (General)">
<objective>Cynically review content and produce numbered findings with severity and classification</objective>
<inputs>
<input name="content" desc="Content to review - diff, spec, story, doc, or any artifact" />
</inputs>
<llm critical="true">
<i>You are a cynical, jaded reviewer with zero patience for sloppy work</i>
<i>The content was submitted by a clueless weasel and you expect to find problems</i>
<i>Find at least five issues to fix or improve - be skeptical of everything</i>
<i>Zero findings is suspicious - if you find nothing, halt and question your analysis</i>
</llm>
<flow>
<step n="1" title="Receive Content">
<action>Load the content to review from provided input or context</action>
<action>Identify content type (diff, spec, story, doc, etc.) to calibrate review approach</action>
</step>
<step n="2" title="Adversarial Analysis" critical="true">
<mandate>Review with extreme skepticism - assume problems exist</mandate>
<analysis-areas>
<area>Correctness - Is it actually right? Look for logic errors, bugs, gaps</area>
<area>Completeness - What's missing? Edge cases, error handling, validation</area>
<area>Consistency - Does it match patterns, conventions, existing code?</area>
<area>Clarity - Is it understandable? Naming, structure, documentation</area>
<area>Quality - Is it good enough? Performance, security, maintainability</area>
</analysis-areas>
<action>Find at least 5 issues - dig deep, don't accept surface-level "looks good"</action>
</step>
<step n="3" title="Classify Findings">
<action>For each finding, assign:</action>
<finding-id>F1, F2, F3... (sequential)</finding-id>
<severity>
<level name="critical">Must fix - blocks ship, causes failures</level>
<level name="high">Should fix - significant issue, notable risk</level>
<level name="medium">Consider fixing - minor issue, small improvement</level>
<level name="low">Nitpick - optional, stylistic, nice-to-have</level>
</severity>
<classification>
<type name="real">Confirmed issue - should address</type>
<type name="noise">False positive - no action needed</type>
<type name="uncertain">Needs discussion - could go either way</type>
</classification>
</step>
<step n="4" title="Present Findings">
<action>Output findings in structured format</action>
</step>
</flow>
<findings-format>
**Adversarial Review Findings**
| ID | Severity | Classification | Finding |
|----|----------|----------------|---------|
| F1 | {severity} | {classification} | {description} |
| F2 | {severity} | {classification} | {description} |
| ... | | | |
**Summary:** {count} findings - {critical_count} critical, {high_count} high, {medium_count} medium, {low_count} low
</findings-format>
<halt-conditions>
<condition>HALT if zero findings - this is suspicious, re-analyze or ask for guidance</condition>
<condition>HALT if content is empty or unreadable</condition>
</halt-conditions>
<critical-rules>
<rule>NEVER accept "looks good" without deep analysis</rule>
<rule>ALWAYS find at least 5 issues - if you can't, you're not looking hard enough</rule>
<rule>ALWAYS assign ID, severity, and classification to each finding</rule>
<rule>Be cynical but fair - classify noise as noise, real as real</rule>
</critical-rules>
</task>

View File

@ -104,52 +104,104 @@
</action> </action>
<action>Find at least 3 more specific, actionable issues</action> <action>Find at least 3 more specific, actionable issues</action>
</check> </check>
<!-- Store context-aware findings for later consolidation -->
<action>Set {{context_aware_findings}} = all issues found in this step (numbered list with file:line locations)</action>
</step> </step>
<step n="4" goal="Present findings and fix them"> <step n="4" goal="Run information-asymmetric adversarial review">
<action>Categorize findings: HIGH (must fix), MEDIUM (should fix), LOW (nice to fix)</action> <critical>Reviewer has FULL repo access but NO knowledge of WHY changes were made</critical>
<critical>DO NOT include story file in prompt - asymmetry is about intent, not visibility</critical>
<critical>Reviewer can explore codebase to understand impact, but judges changes on merit alone</critical>
<!-- Construct diff of story-related changes -->
<action>Construct the diff of story-related changes:
- Uncommitted changes: `git diff` + `git diff --cached`
- Committed changes (if story spans commits): `git log --oneline` to find relevant commits, then `git diff base..HEAD`
- Exclude story file from diff: `git diff -- . ':!{{story_path}}'`
</action>
<action>Set {{asymmetric_target}} = the diff output (reviewer can explore repo but is prompted to review this diff)</action>
<!-- Execution hierarchy: cleanest context first -->
<check if="Task tool available (can spawn subagent)">
<action>Launch general-purpose subagent with adversarial prompt:
"You are a cynical, jaded code reviewer with zero patience for sloppy work.
A clueless weasel submitted the following changes and you expect to find problems.
Find at least ten findings to fix or improve. Look for what's missing, not just what's wrong.
Number each finding (1., 2., 3., ...). Be skeptical of everything.
Changes to review:
{{asymmetric_target}}"
</action>
<action>Collect numbered findings into {{asymmetric_findings}}</action>
</check>
<check if="no Task tool BUT can use Bash to invoke CLI for fresh context">
<action>Execute adversarial review via CLI (e.g., claude --print) in fresh context with same prompt</action>
<action>Collect numbered findings into {{asymmetric_findings}}</action>
</check>
<check if="cannot create clean slate agent by any means (fallback)">
<action>Execute adversarial prompt inline in main context</action>
<action>Note: Has context pollution but cynical reviewer persona still adds significant value</action>
<action>Collect numbered findings into {{asymmetric_findings}}</action>
</check>
</step>
<step n="5" goal="Consolidate findings and present to user">
<critical>Merge findings from BOTH context-aware review (step 3) AND asymmetric review (step 4)</critical>
<action>Combine {{context_aware_findings}} from step 3 with {{asymmetric_findings}} from step 4</action>
<action>Deduplicate findings:
- Identify findings that describe the same underlying issue
- Keep the more detailed/actionable version
- Note when both reviews caught the same issue (validates severity)
</action>
<action>Assess each finding:
- Is this a real issue or noise/false positive?
- Assign severity: 🔴 CRITICAL, 🟠 HIGH, 🟡 MEDIUM, 🟢 LOW
</action>
<action>Filter out non-issues:
- Remove false positives
- Remove nitpicks that do not warrant action
- Keep anything that could cause problems in production
</action>
<action>Sort by severity (CRITICAL → HIGH → MEDIUM → LOW)</action>
<action>Set {{fixed_count}} = 0</action> <action>Set {{fixed_count}} = 0</action>
<action>Set {{action_count}} = 0</action> <action>Set {{action_count}} = 0</action>
<output>**🔥 CODE REVIEW FINDINGS, {user_name}!** <output>**🔥 CODE REVIEW FINDINGS, {user_name}!**
**Story:** {{story_file}} **Story:** {{story_path}}
**Git vs Story Discrepancies:** {{git_discrepancy_count}} found **Git vs Story Discrepancies:** {{git_discrepancy_count}} found
**Issues Found:** {{high_count}} High, {{medium_count}} Medium, {{low_count}} Low **Issues Found:** {{critical_count}} Critical, {{high_count}} High, {{medium_count}} Medium, {{low_count}} Low
## 🔴 CRITICAL ISSUES | # | Severity | Summary | Location |
- Tasks marked [x] but not actually implemented |---|----------|---------|----------|
- Acceptance Criteria not implemented {{findings_table}}
- Story claims files changed but no git evidence
- Security vulnerabilities
## 🟡 MEDIUM ISSUES **{{total_count}} issues found** ({{critical_count}} critical, {{high_count}} high, {{medium_count}} medium, {{low_count}} low)
- Files changed but not documented in story File List
- Uncommitted changes not tracked
- Performance problems
- Poor test coverage/quality
- Code maintainability issues
## 🟢 LOW ISSUES
- Code style improvements
- Documentation gaps
- Git commit message quality
</output> </output>
<ask>What should I do with these issues? <ask>What should I do with these issues?
1. **Fix them automatically** - I'll update the code and tests 1. **Fix them automatically** - I'll fix all HIGH and CRITICAL, you approve each
2. **Create action items** - Add to story Tasks/Subtasks for later 2. **Create action items** - Add to story Tasks/Subtasks for later
3. **Show me details** - Deep dive into specific issues 3. **Details on #N** - Explain specific issue
Choose [1], [2], or specify which issue to examine:</ask> Choose [1], [2], or specify which issue to examine:</ask>
<check if="user chooses 1"> <check if="user chooses 1">
<action>Fix all HIGH and MEDIUM issues in the code</action> <action>Fix all CRITICAL and HIGH issues in the code</action>
<action>Add/update tests as needed</action> <action>Add/update tests as needed</action>
<action>Update File List in story if files changed</action> <action>Update File List in story if files changed</action>
<action>Update story Dev Agent Record with fixes applied</action> <action>Update story Dev Agent Record with fixes applied</action>
<action>Set {{fixed_count}} = number of HIGH and MEDIUM issues fixed</action> <action>Set {{fixed_count}} = number of CRITICAL and HIGH issues fixed</action>
<action>Set {{action_count}} = 0</action> <action>Set {{action_count}} = 0</action>
</check> </check>
@ -166,13 +218,13 @@
</check> </check>
</step> </step>
<step n="5" goal="Update story status and sync sprint tracking"> <step n="6" goal="Update story status and sync sprint tracking">
<!-- Determine new status based on review outcome --> <!-- Determine new status based on review outcome -->
<check if="all HIGH and MEDIUM issues fixed AND all ACs implemented"> <check if="all CRITICAL and HIGH issues fixed AND all ACs implemented">
<action>Set {{new_status}} = "done"</action> <action>Set {{new_status}} = "done"</action>
<action>Update story Status field to "done"</action> <action>Update story Status field to "done"</action>
</check> </check>
<check if="HIGH or MEDIUM issues remain OR ACs not fully implemented"> <check if="CRITICAL or HIGH issues remain OR ACs not fully implemented">
<action>Set {{new_status}} = "in-progress"</action> <action>Set {{new_status}} = "in-progress"</action>
<action>Update story Status field to "in-progress"</action> <action>Update story Status field to "in-progress"</action>
</check> </check>

View File

@ -1,25 +0,0 @@
# Quick-Dev Checklist
## Before Implementation
- [ ] Context loaded (tech-spec or user guidance)
- [ ] Files to modify identified
- [ ] Patterns understood
## Implementation
- [ ] All tasks completed
- [ ] Code follows existing patterns
- [ ] Error handling appropriate
## Testing
- [ ] Tests written (where appropriate)
- [ ] All tests passing
- [ ] No regressions
## Completion
- [ ] Acceptance criteria satisfied
- [ ] Tech-spec updated (if applicable)
- [ ] Summary provided to user

View File

@ -1,202 +0,0 @@
# Quick-Dev - Flexible Development Workflow
<workflow>
<critical>Communicate in {communication_language}, tailored to {user_skill_level}</critical>
<critical>Execute continuously until COMPLETE - do not stop for milestones</critical>
<critical>Flexible - handles tech-specs OR direct instructions</critical>
<critical>ALWAYS respect {project_context} if it exists - it defines project standards</critical>
<checkpoint-handlers>
<on-select key="a">Load and execute {advanced_elicitation}, then return</on-select>
<on-select key="p">Load and execute {party_mode_workflow}, then return</on-select>
<on-select key="t">Load and execute {create_tech_spec_workflow}</on-select>
</checkpoint-handlers>
<step n="1" goal="Load project context and determine execution mode">
<action>Check if {project_context} exists. If yes, load it - this is your foundational reference for ALL implementation decisions (patterns, conventions, architecture).</action>
<action>Parse user input:
**Mode A: Tech-Spec** - e.g., `quick-dev tech-spec-auth.md`
→ Load spec, extract tasks/context/AC, goto step 3
**Mode B: Direct Instructions** - e.g., `refactor src/foo.ts...`
→ Offer planning choice
</action>
<check if="Mode A">
<action>Load tech-spec, extract tasks/context/AC</action>
<goto>step_3</goto>
</check>
<check if="Mode B">
<!-- Escalation Threshold: Lightweight check - should we invoke scale-adaptive? -->
<action>Evaluate escalation threshold against user input (minimal tokens, no file loading):
**Triggers escalation** (if 2+ signals present):
- Multiple components mentioned (e.g., dashboard + api + database)
- System-level language (e.g., platform, integration, architecture)
- Uncertainty about approach (e.g., "how should I", "best way to")
- Multi-layer scope (e.g., UI + backend + data together)
- Extended timeframe (e.g., "this week", "over the next few days")
**Reduces signal:**
- Simplicity markers (e.g., "just", "quickly", "fix", "bug", "typo", "simple", "basic", "minor")
- Single file/component focus
- Confident, specific request
Use holistic judgment, not mechanical keyword matching.</action>
<!-- No Escalation: Simple request, offer existing choice -->
<check if="escalation threshold NOT triggered">
<ask>**[t] Plan first** - Create tech-spec then implement
**[e] Execute directly** - Start now</ask>
<check if="t">
<action>Load and execute {create_tech_spec_workflow}</action>
<action>Continue to implementation after spec complete</action>
</check>
<check if="e">
<ask>Any additional guidance before I begin? (patterns, files, constraints) Or "go" to start.</ask>
<goto>step_2</goto>
</check>
</check>
<!-- Escalation Triggered: Load scale-adaptive and evaluate level -->
<check if="escalation threshold triggered">
<action>Load {project_levels} and evaluate user input against detection_hints.keywords</action>
<action>Determine level (0-4) using scale-adaptive definitions</action>
<!-- Level 0: Scale-adaptive confirms simple, fall back to standard choice -->
<check if="level 0">
<ask>**[t] Plan first** - Create tech-spec then implement
**[e] Execute directly** - Start now</ask>
<check if="t">
<action>Load and execute {create_tech_spec_workflow}</action>
<action>Continue to implementation after spec complete</action>
</check>
<check if="e">
<ask>Any additional guidance before I begin? (patterns, files, constraints) Or "go" to start.</ask>
<goto>step_2</goto>
</check>
</check>
<check if="level 1 or 2 or couldn't determine level">
<ask>This looks like a focused feature with multiple components.
**[t] Create tech-spec first** (recommended)
**[w] Seems bigger than quick-dev** — see what BMad Method recommends (workflow-init)
**[e] Execute directly**</ask>
<check if="t">
<action>Load and execute {create_tech_spec_workflow}</action>
<action>Continue to implementation after spec complete</action>
</check>
<check if="w">
<action>Load and execute {workflow_init}</action>
<action>EXIT quick-dev - user has been routed to BMad Method</action>
</check>
<check if="e">
<ask>Any additional guidance before I begin? (patterns, files, constraints) Or "go" to start.</ask>
<goto>step_2</goto>
</check>
</check>
<!-- Level 3+: BMad Method territory, recommend workflow-init -->
<check if="level 3 or higher">
<ask>This sounds like platform/system work.
**[w] Start BMad Method** (recommended) (workflow-init)
**[t] Create tech-spec** (lighter planning)
**[e] Execute directly** - feeling lucky</ask>
<check if="w">
<action>Load and execute {workflow_init}</action>
<action>EXIT quick-dev - user has been routed to BMad Method</action>
</check>
<check if="t">
<action>Load and execute {create_tech_spec_workflow}</action>
<action>Continue to implementation after spec complete</action>
</check>
<check if="e">
<ask>Any additional guidance before I begin? (patterns, files, constraints) Or "go" to start.</ask>
<goto>step_2</goto>
</check>
</check>
</check>
</check>
</step>
<step n="2" goal="Quick context gathering (direct mode)">
<action>Identify files to modify, find relevant patterns, note dependencies</action>
<action>Create mental plan: tasks, acceptance criteria, files to touch</action>
</step>
<step n="3" goal="Execute implementation" id="step_3">
<action>For each task:
1. **Load Context** - read files from spec or relevant to change
2. **Implement** - follow patterns, handle errors, follow conventions
3. **Test** - write tests, run existing tests, verify AC
4. **Mark Complete** - check off task [x], continue
</action>
<action if="3 failures">HALT and request guidance</action>
<action if="tests fail">Fix before continuing</action>
<critical>Continue through ALL tasks without stopping</critical>
</step>
<step n="4" goal="Verify and complete">
<action>Verify: all tasks [x], tests passing, AC satisfied, patterns followed</action>
<check if="using tech-spec">
<action>Update tech-spec status to "Completed", mark all tasks [x]</action>
</check>
<output>**Implementation Complete!**
**Summary:** {{implementation_summary}}
**Files Modified:** {{files_list}}
**Tests:** {{test_summary}}
**AC Status:** {{ac_status}}
---
**Before committing (Recommended): Copy this code review prompt to a different LLM**
```
You are a cynical, jaded code reviewer with zero patience for sloppy work. These uncommitted changes were submitted by a clueless weasel and you expect to find problems. Find at least five issues to fix or improve in it. Number them. Be skeptical of everything.
```
</output>
<action>You must explain what was implemented based on {user_skill_level}</action>
</step>
</workflow>

View File

@ -0,0 +1,148 @@
---
name: 'step-01-mode-detection'
description: 'Determine execution mode (tech-spec vs direct), handle escalation, set state variables'
workflow_path: '{project-root}/_bmad/bmm/workflows/bmad-quick-flow/quick-dev'
thisStepFile: '{workflow_path}/steps/step-01-mode-detection.md'
nextStepFile_modeA: '{workflow_path}/steps/step-03-execute.md'
nextStepFile_modeB: '{workflow_path}/steps/step-02-context-gathering.md'
---
# Step 1: Mode Detection
**Goal:** Determine execution mode, capture baseline, handle escalation if needed.
---
## STATE VARIABLES (capture now, persist throughout)
These variables MUST be set in this step and available to all subsequent steps:
- `{baseline_commit}` - Git HEAD at workflow start
- `{execution_mode}` - "tech-spec" or "direct"
- `{tech_spec_path}` - Path to tech-spec file (if Mode A)
---
## EXECUTION SEQUENCE
### 1. Capture Baseline
Run `git rev-parse HEAD` and store result as `{baseline_commit}`.
### 2. Load Project Context
Check if `{project_context}` exists (`**/project-context.md`). If found, load it - this is foundational reference for ALL implementation decisions.
### 3. Parse User Input
Analyze the user's input to determine mode:
**Mode A: Tech-Spec**
- User provided a path to a tech-spec file (e.g., `quick-dev tech-spec-auth.md`)
- Load the spec, extract tasks/context/AC
- Set `{execution_mode}` = "tech-spec"
- Set `{tech_spec_path}` = provided path
- **NEXT:** Load `step-03-execute.md`
**Mode B: Direct Instructions**
- User provided task description directly (e.g., `refactor src/foo.ts...`)
- Set `{execution_mode}` = "direct"
- **NEXT:** Evaluate escalation threshold, then proceed
---
## ESCALATION THRESHOLD (Mode B only)
Evaluate user input with minimal token usage (no file loading):
**Triggers escalation (if 2+ signals present):**
- Multiple components mentioned (dashboard + api + database)
- System-level language (platform, integration, architecture)
- Uncertainty about approach ("how should I", "best way to")
- Multi-layer scope (UI + backend + data together)
- Extended timeframe ("this week", "over the next few days")
**Reduces signal:**
- Simplicity markers ("just", "quickly", "fix", "bug", "typo", "simple")
- Single file/component focus
- Confident, specific request
Use holistic judgment, not mechanical keyword matching.
---
## ESCALATION HANDLING
### No Escalation (simple request)
Present choice:
```
**[t] Plan first** - Create tech-spec then implement
**[e] Execute directly** - Start now
```
- **[t]:** Direct user to `{create_tech_spec_workflow}`. **EXIT Quick Dev.**
- **[e]:** Ask for any additional guidance, then **NEXT:** Load `step-02-context-gathering.md`
### Escalation Triggered - Level 0-2
```
This looks like a focused feature with multiple components.
**[t] Create tech-spec first** (recommended)
**[w] Seems bigger than quick-dev** - see what BMad Method recommends
**[e] Execute directly**
```
- **[t]:** Direct to `{create_tech_spec_workflow}`. **EXIT Quick Dev.**
- **[w]:** Direct to `{workflow_init}`. **EXIT Quick Dev.**
- **[e]:** Ask for guidance, then **NEXT:** Load `step-02-context-gathering.md`
### Escalation Triggered - Level 3+
```
This sounds like platform/system work.
**[w] Start BMad Method** (recommended)
**[t] Create tech-spec** (lighter planning)
**[e] Execute directly** - feeling lucky
```
- **[w]:** Direct to `{workflow_init}`. **EXIT Quick Dev.**
- **[t]:** Direct to `{create_tech_spec_workflow}`. **EXIT Quick Dev.**
- **[e]:** Ask for guidance, then **NEXT:** Load `step-02-context-gathering.md`
---
## NEXT STEP DIRECTIVE
**CRITICAL:** When this step completes, explicitly state which step to load:
- Mode A (tech-spec): "**NEXT:** Loading `step-03-execute.md`"
- Mode B (direct, [e] selected): "**NEXT:** Loading `step-02-context-gathering.md`"
- Escalation ([t] or [w]): "**EXITING Quick Dev.** Follow the directed workflow."
---
## SUCCESS METRICS
- `{baseline_commit}` captured and stored
- `{execution_mode}` determined ("tech-spec" or "direct")
- `{tech_spec_path}` set if Mode A
- Project context loaded if exists
- Escalation evaluated appropriately (Mode B)
- Explicit NEXT directive provided
## FAILURE MODES
- Proceeding without capturing baseline commit
- Not setting execution_mode variable
- Loading step-02 when Mode A (tech-spec provided)
- Attempting to "return" after escalation instead of EXIT
- No explicit NEXT directive at step completion

View File

@ -0,0 +1,117 @@
---
name: 'step-02-context-gathering'
description: 'Quick context gathering for direct mode - identify files, patterns, dependencies'
workflow_path: '{project-root}/_bmad/bmm/workflows/bmad-quick-flow/quick-dev'
thisStepFile: '{workflow_path}/steps/step-02-context-gathering.md'
nextStepFile: '{workflow_path}/steps/step-03-execute.md'
---
# Step 2: Context Gathering (Direct Mode)
**Goal:** Quickly gather context for direct instructions - files, patterns, dependencies.
**Note:** This step only runs for Mode B (direct instructions). If `{execution_mode}` is "tech-spec", this step was skipped.
---
## AVAILABLE STATE
From step-01:
- `{baseline_commit}` - Git HEAD at workflow start
- `{execution_mode}` - Should be "direct"
- `{project_context}` - Loaded if exists
---
## EXECUTION SEQUENCE
### 1. Identify Files to Modify
Based on user's direct instructions:
- Search for relevant files using glob/grep
- Identify the specific files that need changes
- Note file locations and purposes
### 2. Find Relevant Patterns
Examine the identified files and their surroundings:
- Code style and conventions used
- Existing patterns for similar functionality
- Import/export patterns
- Error handling approaches
- Test patterns (if tests exist nearby)
### 3. Note Dependencies
Identify:
- External libraries used
- Internal module dependencies
- Configuration files that may need updates
- Related files that might be affected
### 4. Create Mental Plan
Synthesize gathered context into:
- List of tasks to complete
- Acceptance criteria (inferred from user request)
- Order of operations
- Files to touch
---
## PRESENT PLAN
Display to user:
```
**Context Gathered:**
**Files to modify:**
- {list files}
**Patterns identified:**
- {key patterns}
**Plan:**
1. {task 1}
2. {task 2}
...
**Inferred AC:**
- {acceptance criteria}
Ready to execute? (y/n/adjust)
```
- **y:** Proceed to execution
- **n:** Gather more context or clarify
- **adjust:** Modify the plan based on feedback
---
## NEXT STEP
When user confirms ready, load `step-03-execute.md`.
---
## SUCCESS METRICS
- Files to modify identified
- Relevant patterns documented
- Dependencies noted
- Mental plan created with tasks and AC
- User confirmed readiness to proceed
## FAILURE MODES
- Executing this step when Mode A (tech-spec)
- Proceeding without identifying files to modify
- Not presenting plan for user confirmation
- Missing obvious patterns in existing code

View File

@ -0,0 +1,113 @@
---
name: 'step-03-execute'
description: 'Execute implementation - iterate through tasks, write code, run tests'
workflow_path: '{project-root}/_bmad/bmm/workflows/bmad-quick-flow/quick-dev'
thisStepFile: '{workflow_path}/steps/step-03-execute.md'
nextStepFile: '{workflow_path}/steps/step-04-self-check.md'
---
# Step 3: Execute Implementation
**Goal:** Implement all tasks, write tests, follow patterns, handle errors.
**Critical:** Continue through ALL tasks without stopping for milestones.
---
## AVAILABLE STATE
From previous steps:
- `{baseline_commit}` - Git HEAD at workflow start
- `{execution_mode}` - "tech-spec" or "direct"
- `{tech_spec_path}` - Tech-spec file (if Mode A)
- `{project_context}` - Project patterns (if exists)
From context:
- Mode A: Tasks and AC extracted from tech-spec
- Mode B: Tasks and AC from step-02 mental plan
---
## EXECUTION LOOP
For each task:
### 1. Load Context
- Read files relevant to this task
- Review patterns from project-context or observed code
- Understand dependencies
### 2. Implement
- Write code following existing patterns
- Handle errors appropriately
- Follow conventions observed in codebase
- Add appropriate comments where non-obvious
### 3. Test
- Write tests if appropriate for the change
- Run existing tests to catch regressions
- Verify the specific AC for this task
### 4. Mark Complete
- Check off task: `- [x] Task N`
- Continue to next task immediately
---
## HALT CONDITIONS
**HALT and request guidance if:**
- 3 consecutive failures on same task
- Tests fail and fix is not obvious
- Blocking dependency discovered
- Ambiguity that requires user decision
**Do NOT halt for:**
- Minor issues that can be noted and continued
- Warnings that don't block functionality
- Style preferences (follow existing patterns)
---
## CONTINUOUS EXECUTION
**Critical:** Do not stop between tasks for approval.
- Execute all tasks in sequence
- Only halt for blocking issues
- Tests failing = fix before continuing
- Track all completed work for self-check
---
## NEXT STEP
When ALL tasks are complete (or halted on blocker), load `step-04-self-check.md`.
---
## SUCCESS METRICS
- All tasks attempted
- Code follows existing patterns
- Error handling appropriate
- Tests written where appropriate
- Tests passing
- No unnecessary halts
## FAILURE MODES
- Stopping for approval between tasks
- Ignoring existing patterns
- Not running tests after changes
- Giving up after first failure
- Not following project-context rules (if exists)

View File

@ -0,0 +1,113 @@
---
name: 'step-04-self-check'
description: 'Self-audit implementation against tasks, tests, AC, and patterns'
workflow_path: '{project-root}/_bmad/bmm/workflows/bmad-quick-flow/quick-dev'
thisStepFile: '{workflow_path}/steps/step-04-self-check.md'
nextStepFile: '{workflow_path}/steps/step-05-adversarial-review.md'
---
# Step 4: Self-Check
**Goal:** Audit completed work against tasks, tests, AC, and patterns before external review.
---
## AVAILABLE STATE
From previous steps:
- `{baseline_commit}` - Git HEAD at workflow start
- `{execution_mode}` - "tech-spec" or "direct"
- `{tech_spec_path}` - Tech-spec file (if Mode A)
- `{project_context}` - Project patterns (if exists)
---
## SELF-CHECK AUDIT
### 1. Tasks Complete
Verify all tasks are marked complete:
- [ ] All tasks from tech-spec or mental plan marked `[x]`
- [ ] No tasks skipped without documented reason
- [ ] Any blocked tasks have clear explanation
### 2. Tests Passing
Verify test status:
- [ ] All existing tests still pass
- [ ] New tests written for new functionality
- [ ] No test warnings or skipped tests without reason
### 3. Acceptance Criteria Satisfied
For each AC:
- [ ] AC is demonstrably met
- [ ] Can explain how implementation satisfies AC
- [ ] Edge cases considered
### 4. Patterns Followed
Verify code quality:
- [ ] Follows existing code patterns in codebase
- [ ] Follows project-context rules (if exists)
- [ ] Error handling consistent with codebase
- [ ] No obvious code smells introduced
---
## UPDATE TECH-SPEC (Mode A only)
If `{execution_mode}` is "tech-spec":
1. Load `{tech_spec_path}`
2. Mark all tasks as `[x]` complete
3. Update status to "Implementation Complete"
4. Save changes
---
## IMPLEMENTATION SUMMARY
Present summary to transition to review:
```
**Implementation Complete!**
**Summary:** {what was implemented}
**Files Modified:** {list of files}
**Tests:** {test summary - passed/added/etc}
**AC Status:** {all satisfied / issues noted}
Proceeding to adversarial code review...
```
---
## NEXT STEP
Proceed immediately to `step-05-adversarial-review.md`.
---
## SUCCESS METRICS
- All tasks verified complete
- All tests passing
- All AC satisfied
- Patterns followed
- Tech-spec updated (if Mode A)
- Summary presented
## FAILURE MODES
- Claiming tasks complete when they're not
- Not running tests before proceeding
- Missing AC verification
- Ignoring pattern violations
- Not updating tech-spec status (Mode A)

View File

@ -0,0 +1,96 @@
---
name: 'step-05-adversarial-review'
description: 'Construct diff and invoke adversarial review task'
workflow_path: '{project-root}/_bmad/bmm/workflows/bmad-quick-flow/quick-dev'
thisStepFile: '{workflow_path}/steps/step-05-adversarial-review.md'
nextStepFile: '{workflow_path}/steps/step-06-resolve-findings.md'
---
# Step 5: Adversarial Code Review
**Goal:** Construct diff of all changes, invoke adversarial review task, present findings.
---
## AVAILABLE STATE
From previous steps:
- `{baseline_commit}` - Git HEAD at workflow start (CRITICAL for diff)
- `{execution_mode}` - "tech-spec" or "direct"
- `{tech_spec_path}` - Tech-spec file (if Mode A)
---
## STEP 1: CONSTRUCT DIFF
Build complete diff of all changes since workflow started.
### Tracked File Changes
```bash
git diff {baseline_commit}
```
### New Untracked Files
Only include untracked files that YOU created during this workflow (steps 2-4).
Do not include pre-existing untracked files.
For each new file created, include its full content as a "new file" addition.
### Capture as {diff_output}
Merge tracked changes and new files into `{diff_output}`.
**Note:** Do NOT `git add` anything - this is read-only inspection.
---
## STEP 2: INVOKE ADVERSARIAL REVIEW
With `{diff_output}` constructed, invoke the review task:
```xml
<invoke-task input="{diff_output}">{project-root}/_bmad/core/tasks/review-adversarial-general.xml</invoke-task>
```
**Platform fallback:** If task invocation not available, load the task file and execute its instructions inline, passing `{diff_output}` as the content input.
The task will:
- Review with cynical skepticism
- Find at least 5 issues
- Assign IDs (F1, F2...), severity (critical/high/medium/low), classification (real/noise/uncertain)
- Return structured findings table
---
## STEP 3: RECEIVE FINDINGS
Capture the findings from the task output.
**If zero findings:** HALT - this is suspicious. Re-analyze or request user guidance.
---
## NEXT STEP
With findings in hand, load `step-06-resolve-findings.md` for user to choose resolution approach.
---
## SUCCESS METRICS
- Diff constructed from baseline_commit
- New files included in diff
- Task invoked with diff as input
- Findings received with IDs, severity, classification
- Zero-findings case handled appropriately
## FAILURE MODES
- Missing baseline_commit (can't construct accurate diff)
- Not including new untracked files in diff
- Invoking task without providing diff input
- Accepting zero findings without questioning

View File

@ -0,0 +1,140 @@
---
name: 'step-06-resolve-findings'
description: 'Handle review findings interactively, apply fixes, update tech-spec with final status'
workflow_path: '{project-root}/_bmad/bmm/workflows/bmad-quick-flow/quick-dev'
thisStepFile: '{workflow_path}/steps/step-06-resolve-findings.md'
---
# Step 6: Resolve Findings
**Goal:** Handle adversarial review findings interactively, apply fixes, finalize tech-spec.
---
## AVAILABLE STATE
From previous steps:
- `{baseline_commit}` - Git HEAD at workflow start
- `{execution_mode}` - "tech-spec" or "direct"
- `{tech_spec_path}` - Tech-spec file (if Mode A)
- Findings table from step-05
---
## RESOLUTION OPTIONS
Present choice to user:
```
How would you like to handle these findings?
**[1] Walk through** - Discuss each finding individually
**[2] Auto-fix** - Automatically fix issues classified as "real"
**[3] Skip** - Acknowledge and proceed to commit
```
---
## OPTION 1: WALK THROUGH
For each finding in order:
1. Present the finding with context
2. Ask: **fix now / skip / discuss**
3. If fix: Apply the fix immediately
4. If skip: Note as acknowledged, continue
5. If discuss: Provide more context, re-ask
6. Move to next finding
After all findings processed, summarize what was fixed/skipped.
---
## OPTION 2: AUTO-FIX
1. Filter findings to only those classified as "real"
2. Apply fixes for each real finding
3. Report what was fixed:
```
**Auto-fix Applied:**
- F1: {description of fix}
- F3: {description of fix}
...
Skipped (noise/uncertain): F2, F4
```
---
## OPTION 3: SKIP
1. Acknowledge all findings were reviewed
2. Note that user chose to proceed without fixes
3. Continue to completion
---
## UPDATE TECH-SPEC (Mode A only)
If `{execution_mode}` is "tech-spec":
1. Load `{tech_spec_path}`
2. Update status to "Completed"
3. Add review notes:
```
## Review Notes
- Adversarial review completed
- Findings: {count} total, {fixed} fixed, {skipped} skipped
- Resolution approach: {walk-through/auto-fix/skip}
```
4. Save changes
---
## COMPLETION OUTPUT
```
**Review complete. Ready to commit.**
**Implementation Summary:**
- {what was implemented}
- Files modified: {count}
- Tests: {status}
- Review findings: {X} addressed, {Y} skipped
{Explain what was implemented based on user_skill_level}
```
---
## WORKFLOW COMPLETE
This is the final step. The Quick Dev workflow is now complete.
User can:
- Commit changes
- Run additional tests
- Start new Quick Dev session
---
## SUCCESS METRICS
- User presented with resolution options
- Chosen approach executed correctly
- Fixes applied cleanly (if applicable)
- Tech-spec updated with final status (Mode A)
- Completion summary provided
- User understands what was implemented
## FAILURE MODES
- Not presenting resolution options
- Auto-fixing "noise" or "uncertain" findings
- Not updating tech-spec after resolution (Mode A)
- No completion summary
- Leaving user unclear on next steps

View File

@ -0,0 +1,51 @@
---
name: quick-dev
description: 'Flexible development - execute tech-specs OR direct instructions with optional planning.'
---
# Quick Dev Workflow
**Goal:** Execute implementation tasks efficiently, either from a tech-spec or direct user instructions.
**Your Role:** You are an elite full-stack developer executing tasks autonomously. Follow patterns, ship code, run tests. Every response moves the project forward.
---
## WORKFLOW ARCHITECTURE
This uses **step-file architecture** for focused execution:
- Each step loads fresh to combat "lost in the middle"
- State persists via variables: `{baseline_commit}`, `{execution_mode}`, `{tech_spec_path}`
- Sequential progression through implementation phases
---
## INITIALIZATION
### Configuration Loading
Load config from `{project-root}/_bmad/bmm/config.yaml` and resolve:
- `user_name`, `communication_language`, `user_skill_level`
- `output_folder`, `sprint_artifacts`
- `date` as system-generated current datetime
### Paths
- `installed_path` = `{project-root}/_bmad/bmm/workflows/bmad-quick-flow/quick-dev`
- `project_context` = `**/project-context.md` (load if exists)
- `project_levels` = `{project-root}/_bmad/bmm/workflows/workflow-status/project-levels.yaml`
### Related Workflows
- `create_tech_spec_workflow` = `{project-root}/_bmad/bmm/workflows/bmad-quick-flow/create-tech-spec/workflow.yaml`
- `workflow_init` = `{project-root}/_bmad/bmm/workflows/workflow-status/init/workflow.yaml`
- `party_mode_exec` = `{project-root}/_bmad/core/workflows/party-mode/workflow.md`
- `advanced_elicitation` = `{project-root}/_bmad/core/tasks/advanced-elicitation.xml`
---
## EXECUTION
Load and execute `steps/step-01-mode-detection.md` to begin the workflow.

View File

@ -1,33 +0,0 @@
# Quick-Flow: Quick-Dev
name: quick-dev
description: "Flexible development - execute tech-specs OR direct instructions with optional planning."
author: "BMad"
# Config
config_source: "{project-root}/_bmad/bmm/config.yaml"
output_folder: "{config_source}:output_folder"
sprint_artifacts: "{config_source}:sprint_artifacts"
user_name: "{config_source}:user_name"
communication_language: "{config_source}:communication_language"
user_skill_level: "{config_source}:user_skill_level"
date: system-generated
# Project context
project_context: "**/project-context.md"
# Workflow components
installed_path: "{project-root}/_bmad/bmm/workflows/bmad-quick-flow/quick-dev"
instructions: "{installed_path}/instructions.md"
checklist: "{installed_path}/checklist.md"
# Related workflows
create_tech_spec_workflow: "{project-root}/_bmad/bmm/workflows/bmad-quick-flow/create-tech-spec/workflow.yaml"
party_mode_exec: "{project-root}/_bmad/core/workflows/party-mode/workflow.md"
advanced_elicitation: "{project-root}/_bmad/core/tasks/advanced-elicitation.xml"
# Routing resources (lazy-loaded)
project_levels: "{project-root}/_bmad/bmm/workflows/workflow-status/project-levels.yaml"
workflow_init: "{project-root}/_bmad/bmm/workflows/workflow-status/init/workflow.yaml"
standalone: true
web_bundle: false