Merge branch 'main' into fix/workflow-manifest-schema
This commit is contained in:
commit
3c03f3f070
|
|
@ -4,9 +4,10 @@ language: "en-US"
|
|||
early_access: true
|
||||
reviews:
|
||||
profile: chill
|
||||
high_level_summary: true
|
||||
high_level_summary: false # don't post summary until explicitly invoked
|
||||
request_changes_workflow: false
|
||||
review_status: false
|
||||
commit_status: false # don't set commit status until explicitly invoked
|
||||
collapse_walkthrough: false
|
||||
poem: false
|
||||
auto_review:
|
||||
|
|
@ -33,4 +34,7 @@ reviews:
|
|||
Flag any process.exit() without error message.
|
||||
chat:
|
||||
auto_reply: true # Response to mentions in comments, a la @coderabbit review
|
||||
issue_enrichment:
|
||||
auto_enrich:
|
||||
enabled: false # don't auto-comment on issues
|
||||
|
||||
|
|
|
|||
|
|
@ -1,12 +1,12 @@
|
|||
{
|
||||
"name": "bmad-method",
|
||||
"version": "6.0.0-alpha.15",
|
||||
"version": "6.0.0-alpha.16",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "bmad-method",
|
||||
"version": "6.0.0-alpha.15",
|
||||
"version": "6.0.0-alpha.16",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@kayvan/markdown-tree-parser": "^1.6.1",
|
||||
|
|
|
|||
|
|
@ -330,7 +330,7 @@ Review was saved to story file, but sprint-status.yaml may be out of sync.
|
|||
<action>All action items are included in the standalone review report</action>
|
||||
<ask if="action items exist">Would you like me to create tracking items for these action items? (backlog/tasks)</ask>
|
||||
<action if="user confirms">
|
||||
If {{backlog_file}} does not exist, copy {installed_path}/backlog_template.md to {{backlog_file}} location.
|
||||
If {{backlog_file}} does not exist, copy {installed_path}/backlog-template.md to {{backlog_file}} location.
|
||||
Append a row per action item with Date={{date}}, Story="Ad-Hoc Review", Epic="N/A", Type, Severity, Owner (or "TBD"), Status="Open", Notes with file refs and context.
|
||||
</action>
|
||||
</check>
|
||||
|
|
@ -342,7 +342,7 @@ Review was saved to story file, but sprint-status.yaml may be out of sync.
|
|||
Append under the story's "Tasks / Subtasks" a new subsection titled "Review Follow-ups (AI)", adding each item as an unchecked checkbox in imperative form, prefixed with "[AI-Review]" and severity. Example: "- [ ] [AI-Review][High] Add input validation on server route /api/x (AC #2)".
|
||||
</action>
|
||||
<action>
|
||||
If {{backlog_file}} does not exist, copy {installed_path}/backlog_template.md to {{backlog_file}} location.
|
||||
If {{backlog_file}} does not exist, copy {installed_path}/backlog-template.md to {{backlog_file}} location.
|
||||
Append a row per action item with Date={{date}}, Story={{epic_num}}.{{story_num}}, Epic={{epic_num}}, Type, Severity, Owner (or "TBD"), Status="Open", Notes with short context and file refs.
|
||||
</action>
|
||||
<action>
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ agent:
|
|||
|
||||
critical_actions:
|
||||
- "READ the entire story file BEFORE any implementation - tasks/subtasks sequence is your authoritative implementation guide"
|
||||
- "Load project_context.md if available for coding standards only - never let it override story requirements"
|
||||
- "Load project-context.md if available for coding standards only - never let it override story requirements"
|
||||
- "Execute tasks/subtasks IN ORDER as written in story file - no skipping, no reordering, no doing what you want"
|
||||
- "For each task/subtask: follow red-green-refactor cycle - write failing test first, then implementation"
|
||||
- "Mark task/subtask [x] ONLY when both implementation AND tests are complete and passing"
|
||||
|
|
|
|||
|
|
@ -26,14 +26,17 @@ graph TB
|
|||
subgraph Phase3["<b>Phase 3: SOLUTIONING</b>"]
|
||||
Architecture["<b>Architect: *architecture</b>"]
|
||||
EpicsStories["<b>PM/Architect: *create-epics-and-stories</b>"]
|
||||
TestDesignSys["<b>TEA: *test-design (system-level)</b>"]
|
||||
Framework["<b>TEA: *framework</b>"]
|
||||
CI["<b>TEA: *ci</b>"]
|
||||
GateCheck["<b>Architect: *implementation-readiness</b>"]
|
||||
Architecture --> EpicsStories
|
||||
Architecture --> TestDesignSys
|
||||
TestDesignSys --> Framework
|
||||
EpicsStories --> Framework
|
||||
Framework --> CI
|
||||
CI --> GateCheck
|
||||
Phase3Note["<b>Epics created AFTER architecture,</b><br/><b>then test infrastructure setup</b>"]
|
||||
Phase3Note["<b>Epics created AFTER architecture,</b><br/><b>then system-level test design and test infrastructure setup</b>"]
|
||||
EpicsStories -.-> Phase3Note
|
||||
end
|
||||
|
||||
|
|
@ -93,12 +96,17 @@ graph TB
|
|||
- **Documentation** (Optional for brownfield): Prerequisite using `*document-project`
|
||||
- **Phase 1** (Optional): Discovery/Analysis (`*brainstorm`, `*research`, `*product-brief`)
|
||||
- **Phase 2** (Required): Planning (`*prd` creates PRD with FRs/NFRs)
|
||||
- **Phase 3** (Track-dependent): Solutioning (`*architecture` → `*create-epics-and-stories` → TEA: `*framework`, `*ci` → `*implementation-readiness`)
|
||||
- **Phase 3** (Track-dependent): Solutioning (`*architecture` → `*test-design` (system-level) → `*create-epics-and-stories` → TEA: `*framework`, `*ci` → `*implementation-readiness`)
|
||||
- **Phase 4** (Required): Implementation (`*sprint-planning` → per-epic: `*test-design` → per-story: dev workflows)
|
||||
|
||||
**TEA workflows:** `*framework` and `*ci` run once in Phase 3 after architecture. `*test-design` runs per-epic in Phase 4. Output: `test-design-epic-N.md`.
|
||||
**TEA workflows:** `*framework` and `*ci` run once in Phase 3 after architecture. `*test-design` is **dual-mode**:
|
||||
|
||||
Quick Flow track skips Phase 1 and 3. BMad Method and Enterprise use all phases based on project needs.
|
||||
- **System-level (Phase 3):** Run immediately after architecture/ADR drafting to produce `test-design-system.md` (testability review, ADR → test mapping, Architecturally Significant Requirements (ASRs), environment needs). Feeds the implementation-readiness gate.
|
||||
- **Epic-level (Phase 4):** Run per-epic to produce `test-design-epic-N.md` (risk, priorities, coverage plan).
|
||||
|
||||
Quick Flow track skips Phases 1 and 3.
|
||||
BMad Method and Enterprise use all phases based on project needs.
|
||||
When an ADR or architecture draft is produced, run `*test-design` in **system-level** mode before the implementation-readiness gate. This ensures the ADR has an attached testability review and ADR → test mapping. Keep the test-design updated if ADRs change.
|
||||
|
||||
### Why TEA is Different from Other BMM Agents
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
name: create-prd
|
||||
description: Creates a comprehensive PRDs through collaborative step-by-step discovery between two product managers working as peers.
|
||||
description: Creates a comprehensive PRD through collaborative step-by-step discovery between two product managers working as peers.
|
||||
main_config: '{project-root}/.bmad/bmm/config.yaml'
|
||||
web_bundle: true
|
||||
---
|
||||
|
|
|
|||
|
|
@ -94,7 +94,7 @@ Discover and load context documents using smart discovery:
|
|||
|
||||
**Project Context Rules (Critical for AI Agents):**
|
||||
|
||||
1. Check for project context file: `**/project_context.md`
|
||||
1. Check for project context file: `**/project-context.md`
|
||||
2. If exists: Load COMPLETE file contents - this contains critical rules for AI agents
|
||||
3. Add to frontmatter `hasProjectContext: true` and track file path
|
||||
4. Report to user: "Found existing project context with {number_of_rules} agent rules"
|
||||
|
|
|
|||
|
|
@ -280,7 +280,7 @@ Your architecture will ensure consistent, high-quality implementation across all
|
|||
|
||||
**💡 Optional Enhancement: Project Context File**
|
||||
|
||||
Would you like to create a `project_context.md` file? This is a concise, optimized guide for AI agents that captures:
|
||||
Would you like to create a `project-context.md` file? This is a concise, optimized guide for AI agents that captures:
|
||||
|
||||
- Critical language and framework rules they might miss
|
||||
- Specific patterns and conventions for your project
|
||||
|
|
@ -310,7 +310,7 @@ This will help ensure consistent implementation by capturing:
|
|||
- Testing and quality standards
|
||||
- Anti-patterns to avoid
|
||||
|
||||
The workflow will collaborate with you to create an optimized `project_context.md` file that AI agents will read before implementing any code."
|
||||
The workflow will collaborate with you to create an optimized `project-context.md` file that AI agents will read before implementing any code."
|
||||
|
||||
**Execute the Generate Project Context workflow:**
|
||||
|
||||
|
|
|
|||
|
|
@ -217,7 +217,7 @@
|
|||
**Issues Fixed:** {{fixed_count}}
|
||||
**Action Items Created:** {{action_count}}
|
||||
|
||||
{{#if new_status == "done"}}Story is ready for next work!{{else}}Address the action items and continue development.{{/if}}
|
||||
{{#if new_status == "done"}}Code review complete!{{else}}Address the action items and continue development.{{/if}}
|
||||
</output>
|
||||
</step>
|
||||
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ validation-rules:
|
|||
- [ ] **Acceptance Criteria Satisfaction:** Implementation satisfies EVERY Acceptance Criterion in the story
|
||||
- [ ] **No Ambiguous Implementation:** Clear, unambiguous implementation that meets story requirements
|
||||
- [ ] **Edge Cases Handled:** Error conditions and edge cases appropriately addressed
|
||||
- [ ] **Dependencies Within Scope:** Only uses dependencies specified in story or project_context.md
|
||||
- [ ] **Dependencies Within Scope:** Only uses dependencies specified in story or project-context.md
|
||||
|
||||
## 🧪 Testing & Quality Assurance
|
||||
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ Discover the project's technology stack, existing patterns, and critical impleme
|
|||
|
||||
First, check if project context already exists:
|
||||
|
||||
- Look for file at `{output_folder}/project_context.md`
|
||||
- Look for file at `{output_folder}/project-context.md`
|
||||
- If exists: Read complete file to understand existing rules
|
||||
- Present to user: "Found existing project context with {number_of_sections} sections. Would you like to update this or create a new one?"
|
||||
|
||||
|
|
@ -122,7 +122,7 @@ Based on discovery, create or update the context document:
|
|||
|
||||
#### A. Fresh Document Setup (if no existing context)
|
||||
|
||||
Copy template from `{installed_path}/project-context-template.md` to `{output_folder}/project_context.md`
|
||||
Copy template from `{installed_path}/project-context-template.md` to `{output_folder}/project-context.md`
|
||||
Initialize frontmatter with:
|
||||
|
||||
```yaml
|
||||
|
|
|
|||
|
|
@ -288,7 +288,7 @@ After each category, show the generated rules and present choices:
|
|||
|
||||
## APPEND TO PROJECT CONTEXT:
|
||||
|
||||
When user selects 'C' for a category, append the content directly to `{output_folder}/project_context.md` using the structure from step 8.
|
||||
When user selects 'C' for a category, append the content directly to `{output_folder}/project-context.md` using the structure from step 8.
|
||||
|
||||
## SUCCESS METRICS:
|
||||
|
||||
|
|
|
|||
|
|
@ -134,7 +134,7 @@ Based on user skill level, present the completion:
|
|||
**Expert Mode:**
|
||||
"Project context complete. Optimized for LLM consumption with {{rule_count}} critical rules across {{section_count}} sections.
|
||||
|
||||
File saved to: `{output_folder}/project_context.md`
|
||||
File saved to: `{output_folder}/project-context.md`
|
||||
|
||||
Ready for AI agent integration."
|
||||
|
||||
|
|
@ -227,7 +227,7 @@ Present final completion to user:
|
|||
"✅ **Project Context Generation Complete!**
|
||||
|
||||
Your optimized project context file is ready at:
|
||||
`{output_folder}/project_context.md`
|
||||
`{output_folder}/project-context.md`
|
||||
|
||||
**📊 Context Summary:**
|
||||
|
||||
|
|
|
|||
|
|
@ -1,11 +1,11 @@
|
|||
---
|
||||
name: generate-project-context
|
||||
description: Creates a concise project_context.md file with critical rules and patterns that AI agents must follow when implementing code. Optimized for LLM context efficiency.
|
||||
description: Creates a concise project-context.md file with critical rules and patterns that AI agents must follow when implementing code. Optimized for LLM context efficiency.
|
||||
---
|
||||
|
||||
# Generate Project Context Workflow
|
||||
|
||||
**Goal:** Create a concise, optimized `project_context.md` file containing critical rules, patterns, and guidelines that AI agents must follow when implementing code. This file focuses on unobvious details that LLMs need to be reminded of.
|
||||
**Goal:** Create a concise, optimized `project-context.md` file containing critical rules, patterns, and guidelines that AI agents must follow when implementing code. This file focuses on unobvious details that LLMs need to be reminded of.
|
||||
|
||||
**Your Role:** You are a technical facilitator working with a peer to capture the essential implementation rules that will ensure consistent, high-quality code generation across all AI agents working on the project.
|
||||
|
||||
|
|
@ -37,7 +37,7 @@ Load config from `{project-root}/.bmad/bmm/config.yaml` and resolve:
|
|||
|
||||
- `installed_path` = `{project-root}/.bmad/bmm/workflows/generate-project-context`
|
||||
- `template_path` = `{installed_path}/project-context-template.md`
|
||||
- `output_file` = `{output_folder}/project_context.md`
|
||||
- `output_file` = `{output_folder}/project-context.md`
|
||||
|
||||
---
|
||||
|
||||
|
|
|
|||
|
|
@ -24,8 +24,14 @@ variables:
|
|||
|
||||
# Output configuration
|
||||
# Note: Actual output file determined dynamically based on mode detection
|
||||
# - System-Level (Phase 3): {output_folder}/test-design-system.md
|
||||
# - Epic-Level (Phase 4): {output_folder}/test-design-epic-{epic_num}.md
|
||||
# Declared outputs for new workflow format
|
||||
outputs:
|
||||
- id: system-level
|
||||
description: "System-level testability review (Phase 3)"
|
||||
path: "{output_folder}/test-design-system.md"
|
||||
- id: epic-level
|
||||
description: "Epic-level test plan (Phase 4)"
|
||||
path: "{output_folder}/test-design-epic-{epic_num}.md"
|
||||
default_output_file: "{output_folder}/test-design-epic-{epic_num}.md"
|
||||
|
||||
# Required tools
|
||||
|
|
|
|||
|
|
@ -248,14 +248,21 @@ class ConfigCollector {
|
|||
const configKeys = Object.keys(moduleConfig).filter((key) => key !== 'prompt');
|
||||
const existingKeys = this.existingConfig && this.existingConfig[moduleName] ? Object.keys(this.existingConfig[moduleName]) : [];
|
||||
|
||||
// Find new interactive fields (with prompt)
|
||||
const newKeys = configKeys.filter((key) => {
|
||||
const item = moduleConfig[key];
|
||||
// Check if it's a config item and doesn't exist in existing config
|
||||
return item && typeof item === 'object' && item.prompt && !existingKeys.includes(key);
|
||||
});
|
||||
|
||||
// If in silent mode and no new keys, use existing config and skip prompts
|
||||
if (silentMode && newKeys.length === 0) {
|
||||
// Find new static fields (without prompt, just result)
|
||||
const newStaticKeys = configKeys.filter((key) => {
|
||||
const item = moduleConfig[key];
|
||||
return item && typeof item === 'object' && !item.prompt && item.result && !existingKeys.includes(key);
|
||||
});
|
||||
|
||||
// If in silent mode and no new keys (neither interactive nor static), use existing config and skip prompts
|
||||
if (silentMode && newKeys.length === 0 && newStaticKeys.length === 0) {
|
||||
if (this.existingConfig && this.existingConfig[moduleName]) {
|
||||
if (!this.collectedConfig[moduleName]) {
|
||||
this.collectedConfig[moduleName] = {};
|
||||
|
|
@ -294,9 +301,12 @@ class ConfigCollector {
|
|||
return false; // No new fields
|
||||
}
|
||||
|
||||
// If we have new fields, build questions first
|
||||
if (newKeys.length > 0) {
|
||||
// If we have new fields (interactive or static), process them
|
||||
if (newKeys.length > 0 || newStaticKeys.length > 0) {
|
||||
const questions = [];
|
||||
const staticAnswers = {};
|
||||
|
||||
// Build questions for interactive fields
|
||||
for (const key of newKeys) {
|
||||
const item = moduleConfig[key];
|
||||
const question = await this.buildQuestion(moduleName, key, item, moduleConfig);
|
||||
|
|
@ -305,20 +315,35 @@ class ConfigCollector {
|
|||
}
|
||||
}
|
||||
|
||||
// Prepare static answers (no prompt, just result)
|
||||
for (const key of newStaticKeys) {
|
||||
staticAnswers[`${moduleName}_${key}`] = undefined;
|
||||
}
|
||||
|
||||
// Collect all answers (static + prompted)
|
||||
let allAnswers = { ...staticAnswers };
|
||||
|
||||
if (questions.length > 0) {
|
||||
// Only show header if we actually have questions
|
||||
CLIUtils.displayModuleConfigHeader(moduleName, moduleConfig.header, moduleConfig.subheader);
|
||||
console.log(); // Line break before questions
|
||||
const answers = await inquirer.prompt(questions);
|
||||
const promptedAnswers = await inquirer.prompt(questions);
|
||||
|
||||
// Store answers for cross-referencing
|
||||
Object.assign(this.allAnswers, answers);
|
||||
// Merge prompted answers with static answers
|
||||
Object.assign(allAnswers, promptedAnswers);
|
||||
} else if (newStaticKeys.length > 0) {
|
||||
// Only static fields, no questions - show no config message
|
||||
CLIUtils.displayModuleNoConfig(moduleName, moduleConfig.header, moduleConfig.subheader);
|
||||
}
|
||||
|
||||
// Process answers and build result values
|
||||
for (const key of Object.keys(answers)) {
|
||||
// Store all answers for cross-referencing
|
||||
Object.assign(this.allAnswers, allAnswers);
|
||||
|
||||
// Process all answers (both static and prompted)
|
||||
for (const key of Object.keys(allAnswers)) {
|
||||
const originalKey = key.replace(`${moduleName}_`, '');
|
||||
const item = moduleConfig[originalKey];
|
||||
const value = answers[key];
|
||||
const value = allAnswers[key];
|
||||
|
||||
let result;
|
||||
if (Array.isArray(value)) {
|
||||
|
|
@ -334,10 +359,6 @@ class ConfigCollector {
|
|||
}
|
||||
this.collectedConfig[moduleName][originalKey] = result;
|
||||
}
|
||||
} else {
|
||||
// New keys exist but no questions generated - show no config message
|
||||
CLIUtils.displayModuleNoConfig(moduleName, moduleConfig.header, moduleConfig.subheader);
|
||||
}
|
||||
}
|
||||
|
||||
// Copy over existing values for fields that weren't prompted
|
||||
|
|
@ -353,7 +374,7 @@ class ConfigCollector {
|
|||
}
|
||||
}
|
||||
|
||||
return newKeys.length > 0; // Return true if we prompted for new fields
|
||||
return newKeys.length > 0 || newStaticKeys.length > 0; // Return true if we had any new fields (interactive or static)
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -501,30 +522,52 @@ class ConfigCollector {
|
|||
|
||||
// Process each config item
|
||||
const questions = [];
|
||||
const staticAnswers = {};
|
||||
const configKeys = Object.keys(moduleConfig).filter((key) => key !== 'prompt');
|
||||
|
||||
for (const key of configKeys) {
|
||||
const item = moduleConfig[key];
|
||||
|
||||
// Skip if not a config object
|
||||
if (!item || typeof item !== 'object' || !item.prompt) {
|
||||
if (!item || typeof item !== 'object') {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Handle static values (no prompt, just result)
|
||||
if (!item.prompt && item.result) {
|
||||
// Add to static answers with a marker value
|
||||
staticAnswers[`${moduleName}_${key}`] = undefined;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Handle interactive values (with prompt)
|
||||
if (item.prompt) {
|
||||
const question = await this.buildQuestion(moduleName, key, item, moduleConfig);
|
||||
if (question) {
|
||||
questions.push(question);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Collect all answers (static + prompted)
|
||||
let allAnswers = { ...staticAnswers };
|
||||
|
||||
// Display appropriate header based on whether there are questions
|
||||
if (questions.length > 0) {
|
||||
CLIUtils.displayModuleConfigHeader(moduleName, moduleConfig.header, moduleConfig.subheader);
|
||||
console.log(); // Line break before questions
|
||||
const answers = await inquirer.prompt(questions);
|
||||
const promptedAnswers = await inquirer.prompt(questions);
|
||||
|
||||
// Store answers for cross-referencing
|
||||
Object.assign(this.allAnswers, answers);
|
||||
// Merge prompted answers with static answers
|
||||
Object.assign(allAnswers, promptedAnswers);
|
||||
}
|
||||
|
||||
// Store all answers for cross-referencing
|
||||
Object.assign(this.allAnswers, allAnswers);
|
||||
|
||||
// Process all answers (both static and prompted)
|
||||
if (Object.keys(allAnswers).length > 0) {
|
||||
const answers = allAnswers;
|
||||
|
||||
// Process answers and build result values
|
||||
for (const key of Object.keys(answers)) {
|
||||
|
|
|
|||
Loading…
Reference in New Issue