Compare commits

...

17 Commits

Author SHA1 Message Date
Armel BOBDA 7b7f6ee97f
Merge 951b66b037 into ed0defbe08 2025-12-12 07:24:05 +08:00
Dicky Moore ed0defbe08
fix: normalize workflow manifest schema (#1071)
* fix: normalize workflow manifest schema

* fix: escape workflow manifest values safely

---------

Co-authored-by: Brian <bmadcode@gmail.com>
2025-12-12 07:20:43 +08:00
Kevin Heidt 3bc485d0ed
Enhance config collector to support static fields (#1086)
Refactor config collection to handle both interactive and static fields. Update logic to process new static fields and merge answers accordingly.

Co-authored-by: Brian <bmadcode@gmail.com>
2025-12-12 06:56:31 +08:00
Alex Verkhovsky 0f5a9cf0dd
fix: correct grammar in PRD workflow description (#1087) 2025-12-12 06:43:40 +08:00
Alex Verkhovsky e2d9d35ce9
fix(bmm): improve code review completion message (#1095)
Change "Story is ready for next work!" to "Code review complete!"

The original phrasing was misleading - when a code review finishes
with status "done", it means the review itself is complete and the
story is marked done in tracking. However, the user may choose to
do additional reviews or the story may genuinely be finished.
"Code review complete" more accurately describes what actually
happened without implying next steps.
2025-12-12 06:42:52 +08:00
Alex Verkhovsky 82e6433b69
refactor: standardize file naming to use dashes instead of underscores (#1094)
Rename output/template files and update all references to use kebab-case
(dashes) instead of snake_case (underscores) for consistency:

- project_context.md -> project-context.md (13 references)
- backlog_template.md -> backlog-template.md
- agent_commands.md -> agent-commands.md
- agent_persona.md -> agent-persona.md
- agent_purpose_and_type.md -> agent-purpose-and-type.md
2025-12-12 06:42:24 +08:00
Alex Verkhovsky be7e07cc1a
fix: fully silence CodeRabbit unless explicitly invoked (#1096)
- Disable high_level_summary to stop PR description modifications
- Disable commit_status to stop GitHub status checks
- Disable issue_enrichment.auto_enrich to stop auto-commenting on issues

These settings complement the existing review_status: false and
auto_review.enabled: false to ensure CodeRabbit only responds
when explicitly tagged with @coderabbitai review.
2025-12-12 06:32:24 +08:00
Alex Verkhovsky 079f79aba5
Merge pull request #1103 from bmad-code-org/docs/test-architect-ADR-usage-update-2
docs: test arch ADR usage update
2025-12-11 12:35:12 -07:00
murat b4d7e1adef docs: addressed further PR comments 2025-12-11 13:13:44 -06:00
murat 6e9fe6c9a2 fix: addressed review comment 2025-12-11 11:36:33 -06:00
Murat K Ozcan d2d9010a8e
Update src/modules/bmm/docs/test-architecture.md
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-12-11 10:15:23 -06:00
murat 6d5a1084eb docs: test arch ADR usage update 2 2025-12-11 09:43:25 -06:00
murat 978a93ed33 docs: test arch ADR usage update 2025-12-11 09:34:22 -06:00
Alex Verkhovsky ec90699016
Merge pull request #1090 from alexeyv/fix/issue-1088-remove-stale-workflow-refs
docs: remove stale references to deleted Phase 4 workflows
2025-12-11 04:38:31 -07:00
Alex Verkhovsky 0f06ef724b
Merge branch 'main' into fix/issue-1088-remove-stale-workflow-refs 2025-12-10 16:00:11 -07:00
Alex Verkhovsky b9ba98d3f8 docs: remove stale references to deleted Phase 4 workflows
Removes references to epic-tech-context, story-context, story-done,
and story-ready workflows that were deleted in the Phase 4 transformation.

Also renames mislabeled excalidraw element IDs from proc-story-done
to proc-code-review to match the actual displayed text.

Fixes #1088
2025-12-09 21:50:39 -07:00
BOBDA DJIMO ARMEL HYACINTHE 951b66b037 Enable optional DeepWiki integration for advanced repository analysis and UI library research. 2025-12-09 01:43:14 +04:00
30 changed files with 878 additions and 148 deletions

View File

@ -4,9 +4,10 @@ language: "en-US"
early_access: true
reviews:
profile: chill
high_level_summary: true
high_level_summary: false # don't post summary until explicitly invoked
request_changes_workflow: false
review_status: false
commit_status: false # don't set commit status until explicitly invoked
collapse_walkthrough: false
poem: false
auto_review:
@ -33,4 +34,7 @@ reviews:
Flag any process.exit() without error message.
chat:
auto_reply: true # Response to mentions in comments, a la @coderabbit review
issue_enrichment:
auto_enrich:
enabled: false # don't auto-comment on issues

4
package-lock.json generated
View File

@ -1,12 +1,12 @@
{
"name": "bmad-method",
"version": "6.0.0-alpha.15",
"version": "6.0.0-alpha.16",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "bmad-method",
"version": "6.0.0-alpha.15",
"version": "6.0.0-alpha.16",
"license": "MIT",
"dependencies": {
"@kayvan/markdown-tree-parser": "^1.6.1",

View File

@ -330,7 +330,7 @@ Review was saved to story file, but sprint-status.yaml may be out of sync.
<action>All action items are included in the standalone review report</action>
<ask if="action items exist">Would you like me to create tracking items for these action items? (backlog/tasks)</ask>
<action if="user confirms">
If {{backlog_file}} does not exist, copy {installed_path}/backlog_template.md to {{backlog_file}} location.
If {{backlog_file}} does not exist, copy {installed_path}/backlog-template.md to {{backlog_file}} location.
Append a row per action item with Date={{date}}, Story="Ad-Hoc Review", Epic="N/A", Type, Severity, Owner (or "TBD"), Status="Open", Notes with file refs and context.
</action>
</check>
@ -342,7 +342,7 @@ Review was saved to story file, but sprint-status.yaml may be out of sync.
Append under the story's "Tasks / Subtasks" a new subsection titled "Review Follow-ups (AI)", adding each item as an unchecked checkbox in imperative form, prefixed with "[AI-Review]" and severity. Example: "- [ ] [AI-Review][High] Add input validation on server route /api/x (AC #2)".
</action>
<action>
If {{backlog_file}} does not exist, copy {installed_path}/backlog_template.md to {{backlog_file}} location.
If {{backlog_file}} does not exist, copy {installed_path}/backlog-template.md to {{backlog_file}} location.
Append a row per action item with Date={{date}}, Story={{epic_num}}.{{story_num}}, Epic={{epic_num}}, Type, Severity, Owner (or "TBD"), Status="Open", Notes with short context and file refs.
</action>
<action>

View File

@ -24,7 +24,7 @@ agent:
critical_actions:
- "READ the entire story file BEFORE any implementation - tasks/subtasks sequence is your authoritative implementation guide"
- "Load project_context.md if available for coding standards only - never let it override story requirements"
- "Load project-context.md if available for coding standards only - never let it override story requirements"
- "Execute tasks/subtasks IN ORDER as written in story file - no skipping, no reordering, no doing what you want"
- "For each task/subtask: follow red-green-refactor cycle - write failing test first, then implementation"
- "Mark task/subtask [x] ONLY when both implementation AND tests are complete and passing"

View File

@ -199,24 +199,11 @@ PRDs are for Level 2-4 projects with multiple features requiring product-level c
### Q: How do I mark a story as done?
**A:** You have two options:
**A:** After dev-story completes and code-review passes:
**Option 1: Use story-done workflow (Recommended)**
1. Load SM agent
2. Run `story-done` workflow
3. Workflow automatically updates `sprint-status.yaml` (created by sprint-planning at Phase 4 start)
4. Moves story from current status → `DONE`
5. Advances the story queue
**Option 2: Manual update**
1. After dev-story completes and code-review passes
2. Open `sprint-status.yaml` (created by sprint-planning)
3. Change the story status from `review` to `done`
4. Save the file
The story-done workflow is faster and ensures proper status file updates.
1. Open `sprint-status.yaml` (created by sprint-planning)
2. Change the story status from `review` to `done`
3. Save the file
### Q: Can I work on multiple stories at once?

View File

@ -2934,7 +2934,7 @@
"gap": 1
},
"endBinding": {
"elementId": "proc-story-done",
"elementId": "proc-code-review",
"focus": 0.04241833499478815,
"gap": 1.3466869862454587
},
@ -3189,7 +3189,7 @@
"lineHeight": 1.25
},
{
"id": "proc-story-done",
"id": "proc-code-review",
"type": "rectangle",
"x": 1169.3991588878014,
"y": 947.2529662369525,
@ -3207,12 +3207,12 @@
"value": 8
},
"groupIds": [
"proc-story-done-group"
"proc-code-review-group"
],
"boundElements": [
{
"type": "text",
"id": "proc-story-done-text"
"id": "proc-code-review-text"
},
{
"type": "arrow",
@ -3235,7 +3235,7 @@
"link": null
},
{
"id": "proc-story-done-text",
"id": "proc-code-review-text",
"type": "text",
"x": 1187.9272045420983,
"y": 972.2529662369525,
@ -3249,14 +3249,14 @@
"roughness": 0,
"opacity": 100,
"groupIds": [
"proc-story-done-group"
"proc-code-review-group"
],
"fontSize": 16,
"fontFamily": 1,
"text": "Code Review\n<<use different\nLLM>>",
"textAlign": "center",
"verticalAlign": "middle",
"containerId": "proc-story-done",
"containerId": "proc-code-review",
"locked": false,
"version": 502,
"versionNonce": 1242095014,
@ -3289,7 +3289,7 @@
"opacity": 100,
"groupIds": [],
"startBinding": {
"elementId": "proc-story-done",
"elementId": "proc-code-review",
"focus": 0.014488632877232727,
"gap": 8.284295421831303
},

View File

@ -377,12 +377,6 @@ Checks:
Quick Spec Flow works seamlessly with all Phase 4 implementation workflows:
### story-context (SM Agent)
- ✅ Recognizes tech-spec.md as authoritative source
- ✅ Extracts context from tech-spec (replaces PRD)
- ✅ Generates XML context for complex scenarios
### create-story (SM Agent)
- ✅ Can work with tech-spec.md instead of PRD
@ -529,10 +523,6 @@ Quick Spec Flow is **fully standalone**:
**A:** No problem! You can always transition to BMad Method by running workflow-init and create-prd. Your tech-spec becomes input for the PRD.
### Q: Do I need story-context for every story?
**A:** Usually no! Tech-spec is comprehensive enough for most Quick Flow projects. Only use story-context for complex edge cases.
### Q: Can I skip validation?
**A:** No, validation always runs automatically. But it's fast and catches issues early!
@ -564,15 +554,11 @@ Starter templates save hours of setup time. Let Quick Spec Flow find the best on
When validation runs, read the scores. They tell you if your spec is production-ready.
### 5. **Story Context is Optional**
For single changes, try going directly to dev-story first. Only add story-context if you hit complexity.
### 6. **Keep Single Changes Truly Atomic**
### 5. **Keep Single Changes Truly Atomic**
If your "single change" needs 3+ files, it might be a multi-story feature. Let the workflow guide you.
### 7. **Validate Story Sequence for Multi-Story Features**
### 6. **Validate Story Sequence for Multi-Story Features**
When you get multiple stories, check the dependency validation output. Proper sequence matters!

View File

@ -26,14 +26,17 @@ graph TB
subgraph Phase3["<b>Phase 3: SOLUTIONING</b>"]
Architecture["<b>Architect: *architecture</b>"]
EpicsStories["<b>PM/Architect: *create-epics-and-stories</b>"]
TestDesignSys["<b>TEA: *test-design (system-level)</b>"]
Framework["<b>TEA: *framework</b>"]
CI["<b>TEA: *ci</b>"]
GateCheck["<b>Architect: *implementation-readiness</b>"]
Architecture --> EpicsStories
Architecture --> TestDesignSys
TestDesignSys --> Framework
EpicsStories --> Framework
Framework --> CI
CI --> GateCheck
Phase3Note["<b>Epics created AFTER architecture,</b><br/><b>then test infrastructure setup</b>"]
Phase3Note["<b>Epics created AFTER architecture,</b><br/><b>then system-level test design and test infrastructure setup</b>"]
EpicsStories -.-> Phase3Note
end
@ -93,12 +96,17 @@ graph TB
- **Documentation** (Optional for brownfield): Prerequisite using `*document-project`
- **Phase 1** (Optional): Discovery/Analysis (`*brainstorm`, `*research`, `*product-brief`)
- **Phase 2** (Required): Planning (`*prd` creates PRD with FRs/NFRs)
- **Phase 3** (Track-dependent): Solutioning (`*architecture` → `*create-epics-and-stories` → TEA: `*framework`, `*ci``*implementation-readiness`)
- **Phase 3** (Track-dependent): Solutioning (`*architecture` → `*test-design` (system-level) → `*create-epics-and-stories` → TEA: `*framework`, `*ci``*implementation-readiness`)
- **Phase 4** (Required): Implementation (`*sprint-planning` → per-epic: `*test-design` → per-story: dev workflows)
**TEA workflows:** `*framework` and `*ci` run once in Phase 3 after architecture. `*test-design` runs per-epic in Phase 4. Output: `test-design-epic-N.md`.
**TEA workflows:** `*framework` and `*ci` run once in Phase 3 after architecture. `*test-design` is **dual-mode**:
Quick Flow track skips Phase 1 and 3. BMad Method and Enterprise use all phases based on project needs.
- **System-level (Phase 3):** Run immediately after architecture/ADR drafting to produce `test-design-system.md` (testability review, ADR → test mapping, Architecturally Significant Requirements (ASRs), environment needs). Feeds the implementation-readiness gate.
- **Epic-level (Phase 4):** Run per-epic to produce `test-design-epic-N.md` (risk, priorities, coverage plan).
Quick Flow track skips Phases 1 and 3.
BMad Method and Enterprise use all phases based on project needs.
When an ADR or architecture draft is produced, run `*test-design` in **system-level** mode before the implementation-readiness gate. This ensures the ADR has an attached testability review and ADR → test mapping. Keep the test-design updated if ADRs change.
### Why TEA is Different from Other BMM Agents

View File

@ -196,7 +196,7 @@ workflow-init asks: "Is this work in progress or previous effort?"
2. Verify agent has workflow:
- PM agent: prd, tech-spec
- Architect agent: create-architecture, validate-architecture
- SM agent: sprint-planning, create-story, story-context
- SM agent: sprint-planning, create-story
3. Try menu number instead of name
4. Check you're using correct agent for workflow
@ -219,23 +219,6 @@ workflow-init asks: "Is this work in progress or previous effort?"
3. **Run in Phase 4 only** - Ensure Phase 2/3 complete first
4. **Check file paths** - Epic files should be in correct output folder
### Problem: story-context generates empty or wrong context
**Symptoms:**
- Context file created but has no useful content
- Context doesn't reference existing code
- Missing technical guidance
**Solution:**
1. **Run epic-tech-context first** - story-context builds on epic context
2. **Check story file exists** - Verify story was created by create-story
3. **For brownfield**:
- Ensure document-project was run
- Verify docs/index.md exists with codebase context
4. **Try regenerating** - Sometimes needs fresh attempt with more specific story details
---
## Context and Documentation Issues
@ -362,7 +345,7 @@ For most brownfield projects, **Deep scan is sufficient**.
1. **For brownfield**:
- Ensure document-project captured existing architecture
- Review architecture docs before implementing
2. **Check story-context** - Should document integration points
2. **Check story file** - Should document integration points
3. **In tech-spec/architecture** - Explicitly document:
- Which existing modules to modify
- What APIs/services to integrate with
@ -384,7 +367,7 @@ For most brownfield projects, **Deep scan is sufficient**.
- Should detect existing patterns
- Asks for confirmation before proceeding
2. **Review documentation** - Ensure document-project captured patterns
3. **Use story-context** - Injects pattern guidance per story
3. **Use comprehensive story files** - Include pattern guidance in story
4. **Add to code-review checklist**:
- Pattern adherence
- Convention consistency
@ -459,9 +442,7 @@ To change locations, edit config.yaml then re-run workflows.
```
2. **Some workflows auto-update**:
- sprint-planning creates file
- epic-tech-context changes epic to "contexted"
- create-story changes story to "drafted"
- story-context changes to "ready-for-dev"
- create-story changes story to "ready-for-dev"
- dev-story may auto-update (check workflow)
3. **Re-run sprint-planning** to resync if needed
@ -657,8 +638,8 @@ If your issue isn't covered here:
### "Context generation failed"
**Cause:** Missing prerequisites (epic context, story file, or docs)
**Fix:** Verify epic-tech-context run, story file exists, docs present
**Cause:** Missing prerequisites (story file or docs)
**Fix:** Verify story file exists, docs present
---

View File

@ -152,10 +152,9 @@ Dependencies: Story 1.2 (DONE) ✅
**Recommendation:** Run `create-story` to generate Story 1.3
After create-story:
1. Run story-context
2. Run dev-story
3. Run code-review
4. Run story-done
1. Run dev-story
2. Run code-review
3. Update sprint-status.yaml to mark story done
```
See: [workflow-status instructions](../workflows/workflow-status/instructions.md)

View File

@ -26,6 +26,7 @@
- **Research goals = "{{research_goals}}"** - captured from initial discussion
- Focus on technical architecture and implementation research
- Web search is required to verify and supplement your knowledge with current facts
- **DeepWiki MCP** may be available for authoritative repository documentation (optional enhancement)
## YOUR TASK:
@ -69,7 +70,101 @@ For **{{research_topic}}**, I will research:
**All claims verified against current public sources.**
**Does this technical research scope and approach align with your goals?**
[C] Continue - Begin technical research with this scope
### 2.5 Framework Integration Mode (Optional DeepWiki Enhancement)
After confirming scope, ask:
"**Does this research involve integrating multiple frameworks or libraries?**
If you're researching how frameworks work together (e.g., Tauri + Next.js, or React + a backend framework), I can query authoritative repository documentation via DeepWiki for verified API signatures and integration patterns.
[Y] Yes - Enable DeepWiki mode for authoritative framework documentation
[N] No - Standard web research is sufficient for this topic"
#### If Y (Enable DeepWiki):
"**Which GitHub repositories should I query?** (Maximum 3 for optimal results)
Format: `owner/repo` (e.g., `tauri-apps/tauri`, `vercel/next.js`)
Enter repositories (comma-separated):"
#### Repository Type Classification (MANDATORY if enabled):
For each provided repository, classify its type based on name patterns:
| Pattern Match | Type | Query Focus |
| ---------------------------------------- | -------------- | -------------------------------------- |
| `shadcn/*`, `shadcn-ui/*`, `radix-ui/*` | `ui-primitive` | Primitives, composition, accessibility |
| `chakra-ui/*`, `mui/*`, `ant-design/*` | `ui-library` | Components, theming, variants |
| `*-ui/*`, `*ui/*` (general) | `ui-library` | Components, props, customization |
| `tauri-apps/*`, `vercel/*`, `electron/*` | `framework` | APIs, protocols, extensions |
| `*auth*`, `*-auth/*` | `auth-library` | Sessions, providers, security |
| Other | `general` | Standard technical queries |
Display classification to user:
"**Repository Classification:**
| Repository | Type | Query Focus |
|------------|------|-------------|
| {repo_1} | {type} | {focus_description} |
| {repo_2} | {type} | {focus_description} |
**Note:** UI libraries will receive additional component-focused queries for UX Designer downstream use."
#### DeepWiki Validation (MANDATORY if enabled):
For each provided repository:
1. Call `read_wiki_structure(repo)` to verify indexing
2. If successful: Store repo with `indexed: true`, extract version if available, store `type` classification
3. If fails: Warn user - "⚠️ {repo} is not indexed in DeepWiki. Continue without it? [Y/N]"
Display validation results:
"**DeepWiki Repository Status:**
| Repository | Indexed | Version |
|------------|---------|---------|
| {repo_1} | ✅/❌ | {version} |
| {repo_2} | ✅/❌ | {version} |
**Query Budget:** This session will use approximately {estimated} queries. Maximum recommended: 15 queries."
#### DeepWiki Frontmatter Variables:
When DeepWiki is enabled, set:
```yaml
deepwiki_enabled: true
deepwiki_repos:
- repo: 'owner/repo'
indexed: true
version: 'detected or unknown'
type: 'framework|ui-library|ui-primitive|auth-library|general'
deepwiki_query_budget: 15
deepwiki_queries_used: 0
deepwiki_has_ui_repos: true|false # Set true if any repo type is ui-library or ui-primitive
```
#### If N (Standard Research):
Proceed with web-only research:
```yaml
deepwiki_enabled: false
```
### 2.6 Final Scope Confirmation
Present final confirmation with DeepWiki status:
"**Technical Research Configuration:**
✅ Research Topic: {{research_topic}}
✅ Research Goals: {{research_goals}}
✅ Data Sources: Web Search + DeepWiki (if enabled)
✅ DeepWiki Repos: {list or 'None - web only'}
[C] Continue - Begin technical research with this configuration"
### 3. Handle Continue Selection
@ -104,6 +199,12 @@ When user selects 'C', append scope confirmation:
- Confidence level framework for uncertain information
- Comprehensive technical coverage with architecture-specific insights
**DeepWiki Configuration:**
- Enabled: {{deepwiki_enabled}}
- Repositories: {{deepwiki_repos or 'None - web research only'}}
- Query Budget: {{deepwiki_query_budget or 'N/A'}}
**Scope Confirmed:** {{date}}
```
@ -115,6 +216,10 @@ When user selects 'C', append scope confirmation:
✅ [C] continue option presented and handled correctly
✅ Scope confirmation documented when user proceeds
✅ Proper routing to next technical research step
✅ DeepWiki mode offered for framework integration research
✅ DeepWiki repos validated via read_wiki_structure() before proceeding
✅ Query budget communicated to user
✅ DeepWiki configuration saved to frontmatter
## FAILURE MODES:
@ -124,6 +229,10 @@ When user selects 'C', append scope confirmation:
❌ Not presenting [C] continue option
❌ Proceeding without user scope confirmation
❌ Not routing to next technical research step
❌ Enabling DeepWiki without validating repos are indexed
❌ Exceeding 3 repo limit without warning
❌ Not communicating query budget to user
❌ Not saving DeepWiki configuration to frontmatter
**CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
**CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file

View File

@ -28,6 +28,9 @@
- **Research goals = "{{research_goals}}"** - established from initial discussion
- Focus on APIs, protocols, and system interoperability
- Web search capabilities with source verification are enabled
- **DeepWiki MCP** - Check `deepwiki_enabled` from frontmatter for enhanced repository analysis
- **DeepWiki repos** - If enabled, `deepwiki_repos` contains validated repositories to query
- **Query budget** - Track `deepwiki_queries_used` against `deepwiki_query_budget`
## YOUR TASK:
@ -165,6 +168,342 @@ _Data Encryption: [Secure data transmission and storage]_
_Source: [URL]_
```
### 3.5 DeepWiki-Enhanced Cross-Repository Analysis (CONDITIONAL)
**⚠️ ONLY EXECUTE THIS SECTION IF `deepwiki_enabled: true` IN FRONTMATTER**
If DeepWiki is not enabled, skip directly to Section 5.
---
#### DeepWiki Query Protocol
**Confidence Label System:**
| Label | Meaning | Source |
|-------|---------|--------|
| 🟢 `[REPO-VERIFIED]` | Direct from repo docs via DeepWiki | Single repo query |
| 🟡 `[CROSS-REPO-DOCUMENTED]` | Explicit integration docs found | Rare - high value |
| 🟠 `[LLM-SYNTHESIZED]` | Combined from per-repo facts | Requires POC |
| 🔴 `[HYPOTHESIS-ONLY]` | Speculative, no supporting docs | Do not use without validation |
---
#### Phase 1: Cross-Reference Detection
For each pair of repositories (A, B) in `deepwiki_repos`:
```
ask_question(repo_A, "Does {repo_A} document integration patterns with {repo_B} or similar frameworks in its category?")
```
**If cross-reference found:**
- Mark as `[CROSS-REPO-DOCUMENTED]` - this is rare and high value
- Extract the documented integration pattern
- Note the source section in DeepWiki
**If not found:**
- Continue to Phase 2 for this pair
- Mark any synthesis as `[LLM-SYNTHESIZED]`
**Update query count:** `deepwiki_queries_used += 1` per query
---
#### Phase 2: Structured Per-Repository Extraction
For each repository in `deepwiki_repos`, run these queries:
**Q1 - Integration APIs:**
```
ask_question(repo, "What APIs does {repo} expose for external integration? Include function signatures and parameters.")
```
**Q2 - Communication Protocols:**
```
ask_question(repo, "What communication protocols and data formats does {repo} support for inter-process or inter-service communication?")
```
**Q3 - Extension Points:**
```
ask_question(repo, "What are the documented extension points, plugin architecture, or hooks in {repo}?")
```
**Q4 - Third-Party Ecosystem:**
```
ask_question(repo, "What third-party libraries, plugins, or bridges exist for {repo} integration with other frameworks?")
```
**Q5 - Ecosystem Discovery (Enhanced):**
```
ask_question(repo, "What official adapters, integrations, or framework-specific packages does {repo} provide? Include any Next.js, React, Vue, Svelte, or mobile adapters.")
```
**Store results per repository with:**
- API signatures extracted
- Protocol support identified
- Extension mechanisms documented
- Ecosystem libraries discovered
- Official adapters/integrations listed
**Update query count:** `deepwiki_queries_used += 5` per repository
---
#### Phase 3: LLM Cross-Repository Synthesis
**⚠️ CRITICAL: This section produces `[LLM-SYNTHESIZED]` content. Label explicitly.**
Using Phase 1 and Phase 2 data, synthesize cross-repo integration patterns:
**For each repository pair (A, B):**
1. **Compare APIs:** Identify compatible integration points
- Does A expose an API that B can consume?
- Does B expose an API that A can consume?
2. **Compare Protocols:** Identify communication compatibility
- Do they share common protocols (HTTP, WebSocket, IPC)?
- Are data formats compatible (JSON, Protobuf)?
3. **Identify Bridge Patterns:**
- Can A's extension points connect to B?
- Are there ecosystem libraries that bridge A and B?
4. **Hypothesize Integration Architecture:**
- How would data flow between A and B?
- What is the recommended communication pattern?
**MANDATORY: Label all synthesis output as `[LLM-SYNTHESIZED]`**
---
#### Phase 3.5: UI Library Component Analysis (CONDITIONAL)
**⚠️ ONLY EXECUTE THIS SECTION IF ANY REPO IN `deepwiki_repos` HAS `type: ui-library` OR `type: ui-primitive`**
For each UI library/primitive repository, run these specialized queries:
**Q1 - Component Inventory:**
```
ask_question(repo, "What UI components does {repo} provide? List all available components with their primary purpose.")
```
**Q2 - Theming & Customization:**
```
ask_question(repo, "How does {repo} handle theming and customization? What are the theming APIs, CSS variables, or design tokens available?")
```
**Q3 - Composition Patterns:**
```
ask_question(repo, "What composition patterns does {repo} support? How do components compose together, and what are the slot/children patterns?")
```
**Q4 - Accessibility Features:**
```
ask_question(repo, "What accessibility features are built into {repo}? List ARIA support, keyboard navigation, and screen reader considerations.")
```
**Q5 - Variant & State System:**
```
ask_question(repo, "How does {repo} handle component variants and states? What props control visual variations (size, color, disabled, etc.)?"
```
**Update query count:** `deepwiki_queries_used += 5` per UI library repo
---
##### UI Component Capability Matrix Output
For each UI library, generate this matrix:
```markdown
### {Repo} Component Capability Matrix [REPO-VERIFIED]
**Library Type:** {ui-library|ui-primitive}
**Source:** DeepWiki query on {repo}
#### Available Components
| Component | Category | Variants | Accessibility | Composable |
| --------- | -------- | -------------------- | -------------------- | ---------- |
| Button | Action | size, color, variant | ✅ ARIA | ✅ |
| Input | Form | size, state | ✅ Label association | ✅ |
| ... | ... | ... | ... | ... |
#### Theming System
| Aspect | Support | Method |
| --------------- | ------- | ------------- |
| CSS Variables | ✅/❌ | {description} |
| Design Tokens | ✅/❌ | {description} |
| Runtime Theming | ✅/❌ | {description} |
| Dark Mode | ✅/❌ | {description} |
#### Composition Patterns
- **Slot Pattern:** {supported/not supported} - {description}
- **Compound Components:** {supported/not supported} - {description}
- **Render Props:** {supported/not supported} - {description}
- **Polymorphic `as` Prop:** {supported/not supported} - {description}
#### Accessibility Summary
| Feature | Coverage |
| ------------------- | --------------------- |
| ARIA Attributes | {auto/manual/partial} |
| Keyboard Navigation | {full/partial/none} |
| Focus Management | {automatic/manual} |
| Screen Reader | {tested/untested} |
```
---
#### Phase 4: POC Validation Checklist Generation
For each synthesized integration pattern, generate a validation checklist:
```markdown
### POC Validation Checklist: {Repo A} ↔ {Repo B}
**Pattern:** {synthesized pattern description}
**Confidence:** [LLM-SYNTHESIZED]
**Validation Steps:**
- [ ] {Repo A} API successfully called from integration code
- [ ] {Repo B} receives data in expected format
- [ ] Bidirectional communication works (if applicable)
- [ ] Error handling propagates correctly across boundary
- [ ] Performance acceptable for use case
- [ ] No memory leaks or resource issues at boundary
**Unknown/Unverified:**
- [ ] Thread safety across framework boundaries
- [ ] Lifecycle coordination between frameworks
- [ ] Version compatibility for untested combinations
```
---
#### DeepWiki Output Structure
Append to document after web search content:
```markdown
## Cross-Repository Integration Analysis [DEEPWIKI-ENHANCED]
**Data Sources:** DeepWiki MCP queries on {{deepwiki_repos}}
**Queries Used:** {{deepwiki_queries_used}} / {{deepwiki_query_budget}}
**Data Freshness:** DeepWiki indexes updated periodically - verify critical APIs against current release notes
---
### Per-Repository Integration Surfaces
#### {Repo A} Integration Surface [REPO-VERIFIED]
**Source:** DeepWiki query on {repo_A}
**Public APIs:**
{extracted from Q1}
**Communication Protocols:**
{extracted from Q2}
**Extension Points:**
{extracted from Q3}
**Ecosystem Libraries:**
{extracted from Q4}
---
#### {Repo B} Integration Surface [REPO-VERIFIED]
**Source:** DeepWiki query on {repo_B}
{same structure as above}
---
### Cross-Repository Integration Patterns
#### Direct Integration Documentation [CROSS-REPO-DOCUMENTED]
{Only if Phase 1 found actual cross-repo docs - often empty}
_No direct cross-repository documentation found._ (if empty)
---
#### Synthesized Integration Patterns [LLM-SYNTHESIZED]
**⚠️ WARNING: The following patterns are LLM synthesis based on per-repo facts.**
**They require POC validation before use in architecture decisions.**
##### Pattern: {Repo A} ↔ {Repo B} Bridge
**Hypothesis:** {description of how they might integrate}
**Based on:**
- {Repo A}'s {api} [REPO-VERIFIED]
- {Repo B}'s {api} [REPO-VERIFIED]
**Confidence:** 🟠 [LLM-SYNTHESIZED] - No direct documentation found
**Recommended Data Flow:**
```
{Repo A} → {mechanism} → {Repo B}
{Repo B} → {mechanism} → {Repo A} (if bidirectional)
````
**Code Example (Conceptual):**
```{language}
// Integration pattern - REQUIRES VALIDATION
{conceptual code based on extracted APIs}
````
---
### POC Validation Requirements
**⚠️ This research is INCOMPLETE until the following POC validations pass:**
{aggregated checklists from Phase 4}
---
### DeepWiki Query Log
| Query | Repository | Purpose | Result |
| ----- | ---------- | ------------------------- | --------------- |
| 1 | {repo} | Cross-reference detection | Found/Not found |
| 2 | {repo} | Integration APIs | {summary} |
| ... | ... | ... | ... |
**Total Queries:** {{deepwiki_queries_used}} / {{deepwiki_query_budget}}
```
---
### 5. Present Analysis and Continue Option
**Show analysis and present continue option:**
@ -179,6 +518,12 @@ _Source: [URL]_
- Microservices integration patterns mapped
- Event-driven integration strategies identified
**DeepWiki Enhancement (if enabled):**
- Per-repository integration surfaces documented [REPO-VERIFIED]
- Cross-repository patterns synthesized [LLM-SYNTHESIZED]
- POC validation checklists generated
- Query budget: {{deepwiki_queries_used}} / {{deepwiki_query_budget}}
**Ready to proceed to architectural patterns analysis?**
[C] Continue - Save this to document and proceed to architectural patterns
@ -206,6 +551,18 @@ Content is already written to document when generated in step 4. No additional a
✅ Proper routing to next step (architectural patterns)
✅ Research goals alignment maintained
### DeepWiki Success Metrics (if enabled):
✅ Phase 1 cross-reference detection executed for all repo pairs
✅ Phase 2 structured extraction completed for all repos (5 queries each)
✅ Phase 3 synthesis clearly labeled as `[LLM-SYNTHESIZED]`
✅ Phase 3.5 UI Component Analysis executed for ui-library/ui-primitive repos (if any)
✅ Phase 4 POC validation checklists generated for all synthesized patterns
✅ Per-repo facts labeled as `[REPO-VERIFIED]`
✅ Query budget tracked and displayed
✅ DeepWiki output structure appended to document
✅ UI Component Capability Matrix generated for each UI library (if applicable)
## FAILURE MODES:
❌ Relying solely on training data without web verification for current facts
@ -217,6 +574,17 @@ Content is already written to document when generated in step 4. No additional a
❌ Not presenting [C] continue option after content generation
❌ Not routing to architectural patterns step
### DeepWiki Failure Modes (if enabled):
❌ Not checking `deepwiki_enabled` before executing DeepWiki section
❌ Labeling synthesized content as `[REPO-VERIFIED]` (CRITICAL - trust violation)
❌ Not generating POC validation checklists for synthesized patterns
❌ Exceeding query budget without warning
❌ Not tracking `deepwiki_queries_used` in frontmatter
❌ Presenting synthesized patterns without confidence labels
❌ Not executing Phase 3.5 UI queries when ui-library/ui-primitive repos are present
❌ Missing UI Component Capability Matrix for UI library repos
**CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
**CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
**CRITICAL**: Making decisions without complete understanding of step requirements and protocols
@ -245,3 +613,4 @@ Content is already written to document when generated in step 4. No additional a
After user selects 'C', load `./step-04-architectural-patterns.md` to analyze architectural patterns, design decisions, and system structures for {{research_topic}}.
Remember: Always write research content to document immediately and emphasize current integration data with rigorous source verification!
```

View File

@ -29,6 +29,9 @@
- All technical research sections have been completed (overview, architecture, implementation)
- Web search capabilities with source verification are enabled
- This is the final synthesis step producing the complete technical research document
- **DeepWiki MCP** - Check `deepwiki_enabled` from frontmatter for Integration Research Findings section
- **DeepWiki data** - If enabled, cross-repo analysis from step-03 should be synthesized here
- **Query usage** - `deepwiki_queries_used` / `deepwiki_query_budget` for final reporting
## YOUR TASK:
@ -59,6 +62,7 @@ Produce a comprehensive, authoritative technical research document on **{{resear
- Strategic Technical Recommendations
- Implementation Roadmap and Risk Assessment
- Future Technical Outlook and Innovation Opportunities
- **Integration Research Findings** (if DeepWiki enabled)
- Technical Research Methodology and Source Documentation
- Technical Appendices and Reference Materials
```
@ -411,6 +415,190 @@ _This comprehensive technical research document serves as an authoritative techn
**Ready to complete this comprehensive technical research document?**
[C] Complete Research - Save final comprehensive technical document
### 5.5 Integration Research Findings (CONDITIONAL)
**⚠️ ONLY INCLUDE THIS SECTION IF `deepwiki_enabled: true` IN FRONTMATTER**
If DeepWiki was not enabled, skip this section entirely.
---
#### Integration Research Findings Section
Append to the final document before the Methodology section:
```markdown
## Integration Research Findings [DEEPWIKI-ENHANCED]
**⚠️ READ THIS FIRST - For Downstream Agents (Architect, DEV)**
This section contains two types of findings:
- **[REPO-VERIFIED]**: Facts extracted directly from repository documentation via DeepWiki
- **[LLM-SYNTHESIZED]**: Cross-repo patterns inferred by combining per-repo facts
**DO NOT** treat synthesized patterns as validated until POC checklist passes.
---
### Data Source Summary
| Repository | DeepWiki Indexed | Version | Queries Used |
| ---------- | ---------------- | ----------- | ------------ |
| {{repo_1}} | ✅ | {{version}} | {{queries}} |
| {{repo_2}} | ✅ | {{version}} | {{queries}} |
| {{repo_3}} | ✅/❌ | {{version}} | {{queries}} |
**Total DeepWiki Queries:** {{deepwiki_queries_used}} / {{deepwiki_query_budget}}
**Data Freshness Notice:** DeepWiki indexes repositories periodically. For production implementations, verify critical API signatures against current official documentation and release notes.
---
### Confidence Level Legend
| Label | Icon | Meaning | Action Required |
| ------------------------- | ---- | -------------------------------------- | --------------------------------- |
| `[REPO-VERIFIED]` | 🟢 | Direct from repo docs via DeepWiki | Normal confidence |
| `[CROSS-REPO-DOCUMENTED]` | 🟡 | Explicit integration docs found (rare) | High confidence |
| `[LLM-SYNTHESIZED]` | 🟠 | Combined from per-repo facts | **POC required before use** |
| `[HYPOTHESIS-ONLY]` | 🔴 | Speculative, no supporting docs | **Do not use without validation** |
---
### Framework Compatibility Matrix
| Framework A | Framework B | Integration Pattern | Confidence | POC Status |
| ------------------ | ------------------ | ------------------- | -------------------- | ---------------- |
| {{repo_A}}@{{ver}} | {{repo_B}}@{{ver}} | {{pattern_summary}} | {{confidence_label}} | ⬜ Not validated |
---
### For Downstream Agents
#### For Architect Agent
When using this research for architecture decisions:
1. **[REPO-VERIFIED] facts**: Use with normal confidence for design decisions
2. **[LLM-SYNTHESIZED] patterns**: Treat as hypotheses, not validated approaches
3. **Before committing to an integration pattern**: Ensure DEV has validated via POC checklist
4. **Version awareness**: Note the documented versions; your project may use different versions
#### For DEV Agent
When implementing based on this research:
1. **Start with POC validation**: Before full implementation, validate synthesized patterns
2. **Use the POC checklists**: Each synthesized pattern has a validation checklist
3. **Report discrepancies**: If actual behavior differs from research, flag for Analyst
4. **Version check**: Verify your project's framework versions match documented versions
#### For UX Designer (CONDITIONAL - only if UI library repos were queried)
**⚠️ ONLY INCLUDE THIS SECTION IF `deepwiki_has_ui_repos: true` IN FRONTMATTER**
When using this research for design decisions:
1. **Component Capability Matrix**: Reference the matrix for available components and their variants
2. **Theming System**: Check documented theming approach before designing custom themes
3. **Composition Patterns**: Understand how components compose before designing complex UIs
4. **Accessibility**: Note built-in accessibility features and gaps that need manual handling
##### UI Library Summary for UX Designers
| Library | Components | Theming | Dark Mode | Accessibility |
| ---------- | ---------- | ---------- | ----------- | ------------- |
| {{repo_1}} | {{count}} | {{method}} | {{support}} | {{level}} |
| {{repo_2}} | {{count}} | {{method}} | {{support}} | {{level}} |
##### Component Availability Quick Reference
| Component Type | {{repo_1}} | {{repo_2}} | Notes |
| --------------- | ---------- | ---------- | --------- |
| Buttons | ✅/❌ | ✅/❌ | {{notes}} |
| Forms/Inputs | ✅/❌ | ✅/❌ | {{notes}} |
| Navigation | ✅/❌ | ✅/❌ | {{notes}} |
| Data Display | ✅/❌ | ✅/❌ | {{notes}} |
| Feedback/Alerts | ✅/❌ | ✅/❌ | {{notes}} |
| Layout | ✅/❌ | ✅/❌ | {{notes}} |
| Overlay/Modal | ✅/❌ | ✅/❌ | {{notes}} |
##### Theming Compatibility
| Aspect | {{repo_1}} | {{repo_2}} | Integration Notes |
| ------------- | ----------- | ----------- | ----------------- |
| CSS Variables | {{support}} | {{support}} | {{notes}} |
| Design Tokens | {{support}} | {{support}} | {{notes}} |
| Custom Themes | {{support}} | {{support}} | {{notes}} |
| Color Schemes | {{support}} | {{support}} | {{notes}} |
##### Design System Recommendations [LLM-SYNTHESIZED]
Based on the component capability analysis:
1. **Primary UI Library**: {{recommendation}} - {{rationale}}
2. **Supplement with**: {{recommendation}} - {{rationale}}
3. **Custom Components Needed**: {{list of gaps}}
4. **Accessibility Gaps to Address**: {{list}}
**⚠️ Note:** Design system recommendations are synthesized and should be validated with a UI prototype.
---
### POC Validation Summary
**⚠️ This research is INCOMPLETE until the following POC validations pass:**
#### {Repo A} ↔ {Repo B} Integration
**Pattern:** {{pattern_description}}
**Confidence:** 🟠 [LLM-SYNTHESIZED]
**Validation Checklist:**
- [ ] {{Repo A}} API successfully called from integration code
- [ ] {{Repo B}} receives data in expected format
- [ ] Bidirectional communication works (if applicable)
- [ ] Error handling propagates correctly across boundary
- [ ] Performance acceptable for use case
- [ ] No memory leaks or resource issues at boundary
**Unknown/Unverified:**
- [ ] Thread safety across framework boundaries
- [ ] Lifecycle coordination between frameworks
- [ ] Version compatibility for untested combinations
---
### Known Limitations and Gaps
Based on DeepWiki research, the following areas require additional investigation:
- [ ] {{Gap 1}}: {{description}}
- [ ] {{Gap 2}}: {{description}}
- [ ] {{Gap 3}}: {{description}}
---
### DeepWiki Query Audit Trail
For transparency and reproducibility:
| # | Repository | Query Purpose | Result Summary |
| --- | ---------- | ------------------------- | ------------------- |
| 1 | {{repo}} | Cross-reference detection | {{found/not found}} |
| 2 | {{repo}} | Integration APIs | {{summary}} |
| 3 | {{repo}} | Communication protocols | {{summary}} |
| ... | ... | ... | ... |
**Research Date:** {{date}}
**DeepWiki MCP Endpoint:** https://mcp.deepwiki.com/sse
```
---
### 6. Handle Final Technical Completion
#### If 'C' (Complete Research):
@ -436,6 +624,26 @@ When user selects 'C', append the complete comprehensive technical research docu
✅ [C] complete option presented and handled correctly
✅ Technical research workflow completed with comprehensive document
### DeepWiki Success Metrics (if enabled):
✅ Integration Research Findings section included in final document
✅ Data Source Summary with repo versions and query counts
✅ Confidence Level Legend clearly displayed
✅ Framework Compatibility Matrix populated
✅ Downstream Agent Instructions for Architect and DEV
✅ POC Validation Summary with all checklists aggregated
✅ Known Limitations and Gaps documented
✅ DeepWiki Query Audit Trail for transparency
### UI Library Success Metrics (if deepwiki_has_ui_repos: true):
✅ "For UX Designer" section included with guidance
✅ UI Library Summary table populated
✅ Component Availability Quick Reference matrix generated
✅ Theming Compatibility matrix documented
✅ Design System Recommendations with [LLM-SYNTHESIZED] label
✅ Component Capability Matrix from step-03 referenced
## FAILURE MODES:
❌ Not producing compelling technical introduction
@ -447,6 +655,24 @@ When user selects 'C', append the complete comprehensive technical research docu
❌ Producing technical document without professional structure
❌ Not presenting completion option for final technical document
### DeepWiki Failure Modes (if enabled):
❌ Not checking `deepwiki_enabled` before including Integration Research Findings
❌ Missing Downstream Agent Instructions (Architect/DEV guidance)
❌ Not including POC Validation Summary
❌ Omitting confidence labels from final output
❌ Not reporting total query usage
❌ Missing Data Freshness Notice
### UI Library Failure Modes (if deepwiki_has_ui_repos: true):
❌ Not checking `deepwiki_has_ui_repos` before including UX Designer section
❌ Missing UI Library Summary table
❌ Not including Component Availability Quick Reference
❌ Missing Theming Compatibility analysis
❌ Design System Recommendations without [LLM-SYNTHESIZED] label
❌ Not referencing Component Capability Matrix from step-03
**CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
**CRITICAL**: Proceeding with 'C' without fully reading and understanding the next step file
**CRITICAL**: Making decisions without complete understanding of step requirements and protocols

View File

@ -1,6 +1,6 @@
---
name: create-prd
description: Creates a comprehensive PRDs through collaborative step-by-step discovery between two product managers working as peers.
description: Creates a comprehensive PRD through collaborative step-by-step discovery between two product managers working as peers.
main_config: '{project-root}/.bmad/bmm/config.yaml'
web_bundle: true
---

View File

@ -94,7 +94,7 @@ Discover and load context documents using smart discovery:
**Project Context Rules (Critical for AI Agents):**
1. Check for project context file: `**/project_context.md`
1. Check for project context file: `**/project-context.md`
2. If exists: Load COMPLETE file contents - this contains critical rules for AI agents
3. Add to frontmatter `hasProjectContext: true` and track file path
4. Report to user: "Found existing project context with {number_of_rules} agent rules"

View File

@ -280,7 +280,7 @@ Your architecture will ensure consistent, high-quality implementation across all
**💡 Optional Enhancement: Project Context File**
Would you like to create a `project_context.md` file? This is a concise, optimized guide for AI agents that captures:
Would you like to create a `project-context.md` file? This is a concise, optimized guide for AI agents that captures:
- Critical language and framework rules they might miss
- Specific patterns and conventions for your project
@ -310,7 +310,7 @@ This will help ensure consistent implementation by capturing:
- Testing and quality standards
- Anti-patterns to avoid
The workflow will collaborate with you to create an optimized `project_context.md` file that AI agents will read before implementing any code."
The workflow will collaborate with you to create an optimized `project-context.md` file that AI agents will read before implementing any code."
**Execute the Generate Project Context workflow:**

View File

@ -217,7 +217,7 @@
**Issues Fixed:** {{fixed_count}}
**Action Items Created:** {{action_count}}
{{#if new_status == "done"}}Story is ready for next work!{{else}}Address the action items and continue development.{{/if}}
{{#if new_status == "done"}}Code review complete!{{else}}Address the action items and continue development.{{/if}}
</output>
</step>

View File

@ -35,7 +35,7 @@ validation-rules:
- [ ] **Acceptance Criteria Satisfaction:** Implementation satisfies EVERY Acceptance Criterion in the story
- [ ] **No Ambiguous Implementation:** Clear, unambiguous implementation that meets story requirements
- [ ] **Edge Cases Handled:** Error conditions and edge cases appropriately addressed
- [ ] **Dependencies Within Scope:** Only uses dependencies specified in story or project_context.md
- [ ] **Dependencies Within Scope:** Only uses dependencies specified in story or project-context.md
## 🧪 Testing & Quality Assurance

View File

@ -53,11 +53,9 @@ Run `/bmad:bmm:workflows:sprint-planning` to generate it, then rerun sprint-stat
1. If any story status == in-progress → recommend `dev-story` for the first in-progress story
2. Else if any story status == review → recommend `code-review` for the first review story
3. Else if any story status == ready-for-dev → recommend `dev-story`
4. Else if any story status == drafted → recommend `story-ready`
5. Else if any story status == backlog → recommend `create-story`
6. Else if any epic status == backlog → recommend `epic-tech-context`
7. Else if retrospectives are optional → recommend `retrospective`
8. Else → All implementation items done; suggest `workflow-status` to plan next phase
4. Else if any story status == backlog → recommend `create-story`
5. Else if retrospectives are optional → recommend `retrospective`
6. Else → All implementation items done; suggest `workflow-status` to plan next phase
<action>Store selected recommendation as: next_story_id, next_workflow_id, next_agent (SM/DEV as appropriate)</action>
</step>

View File

@ -33,7 +33,7 @@ Discover the project's technology stack, existing patterns, and critical impleme
First, check if project context already exists:
- Look for file at `{output_folder}/project_context.md`
- Look for file at `{output_folder}/project-context.md`
- If exists: Read complete file to understand existing rules
- Present to user: "Found existing project context with {number_of_sections} sections. Would you like to update this or create a new one?"
@ -122,7 +122,7 @@ Based on discovery, create or update the context document:
#### A. Fresh Document Setup (if no existing context)
Copy template from `{installed_path}/project-context-template.md` to `{output_folder}/project_context.md`
Copy template from `{installed_path}/project-context-template.md` to `{output_folder}/project-context.md`
Initialize frontmatter with:
```yaml

View File

@ -288,7 +288,7 @@ After each category, show the generated rules and present choices:
## APPEND TO PROJECT CONTEXT:
When user selects 'C' for a category, append the content directly to `{output_folder}/project_context.md` using the structure from step 8.
When user selects 'C' for a category, append the content directly to `{output_folder}/project-context.md` using the structure from step 8.
## SUCCESS METRICS:

View File

@ -134,7 +134,7 @@ Based on user skill level, present the completion:
**Expert Mode:**
"Project context complete. Optimized for LLM consumption with {{rule_count}} critical rules across {{section_count}} sections.
File saved to: `{output_folder}/project_context.md`
File saved to: `{output_folder}/project-context.md`
Ready for AI agent integration."
@ -227,7 +227,7 @@ Present final completion to user:
"✅ **Project Context Generation Complete!**
Your optimized project context file is ready at:
`{output_folder}/project_context.md`
`{output_folder}/project-context.md`
**📊 Context Summary:**

View File

@ -1,11 +1,11 @@
---
name: generate-project-context
description: Creates a concise project_context.md file with critical rules and patterns that AI agents must follow when implementing code. Optimized for LLM context efficiency.
description: Creates a concise project-context.md file with critical rules and patterns that AI agents must follow when implementing code. Optimized for LLM context efficiency.
---
# Generate Project Context Workflow
**Goal:** Create a concise, optimized `project_context.md` file containing critical rules, patterns, and guidelines that AI agents must follow when implementing code. This file focuses on unobvious details that LLMs need to be reminded of.
**Goal:** Create a concise, optimized `project-context.md` file containing critical rules, patterns, and guidelines that AI agents must follow when implementing code. This file focuses on unobvious details that LLMs need to be reminded of.
**Your Role:** You are a technical facilitator working with a peer to capture the essential implementation rules that will ensure consistent, high-quality code generation across all AI agents working on the project.
@ -37,7 +37,7 @@ Load config from `{project-root}/.bmad/bmm/config.yaml` and resolve:
- `installed_path` = `{project-root}/.bmad/bmm/workflows/generate-project-context`
- `template_path` = `{installed_path}/project-context-template.md`
- `output_file` = `{output_folder}/project_context.md`
- `output_file` = `{output_folder}/project-context.md`
---

View File

@ -24,8 +24,14 @@ variables:
# Output configuration
# Note: Actual output file determined dynamically based on mode detection
# - System-Level (Phase 3): {output_folder}/test-design-system.md
# - Epic-Level (Phase 4): {output_folder}/test-design-epic-{epic_num}.md
# Declared outputs for new workflow format
outputs:
- id: system-level
description: "System-level testability review (Phase 3)"
path: "{output_folder}/test-design-system.md"
- id: epic-level
description: "Epic-level test plan (Phase 4)"
path: "{output_folder}/test-design-epic-{epic_num}.md"
default_output_file: "{output_folder}/test-design-epic-{epic_num}.md"
# Required tools

View File

@ -248,14 +248,21 @@ class ConfigCollector {
const configKeys = Object.keys(moduleConfig).filter((key) => key !== 'prompt');
const existingKeys = this.existingConfig && this.existingConfig[moduleName] ? Object.keys(this.existingConfig[moduleName]) : [];
// Find new interactive fields (with prompt)
const newKeys = configKeys.filter((key) => {
const item = moduleConfig[key];
// Check if it's a config item and doesn't exist in existing config
return item && typeof item === 'object' && item.prompt && !existingKeys.includes(key);
});
// If in silent mode and no new keys, use existing config and skip prompts
if (silentMode && newKeys.length === 0) {
// Find new static fields (without prompt, just result)
const newStaticKeys = configKeys.filter((key) => {
const item = moduleConfig[key];
return item && typeof item === 'object' && !item.prompt && item.result && !existingKeys.includes(key);
});
// If in silent mode and no new keys (neither interactive nor static), use existing config and skip prompts
if (silentMode && newKeys.length === 0 && newStaticKeys.length === 0) {
if (this.existingConfig && this.existingConfig[moduleName]) {
if (!this.collectedConfig[moduleName]) {
this.collectedConfig[moduleName] = {};
@ -294,9 +301,12 @@ class ConfigCollector {
return false; // No new fields
}
// If we have new fields, build questions first
if (newKeys.length > 0) {
// If we have new fields (interactive or static), process them
if (newKeys.length > 0 || newStaticKeys.length > 0) {
const questions = [];
const staticAnswers = {};
// Build questions for interactive fields
for (const key of newKeys) {
const item = moduleConfig[key];
const question = await this.buildQuestion(moduleName, key, item, moduleConfig);
@ -305,39 +315,50 @@ class ConfigCollector {
}
}
// Prepare static answers (no prompt, just result)
for (const key of newStaticKeys) {
staticAnswers[`${moduleName}_${key}`] = undefined;
}
// Collect all answers (static + prompted)
let allAnswers = { ...staticAnswers };
if (questions.length > 0) {
// Only show header if we actually have questions
CLIUtils.displayModuleConfigHeader(moduleName, moduleConfig.header, moduleConfig.subheader);
console.log(); // Line break before questions
const answers = await inquirer.prompt(questions);
const promptedAnswers = await inquirer.prompt(questions);
// Store answers for cross-referencing
Object.assign(this.allAnswers, answers);
// Process answers and build result values
for (const key of Object.keys(answers)) {
const originalKey = key.replace(`${moduleName}_`, '');
const item = moduleConfig[originalKey];
const value = answers[key];
let result;
if (Array.isArray(value)) {
result = value;
} else if (item.result) {
result = this.processResultTemplate(item.result, value);
} else {
result = value;
}
if (!this.collectedConfig[moduleName]) {
this.collectedConfig[moduleName] = {};
}
this.collectedConfig[moduleName][originalKey] = result;
}
} else {
// New keys exist but no questions generated - show no config message
// Merge prompted answers with static answers
Object.assign(allAnswers, promptedAnswers);
} else if (newStaticKeys.length > 0) {
// Only static fields, no questions - show no config message
CLIUtils.displayModuleNoConfig(moduleName, moduleConfig.header, moduleConfig.subheader);
}
// Store all answers for cross-referencing
Object.assign(this.allAnswers, allAnswers);
// Process all answers (both static and prompted)
for (const key of Object.keys(allAnswers)) {
const originalKey = key.replace(`${moduleName}_`, '');
const item = moduleConfig[originalKey];
const value = allAnswers[key];
let result;
if (Array.isArray(value)) {
result = value;
} else if (item.result) {
result = this.processResultTemplate(item.result, value);
} else {
result = value;
}
if (!this.collectedConfig[moduleName]) {
this.collectedConfig[moduleName] = {};
}
this.collectedConfig[moduleName][originalKey] = result;
}
}
// Copy over existing values for fields that weren't prompted
@ -353,7 +374,7 @@ class ConfigCollector {
}
}
return newKeys.length > 0; // Return true if we prompted for new fields
return newKeys.length > 0 || newStaticKeys.length > 0; // Return true if we had any new fields (interactive or static)
}
/**
@ -501,30 +522,52 @@ class ConfigCollector {
// Process each config item
const questions = [];
const staticAnswers = {};
const configKeys = Object.keys(moduleConfig).filter((key) => key !== 'prompt');
for (const key of configKeys) {
const item = moduleConfig[key];
// Skip if not a config object
if (!item || typeof item !== 'object' || !item.prompt) {
if (!item || typeof item !== 'object') {
continue;
}
const question = await this.buildQuestion(moduleName, key, item, moduleConfig);
if (question) {
questions.push(question);
// Handle static values (no prompt, just result)
if (!item.prompt && item.result) {
// Add to static answers with a marker value
staticAnswers[`${moduleName}_${key}`] = undefined;
continue;
}
// Handle interactive values (with prompt)
if (item.prompt) {
const question = await this.buildQuestion(moduleName, key, item, moduleConfig);
if (question) {
questions.push(question);
}
}
}
// Collect all answers (static + prompted)
let allAnswers = { ...staticAnswers };
// Display appropriate header based on whether there are questions
if (questions.length > 0) {
CLIUtils.displayModuleConfigHeader(moduleName, moduleConfig.header, moduleConfig.subheader);
console.log(); // Line break before questions
const answers = await inquirer.prompt(questions);
const promptedAnswers = await inquirer.prompt(questions);
// Store answers for cross-referencing
Object.assign(this.allAnswers, answers);
// Merge prompted answers with static answers
Object.assign(allAnswers, promptedAnswers);
}
// Store all answers for cross-referencing
Object.assign(this.allAnswers, allAnswers);
// Process all answers (both static and prompted)
if (Object.keys(allAnswers).length > 0) {
const answers = allAnswers;
// Process answers and build result values
for (const key of Object.keys(answers)) {

View File

@ -581,6 +581,11 @@ class ManifestGenerator {
*/
async writeWorkflowManifest(cfgDir) {
const csvPath = path.join(cfgDir, 'workflow-manifest.csv');
const escapeCsv = (value) => `"${String(value ?? '').replaceAll('"', '""')}"`;
const parseCsvLine = (line) => {
const columns = line.match(/(".*?"|[^",\s]+)(?=\s*,|\s*$)/g) || [];
return columns.map((c) => c.replaceAll(/^"|"$/g, ''));
};
// Read existing manifest to preserve entries
const existingEntries = new Map();
@ -592,18 +597,21 @@ class ManifestGenerator {
for (let i = 1; i < lines.length; i++) {
const line = lines[i];
if (line) {
// Parse CSV (simple parsing assuming no commas in quoted fields)
const parts = line.split('","');
const parts = parseCsvLine(line);
if (parts.length >= 4) {
const name = parts[0].replace(/^"/, '');
const module = parts[2];
existingEntries.set(`${module}:${name}`, line);
const [name, description, module, workflowPath] = parts;
existingEntries.set(`${module}:${name}`, {
name,
description,
module,
path: workflowPath,
});
}
}
}
}
// Create CSV header - removed standalone column as ALL workflows now generate commands
// Create CSV header - standalone column removed, everything is canonicalized to 4 columns
let csv = 'name,description,module,path\n';
// Combine existing and new workflows
@ -617,12 +625,18 @@ class ManifestGenerator {
// Add/update new workflows
for (const workflow of this.workflows) {
const key = `${workflow.module}:${workflow.name}`;
allWorkflows.set(key, `"${workflow.name}","${workflow.description}","${workflow.module}","${workflow.path}"`);
allWorkflows.set(key, {
name: workflow.name,
description: workflow.description,
module: workflow.module,
path: workflow.path,
});
}
// Write all workflows
for (const [, value] of allWorkflows) {
csv += value + '\n';
const row = [escapeCsv(value.name), escapeCsv(value.description), escapeCsv(value.module), escapeCsv(value.path)].join(',');
csv += row + '\n';
}
await fs.writeFile(csvPath, csv);