Merge branch 'main' into feat/external-agent-code-review

This commit is contained in:
Brian 2025-12-11 17:02:46 -06:00 committed by GitHub
commit 78646069ef
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
26 changed files with 151 additions and 139 deletions

View File

@ -4,9 +4,10 @@ language: "en-US"
early_access: true
reviews:
profile: chill
high_level_summary: true
high_level_summary: false # don't post summary until explicitly invoked
request_changes_workflow: false
review_status: false
commit_status: false # don't set commit status until explicitly invoked
collapse_walkthrough: false
poem: false
auto_review:
@ -33,4 +34,7 @@ reviews:
Flag any process.exit() without error message.
chat:
auto_reply: true # Response to mentions in comments, a la @coderabbit review
issue_enrichment:
auto_enrich:
enabled: false # don't auto-comment on issues

4
package-lock.json generated
View File

@ -1,12 +1,12 @@
{
"name": "bmad-method",
"version": "6.0.0-alpha.15",
"version": "6.0.0-alpha.16",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "bmad-method",
"version": "6.0.0-alpha.15",
"version": "6.0.0-alpha.16",
"license": "MIT",
"dependencies": {
"@kayvan/markdown-tree-parser": "^1.6.1",

View File

@ -330,7 +330,7 @@ Review was saved to story file, but sprint-status.yaml may be out of sync.
<action>All action items are included in the standalone review report</action>
<ask if="action items exist">Would you like me to create tracking items for these action items? (backlog/tasks)</ask>
<action if="user confirms">
If {{backlog_file}} does not exist, copy {installed_path}/backlog_template.md to {{backlog_file}} location.
If {{backlog_file}} does not exist, copy {installed_path}/backlog-template.md to {{backlog_file}} location.
Append a row per action item with Date={{date}}, Story="Ad-Hoc Review", Epic="N/A", Type, Severity, Owner (or "TBD"), Status="Open", Notes with file refs and context.
</action>
</check>
@ -342,7 +342,7 @@ Review was saved to story file, but sprint-status.yaml may be out of sync.
Append under the story's "Tasks / Subtasks" a new subsection titled "Review Follow-ups (AI)", adding each item as an unchecked checkbox in imperative form, prefixed with "[AI-Review]" and severity. Example: "- [ ] [AI-Review][High] Add input validation on server route /api/x (AC #2)".
</action>
<action>
If {{backlog_file}} does not exist, copy {installed_path}/backlog_template.md to {{backlog_file}} location.
If {{backlog_file}} does not exist, copy {installed_path}/backlog-template.md to {{backlog_file}} location.
Append a row per action item with Date={{date}}, Story={{epic_num}}.{{story_num}}, Epic={{epic_num}}, Type, Severity, Owner (or "TBD"), Status="Open", Notes with short context and file refs.
</action>
<action>

View File

@ -24,7 +24,7 @@ agent:
critical_actions:
- "READ the entire story file BEFORE any implementation - tasks/subtasks sequence is your authoritative implementation guide"
- "Load project_context.md if available for coding standards only - never let it override story requirements"
- "Load project-context.md if available for coding standards only - never let it override story requirements"
- "Execute tasks/subtasks IN ORDER as written in story file - no skipping, no reordering, no doing what you want"
- "For each task/subtask: follow red-green-refactor cycle - write failing test first, then implementation"
- "Mark task/subtask [x] ONLY when both implementation AND tests are complete and passing"

View File

@ -199,24 +199,11 @@ PRDs are for Level 2-4 projects with multiple features requiring product-level c
### Q: How do I mark a story as done?
**A:** You have two options:
**A:** After dev-story completes and code-review passes:
**Option 1: Use story-done workflow (Recommended)**
1. Load SM agent
2. Run `story-done` workflow
3. Workflow automatically updates `sprint-status.yaml` (created by sprint-planning at Phase 4 start)
4. Moves story from current status → `DONE`
5. Advances the story queue
**Option 2: Manual update**
1. After dev-story completes and code-review passes
2. Open `sprint-status.yaml` (created by sprint-planning)
3. Change the story status from `review` to `done`
4. Save the file
The story-done workflow is faster and ensures proper status file updates.
1. Open `sprint-status.yaml` (created by sprint-planning)
2. Change the story status from `review` to `done`
3. Save the file
### Q: Can I work on multiple stories at once?

View File

@ -2934,7 +2934,7 @@
"gap": 1
},
"endBinding": {
"elementId": "proc-story-done",
"elementId": "proc-code-review",
"focus": 0.04241833499478815,
"gap": 1.3466869862454587
},
@ -3189,7 +3189,7 @@
"lineHeight": 1.25
},
{
"id": "proc-story-done",
"id": "proc-code-review",
"type": "rectangle",
"x": 1169.3991588878014,
"y": 947.2529662369525,
@ -3207,12 +3207,12 @@
"value": 8
},
"groupIds": [
"proc-story-done-group"
"proc-code-review-group"
],
"boundElements": [
{
"type": "text",
"id": "proc-story-done-text"
"id": "proc-code-review-text"
},
{
"type": "arrow",
@ -3235,7 +3235,7 @@
"link": null
},
{
"id": "proc-story-done-text",
"id": "proc-code-review-text",
"type": "text",
"x": 1187.9272045420983,
"y": 972.2529662369525,
@ -3249,14 +3249,14 @@
"roughness": 0,
"opacity": 100,
"groupIds": [
"proc-story-done-group"
"proc-code-review-group"
],
"fontSize": 16,
"fontFamily": 1,
"text": "Code Review\n<<use different\nLLM>>",
"textAlign": "center",
"verticalAlign": "middle",
"containerId": "proc-story-done",
"containerId": "proc-code-review",
"locked": false,
"version": 502,
"versionNonce": 1242095014,
@ -3289,7 +3289,7 @@
"opacity": 100,
"groupIds": [],
"startBinding": {
"elementId": "proc-story-done",
"elementId": "proc-code-review",
"focus": 0.014488632877232727,
"gap": 8.284295421831303
},

View File

@ -377,12 +377,6 @@ Checks:
Quick Spec Flow works seamlessly with all Phase 4 implementation workflows:
### story-context (SM Agent)
- ✅ Recognizes tech-spec.md as authoritative source
- ✅ Extracts context from tech-spec (replaces PRD)
- ✅ Generates XML context for complex scenarios
### create-story (SM Agent)
- ✅ Can work with tech-spec.md instead of PRD
@ -529,10 +523,6 @@ Quick Spec Flow is **fully standalone**:
**A:** No problem! You can always transition to BMad Method by running workflow-init and create-prd. Your tech-spec becomes input for the PRD.
### Q: Do I need story-context for every story?
**A:** Usually no! Tech-spec is comprehensive enough for most Quick Flow projects. Only use story-context for complex edge cases.
### Q: Can I skip validation?
**A:** No, validation always runs automatically. But it's fast and catches issues early!
@ -564,15 +554,11 @@ Starter templates save hours of setup time. Let Quick Spec Flow find the best on
When validation runs, read the scores. They tell you if your spec is production-ready.
### 5. **Story Context is Optional**
For single changes, try going directly to dev-story first. Only add story-context if you hit complexity.
### 6. **Keep Single Changes Truly Atomic**
### 5. **Keep Single Changes Truly Atomic**
If your "single change" needs 3+ files, it might be a multi-story feature. Let the workflow guide you.
### 7. **Validate Story Sequence for Multi-Story Features**
### 6. **Validate Story Sequence for Multi-Story Features**
When you get multiple stories, check the dependency validation output. Proper sequence matters!

View File

@ -26,14 +26,17 @@ graph TB
subgraph Phase3["<b>Phase 3: SOLUTIONING</b>"]
Architecture["<b>Architect: *architecture</b>"]
EpicsStories["<b>PM/Architect: *create-epics-and-stories</b>"]
TestDesignSys["<b>TEA: *test-design (system-level)</b>"]
Framework["<b>TEA: *framework</b>"]
CI["<b>TEA: *ci</b>"]
GateCheck["<b>Architect: *implementation-readiness</b>"]
Architecture --> EpicsStories
Architecture --> TestDesignSys
TestDesignSys --> Framework
EpicsStories --> Framework
Framework --> CI
CI --> GateCheck
Phase3Note["<b>Epics created AFTER architecture,</b><br/><b>then test infrastructure setup</b>"]
Phase3Note["<b>Epics created AFTER architecture,</b><br/><b>then system-level test design and test infrastructure setup</b>"]
EpicsStories -.-> Phase3Note
end
@ -93,12 +96,17 @@ graph TB
- **Documentation** (Optional for brownfield): Prerequisite using `*document-project`
- **Phase 1** (Optional): Discovery/Analysis (`*brainstorm`, `*research`, `*product-brief`)
- **Phase 2** (Required): Planning (`*prd` creates PRD with FRs/NFRs)
- **Phase 3** (Track-dependent): Solutioning (`*architecture` → `*create-epics-and-stories` → TEA: `*framework`, `*ci``*implementation-readiness`)
- **Phase 3** (Track-dependent): Solutioning (`*architecture` → `*test-design` (system-level) → `*create-epics-and-stories` → TEA: `*framework`, `*ci``*implementation-readiness`)
- **Phase 4** (Required): Implementation (`*sprint-planning` → per-epic: `*test-design` → per-story: dev workflows)
**TEA workflows:** `*framework` and `*ci` run once in Phase 3 after architecture. `*test-design` runs per-epic in Phase 4. Output: `test-design-epic-N.md`.
**TEA workflows:** `*framework` and `*ci` run once in Phase 3 after architecture. `*test-design` is **dual-mode**:
Quick Flow track skips Phase 1 and 3. BMad Method and Enterprise use all phases based on project needs.
- **System-level (Phase 3):** Run immediately after architecture/ADR drafting to produce `test-design-system.md` (testability review, ADR → test mapping, Architecturally Significant Requirements (ASRs), environment needs). Feeds the implementation-readiness gate.
- **Epic-level (Phase 4):** Run per-epic to produce `test-design-epic-N.md` (risk, priorities, coverage plan).
Quick Flow track skips Phases 1 and 3.
BMad Method and Enterprise use all phases based on project needs.
When an ADR or architecture draft is produced, run `*test-design` in **system-level** mode before the implementation-readiness gate. This ensures the ADR has an attached testability review and ADR → test mapping. Keep the test-design updated if ADRs change.
### Why TEA is Different from Other BMM Agents

View File

@ -196,7 +196,7 @@ workflow-init asks: "Is this work in progress or previous effort?"
2. Verify agent has workflow:
- PM agent: prd, tech-spec
- Architect agent: create-architecture, validate-architecture
- SM agent: sprint-planning, create-story, story-context
- SM agent: sprint-planning, create-story
3. Try menu number instead of name
4. Check you're using correct agent for workflow
@ -219,23 +219,6 @@ workflow-init asks: "Is this work in progress or previous effort?"
3. **Run in Phase 4 only** - Ensure Phase 2/3 complete first
4. **Check file paths** - Epic files should be in correct output folder
### Problem: story-context generates empty or wrong context
**Symptoms:**
- Context file created but has no useful content
- Context doesn't reference existing code
- Missing technical guidance
**Solution:**
1. **Run epic-tech-context first** - story-context builds on epic context
2. **Check story file exists** - Verify story was created by create-story
3. **For brownfield**:
- Ensure document-project was run
- Verify docs/index.md exists with codebase context
4. **Try regenerating** - Sometimes needs fresh attempt with more specific story details
---
## Context and Documentation Issues
@ -362,7 +345,7 @@ For most brownfield projects, **Deep scan is sufficient**.
1. **For brownfield**:
- Ensure document-project captured existing architecture
- Review architecture docs before implementing
2. **Check story-context** - Should document integration points
2. **Check story file** - Should document integration points
3. **In tech-spec/architecture** - Explicitly document:
- Which existing modules to modify
- What APIs/services to integrate with
@ -384,7 +367,7 @@ For most brownfield projects, **Deep scan is sufficient**.
- Should detect existing patterns
- Asks for confirmation before proceeding
2. **Review documentation** - Ensure document-project captured patterns
3. **Use story-context** - Injects pattern guidance per story
3. **Use comprehensive story files** - Include pattern guidance in story
4. **Add to code-review checklist**:
- Pattern adherence
- Convention consistency
@ -459,9 +442,7 @@ To change locations, edit config.yaml then re-run workflows.
```
2. **Some workflows auto-update**:
- sprint-planning creates file
- epic-tech-context changes epic to "contexted"
- create-story changes story to "drafted"
- story-context changes to "ready-for-dev"
- create-story changes story to "ready-for-dev"
- dev-story may auto-update (check workflow)
3. **Re-run sprint-planning** to resync if needed
@ -657,8 +638,8 @@ If your issue isn't covered here:
### "Context generation failed"
**Cause:** Missing prerequisites (epic context, story file, or docs)
**Fix:** Verify epic-tech-context run, story file exists, docs present
**Cause:** Missing prerequisites (story file or docs)
**Fix:** Verify story file exists, docs present
---

View File

@ -152,10 +152,9 @@ Dependencies: Story 1.2 (DONE) ✅
**Recommendation:** Run `create-story` to generate Story 1.3
After create-story:
1. Run story-context
2. Run dev-story
3. Run code-review
4. Run story-done
1. Run dev-story
2. Run code-review
3. Update sprint-status.yaml to mark story done
```
See: [workflow-status instructions](../workflows/workflow-status/instructions.md)

View File

@ -1,6 +1,6 @@
---
name: create-prd
description: Creates a comprehensive PRDs through collaborative step-by-step discovery between two product managers working as peers.
description: Creates a comprehensive PRD through collaborative step-by-step discovery between two product managers working as peers.
main_config: '{project-root}/.bmad/bmm/config.yaml'
web_bundle: true
---

View File

@ -94,7 +94,7 @@ Discover and load context documents using smart discovery:
**Project Context Rules (Critical for AI Agents):**
1. Check for project context file: `**/project_context.md`
1. Check for project context file: `**/project-context.md`
2. If exists: Load COMPLETE file contents - this contains critical rules for AI agents
3. Add to frontmatter `hasProjectContext: true` and track file path
4. Report to user: "Found existing project context with {number_of_rules} agent rules"

View File

@ -280,7 +280,7 @@ Your architecture will ensure consistent, high-quality implementation across all
**💡 Optional Enhancement: Project Context File**
Would you like to create a `project_context.md` file? This is a concise, optimized guide for AI agents that captures:
Would you like to create a `project-context.md` file? This is a concise, optimized guide for AI agents that captures:
- Critical language and framework rules they might miss
- Specific patterns and conventions for your project
@ -310,7 +310,7 @@ This will help ensure consistent implementation by capturing:
- Testing and quality standards
- Anti-patterns to avoid
The workflow will collaborate with you to create an optimized `project_context.md` file that AI agents will read before implementing any code."
The workflow will collaborate with you to create an optimized `project-context.md` file that AI agents will read before implementing any code."
**Execute the Generate Project Context workflow:**

View File

@ -402,7 +402,7 @@
**Issues Fixed:** {{fixed_count}}
**Action Items Created:** {{action_count}}
{{#if new_status == "done"}}Story is ready for next work!{{else}}Address the action items and continue development.{{/if}}
{{#if new_status == "done"}}Code review complete!{{else}}Address the action items and continue development.{{/if}}
</output>
</step>

View File

@ -35,7 +35,7 @@ validation-rules:
- [ ] **Acceptance Criteria Satisfaction:** Implementation satisfies EVERY Acceptance Criterion in the story
- [ ] **No Ambiguous Implementation:** Clear, unambiguous implementation that meets story requirements
- [ ] **Edge Cases Handled:** Error conditions and edge cases appropriately addressed
- [ ] **Dependencies Within Scope:** Only uses dependencies specified in story or project_context.md
- [ ] **Dependencies Within Scope:** Only uses dependencies specified in story or project-context.md
## 🧪 Testing & Quality Assurance

View File

@ -53,11 +53,9 @@ Run `/bmad:bmm:workflows:sprint-planning` to generate it, then rerun sprint-stat
1. If any story status == in-progress → recommend `dev-story` for the first in-progress story
2. Else if any story status == review → recommend `code-review` for the first review story
3. Else if any story status == ready-for-dev → recommend `dev-story`
4. Else if any story status == drafted → recommend `story-ready`
5. Else if any story status == backlog → recommend `create-story`
6. Else if any epic status == backlog → recommend `epic-tech-context`
7. Else if retrospectives are optional → recommend `retrospective`
8. Else → All implementation items done; suggest `workflow-status` to plan next phase
4. Else if any story status == backlog → recommend `create-story`
5. Else if retrospectives are optional → recommend `retrospective`
6. Else → All implementation items done; suggest `workflow-status` to plan next phase
<action>Store selected recommendation as: next_story_id, next_workflow_id, next_agent (SM/DEV as appropriate)</action>
</step>

View File

@ -33,7 +33,7 @@ Discover the project's technology stack, existing patterns, and critical impleme
First, check if project context already exists:
- Look for file at `{output_folder}/project_context.md`
- Look for file at `{output_folder}/project-context.md`
- If exists: Read complete file to understand existing rules
- Present to user: "Found existing project context with {number_of_sections} sections. Would you like to update this or create a new one?"
@ -122,7 +122,7 @@ Based on discovery, create or update the context document:
#### A. Fresh Document Setup (if no existing context)
Copy template from `{installed_path}/project-context-template.md` to `{output_folder}/project_context.md`
Copy template from `{installed_path}/project-context-template.md` to `{output_folder}/project-context.md`
Initialize frontmatter with:
```yaml

View File

@ -288,7 +288,7 @@ After each category, show the generated rules and present choices:
## APPEND TO PROJECT CONTEXT:
When user selects 'C' for a category, append the content directly to `{output_folder}/project_context.md` using the structure from step 8.
When user selects 'C' for a category, append the content directly to `{output_folder}/project-context.md` using the structure from step 8.
## SUCCESS METRICS:

View File

@ -134,7 +134,7 @@ Based on user skill level, present the completion:
**Expert Mode:**
"Project context complete. Optimized for LLM consumption with {{rule_count}} critical rules across {{section_count}} sections.
File saved to: `{output_folder}/project_context.md`
File saved to: `{output_folder}/project-context.md`
Ready for AI agent integration."
@ -227,7 +227,7 @@ Present final completion to user:
"✅ **Project Context Generation Complete!**
Your optimized project context file is ready at:
`{output_folder}/project_context.md`
`{output_folder}/project-context.md`
**📊 Context Summary:**

View File

@ -1,11 +1,11 @@
---
name: generate-project-context
description: Creates a concise project_context.md file with critical rules and patterns that AI agents must follow when implementing code. Optimized for LLM context efficiency.
description: Creates a concise project-context.md file with critical rules and patterns that AI agents must follow when implementing code. Optimized for LLM context efficiency.
---
# Generate Project Context Workflow
**Goal:** Create a concise, optimized `project_context.md` file containing critical rules, patterns, and guidelines that AI agents must follow when implementing code. This file focuses on unobvious details that LLMs need to be reminded of.
**Goal:** Create a concise, optimized `project-context.md` file containing critical rules, patterns, and guidelines that AI agents must follow when implementing code. This file focuses on unobvious details that LLMs need to be reminded of.
**Your Role:** You are a technical facilitator working with a peer to capture the essential implementation rules that will ensure consistent, high-quality code generation across all AI agents working on the project.
@ -37,7 +37,7 @@ Load config from `{project-root}/.bmad/bmm/config.yaml` and resolve:
- `installed_path` = `{project-root}/.bmad/bmm/workflows/generate-project-context`
- `template_path` = `{installed_path}/project-context-template.md`
- `output_file` = `{output_folder}/project_context.md`
- `output_file` = `{output_folder}/project-context.md`
---

View File

@ -24,8 +24,14 @@ variables:
# Output configuration
# Note: Actual output file determined dynamically based on mode detection
# - System-Level (Phase 3): {output_folder}/test-design-system.md
# - Epic-Level (Phase 4): {output_folder}/test-design-epic-{epic_num}.md
# Declared outputs for new workflow format
outputs:
- id: system-level
description: "System-level testability review (Phase 3)"
path: "{output_folder}/test-design-system.md"
- id: epic-level
description: "Epic-level test plan (Phase 4)"
path: "{output_folder}/test-design-epic-{epic_num}.md"
default_output_file: "{output_folder}/test-design-epic-{epic_num}.md"
# Required tools

View File

@ -248,14 +248,21 @@ class ConfigCollector {
const configKeys = Object.keys(moduleConfig).filter((key) => key !== 'prompt');
const existingKeys = this.existingConfig && this.existingConfig[moduleName] ? Object.keys(this.existingConfig[moduleName]) : [];
// Find new interactive fields (with prompt)
const newKeys = configKeys.filter((key) => {
const item = moduleConfig[key];
// Check if it's a config item and doesn't exist in existing config
return item && typeof item === 'object' && item.prompt && !existingKeys.includes(key);
});
// If in silent mode and no new keys, use existing config and skip prompts
if (silentMode && newKeys.length === 0) {
// Find new static fields (without prompt, just result)
const newStaticKeys = configKeys.filter((key) => {
const item = moduleConfig[key];
return item && typeof item === 'object' && !item.prompt && item.result && !existingKeys.includes(key);
});
// If in silent mode and no new keys (neither interactive nor static), use existing config and skip prompts
if (silentMode && newKeys.length === 0 && newStaticKeys.length === 0) {
if (this.existingConfig && this.existingConfig[moduleName]) {
if (!this.collectedConfig[moduleName]) {
this.collectedConfig[moduleName] = {};
@ -294,9 +301,12 @@ class ConfigCollector {
return false; // No new fields
}
// If we have new fields, build questions first
if (newKeys.length > 0) {
// If we have new fields (interactive or static), process them
if (newKeys.length > 0 || newStaticKeys.length > 0) {
const questions = [];
const staticAnswers = {};
// Build questions for interactive fields
for (const key of newKeys) {
const item = moduleConfig[key];
const question = await this.buildQuestion(moduleName, key, item, moduleConfig);
@ -305,20 +315,35 @@ class ConfigCollector {
}
}
// Prepare static answers (no prompt, just result)
for (const key of newStaticKeys) {
staticAnswers[`${moduleName}_${key}`] = undefined;
}
// Collect all answers (static + prompted)
let allAnswers = { ...staticAnswers };
if (questions.length > 0) {
// Only show header if we actually have questions
CLIUtils.displayModuleConfigHeader(moduleName, moduleConfig.header, moduleConfig.subheader);
console.log(); // Line break before questions
const answers = await inquirer.prompt(questions);
const promptedAnswers = await inquirer.prompt(questions);
// Store answers for cross-referencing
Object.assign(this.allAnswers, answers);
// Merge prompted answers with static answers
Object.assign(allAnswers, promptedAnswers);
} else if (newStaticKeys.length > 0) {
// Only static fields, no questions - show no config message
CLIUtils.displayModuleNoConfig(moduleName, moduleConfig.header, moduleConfig.subheader);
}
// Process answers and build result values
for (const key of Object.keys(answers)) {
// Store all answers for cross-referencing
Object.assign(this.allAnswers, allAnswers);
// Process all answers (both static and prompted)
for (const key of Object.keys(allAnswers)) {
const originalKey = key.replace(`${moduleName}_`, '');
const item = moduleConfig[originalKey];
const value = answers[key];
const value = allAnswers[key];
let result;
if (Array.isArray(value)) {
@ -334,10 +359,6 @@ class ConfigCollector {
}
this.collectedConfig[moduleName][originalKey] = result;
}
} else {
// New keys exist but no questions generated - show no config message
CLIUtils.displayModuleNoConfig(moduleName, moduleConfig.header, moduleConfig.subheader);
}
}
// Copy over existing values for fields that weren't prompted
@ -353,7 +374,7 @@ class ConfigCollector {
}
}
return newKeys.length > 0; // Return true if we prompted for new fields
return newKeys.length > 0 || newStaticKeys.length > 0; // Return true if we had any new fields (interactive or static)
}
/**
@ -501,30 +522,52 @@ class ConfigCollector {
// Process each config item
const questions = [];
const staticAnswers = {};
const configKeys = Object.keys(moduleConfig).filter((key) => key !== 'prompt');
for (const key of configKeys) {
const item = moduleConfig[key];
// Skip if not a config object
if (!item || typeof item !== 'object' || !item.prompt) {
if (!item || typeof item !== 'object') {
continue;
}
// Handle static values (no prompt, just result)
if (!item.prompt && item.result) {
// Add to static answers with a marker value
staticAnswers[`${moduleName}_${key}`] = undefined;
continue;
}
// Handle interactive values (with prompt)
if (item.prompt) {
const question = await this.buildQuestion(moduleName, key, item, moduleConfig);
if (question) {
questions.push(question);
}
}
}
// Collect all answers (static + prompted)
let allAnswers = { ...staticAnswers };
// Display appropriate header based on whether there are questions
if (questions.length > 0) {
CLIUtils.displayModuleConfigHeader(moduleName, moduleConfig.header, moduleConfig.subheader);
console.log(); // Line break before questions
const answers = await inquirer.prompt(questions);
const promptedAnswers = await inquirer.prompt(questions);
// Store answers for cross-referencing
Object.assign(this.allAnswers, answers);
// Merge prompted answers with static answers
Object.assign(allAnswers, promptedAnswers);
}
// Store all answers for cross-referencing
Object.assign(this.allAnswers, allAnswers);
// Process all answers (both static and prompted)
if (Object.keys(allAnswers).length > 0) {
const answers = allAnswers;
// Process answers and build result values
for (const key of Object.keys(answers)) {