BMAD-METHOD/src/modules/bmm/_module-installer/install-config.yaml

87 lines
3.1 KiB
YAML

# BMAD™ Method Core Configuration
code: bmm
name: "BMM: BMad Method Agile-AI Driven-Development"
default_selected: false # This module will be selected by default for new installations
header: "BMad Method™: Breakthrough Method of Agile-Ai Driven-Dev"
subheader: "Agent and Workflow Configuration for this module"
# Variables from Core Config inserted:
## user_name
## communication_language
## output_folder
## bmad_folder
## install_user_docs
## kb_install
project_name:
prompt: "What is the title of your project you will be working on?"
default: "{directory_name}"
result: "{value}"
user_skill_level:
prompt:
- "What is your technical experience level?"
- "This affects how agents explain concepts to you (NOT document content)."
- "Documents are always concise for LLM efficiency."
default: "intermediate"
result: "{value}"
single-select:
- value: "beginner"
label: "Beginner - New to development, explain concepts clearly"
- value: "intermediate"
label: "Intermediate - Familiar with development, balance explanation with efficiency"
- value: "expert"
label: "Expert - Deep technical knowledge, be direct and technical"
sprint_artifacts:
prompt: "Where should Sprint Artifacts be stored (sprint status, stories, story context, temp context, etc...)?"
default: "{output_folder}/sprint-artifacts"
result: "{project-root}/{value}"
tea_use_mcp_enhancements:
prompt: "Enable Test Architect Playwright MCP capabilities (healing, exploratory, verification)? You have to setup your MCPs yourself; refer to test-architecture.md for hints."
default: false
result: "{value}"
tea_use_playwright_utils:
prompt:
- "Are you using playwright-utils (@seontechnologies/playwright-utils) in your project?"
- "This adds fixture-based utilities for auth, API requests, network recording, polling, intercept, recurse, logging, file download handling, and burn-in."
- "You must install packages yourself, or use test architect's *framework command."
default: false
result: "{value}"
# External Code Review Agents Configuration
# These are auto-detected at runtime, but user can set preference here
# Useful when using a different AI as primary IDE agent (e.g., Codex/Gemini users can use Claude for reviews)
external_review_agents:
codex_available:
prompt: false # Auto-detected at runtime
default: false
result: "{value}"
gemini_available:
prompt: false # Auto-detected at runtime
default: false
result: "{value}"
claude_available:
prompt: false # Auto-detected at runtime
default: false
result: "{value}"
preferred_agent:
prompt: "Which external code review agent do you prefer (if multiple are available)?"
default: "codex"
result: "{value}"
single-select:
- value: "codex"
label: "Codex (OpenAI) - Fast code review with OpenAI models"
- value: "gemini"
label: "Gemini (Google) - Code review with Google models"
- value: "claude"
label: "Claude (Anthropic) - Code review with Claude models (good for Codex/Gemini users)"
last_checked:
prompt: false # System-managed timestamp
default: null
result: "{value}"