Compare commits
39 Commits
94cf1890d1
...
550807f8ec
| Author | SHA1 | Date |
|---|---|---|
|
|
550807f8ec | |
|
|
38b2ffe53d | |
|
|
972b79852f | |
|
|
6f849e00a3 | |
|
|
ea0c12ac04 | |
|
|
7428054805 | |
|
|
7701cbea62 | |
|
|
e36f219c81 | |
|
|
9debc165aa | |
|
|
65b810a11f | |
|
|
e6cdc93b79 | |
|
|
e174bebc60 | |
|
|
fcf20f1c7b | |
|
|
e011192525 | |
|
|
91a57499e9 | |
|
|
48a7ec8bff | |
|
|
3da984a491 | |
|
|
815600e4ca | |
|
|
7ee5fa313b | |
|
|
3e89b30b3c | |
|
|
b4d73b7daf | |
|
|
6ff74ba662 | |
|
|
1ad1f91e38 | |
|
|
350688df67 | |
|
|
be85e5b4a0 | |
|
|
04cfde1454 | |
|
|
7baa30c567 | |
|
|
88b9a1c842 | |
|
|
69cbeb4d07 | |
|
|
1d35acfd84 | |
|
|
01cc32540b | |
|
|
1197122001 | |
|
|
314fe69d14 | |
|
|
9ff9d6f8f3 | |
|
|
c29b72ecc0 | |
|
|
e7a213ed07 | |
|
|
0533976753 | |
|
|
3d824d4c0f | |
|
|
2395b0e2ed |
|
|
@ -13,7 +13,7 @@
|
||||||
"name": "bmad-pro-skills",
|
"name": "bmad-pro-skills",
|
||||||
"source": "./",
|
"source": "./",
|
||||||
"description": "Next level skills for power users — advanced prompting techniques, agent management, and more.",
|
"description": "Next level skills for power users — advanced prompting techniques, agent management, and more.",
|
||||||
"version": "6.3.0",
|
"version": "6.6.0",
|
||||||
"author": {
|
"author": {
|
||||||
"name": "Brian (BMad) Madison"
|
"name": "Brian (BMad) Madison"
|
||||||
},
|
},
|
||||||
|
|
@ -35,7 +35,7 @@
|
||||||
"name": "bmad-method-lifecycle",
|
"name": "bmad-method-lifecycle",
|
||||||
"source": "./",
|
"source": "./",
|
||||||
"description": "Full-lifecycle AI development framework — agents and workflows for product analysis, planning, architecture, and implementation.",
|
"description": "Full-lifecycle AI development framework — agents and workflows for product analysis, planning, architecture, and implementation.",
|
||||||
"version": "6.3.0",
|
"version": "6.6.0",
|
||||||
"author": {
|
"author": {
|
||||||
"name": "Brian (BMad) Madison"
|
"name": "Brian (BMad) Madison"
|
||||||
},
|
},
|
||||||
|
|
|
||||||
|
|
@ -7,6 +7,7 @@ on:
|
||||||
- "src/**"
|
- "src/**"
|
||||||
- "tools/installer/**"
|
- "tools/installer/**"
|
||||||
- "package.json"
|
- "package.json"
|
||||||
|
- "removals.txt"
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
inputs:
|
inputs:
|
||||||
channel:
|
channel:
|
||||||
|
|
@ -135,6 +136,22 @@ jobs:
|
||||||
env:
|
env:
|
||||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Advance @next dist-tag to stable
|
||||||
|
if: github.event_name == 'workflow_dispatch' && inputs.channel == 'latest'
|
||||||
|
# Failure here leaves @next stale until the next push-driven prerelease
|
||||||
|
# republishes — annoying but not release-breaking. Don't fail the job
|
||||||
|
# after a successful stable publish + tag + GH release.
|
||||||
|
continue-on-error: true
|
||||||
|
run: |
|
||||||
|
# Without this, @latest can leapfrog @next (e.g. latest=6.5.0 while
|
||||||
|
# next=6.4.1-next.0) and `npx bmad-method@next install` silently
|
||||||
|
# downgrades users. Point @next at the just-published stable so
|
||||||
|
# @next >= @latest always holds; the next push-driven prerelease will
|
||||||
|
# bump from this base via the existing derive step above.
|
||||||
|
VERSION=$(node -p 'require("./package.json").version')
|
||||||
|
npm dist-tag add "bmad-method@${VERSION}" next
|
||||||
|
echo "Advanced @next dist-tag to ${VERSION}"
|
||||||
|
|
||||||
- name: Notify Discord
|
- name: Notify Discord
|
||||||
if: github.event_name == 'workflow_dispatch' && inputs.channel == 'latest'
|
if: github.event_name == 'workflow_dispatch' && inputs.channel == 'latest'
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
|
|
|
||||||
95
CHANGELOG.md
95
CHANGELOG.md
|
|
@ -1,5 +1,100 @@
|
||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## v6.6.0 - 2026-04-28
|
||||||
|
|
||||||
|
### 💥 Breaking Changes
|
||||||
|
|
||||||
|
* `--tools none` is no longer accepted; fresh `--yes` installs now require an explicit `--tools <id>`. Existing-install flows are unchanged. Run `npx bmad-method --list-tools` to see supported IDs (#2346)
|
||||||
|
* `project_name` has moved from `[modules.bmm]` to `[core]` in `config.toml`. Existing installs are auto-migrated on next install/update — no manual action required (#2348)
|
||||||
|
|
||||||
|
### 🎁 Features
|
||||||
|
|
||||||
|
* **Non-interactive config for CI/Docker** — new `--set <module>.<key>=<value>` (repeatable) and `--list-options [module]` flags allow installer configuration without prompts. Routes values to the correct config file with prototype-pollution defenses (#2354)
|
||||||
|
* **Brownfield epic scoping** — Create Epics and Stories workflow now detects file-overlap between epics and applies an Implementation Efficiency principle plus a design completeness gate, reducing unnecessary file churn (#1826)
|
||||||
|
|
||||||
|
### 🐛 Fixes
|
||||||
|
|
||||||
|
* **Custom module installer** — Azure DevOps URLs now parse correctly with multi-segment paths and `_git` prefixes (#2269); HTTP (non-HTTPS) Git URLs are preserved for self-hosted servers (#2344); community installs route through `PluginResolver` so marketplace plugins with nested `module.yaml` install all skills (#2331); URL-source modules resolve from disk cache on re-install instead of warning (#2323); local `--custom-content` modules resolve correctly and `[modules.<code>]` TOML keys use the module code rather than display name (#2316); `--yes` with `--custom-source` now runs the full update path so version tags are respected (#2336)
|
||||||
|
* **Installer safety** — `--list-tools` flag added; empty/typo'd tool IDs rejected with specific errors (#2346)
|
||||||
|
* **Channel and dist-tag handling** — installer launched from a prerelease (e.g. `@next`) now defaults external module channels to `next` instead of silently downgrading to stable (#2321); stable publishes advance the `@next` dist-tag so prerelease users no longer leapfrog or miss update notifications (#2320)
|
||||||
|
* **Architecture validation gate** — step-07 validation template no longer ships pre-checked; status field is now templated against actual checklist completion (#2347)
|
||||||
|
* **bmad-help data integrity** — `bmad-help.csv` is no longer transformed at merge time and is emitted in its documented schema; 31 misaligned rows in core/bmm `module-help.csv` repaired (#2349)
|
||||||
|
* **Config robustness** — malformed `module.yaml` (scalars, arrays) is now rejected before crash (#2348)
|
||||||
|
* **Legacy cleanup** — pre-v6.2.0 wrapper skills (`bmad-bmm-*`, `bmad-agent-bmm-*`) are removed automatically on upgrade so they no longer error with missing-file warnings (#2315)
|
||||||
|
|
||||||
|
### 📚 Docs
|
||||||
|
|
||||||
|
* Complete Chinese (zh-CN) translations for `named-agents.md` and `expand-bmad-for-your-org.md`; localized BMad Ecosystem sidebar (CIS, BMB, TEA, WDS) across zh-cn, vi-vn, fr-fr, cs-cz (#2355)
|
||||||
|
|
||||||
|
## v6.5.0 - 2026-04-26
|
||||||
|
|
||||||
|
### 🎁 Features
|
||||||
|
|
||||||
|
* Support for 18 new agent platforms: AdaL, Sourcegraph Amp, IBM Bob, Command Code, Snowflake Cortex Code, Factory Droid, Firebender, Block Goose, Kode, Mistral Vibe, Mux, Neovate, OpenClaw, OpenHands, Pochi, Replit Agent, Warp, Zencoder — bringing total supported platforms to 42 (#2313)
|
||||||
|
* All platforms that support the cross-tool `.agents/skills/` standard now use it (#2313)
|
||||||
|
|
||||||
|
## v6.4.0 - 2026-04-24
|
||||||
|
|
||||||
|
### ✨ Headline
|
||||||
|
|
||||||
|
**Full agent and workflow customization across the entire BMad Method.** Every agent and workflow in BMM, Core, CIS, GDS, and TEA can now be customized via TOML overrides in `_bmad/custom/`. Customize agents to apply tooling, version control, or behavior changes across whole groups of workflows. Drop in fine-grained per-workflow overrides where you need them. Built for power users who want BMad to fit their stack without forking.
|
||||||
|
|
||||||
|
**Stable and bleeding-edge release channels, standardized across all modules.** Pick `stable` or `next` per module, pin specific versions, and switch channels interactively or via CLI flags (`--channel`, `--all-stable`, `--all-next`, `--next=CODE`, `--pin CODE=TAG`). Same model across BMM, Core, and every external module.
|
||||||
|
|
||||||
|
### 💥 Breaking Changes
|
||||||
|
|
||||||
|
* Customization is now TOML-based; the briefly introduced YAML-based customization is no longer supported (#2284, #2283)
|
||||||
|
|
||||||
|
### 🎁 Features
|
||||||
|
|
||||||
|
**Customization framework**
|
||||||
|
|
||||||
|
* TOML-based agent and workflow customization with flat schema, structural merge rules (scalars, tables, code-keyed arrays, append arrays), and `persistent_facts` unification (#2284)
|
||||||
|
* Central `_bmad/config.toml` surface with four-file architecture (`config.toml`, `config.user.toml`, `custom/config.toml`, `custom/config.user.toml`) for agent roster and scope-partitioned install answers (#2285)
|
||||||
|
* `customize.toml` support extended to 17 bmm-skills workflows with flattened SKILL.md architecture and standardized `[workflow]` block (#2287)
|
||||||
|
* `customize.toml` extended to all six developer-execution workflows: bmad-dev-story, bmad-code-review, bmad-sprint-planning, bmad-sprint-status, bmad-quick-dev, bmad-checkpoint-preview (#2308)
|
||||||
|
* `bmad-customize` skill — guided authoring of TOML overrides in `_bmad/custom/` with stdlib-only resolver verification (#2289)
|
||||||
|
* Wire `on_complete` hook into all 23 workflow terminal steps with full customize.toml documentation (#2290)
|
||||||
|
|
||||||
|
**Release channels & installer**
|
||||||
|
|
||||||
|
* Channel-based version resolution for external modules with interactive channel management (`stable` / `next` / `pinned`) and CLI flags (`--channel`, `--all-stable`, `--all-next`, `--next=CODE`, `--pin CODE=TAG`) (#2305)
|
||||||
|
* GitHub API as primary fetch with raw CDN fallback in installer registry client to support corporate proxies (#2248)
|
||||||
|
|
||||||
|
**Other**
|
||||||
|
|
||||||
|
* Kimi Code CLI support for installing BMM skills in `.kimi/skills/` (#2302)
|
||||||
|
* `bmad-create-story` now reads every UPDATE-marked file before generating dev notes so brownfield stories preserve current behavior instead of improvising at implementation time (#2274)
|
||||||
|
* Sync `sprint-status.yaml` from quick-dev on epic-story implementation with idempotent writes tracking `in-progress` and `review` transitions (#2234)
|
||||||
|
* Enforce model parity for all code review subagents to match orchestrator session capability for improved rare-event detection (#2236)
|
||||||
|
* Set `team: software-development` on all six BMM agents for unified grouping in party-mode and retrospective skills (#2286)
|
||||||
|
|
||||||
|
### 🐛 Bug Fixes
|
||||||
|
|
||||||
|
* PRD workflow no longer silently de-scopes user requirements or invents MVP/Growth/Vision phasing; requires explicit confirmation before any scope reduction (#1927)
|
||||||
|
* Installer shows live npm version for external modules instead of stale cached metadata (#2307)
|
||||||
|
* Resolve external-module agents from cache during manifest write so agents land in `config.toml` (#2295)
|
||||||
|
* Fix installer version resolution for external modules with shared resolver preferring package.json > module.yaml > marketplace.json (#2298)
|
||||||
|
* Replace fs-extra with native `node:fs` to prevent file loss during multi-module installs from deferred retry-queue races (#2253)
|
||||||
|
* Add `move()` and overwrite support to fs-native wrapper for directory migrations during upgrades (#2253)
|
||||||
|
* Stop skill scanner from recursing into discovered skills to prevent spurious errors on nested template files (#2255)
|
||||||
|
* Source built-in modules locally in installer UI to preserve core and bmm in module list when registry is unreachable (#2251)
|
||||||
|
* Remove dead Batch-apply option from code-review patch menu and rename apply options for clarity (#2225)
|
||||||
|
|
||||||
|
### ♻️ Refactoring
|
||||||
|
|
||||||
|
* Remove 1,683 lines of dead code: three entirely dead files (agent-command-generator.js, bmad-artifacts.js, module-injections.js) and ~50 unused exports across installer modules (#2247)
|
||||||
|
* Remove dead template and agent-command pipeline from installer; SKILL.md directory copying is the sole installation path (#2244)
|
||||||
|
|
||||||
|
### 📚 Documentation
|
||||||
|
|
||||||
|
* Sync and update Vietnamese (vi-VN) docs with missing pages and refreshed translations (#2291, #2222)
|
||||||
|
* Sync French (fr-FR) translations with upstream, restore Amelia as dev agent, fix sidebar ordering (#2231)
|
||||||
|
* Add Czech (cs-CZ) `analysis-phase.md` translation; normalize typographic quotes (#2240, #2241, #2242)
|
||||||
|
* Add missing Chinese (zh-CN) translations for 3 documents (#2254)
|
||||||
|
* Update stale Analyst agent triggers and add PRFAQ link (#2238)
|
||||||
|
* Remove Bob from workflow map diagrams reflecting consolidation into Amelia in v6.3.0 (#2252)
|
||||||
|
|
||||||
## v6.3.0 - 2026-04-09
|
## v6.3.0 - 2026-04-09
|
||||||
|
|
||||||
### 💥 Breaking Changes
|
### 💥 Breaking Changes
|
||||||
|
|
|
||||||
|
|
@ -52,6 +52,15 @@ Follow the installer prompts, then open your AI IDE (Claude Code, Cursor, etc.)
|
||||||
npx bmad-method install --directory /path/to/project --modules bmm --tools claude-code --yes
|
npx bmad-method install --directory /path/to/project --modules bmm --tools claude-code --yes
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Override any module config option with `--set <module>.<key>=<value>` (repeatable). Run `--list-options [module]` to see locally-known official keys (built-in modules plus any external officials cached on this machine):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npx bmad-method install --yes \
|
||||||
|
--modules bmm --tools claude-code \
|
||||||
|
--set bmm.project_knowledge=research \
|
||||||
|
--set bmm.user_skill_level=expert
|
||||||
|
```
|
||||||
|
|
||||||
[See all installation options](https://docs.bmad-method.org/how-to/non-interactive-installation/)
|
[See all installation options](https://docs.bmad-method.org/how-to/non-interactive-installation/)
|
||||||
|
|
||||||
> **Not sure what to do?** Ask `bmad-help` — it tells you exactly what's next and what's optional. You can also ask questions like `bmad-help I just finished the architecture, what do I do next?`
|
> **Not sure what to do?** Ask `bmad-help` — it tells you exactly what's next and what's optional. You can also ask questions like `bmad-help I just finished the architecture, what do I do next?`
|
||||||
|
|
|
||||||
|
|
@ -60,7 +60,7 @@ Dostupná ID nástrojů pro příznak `--tools`:
|
||||||
|
|
||||||
**Preferované:** `claude-code`, `cursor`
|
**Preferované:** `claude-code`, `cursor`
|
||||||
|
|
||||||
Spusťte `npx bmad-method install` interaktivně jednou pro zobrazení aktuálního seznamu podporovaných nástrojů, nebo zkontrolujte [konfiguraci kódů platforem](https://github.com/bmad-code-org/BMAD-METHOD/blob/main/tools/cli/installers/lib/ide/platform-codes.yaml).
|
Spusťte `npx bmad-method install` interaktivně jednou pro zobrazení aktuálního seznamu podporovaných nástrojů, nebo zkontrolujte [konfiguraci kódů platforem](https://github.com/bmad-code-org/BMAD-METHOD/blob/main/tools/installer/ide/platform-codes.yaml).
|
||||||
|
|
||||||
## Režimy instalace
|
## Režimy instalace
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,122 +1,266 @@
|
||||||
---
|
---
|
||||||
title: 'How to Install BMad'
|
title: 'How to Install BMad'
|
||||||
description: Step-by-step guide to installing BMad in your project
|
description: Install, update, and pin BMad for local development, teams, and CI
|
||||||
sidebar:
|
sidebar:
|
||||||
order: 1
|
order: 1
|
||||||
---
|
---
|
||||||
|
|
||||||
Use the `npx bmad-method install` command to set up BMad in your project with your choice of modules and AI tools.
|
Use `npx bmad-method install` to set up BMad in your project. One command handles first installs, upgrades, channel switching, and scripted CI runs. This page covers all of it.
|
||||||
|
|
||||||
If you want to use a non interactive installer and provide all install options on the command line, see [this guide](./non-interactive-installation.md).
|
|
||||||
|
|
||||||
## When to Use This
|
## When to Use This
|
||||||
|
|
||||||
- Starting a new project with BMad
|
- Starting a new project with BMad
|
||||||
- Adding BMad to an existing codebase
|
- Adding or removing modules on an existing install
|
||||||
- Update the existing BMad Installation
|
- Switching a module to main-HEAD or pinning to a specific release
|
||||||
|
- Scripting installs for CI pipelines, Dockerfiles, or enterprise rollouts
|
||||||
|
|
||||||
:::note[Prerequisites]
|
:::note[Prerequisites]
|
||||||
|
|
||||||
- **Node.js** 20+ (required for the installer)
|
- **Node.js** 20+ (the installer requires it)
|
||||||
- **Git** (recommended)
|
- **Git** (for cloning external modules)
|
||||||
- **AI tool** (Claude Code, Cursor, or similar)
|
- **An AI tool** such as Claude Code or Cursor (run `npx bmad-method install --list-tools` to see all supported tools)
|
||||||
:::
|
|
||||||
|
|
||||||
## Steps
|
:::
|
||||||
|
|
||||||
### 1. Run the Installer
|
## First-time install (the fast path)
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
npx bmad-method install
|
npx bmad-method install
|
||||||
```
|
```
|
||||||
|
|
||||||
:::tip[Want the newest prerelease build?]
|
The interactive flow asks you five things:
|
||||||
Use the `next` dist-tag:
|
|
||||||
|
1. Installation directory (defaults to the current working directory)
|
||||||
|
2. Which modules to install (checkboxes for core, bmm, bmb, cis, gds, tea)
|
||||||
|
3. **"Ready to install (all stable)?"** — Yes accepts the latest released tag for every external module
|
||||||
|
4. Which AI tools/IDEs to integrate with (claude-code, cursor, and others)
|
||||||
|
5. Per-module config (name, language, output folder)
|
||||||
|
|
||||||
|
Accept the defaults and you land on the latest stable release of every module, configured for your chosen tool.
|
||||||
|
|
||||||
|
:::tip[Just want the newest prerelease?]
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
npx bmad-method@next install
|
npx bmad-method@next install
|
||||||
```
|
```
|
||||||
|
|
||||||
This gets you newer changes earlier, with a higher chance of churn than the default install.
|
Runs the prerelease installer, which ships a newer snapshot of core and bmm. More churn, fewer delays between development and release.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
:::tip[Bleeding edge]
|
## Picking a specific version
|
||||||
To install the latest from the main branch (may be unstable):
|
|
||||||
|
Two independent axes control what ends up on disk.
|
||||||
|
|
||||||
|
### Axis 1: external module channels
|
||||||
|
|
||||||
|
Every external module — bmb, cis, gds, tea, and any community module — installs on one of three channels:
|
||||||
|
|
||||||
|
| Channel | What gets installed | Who picks this |
|
||||||
|
| ------------------ | ---------------------------------------------------------------------------- | --------------------------------------- |
|
||||||
|
| `stable` (default) | Highest released semver tag. Prereleases like `v2.0.0-alpha.1` are excluded. | Most users |
|
||||||
|
| `next` | Main branch HEAD at install time | Contributors, early adopters |
|
||||||
|
| `pinned` | A specific tag you name | Enterprise installs, CI reproducibility |
|
||||||
|
|
||||||
|
Channels are per-module. You can run bmb on `next` while leaving cis on `stable` — the flags below let you mix freely.
|
||||||
|
|
||||||
|
### Axis 2: installer binary version
|
||||||
|
|
||||||
|
The `bmad-method` npm package itself has two dist-tags:
|
||||||
|
|
||||||
|
| Command | What you get |
|
||||||
|
| ------------------------------------- | ----------------------------------------------------------------- |
|
||||||
|
| `npx bmad-method install` (`@latest`) | Latest stable installer release |
|
||||||
|
| `npx bmad-method@next install` | Latest prerelease installer, auto-published on every push to main |
|
||||||
|
|
||||||
|
**The installer binary determines your core and bmm versions.** Those two modules ship bundled inside the installer package rather than being cloned from separate repos.
|
||||||
|
|
||||||
|
### Why core and bmm don't have their own channel
|
||||||
|
|
||||||
|
They're stapled to the installer binary you ran:
|
||||||
|
|
||||||
|
- `npx bmad-method install` → latest stable core and bmm
|
||||||
|
- `npx bmad-method@next install` → prerelease core and bmm
|
||||||
|
- `node /path/to/local-checkout/tools/installer/bmad-cli.js install` → whatever your local checkout has
|
||||||
|
|
||||||
|
`--pin bmm=v6.3.0` and `--next=bmm` are silently ineffective against bundled modules, and the installer warns you when you try. A future release extracts bmm from the installer package; once that ships, bmm gets a proper channel selector like bmb has today.
|
||||||
|
|
||||||
|
## Updating an existing install
|
||||||
|
|
||||||
|
Running `npx bmad-method install` in a directory that already contains `_bmad/` gives you a menu:
|
||||||
|
|
||||||
|
| Choice | What it does |
|
||||||
|
| ------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
|
| **Quick Update** | Re-runs the install with your existing settings. Refreshes files, applies patches and minor stable upgrades, refuses major upgrades. Fast, non-interactive. |
|
||||||
|
| **Modify Install** | Full interactive flow. Add or remove modules, reconfigure settings, optionally review and switch channels for existing modules. |
|
||||||
|
|
||||||
|
### Upgrade prompts
|
||||||
|
|
||||||
|
When Modify detects a newer stable tag for a module you've installed on `stable`, it classifies the diff and prompts accordingly:
|
||||||
|
|
||||||
|
| Upgrade type | Example | Default |
|
||||||
|
| ------------ | --------------- | ------- |
|
||||||
|
| Patch | v1.7.0 → v1.7.1 | Y |
|
||||||
|
| Minor | v1.7.0 → v1.8.0 | Y |
|
||||||
|
| Major | v1.7.0 → v2.0.0 | **N** |
|
||||||
|
|
||||||
|
Major defaults to N because breaking changes frequently surface as "instability" when they weren't expected. The prompt includes a GitHub release-notes URL so you can read what changed before accepting.
|
||||||
|
|
||||||
|
Under `--yes`, patch and minor upgrades apply automatically. Majors stay frozen — pass `--pin <code>=<new-tag>` to accept non-interactively.
|
||||||
|
|
||||||
|
### Switching a module's channel
|
||||||
|
|
||||||
|
**Interactively:** choose Modify → answer **Yes** to "Review channel assignments?" → each external module offers Keep, Switch to stable, Switch to next, or Pin to a tag.
|
||||||
|
|
||||||
|
**Via flags:** the recipes in the next section cover the common cases.
|
||||||
|
|
||||||
|
## Headless CI installs
|
||||||
|
|
||||||
|
### Flag reference
|
||||||
|
|
||||||
|
| Flag | Purpose |
|
||||||
|
| ------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
|
| `--yes`, `-y` | Skip all prompts; accept flag values + defaults |
|
||||||
|
| `--directory <path>` | Install into this directory (default: current working dir) |
|
||||||
|
| `--modules <a,b,c>` | Exact module set. Core is auto-added. Not a delta — list everything you want kept. |
|
||||||
|
| `--tools <a,b>` | IDE/tool selection. Required for fresh `--yes` installs. Run `--list-tools` for valid IDs. |
|
||||||
|
| `--list-tools` | Print all supported tool/IDE IDs (with target directories) and exit. |
|
||||||
|
| `--action <type>` | `install`, `update`, or `quick-update`. Defaults based on existing install state. |
|
||||||
|
| `--custom-source <urls>` | Install custom modules from Git URLs or local paths |
|
||||||
|
| `--channel <stable\|next>` | Apply to all externals (aliased as `--all-stable` / `--all-next`) |
|
||||||
|
| `--all-stable` | Alias for `--channel=stable` |
|
||||||
|
| `--all-next` | Alias for `--channel=next` |
|
||||||
|
| `--next=<code>` | Put one module on next. Repeatable. |
|
||||||
|
| `--pin <code>=<tag>` | Pin one module to a specific tag. Repeatable. |
|
||||||
|
| `--set <module>.<key>=<value>` | Set any module config option non-interactively (preferred — see [Module config overrides](#module-config-overrides)). Repeatable. |
|
||||||
|
| `--list-options [module]` | Print every `--set` key for built-in and locally-cached official modules, then exit. Pass a module code to scope to one module. |
|
||||||
|
| `--user-name`, `--communication-language`, `--document-output-language`, `--output-folder` | Legacy shortcuts equivalent to `--set core.<key>=<value>` (still supported) |
|
||||||
|
|
||||||
|
Precedence when flags overlap: `--pin` beats `--next=` beats `--channel` / `--all-*` beats the registry default (`stable`).
|
||||||
|
|
||||||
|
:::note[Example resolution]
|
||||||
|
`--all-next --pin cis=v0.2.0` puts bmb, gds, and tea on next while pinning cis to v0.2.0.
|
||||||
|
:::
|
||||||
|
|
||||||
|
### Recipes
|
||||||
|
|
||||||
|
**Default install — latest stable for everything:**
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
npx github:bmad-code-org/BMAD-METHOD install
|
npx bmad-method install --yes --modules bmm,bmb,cis --tools claude-code
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**Enterprise pin — reproducible byte-for-byte:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npx bmad-method install --yes \
|
||||||
|
--modules bmm,bmb,cis \
|
||||||
|
--pin bmb=v1.7.0 --pin cis=v0.2.0 \
|
||||||
|
--tools claude-code
|
||||||
|
```
|
||||||
|
|
||||||
|
**Bleeding edge — externals on main HEAD:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npx bmad-method install --yes --modules bmm,bmb --all-next --tools claude-code
|
||||||
|
```
|
||||||
|
|
||||||
|
**Add a module to an existing install** (keep everything else):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npx bmad-method install --yes --action update \
|
||||||
|
--modules bmm,bmb,gds
|
||||||
|
```
|
||||||
|
|
||||||
|
`--tools` is omitted intentionally — `--action update` reuses the tools configured during the first install.
|
||||||
|
|
||||||
|
**Mix channels — bmb on next, gds on stable:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npx bmad-method install --yes --action update \
|
||||||
|
--modules bmm,bmb,cis,gds \
|
||||||
|
--next=bmb
|
||||||
|
```
|
||||||
|
|
||||||
|
### Module config overrides
|
||||||
|
|
||||||
|
`--set <module>.<key>=<value>` lets you set any module config option non-interactively. It's repeatable and scales to every module — present and future. The flag is applied as a post-install patch: the installer runs its normal flow first, then `--set` upserts each value into `_bmad/config.toml` (team scope) or `_bmad/config.user.toml` (user scope), and into `_bmad/<module>/config.yaml` so declared values carry forward to the next install.
|
||||||
|
|
||||||
|
**Example — install bmm with explicit project knowledge and skill level:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npx bmad-method install --yes \
|
||||||
|
--modules bmm \
|
||||||
|
--tools claude-code \
|
||||||
|
--set bmm.project_knowledge=research \
|
||||||
|
--set bmm.user_skill_level=expert
|
||||||
|
```
|
||||||
|
|
||||||
|
**Discover available keys for a module:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npx bmad-method install --list-options bmm
|
||||||
|
```
|
||||||
|
|
||||||
|
`--list-options` (no argument) lists every key the installer can find locally — built-in modules (`core`, `bmm`) plus any currently cached official modules. The cache is per-machine and can be cleared, so previously installed officials won't appear on a fresh checkout or an ephemeral CI worker until they're installed again. Community and custom modules aren't enumerated here; read the module's `module.yaml` directly to see what keys it declares.
|
||||||
|
|
||||||
|
**How it works:**
|
||||||
|
|
||||||
|
- **Routing.** The patch step looks for `[modules.<module>] <key>` (or `[core] <key>`) in `config.user.toml` first; if found there, it updates that file. Otherwise it writes to the team-scope `config.toml`. So user-scope keys (e.g. `core.user_name`, `bmm.user_skill_level`) end up in `config.user.toml` and team-scope keys end up in `config.toml`, matching the partition the installer uses.
|
||||||
|
- **Verbatim values.** The value is written exactly as you provided it — no `result:` template rendering. To get the rendered form (e.g. `{project-root}/research`), pass it explicitly: `--set bmm.project_knowledge='{project-root}/research'`.
|
||||||
|
- **Carry-forward, declared keys.** Values for keys declared in `module.yaml` survive subsequent installs because they're also written to `_bmad/<module>/config.yaml`, which the installer reads as the prompt default on the next run.
|
||||||
|
- **Carry-forward, undeclared keys.** A value for a key the module's schema doesn't declare lands in `config.toml` for the current install but won't be re-emitted on the next install (the manifest writer's schema-strict partition drops unknown keys). Re-pass `--set` if you need it sticky, or edit `_bmad/config.toml` directly.
|
||||||
|
- **No validation.** `single-select` values aren't checked against the allowed choices, and unknown keys aren't rejected — whatever you assert is written.
|
||||||
|
- **Modules not in `--modules`.** Setting a value for a module you didn't include prints a warning and the value is dropped (no file gets created for an uninstalled module).
|
||||||
|
|
||||||
|
The legacy core shortcuts (`--user-name`, `--output-folder`, etc.) still work and remain documented for backward compatibility, but `--set core.user_name=...` is equivalent.
|
||||||
|
|
||||||
|
:::note[Works with quick-update]
|
||||||
|
`--set` is a post-install patch, so it applies the same way regardless of action type. Under `bmad install --action quick-update` (or `--yes` against an existing install, where quick-update is the default), `--set` patches the central config files at the end just like a regular install.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
### 2. Choose Installation Location
|
:::caution[Rate limit on shared IPs]
|
||||||
|
Anonymous GitHub API calls are capped at 60/hour per IP. A single install hits the API once per external module to resolve the stable tag. Offices behind NAT, CI runner pools, and VPNs can collectively exhaust this.
|
||||||
|
|
||||||
The installer will ask where to install BMad files:
|
Set `GITHUB_TOKEN=<personal access token>` in the environment to raise the limit to 5000/hour per account. Any public-repo-read PAT works; no scopes are required.
|
||||||
|
|
||||||
- Current directory (recommended for new projects if you created the directory yourself and ran from within the directory)
|
|
||||||
- Custom path
|
|
||||||
|
|
||||||
### 3. Select Your AI Tools
|
|
||||||
|
|
||||||
Pick which AI tools you use:
|
|
||||||
|
|
||||||
- Claude Code
|
|
||||||
- Cursor
|
|
||||||
- Others
|
|
||||||
|
|
||||||
Each tool has its own way of integrating skills. The installer creates tiny prompt files to activate workflows and agents — it just puts them where your tool expects to find them.
|
|
||||||
|
|
||||||
:::note[Enabling Skills]
|
|
||||||
Some platforms require skills to be explicitly enabled in settings before they appear. If you install BMad and don't see the skills, check your platform's settings or ask your AI assistant how to enable skills.
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
### 4. Choose Modules
|
## What got installed
|
||||||
|
|
||||||
The installer shows available modules. Select whichever ones you need — most users just want **BMad Method** (the software development module).
|
After any install, `_bmad/_config/manifest.yaml` records exactly what's on disk:
|
||||||
|
|
||||||
### 5. Follow the Prompts
|
```yaml
|
||||||
|
modules:
|
||||||
The installer guides you through the rest — settings, tool integrations, etc.
|
- name: bmb
|
||||||
|
version: v1.7.0 # the tag, or "main" for next
|
||||||
## What You Get
|
channel: stable # stable | next | pinned
|
||||||
|
sha: 86033fc9aeae2ca6d52c7cdb675c1f4bf17fc1c1
|
||||||
```text
|
source: external
|
||||||
your-project/
|
repoUrl: https://github.com/bmad-code-org/bmad-builder
|
||||||
├── _bmad/
|
|
||||||
│ ├── bmm/ # Your selected modules
|
|
||||||
│ │ └── config.yaml # Module settings (if you ever need to change them)
|
|
||||||
│ ├── core/ # Required core module
|
|
||||||
│ └── ...
|
|
||||||
├── _bmad-output/ # Generated artifacts
|
|
||||||
├── .claude/ # Claude Code skills (if using Claude Code)
|
|
||||||
│ └── skills/
|
|
||||||
│ ├── bmad-help/
|
|
||||||
│ ├── bmad-persona/
|
|
||||||
│ └── ...
|
|
||||||
└── .cursor/ # Cursor skills (if using Cursor)
|
|
||||||
└── skills/
|
|
||||||
└── ...
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Verify Installation
|
The `sha` field is written for git-backed modules (external, community, and URL-based custom). Bundled modules (core, bmm) and local-path custom modules don't have one — their code travels with the installer binary or your filesystem, not a cloneable ref.
|
||||||
|
|
||||||
Run `bmad-help` to verify everything works and see what to do next.
|
For cross-machine reproducibility, don't rely on rerunning the same `--modules` command. Stable-channel installs resolve to the highest released tag **at install time**, so a later rerun lands on whatever has been released since. Convert the recorded tags from `manifest.yaml` into explicit `--pin` flags on the target machine, e.g.:
|
||||||
|
|
||||||
**BMad-Help is your intelligent guide** that will:
|
```bash
|
||||||
|
npx bmad-method install --yes --modules bmb,cis \
|
||||||
- Confirm your installation is working
|
--pin bmb=v1.7.0 --pin cis=v0.4.2 --tools claude-code
|
||||||
- Show what's available based on your installed modules
|
|
||||||
- Recommend your first step
|
|
||||||
|
|
||||||
You can also ask it questions:
|
|
||||||
|
|
||||||
```
|
|
||||||
bmad-help I just installed, what should I do first?
|
|
||||||
bmad-help What are my options for a SaaS project?
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Troubleshooting
|
## Troubleshooting
|
||||||
|
|
||||||
**Installer throws an error** — Copy-paste the output into your AI assistant and let it figure it out.
|
### "Could not resolve stable tag" or "API rate limit exceeded"
|
||||||
|
|
||||||
**Installer worked but something doesn't work later** — Your AI needs BMad context to help. See [How to Get Answers About BMad](./get-answers-about-bmad.md) for how to point your AI at the right sources.
|
You've hit GitHub's 60/hr anonymous limit. Set `GITHUB_TOKEN` and retry. If you already have a token set, it may be expired or rate-limited on its own budget — try a different token or wait for the hourly reset.
|
||||||
|
|
||||||
|
### "Tag 'vX.Y.Z' not found"
|
||||||
|
|
||||||
|
The tag you passed to `--pin` doesn't exist in the module's repo. Check the repo's releases page on GitHub for valid tags.
|
||||||
|
|
||||||
|
### A pinned install keeps upgrading
|
||||||
|
|
||||||
|
Pinned installs don't upgrade. Quick-update applies patches and minors on stable channel only; it won't touch `pinned` or `next`. If a pinned install changed, open `_bmad/_config/manifest.yaml` — `channel: pinned` plus a fixed `version` and `sha` should hold across runs unless you explicitly override via flags.
|
||||||
|
|
||||||
|
### `--pin bmm=X` didn't do anything
|
||||||
|
|
||||||
|
bmm is a bundled module — `--pin` and `--next=` don't apply. Use `npx bmad-method@next install` for a prerelease core/bmm, or check out the bmad-bmm repo and run the installer locally to get unreleased changes.
|
||||||
|
|
|
||||||
|
|
@ -68,6 +68,7 @@ Select **Yes**, then provide a source:
|
||||||
| Input Type | Example |
|
| Input Type | Example |
|
||||||
| --------------------- | ------------------------------------------------- |
|
| --------------------- | ------------------------------------------------- |
|
||||||
| HTTPS URL (any host) | `https://github.com/org/repo` |
|
| HTTPS URL (any host) | `https://github.com/org/repo` |
|
||||||
|
| HTTP URL (any host) | `http://host/org/repo` |
|
||||||
| HTTPS URL with subdir | `https://github.com/org/repo/tree/main/my-module` |
|
| HTTPS URL with subdir | `https://github.com/org/repo/tree/main/my-module` |
|
||||||
| SSH URL | `git@github.com:org/repo.git` |
|
| SSH URL | `git@github.com:org/repo.git` |
|
||||||
| Local path | `/Users/me/projects/my-module` |
|
| Local path | `/Users/me/projects/my-module` |
|
||||||
|
|
|
||||||
|
|
@ -1,196 +1,10 @@
|
||||||
---
|
---
|
||||||
title: Non-Interactive Installation
|
title: Non-Interactive Installation
|
||||||
description: Install BMad using command-line flags for CI/CD pipelines and automated deployments
|
description: Headless / CI install docs have moved
|
||||||
sidebar:
|
sidebar:
|
||||||
order: 2
|
order: 2
|
||||||
---
|
---
|
||||||
|
|
||||||
Use command-line flags to install BMad non-interactively. This is useful for:
|
:::note[This page has moved]
|
||||||
|
Headless and CI install flags, channel selection, and pinning now live in the unified [How to Install BMad](./install-bmad.md) guide. Jump to the [Headless / CI installs](./install-bmad.md#headless-ci-installs) section for the flag reference and copy-paste recipes.
|
||||||
## When to Use This
|
|
||||||
|
|
||||||
- Automated deployments and CI/CD pipelines
|
|
||||||
- Scripted installations
|
|
||||||
- Batch installations across multiple projects
|
|
||||||
- Quick installations with known configurations
|
|
||||||
|
|
||||||
:::note[Prerequisites]
|
|
||||||
Requires [Node.js](https://nodejs.org) v20+ and `npx` (included with npm).
|
|
||||||
:::
|
|
||||||
|
|
||||||
## Available Flags
|
|
||||||
|
|
||||||
### Installation Options
|
|
||||||
|
|
||||||
| Flag | Description | Example |
|
|
||||||
| --------------------------- | ----------------------------------------------------------------------------------- | ---------------------------------------------- |
|
|
||||||
| `--directory <path>` | Installation directory | `--directory ~/projects/myapp` |
|
|
||||||
| `--modules <modules>` | Comma-separated module IDs | `--modules bmm,bmb` |
|
|
||||||
| `--tools <tools>` | Comma-separated tool/IDE IDs (use `none` to skip) | `--tools claude-code,cursor` or `--tools none` |
|
|
||||||
| `--action <type>` | Action for existing installations: `install` (default), `update`, or `quick-update` | `--action quick-update` |
|
|
||||||
| `--custom-source <sources>` | Comma-separated Git URLs or local paths for custom modules | `--custom-source /path/to/module` |
|
|
||||||
|
|
||||||
### Core Configuration
|
|
||||||
|
|
||||||
| Flag | Description | Default |
|
|
||||||
| ----------------------------------- | ----------------------------------------------- | --------------- |
|
|
||||||
| `--user-name <name>` | Name for agents to use | System username |
|
|
||||||
| `--communication-language <lang>` | Agent communication language | English |
|
|
||||||
| `--document-output-language <lang>` | Document output language | English |
|
|
||||||
| `--output-folder <path>` | Output folder path (see resolution rules below) | `_bmad-output` |
|
|
||||||
|
|
||||||
#### Output Folder Path Resolution
|
|
||||||
|
|
||||||
The value passed to `--output-folder` (or entered interactively) is resolved according to these rules:
|
|
||||||
|
|
||||||
| Input type | Example | Resolved as |
|
|
||||||
| ---------------------------- | -------------------------- | ---------------------------------------------------------- |
|
|
||||||
| Relative path (default) | `_bmad-output` | `<project-root>/_bmad-output` |
|
|
||||||
| Relative path with traversal | `../../shared-outputs` | Normalized absolute path — e.g. `/Users/me/shared-outputs` |
|
|
||||||
| Absolute path | `/Users/me/shared-outputs` | Used as-is — project root is **not** prepended |
|
|
||||||
|
|
||||||
The resolved path is what agents and workflows use at runtime when writing output files. Using an absolute path or a traversal-based relative path lets you direct all generated artifacts to a directory outside your project tree — useful for shared or monorepo setups.
|
|
||||||
|
|
||||||
### Other Options
|
|
||||||
|
|
||||||
| Flag | Description |
|
|
||||||
| ------------- | ------------------------------------------- |
|
|
||||||
| `-y, --yes` | Accept all defaults and skip prompts |
|
|
||||||
| `-d, --debug` | Enable debug output for manifest generation |
|
|
||||||
|
|
||||||
## Module IDs
|
|
||||||
|
|
||||||
Available module IDs for the `--modules` flag:
|
|
||||||
|
|
||||||
- `bmm` — BMad Method Master
|
|
||||||
- `bmb` — BMad Builder
|
|
||||||
|
|
||||||
Check the [BMad registry](https://github.com/bmad-code-org) for available external modules.
|
|
||||||
|
|
||||||
## Tool/IDE IDs
|
|
||||||
|
|
||||||
Available tool IDs for the `--tools` flag:
|
|
||||||
|
|
||||||
**Preferred:** `claude-code`, `cursor`
|
|
||||||
|
|
||||||
Run `npx bmad-method install` interactively once to see the full current list of supported tools, or check the [platform codes configuration](https://github.com/bmad-code-org/BMAD-METHOD/blob/main/tools/installer/ide/platform-codes.yaml).
|
|
||||||
|
|
||||||
## Installation Modes
|
|
||||||
|
|
||||||
| Mode | Description | Example |
|
|
||||||
| --------------------- | --------------------------------------------- | ------------------------------------------------------------------------------------------------- |
|
|
||||||
| Fully non-interactive | Provide all flags to skip all prompts | `npx bmad-method install --directory . --modules bmm --tools claude-code --yes` |
|
|
||||||
| Semi-interactive | Provide some flags; BMad prompts for the rest | `npx bmad-method install --directory . --modules bmm` |
|
|
||||||
| Defaults only | Accept all defaults with `-y` | `npx bmad-method install --yes` |
|
|
||||||
| Custom source only | Install core + custom module(s) | `npx bmad-method install --directory . --custom-source /path/to/module --tools claude-code --yes` |
|
|
||||||
| Without tools | Skip tool/IDE configuration | `npx bmad-method install --modules bmm --tools none` |
|
|
||||||
|
|
||||||
## Examples
|
|
||||||
|
|
||||||
### CI/CD Pipeline Installation
|
|
||||||
|
|
||||||
```bash
|
|
||||||
#!/bin/bash
|
|
||||||
# install-bmad.sh
|
|
||||||
|
|
||||||
npx bmad-method install \
|
|
||||||
--directory "${GITHUB_WORKSPACE}" \
|
|
||||||
--modules bmm \
|
|
||||||
--tools claude-code \
|
|
||||||
--user-name "CI Bot" \
|
|
||||||
--communication-language English \
|
|
||||||
--document-output-language English \
|
|
||||||
--output-folder _bmad-output \
|
|
||||||
--yes
|
|
||||||
```
|
|
||||||
|
|
||||||
### Update Existing Installation
|
|
||||||
|
|
||||||
```bash
|
|
||||||
npx bmad-method install \
|
|
||||||
--directory ~/projects/myapp \
|
|
||||||
--action update \
|
|
||||||
--modules bmm,bmb,custom-module
|
|
||||||
```
|
|
||||||
|
|
||||||
### Quick Update (Preserve Settings)
|
|
||||||
|
|
||||||
```bash
|
|
||||||
npx bmad-method install \
|
|
||||||
--directory ~/projects/myapp \
|
|
||||||
--action quick-update
|
|
||||||
```
|
|
||||||
|
|
||||||
### Install from Custom Source
|
|
||||||
|
|
||||||
Install a module from a local path or any Git host:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
npx bmad-method install \
|
|
||||||
--directory . \
|
|
||||||
--custom-source /path/to/my-module \
|
|
||||||
--tools claude-code \
|
|
||||||
--yes
|
|
||||||
```
|
|
||||||
|
|
||||||
Combine with official modules:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
npx bmad-method install \
|
|
||||||
--directory . \
|
|
||||||
--modules bmm \
|
|
||||||
--custom-source https://gitlab.com/myorg/my-module \
|
|
||||||
--tools claude-code \
|
|
||||||
--yes
|
|
||||||
```
|
|
||||||
|
|
||||||
:::note[Custom source behavior]
|
|
||||||
When `--custom-source` is used without `--modules`, only core and the custom modules are installed. Add `--modules` to include official modules as well. See [Install Custom and Community Modules](./install-custom-modules.md) for details.
|
|
||||||
:::
|
|
||||||
|
|
||||||
## What You Get
|
|
||||||
|
|
||||||
- A fully configured `_bmad/` directory in your project
|
|
||||||
- Agents and workflows configured for your selected modules and tools
|
|
||||||
- A `_bmad-output/` folder for generated artifacts
|
|
||||||
|
|
||||||
## Validation and Error Handling
|
|
||||||
|
|
||||||
BMad validates all provided flags:
|
|
||||||
|
|
||||||
- **Directory** — Must be a valid path with write permissions
|
|
||||||
- **Modules** — Warns about invalid module IDs (but won't fail)
|
|
||||||
- **Tools** — Warns about invalid tool IDs (but won't fail)
|
|
||||||
- **Action** — Must be one of: `install`, `update`, `quick-update`
|
|
||||||
|
|
||||||
Invalid values will either:
|
|
||||||
|
|
||||||
1. Show an error and exit (for critical options like directory)
|
|
||||||
2. Show a warning and skip (for optional items)
|
|
||||||
3. Fall back to interactive prompts (for missing required values)
|
|
||||||
|
|
||||||
:::tip[Best Practices]
|
|
||||||
|
|
||||||
- Use absolute paths for `--directory` to avoid ambiguity
|
|
||||||
- Use an absolute path for `--output-folder` when you want artifacts written outside the project tree (e.g. a shared monorepo outputs directory)
|
|
||||||
- Test flags locally before using in CI/CD pipelines
|
|
||||||
- Combine with `-y` for truly unattended installations
|
|
||||||
- Use `--debug` if you encounter issues during installation
|
|
||||||
:::
|
|
||||||
|
|
||||||
## Troubleshooting
|
|
||||||
|
|
||||||
### Installation fails with "Invalid directory"
|
|
||||||
|
|
||||||
- The directory path must exist (or its parent must exist)
|
|
||||||
- You need write permissions
|
|
||||||
- The path must be absolute or correctly relative to the current directory
|
|
||||||
|
|
||||||
### Module not found
|
|
||||||
|
|
||||||
- Verify the module ID is correct
|
|
||||||
- External modules must be available in the registry
|
|
||||||
|
|
||||||
:::note[Still stuck?]
|
|
||||||
Run with `--debug` for detailed output, try interactive mode to isolate the issue, or report at <https://github.com/bmad-code-org/BMAD-METHOD/issues>.
|
|
||||||
:::
|
:::
|
||||||
|
|
|
||||||
|
|
@ -68,6 +68,7 @@ Chọn **Yes**, rồi nhập nguồn:
|
||||||
| Loại đầu vào | Ví dụ |
|
| Loại đầu vào | Ví dụ |
|
||||||
| --------------------- | ------------------------------------------------- |
|
| --------------------- | ------------------------------------------------- |
|
||||||
| HTTPS URL trên bất kỳ host nào | `https://github.com/org/repo` |
|
| HTTPS URL trên bất kỳ host nào | `https://github.com/org/repo` |
|
||||||
|
| HTTP URL trên bất kỳ host nào | `http://host/org/repo` |
|
||||||
| HTTPS URL trỏ vào một thư mục con | `https://github.com/org/repo/tree/main/my-module` |
|
| HTTPS URL trỏ vào một thư mục con | `https://github.com/org/repo/tree/main/my-module` |
|
||||||
| SSH URL | `git@github.com:org/repo.git` |
|
| SSH URL | `git@github.com:org/repo.git` |
|
||||||
| Đường dẫn cục bộ | `/Users/me/projects/my-module` |
|
| Đường dẫn cục bộ | `/Users/me/projects/my-module` |
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,94 @@
|
||||||
|
---
|
||||||
|
title: "命名智能体"
|
||||||
|
description: 为什么 BMad 的智能体有名字、人设和自定义能力——相比菜单驱动或纯提示驱动的方案,这解锁了哪些可能性
|
||||||
|
sidebar:
|
||||||
|
order: 1
|
||||||
|
---
|
||||||
|
|
||||||
|
你说"嘿 Mary,咱们来头脑风暴",Mary 就激活了。她用你配置的语言、以她独特的人设向你打招呼,并提醒你随时可以用 `bmad-help`。然后她跳过菜单,直接进入头脑风暴——因为你的意图已经足够明确。
|
||||||
|
|
||||||
|
这一页解释背后发生了什么,以及 BMad 为什么这样设计。
|
||||||
|
|
||||||
|
## 三足鼎立
|
||||||
|
|
||||||
|
BMad 的智能体模型建立在三个可组合的基本要素之上:
|
||||||
|
|
||||||
|
| 要素 | 提供什么 | 所在位置 |
|
||||||
|
|---|---|---|
|
||||||
|
| **技能(Skill)** | 能力——一项智能体能做的具体事(头脑风暴、撰写 PRD、实现 story) | `.claude/skills/{skill-name}/SKILL.md`(或你所用 IDE 的等价位置) |
|
||||||
|
| **命名智能体(Named Agent)** | 人设连续性——一个可辨识的身份,把一组相关技能包装在统一的语气、原则和视觉标识下 | 目录名以 `bmad-agent-*` 开头的技能 |
|
||||||
|
| **自定义(Customization)** | 让它成为你的——覆盖选项可以重塑智能体行为、添加 MCP 集成、替换模板、叠加组织规范 | `_bmad/custom/{skill-name}.toml`(团队提交的覆盖)和 `.user.toml`(个人,已 gitignore) |
|
||||||
|
|
||||||
|
抽掉任何一条腿,体验就会坍塌:
|
||||||
|
|
||||||
|
- 有技能没智能体 → 用户只能靠名称或编号在能力列表里自行查找
|
||||||
|
- 有智能体没技能 → 空有人设,没有能力
|
||||||
|
- 没有自定义 → 所有人用一模一样的开箱默认,任何组织特有需求都只能靠 fork
|
||||||
|
|
||||||
|
## 命名智能体带来了什么
|
||||||
|
|
||||||
|
BMad 内置六个命名智能体,各自对应 BMad Method 的一个阶段:
|
||||||
|
|
||||||
|
| 智能体 | 阶段 | 模块 |
|
||||||
|
|---|---|---|
|
||||||
|
| 📊 **Mary**,商业分析师 | 分析 | 市场调研、头脑风暴、产品摘要、PRFAQ |
|
||||||
|
| 📚 **Paige**,技术文档工程师 | 分析 | 项目文档、流程图、文档校验 |
|
||||||
|
| 📋 **John**,产品经理 | 规划 | PRD 创建、Epic/Story 拆分、实施就绪评审 |
|
||||||
|
| 🎨 **Sally**,UX 设计师 | 规划 | UX 设计规范 |
|
||||||
|
| 🏗️ **Winston**,系统架构师 | 方案设计 | 技术架构、一致性检查 |
|
||||||
|
| 💻 **Amelia**,高级工程师 | 实现 | Story 执行、快速开发、代码评审、Sprint 规划 |
|
||||||
|
|
||||||
|
每位智能体都有硬编码的身份(名字、职衔、专业领域)和可自定义的层(角色、原则、沟通风格、图标、菜单)。你可以重写 Mary 的原则或添加菜单项,但无法改她的名字——这是刻意为之的。品牌辨识度经得起自定义,所以"嘿 Mary"永远激活分析师,无论团队怎样塑造她的行为。
|
||||||
|
|
||||||
|
## 激活流程
|
||||||
|
|
||||||
|
调用命名智能体时,八个步骤依次执行:
|
||||||
|
|
||||||
|
1. **解析智能体配置** — 通过 Python 解析器(使用 stdlib `tomllib`)将内置 `customize.toml` 与团队覆盖和个人覆盖合并
|
||||||
|
2. **执行前置步骤** — 团队配置的任何预处理行为
|
||||||
|
3. **采用人设** — 硬编码身份加上自定义的角色、沟通风格、原则
|
||||||
|
4. **加载持久化事实** — 组织规则、合规说明,可通过 `file:` 前缀加载文件(如 `file:{project-root}/docs/project-context.md`)
|
||||||
|
5. **加载配置** — 用户名、沟通语言、输出语言、产物路径
|
||||||
|
6. **打招呼** — 个性化问候,使用配置的语言,带上智能体的 emoji 前缀让你一眼认出谁在说话
|
||||||
|
7. **执行后置步骤** — 团队配置的任何问候后设置
|
||||||
|
8. **分发或展示菜单** — 如果你的开场消息能匹配某个菜单项,直接执行;否则展示菜单等待输入
|
||||||
|
|
||||||
|
第 8 步是意图与能力的交汇点。"嘿 Mary,咱们来头脑风暴"之所以跳过菜单渲染,是因为 `bmad-brainstorming` 显然对应 Mary 菜单上的 `BP`。如果你说的比较模糊,她会简短问一句,而不是走确认仪式。如果完全不匹配,她会正常继续对话。
|
||||||
|
|
||||||
|
## 为什么不只用菜单?
|
||||||
|
|
||||||
|
菜单迫使用户迁就工具。你得记住头脑风暴在分析师智能体的 `BP` 编码下,而不是 PM 智能体上,还得知道哪个人设负责哪些功能。这些都是工具强加给你的认知负担。
|
||||||
|
|
||||||
|
命名智能体把这个关系反转了。你用任何自然的方式,对着某个人说你想做什么。智能体知道自己是谁、能做什么。当你的意图足够清晰,她就直接开始。
|
||||||
|
|
||||||
|
菜单仍然作为兜底存在——探索时展示,确定时跳过。
|
||||||
|
|
||||||
|
## 为什么不直接用空白提示?
|
||||||
|
|
||||||
|
空白提示假设你知道"魔法咒语"。"帮我头脑风暴"也许有用,但"帮我发散下我这个 SaaS 创意"可能就不灵了,而结果取决于你怎么措辞。你变成了提示工程师。
|
||||||
|
|
||||||
|
命名智能体在不牺牲自由度的前提下增加了结构。人设保持一致,能力随时可发现,`bmad-help` 永远只差一个命令。你不用猜智能体能做什么,也不需要翻手册才能用它。
|
||||||
|
|
||||||
|
## 自定义是一等公民
|
||||||
|
|
||||||
|
自定义模型让这套方案能从单个开发者扩展到整个组织。
|
||||||
|
|
||||||
|
每个智能体自带 `customize.toml` 及合理默认值。团队在 `_bmad/custom/bmad-agent-{role}.toml` 中提交覆盖。个人可以在 `.user.toml`(已 gitignore)中叠加偏好。解析器在激活时按可预测的结构化规则合并三层配置。
|
||||||
|
|
||||||
|
大多数用户从不需要手写这些文件。`bmad-customize` 技能会引导你选择目标、区分智能体/工作流作用域、撰写覆盖、验证合并结果——让自定义能力对任何理解自己意图的人开放,不限于精通 TOML 的人。
|
||||||
|
|
||||||
|
举个例子:团队提交一个文件,告诉 Amelia 查库文档时一律用 Context7 MCP 工具,本地 epics 列表找不到 story 时回退到 Linear。Amelia 分发的每个开发工作流(dev-story、quick-dev、create-story、code-review)都继承这些行为,无需改源码、无需逐工作流重复配置。
|
||||||
|
|
||||||
|
此外还有第二个自定义面,用于**跨领域关注点**:中央配置 `_bmad/config.toml` 和 `_bmad/config.user.toml`(由安装器维护,从每个模块的 `module.yaml` 重建)加上 `_bmad/custom/config.toml`(团队提交)和 `_bmad/custom/config.user.toml`(个人,已 gitignore)作为覆盖。这里存放着 **智能体花名册** ——轻量级描述符,`bmad-party-mode`、`bmad-retrospective` 和 `bmad-advanced-elicitation` 等花名册消费者读取它来了解有哪些智能体可用、如何扮演它们。用团队覆盖在全组织范围重新定义某个智能体;用 `.user.toml` 覆盖添加虚构角色(Kirk、Spock、领域专家)作为个人实验——无需碰任何技能目录。每个技能的配置文件塑造 Mary **激活时的行为**;中央配置塑造其他技能**查看花名册时看到的 Mary**。
|
||||||
|
|
||||||
|
完整自定义文档和实操示例请参见:
|
||||||
|
|
||||||
|
- [如何自定义 BMad](../how-to/customize-bmad.md) — 可自定义项和合并规则的参考
|
||||||
|
- [如何为组织扩展 BMad](../how-to/expand-bmad-for-your-org.md) — 五个实操方案,覆盖智能体全局规则、工作流约定、外部发布、模板替换和花名册管理
|
||||||
|
- `bmad-customize` 技能 — 引导式编写助手,将你的意图转换为正确放置并经过验证的覆盖文件
|
||||||
|
|
||||||
|
## 更大的理念
|
||||||
|
|
||||||
|
当今大多数 AI 助手要么是菜单,要么是提示框,两者都把认知负担推给了用户。命名智能体加上可自定义技能,让你可以和一个了解项目的队友对话,并且让你的组织能塑造这个队友而不必 fork。
|
||||||
|
|
||||||
|
下次你输入"嘿 Mary,咱们来头脑风暴",她直接上手干活时,留意一下哪些事情**没有**发生。没有斜杠命令,没有菜单要翻,没有尴尬的功能介绍。这种"无感",正是设计本身。
|
||||||
|
|
@ -0,0 +1,258 @@
|
||||||
|
---
|
||||||
|
title: "如何为组织扩展 BMad"
|
||||||
|
description: 五个自定义方案,无需 fork 即可重塑 BMad——涵盖智能体全局规则、工作流约定、外部发布、模板替换和花名册变更
|
||||||
|
sidebar:
|
||||||
|
order: 9
|
||||||
|
---
|
||||||
|
|
||||||
|
BMad 的自定义机制让组织无需编辑已安装文件或 fork 技能就能重塑行为。本指南介绍五个方案,覆盖大部分企业级需求。
|
||||||
|
|
||||||
|
:::note[前置条件]
|
||||||
|
|
||||||
|
- 已在项目中安装 BMad(参见[如何安装 BMad](./install-bmad.md))
|
||||||
|
- 熟悉自定义模型(参见[如何自定义 BMad](./customize-bmad.md))
|
||||||
|
- PATH 中有 Python 3.11+(解析器只用标准库,不需要 `pip install`)
|
||||||
|
:::
|
||||||
|
|
||||||
|
:::tip[如何应用这些方案]
|
||||||
|
下面的**逐技能方案**(方案 1–4)可以通过运行 `bmad-customize` 技能并描述意图来应用——它会选择正确的配置面、生成覆盖文件并验证合并结果。方案 5(中央配置的花名册覆盖)超出 v1 技能范围,仍需手动编写。本文档中的方案是覆盖**什么**的权威参考;`bmad-customize` 负责处理**怎么做**的部分(针对智能体/工作流层面)。
|
||||||
|
:::
|
||||||
|
|
||||||
|
## 三层心智模型
|
||||||
|
|
||||||
|
在选择方案之前,先理解你的覆盖落在哪一层:
|
||||||
|
|
||||||
|
| 层 | 覆盖文件位置 | 作用范围 |
|
||||||
|
|---|---|---|
|
||||||
|
| **智能体**(如 Amelia、Mary、John) | `_bmad/custom/bmad-agent-{role}.toml` 中的 `[agent]` 段 | 跟随人设进入**该智能体分发的每个工作流** |
|
||||||
|
| **工作流**(如 product-brief、create-prd) | `_bmad/custom/{workflow-name}.toml` 中的 `[workflow]` 段 | 仅作用于该工作流的单次运行 |
|
||||||
|
| **中央配置** | `_bmad/custom/config.toml` 中的 `[agents.*]`、`[core]`、`[modules.*]` | 花名册(party-mode、retrospective、elicitation 可用的角色)、全组织统一的安装设置 |
|
||||||
|
|
||||||
|
经验法则:如果规则应当在工程师做任何开发工作时生效,就自定义**开发智能体**。如果只在撰写产品摘要时生效,就自定义 **product-brief 工作流**。如果要改变"谁在场"(重命名智能体、添加自定义角色、统一产物路径),就编辑**中央配置**。
|
||||||
|
|
||||||
|
## 方案 1:让智能体的规则贯穿其分发的所有工作流
|
||||||
|
|
||||||
|
**场景:** 统一工具使用和外部系统集成,让智能体分发的每个工作流都继承这些行为。这是影响面最大的模式。
|
||||||
|
|
||||||
|
**示例:Amelia(开发智能体)查库文档一律用 Context7,本地 epics 列表找不到 story 时回退到 Linear。**
|
||||||
|
|
||||||
|
```toml
|
||||||
|
# _bmad/custom/bmad-agent-dev.toml
|
||||||
|
|
||||||
|
[agent]
|
||||||
|
|
||||||
|
# 每次激活时加载。传递到 dev-story、quick-dev、
|
||||||
|
# create-story、code-review、qa-generate——Amelia 分发的每个技能。
|
||||||
|
persistent_facts = [
|
||||||
|
"For any library documentation lookup (React, TypeScript, Zod, Prisma, etc.), call the context7 MCP tool (`mcp__context7__resolve_library_id` then `mcp__context7__get_library_docs`) before relying on training-data knowledge. Up-to-date docs trump memorized APIs.",
|
||||||
|
"When a story reference isn't found in {planning_artifacts}/epics-and-stories.md, search Linear via `mcp__linear__search_issues` using the story ID or title before asking the user to clarify. If Linear returns a match, treat it as the authoritative story source.",
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
**为什么有效:** 两句话就能重塑组织内所有开发工作流,无需逐工作流重复配置、无需改源码。每个新工程师拉下仓库就自动继承这些约定。
|
||||||
|
|
||||||
|
**团队文件 vs 个人文件:**
|
||||||
|
- `bmad-agent-dev.toml`:提交到 git,对整个团队生效
|
||||||
|
- `bmad-agent-dev.user.toml`:已 gitignore,个人偏好叠加在上面
|
||||||
|
|
||||||
|
## 方案 2:在特定工作流中强制执行组织规范
|
||||||
|
|
||||||
|
**场景:** 塑造工作流输出的*内容*,使其满足合规、审计或下游消费者的要求。
|
||||||
|
|
||||||
|
**示例:每份产品摘要都必须包含合规字段,智能体知晓组织的发布规范。**
|
||||||
|
|
||||||
|
```toml
|
||||||
|
# _bmad/custom/bmad-product-brief.toml
|
||||||
|
|
||||||
|
[workflow]
|
||||||
|
|
||||||
|
persistent_facts = [
|
||||||
|
"Every brief must include an 'Owner' field, a 'Target Release' field, and a 'Security Review Status' field.",
|
||||||
|
"Non-commercial briefs (internal tools, research projects) must still include a user-value section, but can omit market differentiation.",
|
||||||
|
"file:{project-root}/docs/enterprise/brief-publishing-conventions.md",
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
**效果:** 这些事实在工作流激活的第 3 步加载。当智能体起草摘要时,它已了解必填字段和企业规范文档。内置默认值(`file:{project-root}/**/project-context.md`)仍会加载,因为这是追加操作。
|
||||||
|
|
||||||
|
## 方案 3:将完成的产出发布到外部系统
|
||||||
|
|
||||||
|
**场景:** 工作流生成输出后,自动发布到企业级记录系统(Confluence、Notion、SharePoint)并创建后续工作项(Jira、Linear、Asana)。
|
||||||
|
|
||||||
|
**示例:摘要自动发布到 Confluence,并提供可选的 Jira Epic 创建。**
|
||||||
|
|
||||||
|
```toml
|
||||||
|
# _bmad/custom/bmad-product-brief.toml
|
||||||
|
|
||||||
|
[workflow]
|
||||||
|
|
||||||
|
# 终端钩子。标量覆盖会整体替换空默认值。
|
||||||
|
on_complete = """
|
||||||
|
Publish and offer follow-up:
|
||||||
|
|
||||||
|
1. Read the finalized brief file path from the prior step.
|
||||||
|
2. Call `mcp__atlassian__confluence_create_page` with:
|
||||||
|
- space: "PRODUCT"
|
||||||
|
- parent: "Product Briefs"
|
||||||
|
- title: the brief's title
|
||||||
|
- body: the brief's markdown contents
|
||||||
|
Capture the returned page URL.
|
||||||
|
3. Tell the user: "Brief published to Confluence: <url>".
|
||||||
|
4. Ask: "Want me to open a Jira epic for this brief now?"
|
||||||
|
5. If yes, call `mcp__atlassian__jira_create_issue` with:
|
||||||
|
- type: "Epic"
|
||||||
|
- project: "PROD"
|
||||||
|
- summary: the brief's title
|
||||||
|
- description: a short summary plus a link back to the Confluence page.
|
||||||
|
Report the epic key and URL.
|
||||||
|
6. If no, exit cleanly.
|
||||||
|
|
||||||
|
If either MCP tool fails, report the failure, print the brief path,
|
||||||
|
and ask the user to publish manually.
|
||||||
|
"""
|
||||||
|
```
|
||||||
|
|
||||||
|
**为什么用 `on_complete` 而不是 `activation_steps_append`:** `on_complete` 只在终端阶段运行一次,在工作流主输出写入之后。这是发布产物的正确时机。`activation_steps_append` 在每次激活时运行,在工作流开始之前。
|
||||||
|
|
||||||
|
**权衡:**
|
||||||
|
- **Confluence 发布是非破坏性的**,完成时始终运行
|
||||||
|
- **Jira Epic 创建对全团队可见**,会触发 Sprint 规划信号,因此需用户确认
|
||||||
|
- **优雅降级:** 如果 MCP 工具失败,交给用户手动处理,而不是静默丢弃输出
|
||||||
|
|
||||||
|
## 方案 4:替换为你自己的输出模板
|
||||||
|
|
||||||
|
**场景:** 默认输出结构不符合组织期望的格式,或同一仓库中不同团队需要不同模板。
|
||||||
|
|
||||||
|
**示例:将 product-brief 工作流指向企业自有模板。**
|
||||||
|
|
||||||
|
```toml
|
||||||
|
# _bmad/custom/bmad-product-brief.toml
|
||||||
|
|
||||||
|
[workflow]
|
||||||
|
brief_template = "{project-root}/docs/enterprise/brief-template.md"
|
||||||
|
```
|
||||||
|
|
||||||
|
**原理:** 工作流自带的 `customize.toml` 中 `brief_template = "resources/brief-template.md"`(裸路径,从技能根目录解析)。你的覆盖指向 `{project-root}` 下的文件,智能体在第 4 步读取你的模板而非内置模板。
|
||||||
|
|
||||||
|
**模板编写建议:**
|
||||||
|
- 将模板放在 `{project-root}/docs/` 或 `{project-root}/_bmad/custom/templates/` 下,使它们与覆盖文件一起版本管理
|
||||||
|
- 沿用内置模板的结构约定(章节标题、frontmatter),智能体会适配实际内容
|
||||||
|
- 对于多团队仓库,使用 `.user.toml` 让各团队指向自己的模板,无需改动已提交的团队文件
|
||||||
|
|
||||||
|
## 方案 5:自定义花名册
|
||||||
|
|
||||||
|
**场景:** 改变 `bmad-party-mode`、`bmad-retrospective` 和 `bmad-advanced-elicitation` 等花名册驱动技能中*谁在场*,无需编辑源码或 fork。以下是三种常见变体。
|
||||||
|
|
||||||
|
### 5a. 在全组织范围内重塑 BMad 智能体
|
||||||
|
|
||||||
|
每个真实智能体都有一段安装器从 `module.yaml` 合成的描述符。覆盖它可以在所有花名册消费者中改变语气和定位:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
# _bmad/custom/config.toml(提交到 git——对每个开发者生效)
|
||||||
|
|
||||||
|
[agents.bmad-agent-analyst]
|
||||||
|
description = "Mary the Regulatory-Aware Business Analyst — channels Porter and Minto, but lives and breathes FDA audit trails. Speaks like a forensic investigator presenting a case file."
|
||||||
|
```
|
||||||
|
|
||||||
|
Party-mode 会用新描述来生成 Mary。分析师激活流程本身不受影响,因为 Mary 的行为由她的每技能 `customize.toml` 控制。这个覆盖改变的是**外部技能如何感知和介绍她**,而不是她的内部工作方式。
|
||||||
|
|
||||||
|
### 5b. 添加虚构或自定义智能体
|
||||||
|
|
||||||
|
一段完整的描述符就足以让花名册功能识别,不需要技能目录。适合在 party mode 或头脑风暴中增加性格多样性:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
# _bmad/custom/config.user.toml(个人——已 gitignore)
|
||||||
|
|
||||||
|
[agents.spock]
|
||||||
|
team = "startrek"
|
||||||
|
name = "Commander Spock"
|
||||||
|
title = "Science Officer"
|
||||||
|
icon = "🖖"
|
||||||
|
description = "Logic first, emotion suppressed. Begins observations with 'Fascinating.' Never rounds up. Counterpoint to any argument that relies on gut instinct."
|
||||||
|
|
||||||
|
[agents.mccoy]
|
||||||
|
team = "startrek"
|
||||||
|
name = "Dr. Leonard McCoy"
|
||||||
|
title = "Chief Medical Officer"
|
||||||
|
icon = "⚕️"
|
||||||
|
description = "Country doctor's warmth, short fuse. 'Dammit Jim, I'm a doctor not a ___.' Ethics-driven counterweight to Spock."
|
||||||
|
```
|
||||||
|
|
||||||
|
让 party-mode "邀请企业号船员",它会按 `team = "startrek"` 过滤并生成 Spock 和 McCoy。真实的 BMad 智能体(Mary、Amelia)也可以同桌。
|
||||||
|
|
||||||
|
### 5c. 锁定团队安装设置
|
||||||
|
|
||||||
|
安装器会向每个开发者提示 `planning_artifacts` 路径等值。当组织需要一个统一答案时,在中央配置中锁定——任何开发者本地的提示回答都会在解析时被覆盖:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
# _bmad/custom/config.toml
|
||||||
|
|
||||||
|
[modules.bmm]
|
||||||
|
planning_artifacts = "{project-root}/shared/planning"
|
||||||
|
implementation_artifacts = "{project-root}/shared/implementation"
|
||||||
|
|
||||||
|
[core]
|
||||||
|
document_output_language = "English"
|
||||||
|
```
|
||||||
|
|
||||||
|
个人设置如 `user_name`、`communication_language` 或 `user_skill_level` 留在各开发者自己的 `_bmad/config.user.toml` 中。团队文件不应触碰这些。
|
||||||
|
|
||||||
|
**为什么用中央配置而不是逐智能体的 customize.toml:** 逐智能体文件塑造*一个*智能体激活时的行为。中央配置塑造花名册消费者*查看全局时看到的内容:*有哪些智能体、叫什么、属于哪个团队,以及整个仓库共识的安装设置。两个层面,各司其职。
|
||||||
|
|
||||||
|
## 在 IDE 会话文件中强化全局规则
|
||||||
|
|
||||||
|
BMad 的自定义在技能激活时加载。许多 IDE 工具还会在**每次会话开始时**加载一个全局指令文件,在任何技能运行之前(`CLAUDE.md`、`AGENTS.md`、`.cursor/rules/`、`.github/copilot-instructions.md` 等)。对于即使在 BMad 技能之外也应生效的规则,请在全局指令中也声明一份。
|
||||||
|
|
||||||
|
**何时需要"双重声明":**
|
||||||
|
- 规则足够重要,即使在普通对话(没有激活技能)中也应遵守
|
||||||
|
- 你需要"双保险",因为模型的训练数据默认值可能会拉偏方向
|
||||||
|
- 规则足够精简,重复一次不会让会话文件臃肿
|
||||||
|
|
||||||
|
**示例:在仓库的 `CLAUDE.md` 中强化方案 1 的开发智能体规则。**
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
<!-- Any file-read of library docs goes through the context7 MCP tool
|
||||||
|
(`mcp__context7__resolve_library_id` then `mcp__context7__get_library_docs`)
|
||||||
|
before relying on training-data knowledge. -->
|
||||||
|
```
|
||||||
|
|
||||||
|
一句话,每次会话加载。它与 `bmad-agent-dev.toml` 自定义配合,使规则在 Amelia 的工作流内和与助手的临时对话中都生效。各层各管各的范围:
|
||||||
|
|
||||||
|
| 层 | 作用范围 | 用途 |
|
||||||
|
|---|---|---|
|
||||||
|
| IDE 会话文件(`CLAUDE.md` / `AGENTS.md`) | 每次会话,在任何技能激活之前 | 简短的、应在 BMad 之外也生效的通用规则 |
|
||||||
|
| BMad 智能体自定义 | 该智能体分发的每个工作流 | 智能体人设相关的行为 |
|
||||||
|
| BMad 工作流自定义 | 单次工作流运行 | 工作流特定的输出格式、发布钩子、模板 |
|
||||||
|
| BMad 中央配置 | 花名册 + 共享安装设置 | 谁在场、团队使用的共享路径 |
|
||||||
|
|
||||||
|
IDE 会话文件要**精简**。十几行精挑细选的规则比长篇大论有效得多。模型每轮都会读取它,噪声会淹没信号。
|
||||||
|
|
||||||
|
## 组合使用
|
||||||
|
|
||||||
|
五个方案可以自由组合。一个典型的企业级 `bmad-product-brief` 覆盖可能同时设置 `persistent_facts`(方案 2)、`on_complete`(方案 3)和 `brief_template`(方案 4)。智能体级规则(方案 1)在另一个以智能体命名的文件中,中央配置(方案 5)锁定共享花名册和团队设置,四者并行生效。
|
||||||
|
|
||||||
|
```toml
|
||||||
|
# _bmad/custom/bmad-product-brief.toml(工作流级)
|
||||||
|
|
||||||
|
[workflow]
|
||||||
|
persistent_facts = ["..."]
|
||||||
|
brief_template = "{project-root}/docs/enterprise/brief-template.md"
|
||||||
|
on_complete = """ ... """
|
||||||
|
```
|
||||||
|
|
||||||
|
```toml
|
||||||
|
# _bmad/custom/bmad-agent-analyst.toml(智能体级——Mary 分发 product-brief)
|
||||||
|
|
||||||
|
[agent]
|
||||||
|
persistent_facts = ["Always include a 'Regulatory Review' section when the domain involves healthcare, finance, or children's data."]
|
||||||
|
```
|
||||||
|
|
||||||
|
效果:Mary 在人设激活时加载监管评审规则。当用户选择 product-brief 菜单项时,工作流加载自己的规范、写入企业模板,完成后发布到 Confluence。每一层各有贡献,且无一需要编辑 BMad 源码。
|
||||||
|
|
||||||
|
## 故障排查
|
||||||
|
|
||||||
|
**覆盖没有生效?** 检查文件是否在 `_bmad/custom/` 下且使用了准确的技能目录名(如 `bmad-agent-dev.toml`,而非 `bmad-dev.toml`)。参见[如何自定义 BMad](./customize-bmad.md)。
|
||||||
|
|
||||||
|
**MCP 工具名称不确定?** 使用 MCP 服务器在当前会话中暴露的准确名称。如果不确定,让 Claude Code 列出可用的 MCP 工具。在 `persistent_facts` 或 `on_complete` 中硬编码的名称,在 MCP 服务器未连接时不会生效。
|
||||||
|
|
||||||
|
**方案不适用于你的场景?** 以上方案是示例性的。底层机制(三层合并、结构化规则、智能体贯穿工作流)支持更多模式,按需组合即可。
|
||||||
|
|
@ -68,6 +68,7 @@ Would you like to install from a custom source (Git URL or local path)?
|
||||||
| 输入类型 | 示例 |
|
| 输入类型 | 示例 |
|
||||||
| -------- | ---- |
|
| -------- | ---- |
|
||||||
| HTTPS URL(任意主机) | `https://github.com/org/repo` |
|
| HTTPS URL(任意主机) | `https://github.com/org/repo` |
|
||||||
|
| HTTP URL(任意主机) | `http://host/org/repo` |
|
||||||
| 带子目录的 HTTPS URL | `https://github.com/org/repo/tree/main/my-module` |
|
| 带子目录的 HTTPS URL | `https://github.com/org/repo/tree/main/my-module` |
|
||||||
| SSH URL | `git@github.com:org/repo.git` |
|
| SSH URL | `git@github.com:org/repo.git` |
|
||||||
| 本地路径 | `/Users/me/projects/my-module` |
|
| 本地路径 | `/Users/me/projects/my-module` |
|
||||||
|
|
|
||||||
|
|
@ -1,12 +1,12 @@
|
||||||
{
|
{
|
||||||
"name": "bmad-method",
|
"name": "bmad-method",
|
||||||
"version": "6.3.0",
|
"version": "6.6.0",
|
||||||
"lockfileVersion": 3,
|
"lockfileVersion": 3,
|
||||||
"requires": true,
|
"requires": true,
|
||||||
"packages": {
|
"packages": {
|
||||||
"": {
|
"": {
|
||||||
"name": "bmad-method",
|
"name": "bmad-method",
|
||||||
"version": "6.3.0",
|
"version": "6.6.0",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@clack/core": "^1.0.0",
|
"@clack/core": "^1.0.0",
|
||||||
|
|
@ -15,7 +15,6 @@
|
||||||
"chalk": "^4.1.2",
|
"chalk": "^4.1.2",
|
||||||
"commander": "^14.0.0",
|
"commander": "^14.0.0",
|
||||||
"csv-parse": "^6.1.0",
|
"csv-parse": "^6.1.0",
|
||||||
"fs-extra": "^11.3.0",
|
|
||||||
"glob": "^11.0.3",
|
"glob": "^11.0.3",
|
||||||
"ignore": "^7.0.5",
|
"ignore": "^7.0.5",
|
||||||
"js-yaml": "^4.1.0",
|
"js-yaml": "^4.1.0",
|
||||||
|
|
@ -25,8 +24,8 @@
|
||||||
"yaml": "^2.7.0"
|
"yaml": "^2.7.0"
|
||||||
},
|
},
|
||||||
"bin": {
|
"bin": {
|
||||||
"bmad": "tools/bmad-npx-wrapper.js",
|
"bmad": "tools/installer/bmad-cli.js",
|
||||||
"bmad-method": "tools/bmad-npx-wrapper.js"
|
"bmad-method": "tools/installer/bmad-cli.js"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@astrojs/sitemap": "^3.6.0",
|
"@astrojs/sitemap": "^3.6.0",
|
||||||
|
|
@ -46,6 +45,7 @@
|
||||||
"prettier": "^3.7.4",
|
"prettier": "^3.7.4",
|
||||||
"prettier-plugin-packagejson": "^2.5.19",
|
"prettier-plugin-packagejson": "^2.5.19",
|
||||||
"sharp": "^0.33.5",
|
"sharp": "^0.33.5",
|
||||||
|
"unist-util-visit": "^5.1.0",
|
||||||
"yaml-eslint-parser": "^1.2.3",
|
"yaml-eslint-parser": "^1.2.3",
|
||||||
"yaml-lint": "^1.7.0"
|
"yaml-lint": "^1.7.0"
|
||||||
},
|
},
|
||||||
|
|
@ -6975,20 +6975,6 @@
|
||||||
"url": "https://github.com/sponsors/isaacs"
|
"url": "https://github.com/sponsors/isaacs"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/fs-extra": {
|
|
||||||
"version": "11.3.3",
|
|
||||||
"resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.3.3.tgz",
|
|
||||||
"integrity": "sha512-VWSRii4t0AFm6ixFFmLLx1t7wS1gh+ckoa84aOeapGum0h+EZd1EhEumSB+ZdDLnEPuucsVB9oB7cxJHap6Afg==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"graceful-fs": "^4.2.0",
|
|
||||||
"jsonfile": "^6.0.1",
|
|
||||||
"universalify": "^2.0.0"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">=14.14"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/fs.realpath": {
|
"node_modules/fs.realpath": {
|
||||||
"version": "1.0.0",
|
"version": "1.0.0",
|
||||||
"resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
|
"resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
|
||||||
|
|
@ -7227,6 +7213,7 @@
|
||||||
"version": "4.2.11",
|
"version": "4.2.11",
|
||||||
"resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz",
|
"resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz",
|
||||||
"integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==",
|
"integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==",
|
||||||
|
"dev": true,
|
||||||
"license": "ISC"
|
"license": "ISC"
|
||||||
},
|
},
|
||||||
"node_modules/h3": {
|
"node_modules/h3": {
|
||||||
|
|
@ -9066,18 +9053,6 @@
|
||||||
"dev": true,
|
"dev": true,
|
||||||
"license": "MIT"
|
"license": "MIT"
|
||||||
},
|
},
|
||||||
"node_modules/jsonfile": {
|
|
||||||
"version": "6.2.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz",
|
|
||||||
"integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"universalify": "^2.0.0"
|
|
||||||
},
|
|
||||||
"optionalDependencies": {
|
|
||||||
"graceful-fs": "^4.1.6"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/katex": {
|
"node_modules/katex": {
|
||||||
"version": "0.16.28",
|
"version": "0.16.28",
|
||||||
"resolved": "https://registry.npmjs.org/katex/-/katex-0.16.28.tgz",
|
"resolved": "https://registry.npmjs.org/katex/-/katex-0.16.28.tgz",
|
||||||
|
|
@ -13607,15 +13582,6 @@
|
||||||
"url": "https://opencollective.com/unified"
|
"url": "https://opencollective.com/unified"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/universalify": {
|
|
||||||
"version": "2.0.1",
|
|
||||||
"resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz",
|
|
||||||
"integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==",
|
|
||||||
"license": "MIT",
|
|
||||||
"engines": {
|
|
||||||
"node": ">= 10.0.0"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/unrs-resolver": {
|
"node_modules/unrs-resolver": {
|
||||||
"version": "1.11.1",
|
"version": "1.11.1",
|
||||||
"resolved": "https://registry.npmjs.org/unrs-resolver/-/unrs-resolver-1.11.1.tgz",
|
"resolved": "https://registry.npmjs.org/unrs-resolver/-/unrs-resolver-1.11.1.tgz",
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
{
|
{
|
||||||
"$schema": "https://json.schemastore.org/package.json",
|
"$schema": "https://json.schemastore.org/package.json",
|
||||||
"name": "bmad-method",
|
"name": "bmad-method",
|
||||||
"version": "6.3.0",
|
"version": "6.6.0",
|
||||||
"description": "Breakthrough Method of Agile AI-driven Development",
|
"description": "Breakthrough Method of Agile AI-driven Development",
|
||||||
"keywords": [
|
"keywords": [
|
||||||
"agile",
|
"agile",
|
||||||
|
|
@ -39,12 +39,14 @@
|
||||||
"lint:fix": "eslint . --ext .js,.cjs,.mjs,.yaml --fix",
|
"lint:fix": "eslint . --ext .js,.cjs,.mjs,.yaml --fix",
|
||||||
"lint:md": "markdownlint-cli2 \"**/*.md\"",
|
"lint:md": "markdownlint-cli2 \"**/*.md\"",
|
||||||
"prepare": "command -v husky >/dev/null 2>&1 && husky || exit 0",
|
"prepare": "command -v husky >/dev/null 2>&1 && husky || exit 0",
|
||||||
"quality": "npm run format:check && npm run lint && npm run lint:md && npm run docs:build && npm run test:install && npm run test:renderer && npm run validate:refs && npm run validate:skills",
|
"quality": "npm run format:check && npm run lint && npm run lint:md && npm run docs:build && npm run test:install && npm run test:urls && npm run test:renderer && npm run validate:refs && npm run validate:skills",
|
||||||
"rebundle": "node tools/installer/bundlers/bundle-web.js rebundle",
|
"rebundle": "node tools/installer/bundlers/bundle-web.js rebundle",
|
||||||
"test": "npm run test:refs && npm run test:install && npm run test:renderer && npm run lint && npm run lint:md && npm run format:check",
|
"test": "npm run test:refs && npm run test:install && npm run test:urls && npm run test:channels && npm run test:renderer && npm run lint && npm run lint:md && npm run format:check",
|
||||||
|
"test:channels": "node test/test-installer-channels.js",
|
||||||
"test:install": "node test/test-installation-components.js",
|
"test:install": "node test/test-installation-components.js",
|
||||||
"test:refs": "node test/test-file-refs-csv.js",
|
"test:refs": "node test/test-file-refs-csv.js",
|
||||||
"test:renderer": "node test/test-quick-dev-renderer.js",
|
"test:renderer": "node test/test-quick-dev-renderer.js",
|
||||||
|
"test:urls": "node test/test-parse-source-urls.js",
|
||||||
"validate:refs": "node tools/validate-file-refs.js --strict",
|
"validate:refs": "node tools/validate-file-refs.js --strict",
|
||||||
"validate:skills": "node tools/validate-skills.js --strict"
|
"validate:skills": "node tools/validate-skills.js --strict"
|
||||||
},
|
},
|
||||||
|
|
|
||||||
37
removals.txt
37
removals.txt
|
|
@ -15,3 +15,40 @@ bmad-quick-spec
|
||||||
bmad-quick-flow
|
bmad-quick-flow
|
||||||
bmad-quick-dev-new-preview
|
bmad-quick-dev-new-preview
|
||||||
bmad-init
|
bmad-init
|
||||||
|
|
||||||
|
# Pre-v6.2.0 wrapper skills (module-prefixed naming, dropped in v6.2.0).
|
||||||
|
# Users upgrading from v6.0.x / v6.1.x had these installed and the cleanup
|
||||||
|
# never knew to remove them; they remained alongside the new self-contained
|
||||||
|
# skills causing duplicates and broken-file errors. See issue #2309.
|
||||||
|
bmad-agent-bmm-analyst
|
||||||
|
bmad-agent-bmm-architect
|
||||||
|
bmad-agent-bmm-dev
|
||||||
|
bmad-agent-bmm-pm
|
||||||
|
bmad-agent-bmm-qa
|
||||||
|
bmad-agent-bmm-quick-flow-solo-dev
|
||||||
|
bmad-agent-bmm-sm
|
||||||
|
bmad-agent-bmm-tech-writer
|
||||||
|
bmad-agent-bmm-ux-designer
|
||||||
|
bmad-bmm-check-implementation-readiness
|
||||||
|
bmad-bmm-code-review
|
||||||
|
bmad-bmm-correct-course
|
||||||
|
bmad-bmm-create-architecture
|
||||||
|
bmad-bmm-create-epics-and-stories
|
||||||
|
bmad-bmm-create-prd
|
||||||
|
bmad-bmm-create-product-brief
|
||||||
|
bmad-bmm-create-story
|
||||||
|
bmad-bmm-create-ux-design
|
||||||
|
bmad-bmm-dev-story
|
||||||
|
bmad-bmm-document-project
|
||||||
|
bmad-bmm-domain-research
|
||||||
|
bmad-bmm-edit-prd
|
||||||
|
bmad-bmm-generate-project-context
|
||||||
|
bmad-bmm-market-research
|
||||||
|
bmad-bmm-qa-generate-e2e-tests
|
||||||
|
bmad-bmm-quick-dev
|
||||||
|
bmad-bmm-quick-spec
|
||||||
|
bmad-bmm-retrospective
|
||||||
|
bmad-bmm-sprint-planning
|
||||||
|
bmad-bmm-sprint-status
|
||||||
|
bmad-bmm-technical-research
|
||||||
|
bmad-bmm-validate-prd
|
||||||
|
|
|
||||||
|
|
@ -7,8 +7,8 @@
|
||||||
"description": "Produces battle-tested PRFAQ document and optional LLM distillate for PRD input.",
|
"description": "Produces battle-tested PRFAQ document and optional LLM distillate for PRD input.",
|
||||||
"supports-headless": true,
|
"supports-headless": true,
|
||||||
"phase-name": "1-analysis",
|
"phase-name": "1-analysis",
|
||||||
"after": ["brainstorming", "perform-research"],
|
"preceded-by": ["brainstorming", "perform-research"],
|
||||||
"before": ["create-prd"],
|
"followed-by": ["create-prd"],
|
||||||
"is-required": false,
|
"is-required": false,
|
||||||
"output-location": "{planning_artifacts}"
|
"output-location": "{planning_artifacts}"
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -8,8 +8,8 @@
|
||||||
"description": "Produces executive product brief and optional LLM distillate for PRD input.",
|
"description": "Produces executive product brief and optional LLM distillate for PRD input.",
|
||||||
"supports-headless": true,
|
"supports-headless": true,
|
||||||
"phase-name": "1-analysis",
|
"phase-name": "1-analysis",
|
||||||
"after": ["brainstorming", "perform-research"],
|
"preceded-by": ["brainstorming", "perform-research"],
|
||||||
"before": ["create-prd"],
|
"followed-by": ["create-prd"],
|
||||||
"is-required": true,
|
"is-required": true,
|
||||||
"output-location": "{planning_artifacts}"
|
"output-location": "{planning_artifacts}"
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -227,37 +227,39 @@ Prepare the content to append to the document:
|
||||||
|
|
||||||
### Architecture Completeness Checklist
|
### Architecture Completeness Checklist
|
||||||
|
|
||||||
**✅ Requirements Analysis**
|
Mark each item `[x]` only if validation confirms it; leave `[ ]` if it is missing, partial, or unverified. Any unchecked item must be reflected in the Gap Analysis above and in the Overall Status below.
|
||||||
|
|
||||||
- [x] Project context thoroughly analyzed
|
**Requirements Analysis**
|
||||||
- [x] Scale and complexity assessed
|
|
||||||
- [x] Technical constraints identified
|
|
||||||
- [x] Cross-cutting concerns mapped
|
|
||||||
|
|
||||||
**✅ Architectural Decisions**
|
- [ ] Project context thoroughly analyzed
|
||||||
|
- [ ] Scale and complexity assessed
|
||||||
|
- [ ] Technical constraints identified
|
||||||
|
- [ ] Cross-cutting concerns mapped
|
||||||
|
|
||||||
- [x] Critical decisions documented with versions
|
**Architectural Decisions**
|
||||||
- [x] Technology stack fully specified
|
|
||||||
- [x] Integration patterns defined
|
|
||||||
- [x] Performance considerations addressed
|
|
||||||
|
|
||||||
**✅ Implementation Patterns**
|
- [ ] Critical decisions documented with versions
|
||||||
|
- [ ] Technology stack fully specified
|
||||||
|
- [ ] Integration patterns defined
|
||||||
|
- [ ] Performance considerations addressed
|
||||||
|
|
||||||
- [x] Naming conventions established
|
**Implementation Patterns**
|
||||||
- [x] Structure patterns defined
|
|
||||||
- [x] Communication patterns specified
|
|
||||||
- [x] Process patterns documented
|
|
||||||
|
|
||||||
**✅ Project Structure**
|
- [ ] Naming conventions established
|
||||||
|
- [ ] Structure patterns defined
|
||||||
|
- [ ] Communication patterns specified
|
||||||
|
- [ ] Process patterns documented
|
||||||
|
|
||||||
- [x] Complete directory structure defined
|
**Project Structure**
|
||||||
- [x] Component boundaries established
|
|
||||||
- [x] Integration points mapped
|
- [ ] Complete directory structure defined
|
||||||
- [x] Requirements to structure mapping complete
|
- [ ] Component boundaries established
|
||||||
|
- [ ] Integration points mapped
|
||||||
|
- [ ] Requirements to structure mapping complete
|
||||||
|
|
||||||
### Architecture Readiness Assessment
|
### Architecture Readiness Assessment
|
||||||
|
|
||||||
**Overall Status:** READY FOR IMPLEMENTATION
|
**Overall Status:** {{READY FOR IMPLEMENTATION | READY WITH MINOR GAPS | NOT READY}} (choose READY FOR IMPLEMENTATION only when all 16 checklist items are `[x]` and no Critical Gaps remain; choose NOT READY when any Critical Gap is open or any Requirements Analysis or Architectural Decisions item is unchecked; otherwise READY WITH MINOR GAPS)
|
||||||
|
|
||||||
**Confidence Level:** {{high/medium/low}} based on validation results
|
**Confidence Level:** {{high/medium/low}} based on validation results
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -55,7 +55,8 @@ Load {planning_artifacts}/epics.md and review:
|
||||||
2. **Requirements Grouping**: Group related FRs that deliver cohesive user outcomes
|
2. **Requirements Grouping**: Group related FRs that deliver cohesive user outcomes
|
||||||
3. **Incremental Delivery**: Each epic should deliver value independently
|
3. **Incremental Delivery**: Each epic should deliver value independently
|
||||||
4. **Logical Flow**: Natural progression from user's perspective
|
4. **Logical Flow**: Natural progression from user's perspective
|
||||||
5. **🔗 Dependency-Free Within Epic**: Stories within an epic must NOT depend on future stories
|
5. **Dependency-Free Within Epic**: Stories within an epic must NOT depend on future stories
|
||||||
|
6. **Implementation Efficiency**: Consider consolidating epics that all modify the same core files into fewer epics
|
||||||
|
|
||||||
**⚠️ CRITICAL PRINCIPLE:**
|
**⚠️ CRITICAL PRINCIPLE:**
|
||||||
Organize by USER VALUE, not technical layers:
|
Organize by USER VALUE, not technical layers:
|
||||||
|
|
@ -74,6 +75,18 @@ Organize by USER VALUE, not technical layers:
|
||||||
- Epic 3: Frontend Components (creates reusable components) - **No user value**
|
- Epic 3: Frontend Components (creates reusable components) - **No user value**
|
||||||
- Epic 4: Deployment Pipeline (CI/CD setup) - **No user value**
|
- Epic 4: Deployment Pipeline (CI/CD setup) - **No user value**
|
||||||
|
|
||||||
|
**❌ WRONG Epic Examples (File Churn on Same Component):**
|
||||||
|
|
||||||
|
- Epic 1: File Upload (modifies model, controller, web form, web API)
|
||||||
|
- Epic 2: File Status (modifies model, controller, web form, web API)
|
||||||
|
- Epic 3: File Access permissions (modifies model, controller, web form, web API)
|
||||||
|
- All three epics touch the same files — consolidate into one epic with ordered stories
|
||||||
|
|
||||||
|
**✅ CORRECT Alternative:**
|
||||||
|
|
||||||
|
- Epic 1: File Management Enhancement (upload, status, permissions as stories within one epic)
|
||||||
|
- Rationale: Single component, fully pre-designed, no feedback loop between epics
|
||||||
|
|
||||||
**🔗 DEPENDENCY RULES:**
|
**🔗 DEPENDENCY RULES:**
|
||||||
|
|
||||||
- Each epic must deliver COMPLETE functionality for its domain
|
- Each epic must deliver COMPLETE functionality for its domain
|
||||||
|
|
@ -82,21 +95,38 @@ Organize by USER VALUE, not technical layers:
|
||||||
|
|
||||||
### 3. Design Epic Structure Collaboratively
|
### 3. Design Epic Structure Collaboratively
|
||||||
|
|
||||||
**Step A: Identify User Value Themes**
|
**Step A: Assess Context and Identify Themes**
|
||||||
|
|
||||||
|
First, assess how much of the solution design is already validated (Architecture, UX, Test Design).
|
||||||
|
When the outcome is certain and direction changes between epics are unlikely, prefer fewer but larger epics.
|
||||||
|
Split into multiple epics when there is a genuine risk boundary or when early feedback could change direction
|
||||||
|
of following epics.
|
||||||
|
|
||||||
|
Then, identify user value themes:
|
||||||
|
|
||||||
- Look for natural groupings in the FRs
|
- Look for natural groupings in the FRs
|
||||||
- Identify user journeys or workflows
|
- Identify user journeys or workflows
|
||||||
- Consider user types and their goals
|
- Consider user types and their goals
|
||||||
|
|
||||||
**Step B: Propose Epic Structure**
|
**Step B: Propose Epic Structure**
|
||||||
For each proposed epic:
|
|
||||||
|
For each proposed epic (considering whether epics share the same core files):
|
||||||
|
|
||||||
1. **Epic Title**: User-centric, value-focused
|
1. **Epic Title**: User-centric, value-focused
|
||||||
2. **User Outcome**: What users can accomplish after this epic
|
2. **User Outcome**: What users can accomplish after this epic
|
||||||
3. **FR Coverage**: Which FR numbers this epic addresses
|
3. **FR Coverage**: Which FR numbers this epic addresses
|
||||||
4. **Implementation Notes**: Any technical or UX considerations
|
4. **Implementation Notes**: Any technical or UX considerations
|
||||||
|
|
||||||
**Step C: Create the epics_list**
|
**Step C: Review for File Overlap**
|
||||||
|
|
||||||
|
Assess whether multiple proposed epics repeatedly target the same core files. If overlap is significant:
|
||||||
|
|
||||||
|
- Distinguish meaningful overlap (same component end-to-end) from incidental sharing
|
||||||
|
- Ask whether to consolidate into one epic with ordered stories
|
||||||
|
- If confirmed, merge the epic FRs into a single epic, preserving dependency flow: each story must still fit within
|
||||||
|
a single dev agent's context
|
||||||
|
|
||||||
|
**Step D: Create the epics_list**
|
||||||
|
|
||||||
Format the epics_list as:
|
Format the epics_list as:
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -90,6 +90,12 @@ Review the complete epic and story breakdown to ensure EVERY FR is covered:
|
||||||
- Dependencies flow naturally
|
- Dependencies flow naturally
|
||||||
- Foundation stories only setup what's needed
|
- Foundation stories only setup what's needed
|
||||||
- No big upfront technical work
|
- No big upfront technical work
|
||||||
|
- **File Churn Check:** Do multiple epics repeatedly modify the same core files?
|
||||||
|
- Assess whether the overlap pattern suggests unnecessary churn or is incidental
|
||||||
|
- If overlap is significant: Validate that splitting provides genuine value (risk mitigation, feedback loops, context size limits)
|
||||||
|
- If no justification for the split: Recommend consolidation into fewer epics
|
||||||
|
- ❌ WRONG: Multiple epics each modify the same core files with no feedback loop between them
|
||||||
|
- ✅ RIGHT: Epics target distinct files/components, OR consolidation was explicitly considered and rejected with rationale
|
||||||
|
|
||||||
### 5. Dependency Validation (CRITICAL)
|
### 5. Dependency Validation (CRITICAL)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -7,7 +7,55 @@ description: 'LLM-assisted human-in-the-loop review. Make sense of a change, foc
|
||||||
|
|
||||||
**Goal:** Guide a human through reviewing a change — from purpose and context into details.
|
**Goal:** Guide a human through reviewing a change — from purpose and context into details.
|
||||||
|
|
||||||
You are assisting the user in reviewing a change.
|
**Your Role:** You are assisting the user in reviewing a change.
|
||||||
|
|
||||||
|
## Conventions
|
||||||
|
|
||||||
|
- Bare paths (e.g. `step-01-orientation.md`) resolve from the skill root.
|
||||||
|
- `{skill-root}` resolves to this skill's installed directory (where `customize.toml` lives).
|
||||||
|
- `{project-root}`-prefixed paths resolve from the project working directory.
|
||||||
|
- `{skill-name}` resolves to the skill directory's basename.
|
||||||
|
|
||||||
|
## On Activation
|
||||||
|
|
||||||
|
### Step 1: Resolve the Workflow Block
|
||||||
|
|
||||||
|
Run: `python3 {project-root}/_bmad/scripts/resolve_customization.py --skill {skill-root} --key workflow`
|
||||||
|
|
||||||
|
**If the script fails**, resolve the `workflow` block yourself by reading these three files in base → team → user order and applying the same structural merge rules as the resolver:
|
||||||
|
|
||||||
|
1. `{skill-root}/customize.toml` — defaults
|
||||||
|
2. `{project-root}/_bmad/custom/{skill-name}.toml` — team overrides
|
||||||
|
3. `{project-root}/_bmad/custom/{skill-name}.user.toml` — personal overrides
|
||||||
|
|
||||||
|
Any missing file is skipped. Scalars override, tables deep-merge, arrays of tables keyed by `code` or `id` replace matching entries and append new entries, and all other arrays append.
|
||||||
|
|
||||||
|
### Step 2: Execute Prepend Steps
|
||||||
|
|
||||||
|
Execute each entry in `{workflow.activation_steps_prepend}` in order before proceeding.
|
||||||
|
|
||||||
|
### Step 3: Load Persistent Facts
|
||||||
|
|
||||||
|
Treat every entry in `{workflow.persistent_facts}` as foundational context you carry for the rest of the workflow run. Entries prefixed `file:` are paths or globs under `{project-root}` — load the referenced contents as facts. All other entries are facts verbatim.
|
||||||
|
|
||||||
|
### Step 4: Load Config
|
||||||
|
|
||||||
|
Load config from `{project-root}/_bmad/bmm/config.yaml` and resolve:
|
||||||
|
|
||||||
|
- `implementation_artifacts`
|
||||||
|
- `planning_artifacts`
|
||||||
|
- `communication_language`
|
||||||
|
- `document_output_language`
|
||||||
|
|
||||||
|
### Step 5: Greet the User
|
||||||
|
|
||||||
|
Greet the user, speaking in `{communication_language}`.
|
||||||
|
|
||||||
|
### Step 6: Execute Append Steps
|
||||||
|
|
||||||
|
Execute each entry in `{workflow.activation_steps_append}` in order.
|
||||||
|
|
||||||
|
Activation is complete. Begin the workflow below.
|
||||||
|
|
||||||
## Global Step Rules (apply to every step)
|
## Global Step Rules (apply to every step)
|
||||||
|
|
||||||
|
|
@ -15,15 +63,6 @@ You are assisting the user in reviewing a change.
|
||||||
- **Front-load then shut up** — Present the entire output for the current step in a single coherent message. Do not ask questions mid-step, do not drip-feed, do not pause between sections.
|
- **Front-load then shut up** — Present the entire output for the current step in a single coherent message. Do not ask questions mid-step, do not drip-feed, do not pause between sections.
|
||||||
- **Language** — Speak in `{communication_language}`. Write any file output in `{document_output_language}`.
|
- **Language** — Speak in `{communication_language}`. Write any file output in `{document_output_language}`.
|
||||||
|
|
||||||
## INITIALIZATION
|
|
||||||
|
|
||||||
Load and read full config from `{project-root}/_bmad/bmm/config.yaml` and resolve:
|
|
||||||
|
|
||||||
- `implementation_artifacts`
|
|
||||||
- `planning_artifacts`
|
|
||||||
- `communication_language`
|
|
||||||
- `document_output_language`
|
|
||||||
|
|
||||||
## FIRST STEP
|
## FIRST STEP
|
||||||
|
|
||||||
Read fully and follow `./step-01-orientation.md` to begin.
|
Read fully and follow `./step-01-orientation.md` to begin.
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,41 @@
|
||||||
|
# DO NOT EDIT -- overwritten on every update.
|
||||||
|
#
|
||||||
|
# Workflow customization surface for bmad-checkpoint-preview. Mirrors the
|
||||||
|
# agent customization shape under the [workflow] namespace.
|
||||||
|
|
||||||
|
[workflow]
|
||||||
|
|
||||||
|
# --- Configurable below. Overrides merge per BMad structural rules: ---
|
||||||
|
# scalars: override wins • arrays (persistent_facts, activation_steps_*): append
|
||||||
|
# arrays-of-tables with `code`/`id`: replace matching items, append new ones.
|
||||||
|
|
||||||
|
# Steps to run before the standard activation (config load, greet).
|
||||||
|
# Overrides append. Use for pre-flight loads, compliance checks, etc.
|
||||||
|
|
||||||
|
activation_steps_prepend = []
|
||||||
|
|
||||||
|
# Steps to run after greet but before the workflow begins.
|
||||||
|
# Overrides append. Use for context-heavy setup that should happen
|
||||||
|
# once the user has been acknowledged.
|
||||||
|
|
||||||
|
activation_steps_append = []
|
||||||
|
|
||||||
|
# Persistent facts the workflow keeps in mind for the whole run
|
||||||
|
# (standards, compliance constraints, stylistic guardrails).
|
||||||
|
# Distinct from the runtime memory sidecar — these are static context
|
||||||
|
# loaded on activation. Overrides append.
|
||||||
|
#
|
||||||
|
# Each entry is either:
|
||||||
|
# - a literal sentence, e.g. "All stories must include testable acceptance criteria."
|
||||||
|
# - a file reference prefixed with `file:`, e.g. "file:{project-root}/docs/standards.md"
|
||||||
|
# (glob patterns are supported; the file's contents are loaded and treated as facts).
|
||||||
|
|
||||||
|
persistent_facts = [
|
||||||
|
"file:{project-root}/**/project-context.md",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Scalar: executed when the workflow reaches its final step,
|
||||||
|
# after the review decision (approve/rework/discuss) is made. Override wins.
|
||||||
|
# Leave empty for no custom post-completion behavior.
|
||||||
|
|
||||||
|
on_complete = ""
|
||||||
|
|
@ -22,3 +22,9 @@ HALT — do not proceed until the user makes their choice.
|
||||||
- **Approve**: Acknowledge briefly. If the human wants to patch something before shipping, help apply the fix interactively. If reviewing a PR, offer to approve via `gh pr review --approve` — but confirm with the human before executing, since this is a visible action on a shared resource.
|
- **Approve**: Acknowledge briefly. If the human wants to patch something before shipping, help apply the fix interactively. If reviewing a PR, offer to approve via `gh pr review --approve` — but confirm with the human before executing, since this is a visible action on a shared resource.
|
||||||
- **Rework**: Ask what went wrong — was it the approach, the spec, or the implementation? Help the human decide on next steps (revert commit, open an issue, revise the spec, etc.). Help draft specific, actionable feedback tied to `path:line` locations if the change is a PR from someone else.
|
- **Rework**: Ask what went wrong — was it the approach, the spec, or the implementation? Help the human decide on next steps (revert commit, open an issue, revise the spec, etc.). Help draft specific, actionable feedback tied to `path:line` locations if the change is a PR from someone else.
|
||||||
- **Discuss**: Open conversation — answer questions, explore concerns, dig into any aspect. After discussion, return to the decision prompt above.
|
- **Discuss**: Open conversation — answer questions, explore concerns, dig into any aspect. After discussion, return to the decision prompt above.
|
||||||
|
|
||||||
|
## On Complete
|
||||||
|
|
||||||
|
Run: `python3 {project-root}/_bmad/scripts/resolve_customization.py --skill {skill-root} --key workflow.on_complete`
|
||||||
|
|
||||||
|
If the resolved `workflow.on_complete` is non-empty, follow it as the final terminal instruction before exiting.
|
||||||
|
|
|
||||||
|
|
@ -3,4 +3,88 @@ name: bmad-code-review
|
||||||
description: 'Review code changes adversarially using parallel review layers (Blind Hunter, Edge Case Hunter, Acceptance Auditor) with structured triage into actionable categories. Use when the user says "run code review" or "review this code"'
|
description: 'Review code changes adversarially using parallel review layers (Blind Hunter, Edge Case Hunter, Acceptance Auditor) with structured triage into actionable categories. Use when the user says "run code review" or "review this code"'
|
||||||
---
|
---
|
||||||
|
|
||||||
Follow the instructions in ./workflow.md.
|
# Code Review Workflow
|
||||||
|
|
||||||
|
**Goal:** Review code changes adversarially using parallel review layers and structured triage.
|
||||||
|
|
||||||
|
**Your Role:** You are an elite code reviewer. You gather context, launch parallel adversarial reviews, triage findings with precision, and present actionable results. No noise, no filler.
|
||||||
|
|
||||||
|
## Conventions
|
||||||
|
|
||||||
|
- Bare paths (e.g. `checklist.md`) resolve from the skill root.
|
||||||
|
- `{skill-root}` resolves to this skill's installed directory (where `customize.toml` lives).
|
||||||
|
- `{project-root}`-prefixed paths resolve from the project working directory.
|
||||||
|
- `{skill-name}` resolves to the skill directory's basename.
|
||||||
|
|
||||||
|
## On Activation
|
||||||
|
|
||||||
|
### Step 1: Resolve the Workflow Block
|
||||||
|
|
||||||
|
Run: `python3 {project-root}/_bmad/scripts/resolve_customization.py --skill {skill-root} --key workflow`
|
||||||
|
|
||||||
|
**If the script fails**, resolve the `workflow` block yourself by reading these three files in base → team → user order and applying the same structural merge rules as the resolver:
|
||||||
|
|
||||||
|
1. `{skill-root}/customize.toml` — defaults
|
||||||
|
2. `{project-root}/_bmad/custom/{skill-name}.toml` — team overrides
|
||||||
|
3. `{project-root}/_bmad/custom/{skill-name}.user.toml` — personal overrides
|
||||||
|
|
||||||
|
Any missing file is skipped. Scalars override, tables deep-merge, arrays of tables keyed by `code` or `id` replace matching entries and append new entries, and all other arrays append.
|
||||||
|
|
||||||
|
### Step 2: Execute Prepend Steps
|
||||||
|
|
||||||
|
Execute each entry in `{workflow.activation_steps_prepend}` in order before proceeding.
|
||||||
|
|
||||||
|
### Step 3: Load Persistent Facts
|
||||||
|
|
||||||
|
Treat every entry in `{workflow.persistent_facts}` as foundational context you carry for the rest of the workflow run. Entries prefixed `file:` are paths or globs under `{project-root}` — load the referenced contents as facts. All other entries are facts verbatim.
|
||||||
|
|
||||||
|
### Step 4: Load Config
|
||||||
|
|
||||||
|
Load config from `{project-root}/_bmad/bmm/config.yaml` and resolve:
|
||||||
|
|
||||||
|
- `project_name`, `planning_artifacts`, `implementation_artifacts`, `user_name`
|
||||||
|
- `communication_language`, `document_output_language`, `user_skill_level`
|
||||||
|
- `date` as system-generated current datetime
|
||||||
|
- `sprint_status` = `{implementation_artifacts}/sprint-status.yaml`
|
||||||
|
- `project_context` = `**/project-context.md` (load if exists)
|
||||||
|
- CLAUDE.md / memory files (load if exist)
|
||||||
|
- YOU MUST ALWAYS SPEAK OUTPUT in your Agent communication style with the config `{communication_language}`
|
||||||
|
|
||||||
|
### Step 5: Greet the User
|
||||||
|
|
||||||
|
Greet `{user_name}`, speaking in `{communication_language}`.
|
||||||
|
|
||||||
|
### Step 6: Execute Append Steps
|
||||||
|
|
||||||
|
Execute each entry in `{workflow.activation_steps_append}` in order.
|
||||||
|
|
||||||
|
Activation is complete. Begin the workflow below.
|
||||||
|
|
||||||
|
## WORKFLOW ARCHITECTURE
|
||||||
|
|
||||||
|
This uses **step-file architecture** for disciplined execution:
|
||||||
|
|
||||||
|
- **Micro-file Design**: Each step is self-contained and followed exactly
|
||||||
|
- **Just-In-Time Loading**: Only load the current step file
|
||||||
|
- **Sequential Enforcement**: Complete steps in order, no skipping
|
||||||
|
- **State Tracking**: Persist progress via in-memory variables
|
||||||
|
- **Append-Only Building**: Build artifacts incrementally
|
||||||
|
|
||||||
|
### Step Processing Rules
|
||||||
|
|
||||||
|
1. **READ COMPLETELY**: Read the entire step file before acting
|
||||||
|
2. **FOLLOW SEQUENCE**: Execute sections in order
|
||||||
|
3. **WAIT FOR INPUT**: Halt at checkpoints and wait for human
|
||||||
|
4. **LOAD NEXT**: When directed, read fully and follow the next step file
|
||||||
|
|
||||||
|
### Critical Rules (NO EXCEPTIONS)
|
||||||
|
|
||||||
|
- **NEVER** load multiple step files simultaneously
|
||||||
|
- **ALWAYS** read entire step file before execution
|
||||||
|
- **NEVER** skip steps or optimize the sequence
|
||||||
|
- **ALWAYS** follow the exact instructions in the step file
|
||||||
|
- **ALWAYS** halt at checkpoints and wait for human input
|
||||||
|
|
||||||
|
## FIRST STEP
|
||||||
|
|
||||||
|
Read fully and follow: `./steps/step-01-gather-context.md`
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,41 @@
|
||||||
|
# DO NOT EDIT -- overwritten on every update.
|
||||||
|
#
|
||||||
|
# Workflow customization surface for bmad-code-review. Mirrors the
|
||||||
|
# agent customization shape under the [workflow] namespace.
|
||||||
|
|
||||||
|
[workflow]
|
||||||
|
|
||||||
|
# --- Configurable below. Overrides merge per BMad structural rules: ---
|
||||||
|
# scalars: override wins • arrays (persistent_facts, activation_steps_*): append
|
||||||
|
# arrays-of-tables with `code`/`id`: replace matching items, append new ones.
|
||||||
|
|
||||||
|
# Steps to run before the standard activation (config load, greet).
|
||||||
|
# Overrides append. Use for pre-flight loads, compliance checks, etc.
|
||||||
|
|
||||||
|
activation_steps_prepend = []
|
||||||
|
|
||||||
|
# Steps to run after greet but before the workflow begins.
|
||||||
|
# Overrides append. Use for context-heavy setup that should happen
|
||||||
|
# once the user has been acknowledged.
|
||||||
|
|
||||||
|
activation_steps_append = []
|
||||||
|
|
||||||
|
# Persistent facts the workflow keeps in mind for the whole run
|
||||||
|
# (standards, compliance constraints, stylistic guardrails).
|
||||||
|
# Distinct from the runtime memory sidecar — these are static context
|
||||||
|
# loaded on activation. Overrides append.
|
||||||
|
#
|
||||||
|
# Each entry is either:
|
||||||
|
# - a literal sentence, e.g. "All stories must include testable acceptance criteria."
|
||||||
|
# - a file reference prefixed with `file:`, e.g. "file:{project-root}/docs/standards.md"
|
||||||
|
# (glob patterns are supported; the file's contents are loaded and treated as facts).
|
||||||
|
|
||||||
|
persistent_facts = [
|
||||||
|
"file:{project-root}/**/project-context.md",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Scalar: executed when the workflow reaches its final step,
|
||||||
|
# after review findings are presented and sprint status is synced. Override wins.
|
||||||
|
# Leave empty for no custom post-completion behavior.
|
||||||
|
|
||||||
|
on_complete = ""
|
||||||
|
|
@ -124,3 +124,9 @@ Present the user with follow-up options:
|
||||||
> 3. **Done** — end the workflow
|
> 3. **Done** — end the workflow
|
||||||
|
|
||||||
**HALT** — I am waiting for your choice. Do not proceed until the user selects an option.
|
**HALT** — I am waiting for your choice. Do not proceed until the user selects an option.
|
||||||
|
|
||||||
|
## On Complete
|
||||||
|
|
||||||
|
Run: `python3 {project-root}/_bmad/scripts/resolve_customization.py --skill {skill-root} --key workflow.on_complete`
|
||||||
|
|
||||||
|
If the resolved `workflow.on_complete` is non-empty, follow it as the final terminal instruction before exiting.
|
||||||
|
|
|
||||||
|
|
@ -1,55 +0,0 @@
|
||||||
---
|
|
||||||
main_config: '{project-root}/_bmad/bmm/config.yaml'
|
|
||||||
---
|
|
||||||
|
|
||||||
# Code Review Workflow
|
|
||||||
|
|
||||||
**Goal:** Review code changes adversarially using parallel review layers and structured triage.
|
|
||||||
|
|
||||||
**Your Role:** You are an elite code reviewer. You gather context, launch parallel adversarial reviews, triage findings with precision, and present actionable results. No noise, no filler.
|
|
||||||
|
|
||||||
|
|
||||||
## WORKFLOW ARCHITECTURE
|
|
||||||
|
|
||||||
This uses **step-file architecture** for disciplined execution:
|
|
||||||
|
|
||||||
- **Micro-file Design**: Each step is self-contained and followed exactly
|
|
||||||
- **Just-In-Time Loading**: Only load the current step file
|
|
||||||
- **Sequential Enforcement**: Complete steps in order, no skipping
|
|
||||||
- **State Tracking**: Persist progress via in-memory variables
|
|
||||||
- **Append-Only Building**: Build artifacts incrementally
|
|
||||||
|
|
||||||
### Step Processing Rules
|
|
||||||
|
|
||||||
1. **READ COMPLETELY**: Read the entire step file before acting
|
|
||||||
2. **FOLLOW SEQUENCE**: Execute sections in order
|
|
||||||
3. **WAIT FOR INPUT**: Halt at checkpoints and wait for human
|
|
||||||
4. **LOAD NEXT**: When directed, read fully and follow the next step file
|
|
||||||
|
|
||||||
### Critical Rules (NO EXCEPTIONS)
|
|
||||||
|
|
||||||
- **NEVER** load multiple step files simultaneously
|
|
||||||
- **ALWAYS** read entire step file before execution
|
|
||||||
- **NEVER** skip steps or optimize the sequence
|
|
||||||
- **ALWAYS** follow the exact instructions in the step file
|
|
||||||
- **ALWAYS** halt at checkpoints and wait for human input
|
|
||||||
|
|
||||||
|
|
||||||
## INITIALIZATION SEQUENCE
|
|
||||||
|
|
||||||
### 1. Configuration Loading
|
|
||||||
|
|
||||||
Load and read full config from `{main_config}` and resolve:
|
|
||||||
|
|
||||||
- `project_name`, `planning_artifacts`, `implementation_artifacts`, `user_name`
|
|
||||||
- `communication_language`, `document_output_language`, `user_skill_level`
|
|
||||||
- `date` as system-generated current datetime
|
|
||||||
- `sprint_status` = `{implementation_artifacts}/sprint-status.yaml`
|
|
||||||
- `project_context` = `**/project-context.md` (load if exists)
|
|
||||||
- CLAUDE.md / memory files (load if exist)
|
|
||||||
|
|
||||||
YOU MUST ALWAYS SPEAK OUTPUT in your Agent communication style with the config `{communication_language}`.
|
|
||||||
|
|
||||||
### 2. First Step Execution
|
|
||||||
|
|
||||||
Read fully and follow: `./steps/step-01-gather-context.md` to begin the workflow.
|
|
||||||
|
|
@ -302,6 +302,18 @@ Activation is complete. Begin the workflow below.
|
||||||
processes - **Integration Patterns:** External service integrations, data flows <action>Extract any story-specific requirements that the
|
processes - **Integration Patterns:** External service integrations, data flows <action>Extract any story-specific requirements that the
|
||||||
developer MUST follow</action>
|
developer MUST follow</action>
|
||||||
<action>Identify any architectural decisions that override previous patterns</action>
|
<action>Identify any architectural decisions that override previous patterns</action>
|
||||||
|
|
||||||
|
<!-- Read existing code being modified — non-negotiable -->
|
||||||
|
<critical>📂 READ FILES BEING MODIFIED — skipping this is the primary cause of implementation failures and review cycles</critical>
|
||||||
|
<action>From the architecture directory structure, identify every file marked UPDATE (not NEW) that this story will touch</action>
|
||||||
|
<action>Read each relevant UPDATE file completely. For each one, document in dev notes:
|
||||||
|
- Current state: what it does today (state machine, API calls, data shapes, existing behaviors)
|
||||||
|
- What this story changes: the specific sections or behaviors being modified
|
||||||
|
- What must be preserved: existing interactions and behaviors the story must not break
|
||||||
|
</action>
|
||||||
|
<critical>A story implementation must leave the system working end-to-end — not just satisfy its stated ACs.
|
||||||
|
If a behavior is required for the feature to work correctly in the existing system, it is a requirement
|
||||||
|
whether or not it is explicitly written in the story. The dev agent owns this.</critical>
|
||||||
</step>
|
</step>
|
||||||
|
|
||||||
<step n="4" goal="Web research for latest technical specifics">
|
<step n="4" goal="Web research for latest technical specifics">
|
||||||
|
|
|
||||||
|
|
@ -3,4 +3,483 @@ name: bmad-dev-story
|
||||||
description: 'Execute story implementation following a context filled story spec file. Use when the user says "dev this story [story file]" or "implement the next story in the sprint plan"'
|
description: 'Execute story implementation following a context filled story spec file. Use when the user says "dev this story [story file]" or "implement the next story in the sprint plan"'
|
||||||
---
|
---
|
||||||
|
|
||||||
Follow the instructions in ./workflow.md.
|
# Dev Story Workflow
|
||||||
|
|
||||||
|
**Goal:** Execute story implementation following a context filled story spec file.
|
||||||
|
|
||||||
|
**Your Role:** Developer implementing the story.
|
||||||
|
- Communicate all responses in {communication_language} and language MUST be tailored to {user_skill_level}
|
||||||
|
- Generate all documents in {document_output_language}
|
||||||
|
- Only modify the story file in these areas: Tasks/Subtasks checkboxes, Dev Agent Record (Debug Log, Completion Notes), File List, Change Log, and Status
|
||||||
|
- Execute ALL steps in exact order; do NOT skip steps
|
||||||
|
- Absolutely DO NOT stop because of "milestones", "significant progress", or "session boundaries". Continue in a single execution until the story is COMPLETE (all ACs satisfied and all tasks/subtasks checked) UNLESS a HALT condition is triggered or the USER gives other instruction.
|
||||||
|
- Do NOT schedule a "next session" or request review pauses unless a HALT condition applies. Only Step 9 decides completion.
|
||||||
|
- User skill level ({user_skill_level}) affects conversation style ONLY, not code updates.
|
||||||
|
|
||||||
|
## Conventions
|
||||||
|
|
||||||
|
- Bare paths (e.g. `steps/step-01-init.md`) resolve from the skill root.
|
||||||
|
- `{skill-root}` resolves to this skill's installed directory (where `customize.toml` lives).
|
||||||
|
- `{project-root}`-prefixed paths resolve from the project working directory.
|
||||||
|
- `{skill-name}` resolves to the skill directory's basename.
|
||||||
|
|
||||||
|
## On Activation
|
||||||
|
|
||||||
|
### Step 1: Resolve the Workflow Block
|
||||||
|
|
||||||
|
Run: `python3 {project-root}/_bmad/scripts/resolve_customization.py --skill {skill-root} --key workflow`
|
||||||
|
|
||||||
|
**If the script fails**, resolve the `workflow` block yourself by reading these three files in base → team → user order and applying the same structural merge rules as the resolver:
|
||||||
|
|
||||||
|
1. `{skill-root}/customize.toml` — defaults
|
||||||
|
2. `{project-root}/_bmad/custom/{skill-name}.toml` — team overrides
|
||||||
|
3. `{project-root}/_bmad/custom/{skill-name}.user.toml` — personal overrides
|
||||||
|
|
||||||
|
Any missing file is skipped. Scalars override, tables deep-merge, arrays of tables keyed by `code` or `id` replace matching entries and append new entries, and all other arrays append.
|
||||||
|
|
||||||
|
### Step 2: Execute Prepend Steps
|
||||||
|
|
||||||
|
Execute each entry in `{workflow.activation_steps_prepend}` in order before proceeding.
|
||||||
|
|
||||||
|
### Step 3: Load Persistent Facts
|
||||||
|
|
||||||
|
Treat every entry in `{workflow.persistent_facts}` as foundational context you carry for the rest of the workflow run. Entries prefixed `file:` are paths or globs under `{project-root}` — load the referenced contents as facts. All other entries are facts verbatim.
|
||||||
|
|
||||||
|
### Step 4: Load Config
|
||||||
|
|
||||||
|
Load config from `{project-root}/_bmad/bmm/config.yaml` and resolve:
|
||||||
|
|
||||||
|
- `project_name`, `user_name`
|
||||||
|
- `communication_language`, `document_output_language`
|
||||||
|
- `user_skill_level`
|
||||||
|
- `implementation_artifacts`
|
||||||
|
- `date` as system-generated current datetime
|
||||||
|
|
||||||
|
### Step 5: Greet the User
|
||||||
|
|
||||||
|
Greet `{user_name}`, speaking in `{communication_language}`.
|
||||||
|
|
||||||
|
### Step 6: Execute Append Steps
|
||||||
|
|
||||||
|
Execute each entry in `{workflow.activation_steps_append}` in order.
|
||||||
|
|
||||||
|
Activation is complete. Begin the workflow below.
|
||||||
|
|
||||||
|
## Paths
|
||||||
|
|
||||||
|
- `story_file` = `` (explicit story path; auto-discovered if empty)
|
||||||
|
- `sprint_status` = `{implementation_artifacts}/sprint-status.yaml`
|
||||||
|
|
||||||
|
## Execution
|
||||||
|
|
||||||
|
<workflow>
|
||||||
|
<critical>Communicate all responses in {communication_language} and language MUST be tailored to {user_skill_level}</critical>
|
||||||
|
<critical>Generate all documents in {document_output_language}</critical>
|
||||||
|
<critical>Only modify the story file in these areas: Tasks/Subtasks checkboxes, Dev Agent Record (Debug Log, Completion Notes), File List,
|
||||||
|
Change Log, and Status</critical>
|
||||||
|
<critical>Execute ALL steps in exact order; do NOT skip steps</critical>
|
||||||
|
<critical>Absolutely DO NOT stop because of "milestones", "significant progress", or "session boundaries". Continue in a single execution
|
||||||
|
until the story is COMPLETE (all ACs satisfied and all tasks/subtasks checked) UNLESS a HALT condition is triggered or the USER gives
|
||||||
|
other instruction.</critical>
|
||||||
|
<critical>Do NOT schedule a "next session" or request review pauses unless a HALT condition applies. Only Step 9 decides completion.</critical>
|
||||||
|
<critical>User skill level ({user_skill_level}) affects conversation style ONLY, not code updates.</critical>
|
||||||
|
|
||||||
|
<step n="1" goal="Find next ready story and load it" tag="sprint-status">
|
||||||
|
<check if="{{story_path}} is provided">
|
||||||
|
<action>Use {{story_path}} directly</action>
|
||||||
|
<action>Read COMPLETE story file</action>
|
||||||
|
<action>Extract story_key from filename or metadata</action>
|
||||||
|
<goto anchor="task_check" />
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<!-- Sprint-based story discovery -->
|
||||||
|
<check if="{{sprint_status}} file exists">
|
||||||
|
<critical>MUST read COMPLETE sprint-status.yaml file from start to end to preserve order</critical>
|
||||||
|
<action>Load the FULL file: {{sprint_status}}</action>
|
||||||
|
<action>Read ALL lines from beginning to end - do not skip any content</action>
|
||||||
|
<action>Parse the development_status section completely to understand story order</action>
|
||||||
|
|
||||||
|
<action>Find the FIRST story (by reading in order from top to bottom) where:
|
||||||
|
- Key matches pattern: number-number-name (e.g., "1-2-user-auth")
|
||||||
|
- NOT an epic key (epic-X) or retrospective (epic-X-retrospective)
|
||||||
|
- Status value equals "ready-for-dev"
|
||||||
|
</action>
|
||||||
|
|
||||||
|
<check if="no ready-for-dev or in-progress story found">
|
||||||
|
<output>📋 No ready-for-dev stories found in sprint-status.yaml
|
||||||
|
|
||||||
|
**Current Sprint Status:** {{sprint_status_summary}}
|
||||||
|
|
||||||
|
**What would you like to do?**
|
||||||
|
1. Run `create-story` to create next story from epics with comprehensive context
|
||||||
|
2. Run `*validate-create-story` to improve existing stories before development (recommended quality check)
|
||||||
|
3. Specify a particular story file to develop (provide full path)
|
||||||
|
4. Check {{sprint_status}} file to see current sprint status
|
||||||
|
|
||||||
|
💡 **Tip:** Stories in `ready-for-dev` may not have been validated. Consider running `validate-create-story` first for a quality
|
||||||
|
check.
|
||||||
|
</output>
|
||||||
|
<ask>Choose option [1], [2], [3], or [4], or specify story file path:</ask>
|
||||||
|
|
||||||
|
<check if="user chooses '1'">
|
||||||
|
<action>HALT - Run create-story to create next story</action>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="user chooses '2'">
|
||||||
|
<action>HALT - Run validate-create-story to improve existing stories</action>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="user chooses '3'">
|
||||||
|
<ask>Provide the story file path to develop:</ask>
|
||||||
|
<action>Store user-provided story path as {{story_path}}</action>
|
||||||
|
<goto anchor="task_check" />
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="user chooses '4'">
|
||||||
|
<output>Loading {{sprint_status}} for detailed status review...</output>
|
||||||
|
<action>Display detailed sprint status analysis</action>
|
||||||
|
<action>HALT - User can review sprint status and provide story path</action>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="user provides story file path">
|
||||||
|
<action>Store user-provided story path as {{story_path}}</action>
|
||||||
|
<goto anchor="task_check" />
|
||||||
|
</check>
|
||||||
|
</check>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<!-- Non-sprint story discovery -->
|
||||||
|
<check if="{{sprint_status}} file does NOT exist">
|
||||||
|
<action>Search {implementation_artifacts} for stories directly</action>
|
||||||
|
<action>Find stories with "ready-for-dev" status in files</action>
|
||||||
|
<action>Look for story files matching pattern: *-*-*.md</action>
|
||||||
|
<action>Read each candidate story file to check Status section</action>
|
||||||
|
|
||||||
|
<check if="no ready-for-dev stories found in story files">
|
||||||
|
<output>📋 No ready-for-dev stories found
|
||||||
|
|
||||||
|
**Available Options:**
|
||||||
|
1. Run `create-story` to create next story from epics with comprehensive context
|
||||||
|
2. Run `*validate-create-story` to improve existing stories
|
||||||
|
3. Specify which story to develop
|
||||||
|
</output>
|
||||||
|
<ask>What would you like to do? Choose option [1], [2], or [3]:</ask>
|
||||||
|
|
||||||
|
<check if="user chooses '1'">
|
||||||
|
<action>HALT - Run create-story to create next story</action>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="user chooses '2'">
|
||||||
|
<action>HALT - Run validate-create-story to improve existing stories</action>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="user chooses '3'">
|
||||||
|
<ask>It's unclear what story you want developed. Please provide the full path to the story file:</ask>
|
||||||
|
<action>Store user-provided story path as {{story_path}}</action>
|
||||||
|
<action>Continue with provided story file</action>
|
||||||
|
</check>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="ready-for-dev story found in files">
|
||||||
|
<action>Use discovered story file and extract story_key</action>
|
||||||
|
</check>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<action>Store the found story_key (e.g., "1-2-user-authentication") for later status updates</action>
|
||||||
|
<action>Find matching story file in {implementation_artifacts} using story_key pattern: {{story_key}}.md</action>
|
||||||
|
<action>Read COMPLETE story file from discovered path</action>
|
||||||
|
|
||||||
|
<anchor id="task_check" />
|
||||||
|
|
||||||
|
<action>Parse sections: Story, Acceptance Criteria, Tasks/Subtasks, Dev Notes, Dev Agent Record, File List, Change Log, Status</action>
|
||||||
|
|
||||||
|
<action>Load comprehensive context from story file's Dev Notes section</action>
|
||||||
|
<action>Extract developer guidance from Dev Notes: architecture requirements, previous learnings, technical specifications</action>
|
||||||
|
<action>Use enhanced story context to inform implementation decisions and approaches</action>
|
||||||
|
|
||||||
|
<action>Identify first incomplete task (unchecked [ ]) in Tasks/Subtasks</action>
|
||||||
|
|
||||||
|
<action if="no incomplete tasks">
|
||||||
|
<goto step="9">Completion sequence</goto>
|
||||||
|
</action>
|
||||||
|
<action if="story file inaccessible">HALT: "Cannot develop story without access to story file"</action>
|
||||||
|
<action if="incomplete task or subtask requirements ambiguous">ASK user to clarify or HALT</action>
|
||||||
|
</step>
|
||||||
|
|
||||||
|
<step n="2" goal="Load project context and story information">
|
||||||
|
<critical>Load all available context to inform implementation</critical>
|
||||||
|
|
||||||
|
<action>Load {project_context} for coding standards and project-wide patterns (if exists)</action>
|
||||||
|
<action>Parse sections: Story, Acceptance Criteria, Tasks/Subtasks, Dev Notes, Dev Agent Record, File List, Change Log, Status</action>
|
||||||
|
<action>Load comprehensive context from story file's Dev Notes section</action>
|
||||||
|
<action>Extract developer guidance from Dev Notes: architecture requirements, previous learnings, technical specifications</action>
|
||||||
|
<action>Use enhanced story context to inform implementation decisions and approaches</action>
|
||||||
|
<output>✅ **Context Loaded**
|
||||||
|
Story and project context available for implementation
|
||||||
|
</output>
|
||||||
|
</step>
|
||||||
|
|
||||||
|
<step n="3" goal="Detect review continuation and extract review context">
|
||||||
|
<critical>Determine if this is a fresh start or continuation after code review</critical>
|
||||||
|
|
||||||
|
<action>Check if "Senior Developer Review (AI)" section exists in the story file</action>
|
||||||
|
<action>Check if "Review Follow-ups (AI)" subsection exists under Tasks/Subtasks</action>
|
||||||
|
|
||||||
|
<check if="Senior Developer Review section exists">
|
||||||
|
<action>Set review_continuation = true</action>
|
||||||
|
<action>Extract from "Senior Developer Review (AI)" section:
|
||||||
|
- Review outcome (Approve/Changes Requested/Blocked)
|
||||||
|
- Review date
|
||||||
|
- Total action items with checkboxes (count checked vs unchecked)
|
||||||
|
- Severity breakdown (High/Med/Low counts)
|
||||||
|
</action>
|
||||||
|
<action>Count unchecked [ ] review follow-up tasks in "Review Follow-ups (AI)" subsection</action>
|
||||||
|
<action>Store list of unchecked review items as {{pending_review_items}}</action>
|
||||||
|
|
||||||
|
<output>⏯️ **Resuming Story After Code Review** ({{review_date}})
|
||||||
|
|
||||||
|
**Review Outcome:** {{review_outcome}}
|
||||||
|
**Action Items:** {{unchecked_review_count}} remaining to address
|
||||||
|
**Priorities:** {{high_count}} High, {{med_count}} Medium, {{low_count}} Low
|
||||||
|
|
||||||
|
**Strategy:** Will prioritize review follow-up tasks (marked [AI-Review]) before continuing with regular tasks.
|
||||||
|
</output>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="Senior Developer Review section does NOT exist">
|
||||||
|
<action>Set review_continuation = false</action>
|
||||||
|
<action>Set {{pending_review_items}} = empty</action>
|
||||||
|
|
||||||
|
<output>🚀 **Starting Fresh Implementation**
|
||||||
|
|
||||||
|
Story: {{story_key}}
|
||||||
|
Story Status: {{current_status}}
|
||||||
|
First incomplete task: {{first_task_description}}
|
||||||
|
</output>
|
||||||
|
</check>
|
||||||
|
</step>
|
||||||
|
|
||||||
|
<step n="4" goal="Mark story in-progress" tag="sprint-status">
|
||||||
|
<check if="{{sprint_status}} file exists">
|
||||||
|
<action>Load the FULL file: {{sprint_status}}</action>
|
||||||
|
<action>Read all development_status entries to find {{story_key}}</action>
|
||||||
|
<action>Get current status value for development_status[{{story_key}}]</action>
|
||||||
|
|
||||||
|
<check if="current status == 'ready-for-dev' OR review_continuation == true">
|
||||||
|
<action>Update the story in the sprint status report to = "in-progress"</action>
|
||||||
|
<action>Update last_updated field to current date</action>
|
||||||
|
<output>🚀 Starting work on story {{story_key}}
|
||||||
|
Status updated: ready-for-dev → in-progress
|
||||||
|
</output>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="current status == 'in-progress'">
|
||||||
|
<output>⏯️ Resuming work on story {{story_key}}
|
||||||
|
Story is already marked in-progress
|
||||||
|
</output>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="current status is neither ready-for-dev nor in-progress">
|
||||||
|
<output>⚠️ Unexpected story status: {{current_status}}
|
||||||
|
Expected ready-for-dev or in-progress. Continuing anyway...
|
||||||
|
</output>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<action>Store {{current_sprint_status}} for later use</action>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="{{sprint_status}} file does NOT exist">
|
||||||
|
<output>ℹ️ No sprint status file exists - story progress will be tracked in story file only</output>
|
||||||
|
<action>Set {{current_sprint_status}} = "no-sprint-tracking"</action>
|
||||||
|
</check>
|
||||||
|
</step>
|
||||||
|
|
||||||
|
<step n="5" goal="Implement task following red-green-refactor cycle">
|
||||||
|
<critical>FOLLOW THE STORY FILE TASKS/SUBTASKS SEQUENCE EXACTLY AS WRITTEN - NO DEVIATION</critical>
|
||||||
|
|
||||||
|
<action>Review the current task/subtask from the story file - this is your authoritative implementation guide</action>
|
||||||
|
<action>Plan implementation following red-green-refactor cycle</action>
|
||||||
|
|
||||||
|
<!-- RED PHASE -->
|
||||||
|
<action>Write FAILING tests first for the task/subtask functionality</action>
|
||||||
|
<action>Confirm tests fail before implementation - this validates test correctness</action>
|
||||||
|
|
||||||
|
<!-- GREEN PHASE -->
|
||||||
|
<action>Implement MINIMAL code to make tests pass</action>
|
||||||
|
<action>Run tests to confirm they now pass</action>
|
||||||
|
<action>Handle error conditions and edge cases as specified in task/subtask</action>
|
||||||
|
|
||||||
|
<!-- REFACTOR PHASE -->
|
||||||
|
<action>Improve code structure while keeping tests green</action>
|
||||||
|
<action>Ensure code follows architecture patterns and coding standards from Dev Notes</action>
|
||||||
|
|
||||||
|
<action>Document technical approach and decisions in Dev Agent Record → Implementation Plan</action>
|
||||||
|
|
||||||
|
<action if="new dependencies required beyond story specifications">HALT: "Additional dependencies need user approval"</action>
|
||||||
|
<action if="3 consecutive implementation failures occur">HALT and request guidance</action>
|
||||||
|
<action if="required configuration is missing">HALT: "Cannot proceed without necessary configuration files"</action>
|
||||||
|
|
||||||
|
<critical>NEVER implement anything not mapped to a specific task/subtask in the story file</critical>
|
||||||
|
<critical>NEVER proceed to next task until current task/subtask is complete AND tests pass</critical>
|
||||||
|
<critical>Execute continuously without pausing until all tasks/subtasks are complete or explicit HALT condition</critical>
|
||||||
|
<critical>Do NOT propose to pause for review until Step 9 completion gates are satisfied</critical>
|
||||||
|
</step>
|
||||||
|
|
||||||
|
<step n="6" goal="Author comprehensive tests">
|
||||||
|
<action>Create unit tests for business logic and core functionality introduced/changed by the task</action>
|
||||||
|
<action>Add integration tests for component interactions specified in story requirements</action>
|
||||||
|
<action>Include end-to-end tests for critical user flows when story requirements demand them</action>
|
||||||
|
<action>Cover edge cases and error handling scenarios identified in story Dev Notes</action>
|
||||||
|
</step>
|
||||||
|
|
||||||
|
<step n="7" goal="Run validations and tests">
|
||||||
|
<action>Determine how to run tests for this repo (infer test framework from project structure)</action>
|
||||||
|
<action>Run all existing tests to ensure no regressions</action>
|
||||||
|
<action>Run the new tests to verify implementation correctness</action>
|
||||||
|
<action>Run linting and code quality checks if configured in project</action>
|
||||||
|
<action>Validate implementation meets ALL story acceptance criteria; enforce quantitative thresholds explicitly</action>
|
||||||
|
<action if="regression tests fail">STOP and fix before continuing - identify breaking changes immediately</action>
|
||||||
|
<action if="new tests fail">STOP and fix before continuing - ensure implementation correctness</action>
|
||||||
|
</step>
|
||||||
|
|
||||||
|
<step n="8" goal="Validate and mark task complete ONLY when fully done">
|
||||||
|
<critical>NEVER mark a task complete unless ALL conditions are met - NO LYING OR CHEATING</critical>
|
||||||
|
|
||||||
|
<!-- VALIDATION GATES -->
|
||||||
|
<action>Verify ALL tests for this task/subtask ACTUALLY EXIST and PASS 100%</action>
|
||||||
|
<action>Confirm implementation matches EXACTLY what the task/subtask specifies - no extra features</action>
|
||||||
|
<action>Validate that ALL acceptance criteria related to this task are satisfied</action>
|
||||||
|
<action>Run full test suite to ensure NO regressions introduced</action>
|
||||||
|
|
||||||
|
<!-- REVIEW FOLLOW-UP HANDLING -->
|
||||||
|
<check if="task is review follow-up (has [AI-Review] prefix)">
|
||||||
|
<action>Extract review item details (severity, description, related AC/file)</action>
|
||||||
|
<action>Add to resolution tracking list: {{resolved_review_items}}</action>
|
||||||
|
|
||||||
|
<!-- Mark task in Review Follow-ups section -->
|
||||||
|
<action>Mark task checkbox [x] in "Tasks/Subtasks → Review Follow-ups (AI)" section</action>
|
||||||
|
|
||||||
|
<!-- CRITICAL: Also mark corresponding action item in review section -->
|
||||||
|
<action>Find matching action item in "Senior Developer Review (AI) → Action Items" section by matching description</action>
|
||||||
|
<action>Mark that action item checkbox [x] as resolved</action>
|
||||||
|
|
||||||
|
<action>Add to Dev Agent Record → Completion Notes: "✅ Resolved review finding [{{severity}}]: {{description}}"</action>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<!-- ONLY MARK COMPLETE IF ALL VALIDATION PASS -->
|
||||||
|
<check if="ALL validation gates pass AND tests ACTUALLY exist and pass">
|
||||||
|
<action>ONLY THEN mark the task (and subtasks) checkbox with [x]</action>
|
||||||
|
<action>Update File List section with ALL new, modified, or deleted files (paths relative to repo root)</action>
|
||||||
|
<action>Add completion notes to Dev Agent Record summarizing what was ACTUALLY implemented and tested</action>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="ANY validation fails">
|
||||||
|
<action>DO NOT mark task complete - fix issues first</action>
|
||||||
|
<action>HALT if unable to fix validation failures</action>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="review_continuation == true and {{resolved_review_items}} is not empty">
|
||||||
|
<action>Count total resolved review items in this session</action>
|
||||||
|
<action>Add Change Log entry: "Addressed code review findings - {{resolved_count}} items resolved (Date: {{date}})"</action>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<action>Save the story file</action>
|
||||||
|
<action>Determine if more incomplete tasks remain</action>
|
||||||
|
<action if="more tasks remain">
|
||||||
|
<goto step="5">Next task</goto>
|
||||||
|
</action>
|
||||||
|
<action if="no tasks remain">
|
||||||
|
<goto step="9">Completion</goto>
|
||||||
|
</action>
|
||||||
|
</step>
|
||||||
|
|
||||||
|
<step n="9" goal="Story completion and mark for review" tag="sprint-status">
|
||||||
|
<action>Verify ALL tasks and subtasks are marked [x] (re-scan the story document now)</action>
|
||||||
|
<action>Run the full regression suite (do not skip)</action>
|
||||||
|
<action>Confirm File List includes every changed file</action>
|
||||||
|
<action>Execute enhanced definition-of-done validation</action>
|
||||||
|
<action>Update the story Status to: "review"</action>
|
||||||
|
|
||||||
|
<!-- Enhanced Definition of Done Validation -->
|
||||||
|
<action>Validate definition-of-done checklist with essential requirements:
|
||||||
|
- All tasks/subtasks marked complete with [x]
|
||||||
|
- Implementation satisfies every Acceptance Criterion
|
||||||
|
- Unit tests for core functionality added/updated
|
||||||
|
- Integration tests for component interactions added when required
|
||||||
|
- End-to-end tests for critical flows added when story demands them
|
||||||
|
- All tests pass (no regressions, new tests successful)
|
||||||
|
- Code quality checks pass (linting, static analysis if configured)
|
||||||
|
- File List includes every new/modified/deleted file (relative paths)
|
||||||
|
- Dev Agent Record contains implementation notes
|
||||||
|
- Change Log includes summary of changes
|
||||||
|
- Only permitted story sections were modified
|
||||||
|
</action>
|
||||||
|
|
||||||
|
<!-- Mark story ready for review - sprint status conditional -->
|
||||||
|
<check if="{sprint_status} file exists AND {{current_sprint_status}} != 'no-sprint-tracking'">
|
||||||
|
<action>Load the FULL file: {sprint_status}</action>
|
||||||
|
<action>Find development_status key matching {{story_key}}</action>
|
||||||
|
<action>Verify current status is "in-progress" (expected previous state)</action>
|
||||||
|
<action>Update development_status[{{story_key}}] = "review"</action>
|
||||||
|
<action>Update last_updated field to current date</action>
|
||||||
|
<action>Save file, preserving ALL comments and structure including STATUS DEFINITIONS</action>
|
||||||
|
<output>✅ Story status updated to "review" in sprint-status.yaml</output>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="{sprint_status} file does NOT exist OR {{current_sprint_status}} == 'no-sprint-tracking'">
|
||||||
|
<output>ℹ️ Story status updated to "review" in story file (no sprint tracking configured)</output>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="story key not found in sprint status">
|
||||||
|
<output>⚠️ Story file updated, but sprint-status update failed: {{story_key}} not found
|
||||||
|
|
||||||
|
Story status is set to "review" in file, but sprint-status.yaml may be out of sync.
|
||||||
|
</output>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<!-- Final validation gates -->
|
||||||
|
<action if="any task is incomplete">HALT - Complete remaining tasks before marking ready for review</action>
|
||||||
|
<action if="regression failures exist">HALT - Fix regression issues before completing</action>
|
||||||
|
<action if="File List is incomplete">HALT - Update File List with all changed files</action>
|
||||||
|
<action if="definition-of-done validation fails">HALT - Address DoD failures before completing</action>
|
||||||
|
</step>
|
||||||
|
|
||||||
|
<step n="10" goal="Completion communication and user support">
|
||||||
|
<action>Execute the enhanced definition-of-done checklist using the validation framework</action>
|
||||||
|
<action>Prepare a concise summary in Dev Agent Record → Completion Notes</action>
|
||||||
|
|
||||||
|
<action>Communicate to {user_name} that story implementation is complete and ready for review</action>
|
||||||
|
<action>Summarize key accomplishments: story ID, story key, title, key changes made, tests added, files modified</action>
|
||||||
|
<action>Provide the story file path and current status (now "review")</action>
|
||||||
|
|
||||||
|
<action>Based on {user_skill_level}, ask if user needs any explanations about:
|
||||||
|
- What was implemented and how it works
|
||||||
|
- Why certain technical decisions were made
|
||||||
|
- How to test or verify the changes
|
||||||
|
- Any patterns, libraries, or approaches used
|
||||||
|
- Anything else they'd like clarified
|
||||||
|
</action>
|
||||||
|
|
||||||
|
<check if="user asks for explanations">
|
||||||
|
<action>Provide clear, contextual explanations tailored to {user_skill_level}</action>
|
||||||
|
<action>Use examples and references to specific code when helpful</action>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<action>Once explanations are complete (or user indicates no questions), suggest logical next steps</action>
|
||||||
|
<action>Recommended next steps (flexible based on project setup):
|
||||||
|
- Review the implemented story and test the changes
|
||||||
|
- Verify all acceptance criteria are met
|
||||||
|
- Ensure deployment readiness if applicable
|
||||||
|
- Run `code-review` workflow for peer review
|
||||||
|
- Optional: If Test Architect module installed, run `/bmad:tea:automate` to expand guardrail tests
|
||||||
|
</action>
|
||||||
|
|
||||||
|
<output>💡 **Tip:** For best results, run `code-review` using a **different** LLM than the one that implemented this story.</output>
|
||||||
|
<check if="{sprint_status} file exists">
|
||||||
|
<action>Suggest checking {sprint_status} to see project progress</action>
|
||||||
|
</check>
|
||||||
|
<action>Remain flexible - allow user to choose their own path or ask for other assistance</action>
|
||||||
|
<action>Run: `python3 {project-root}/_bmad/scripts/resolve_customization.py --skill {skill-root} --key workflow.on_complete` — if the resolved value is non-empty, follow it as the final terminal instruction before exiting.</action>
|
||||||
|
</step>
|
||||||
|
|
||||||
|
</workflow>
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,41 @@
|
||||||
|
# DO NOT EDIT -- overwritten on every update.
|
||||||
|
#
|
||||||
|
# Workflow customization surface for bmad-dev-story. Mirrors the
|
||||||
|
# agent customization shape under the [workflow] namespace.
|
||||||
|
|
||||||
|
[workflow]
|
||||||
|
|
||||||
|
# --- Configurable below. Overrides merge per BMad structural rules: ---
|
||||||
|
# scalars: override wins • arrays (persistent_facts, activation_steps_*): append
|
||||||
|
# arrays-of-tables with `code`/`id`: replace matching items, append new ones.
|
||||||
|
|
||||||
|
# Steps to run before the standard activation (config load, greet).
|
||||||
|
# Overrides append. Use for pre-flight loads, compliance checks, etc.
|
||||||
|
|
||||||
|
activation_steps_prepend = []
|
||||||
|
|
||||||
|
# Steps to run after greet but before the workflow begins.
|
||||||
|
# Overrides append. Use for context-heavy setup that should happen
|
||||||
|
# once the user has been acknowledged.
|
||||||
|
|
||||||
|
activation_steps_append = []
|
||||||
|
|
||||||
|
# Persistent facts the workflow keeps in mind for the whole run
|
||||||
|
# (standards, compliance constraints, stylistic guardrails).
|
||||||
|
# Distinct from the runtime memory sidecar — these are static context
|
||||||
|
# loaded on activation. Overrides append.
|
||||||
|
#
|
||||||
|
# Each entry is either:
|
||||||
|
# - a literal sentence, e.g. "All stories must include testable acceptance criteria."
|
||||||
|
# - a file reference prefixed with `file:`, e.g. "file:{project-root}/docs/standards.md"
|
||||||
|
# (glob patterns are supported; the file's contents are loaded and treated as facts).
|
||||||
|
|
||||||
|
persistent_facts = [
|
||||||
|
"file:{project-root}/**/project-context.md",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Scalar: executed when the workflow reaches its final step,
|
||||||
|
# after the story implementation is complete and status is updated. Override wins.
|
||||||
|
# Leave empty for no custom post-completion behavior.
|
||||||
|
|
||||||
|
on_complete = ""
|
||||||
|
|
@ -1,450 +0,0 @@
|
||||||
# Dev Story Workflow
|
|
||||||
|
|
||||||
**Goal:** Execute story implementation following a context filled story spec file.
|
|
||||||
|
|
||||||
**Your Role:** Developer implementing the story.
|
|
||||||
- Communicate all responses in {communication_language} and language MUST be tailored to {user_skill_level}
|
|
||||||
- Generate all documents in {document_output_language}
|
|
||||||
- Only modify the story file in these areas: Tasks/Subtasks checkboxes, Dev Agent Record (Debug Log, Completion Notes), File List, Change Log, and Status
|
|
||||||
- Execute ALL steps in exact order; do NOT skip steps
|
|
||||||
- Absolutely DO NOT stop because of "milestones", "significant progress", or "session boundaries". Continue in a single execution until the story is COMPLETE (all ACs satisfied and all tasks/subtasks checked) UNLESS a HALT condition is triggered or the USER gives other instruction.
|
|
||||||
- Do NOT schedule a "next session" or request review pauses unless a HALT condition applies. Only Step 6 decides completion.
|
|
||||||
- User skill level ({user_skill_level}) affects conversation style ONLY, not code updates.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## INITIALIZATION
|
|
||||||
|
|
||||||
### Configuration Loading
|
|
||||||
|
|
||||||
Load config from `{project-root}/_bmad/bmm/config.yaml` and resolve:
|
|
||||||
|
|
||||||
- `project_name`, `user_name`
|
|
||||||
- `communication_language`, `document_output_language`
|
|
||||||
- `user_skill_level`
|
|
||||||
- `implementation_artifacts`
|
|
||||||
- `date` as system-generated current datetime
|
|
||||||
|
|
||||||
### Paths
|
|
||||||
|
|
||||||
- `story_file` = `` (explicit story path; auto-discovered if empty)
|
|
||||||
- `sprint_status` = `{implementation_artifacts}/sprint-status.yaml`
|
|
||||||
|
|
||||||
### Context
|
|
||||||
|
|
||||||
- `project_context` = `**/project-context.md` (load if exists)
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## EXECUTION
|
|
||||||
|
|
||||||
<workflow>
|
|
||||||
<critical>Communicate all responses in {communication_language} and language MUST be tailored to {user_skill_level}</critical>
|
|
||||||
<critical>Generate all documents in {document_output_language}</critical>
|
|
||||||
<critical>Only modify the story file in these areas: Tasks/Subtasks checkboxes, Dev Agent Record (Debug Log, Completion Notes), File List,
|
|
||||||
Change Log, and Status</critical>
|
|
||||||
<critical>Execute ALL steps in exact order; do NOT skip steps</critical>
|
|
||||||
<critical>Absolutely DO NOT stop because of "milestones", "significant progress", or "session boundaries". Continue in a single execution
|
|
||||||
until the story is COMPLETE (all ACs satisfied and all tasks/subtasks checked) UNLESS a HALT condition is triggered or the USER gives
|
|
||||||
other instruction.</critical>
|
|
||||||
<critical>Do NOT schedule a "next session" or request review pauses unless a HALT condition applies. Only Step 6 decides completion.</critical>
|
|
||||||
<critical>User skill level ({user_skill_level}) affects conversation style ONLY, not code updates.</critical>
|
|
||||||
|
|
||||||
<step n="1" goal="Find next ready story and load it" tag="sprint-status">
|
|
||||||
<check if="{{story_path}} is provided">
|
|
||||||
<action>Use {{story_path}} directly</action>
|
|
||||||
<action>Read COMPLETE story file</action>
|
|
||||||
<action>Extract story_key from filename or metadata</action>
|
|
||||||
<goto anchor="task_check" />
|
|
||||||
</check>
|
|
||||||
|
|
||||||
<!-- Sprint-based story discovery -->
|
|
||||||
<check if="{{sprint_status}} file exists">
|
|
||||||
<critical>MUST read COMPLETE sprint-status.yaml file from start to end to preserve order</critical>
|
|
||||||
<action>Load the FULL file: {{sprint_status}}</action>
|
|
||||||
<action>Read ALL lines from beginning to end - do not skip any content</action>
|
|
||||||
<action>Parse the development_status section completely to understand story order</action>
|
|
||||||
|
|
||||||
<action>Find the FIRST story (by reading in order from top to bottom) where:
|
|
||||||
- Key matches pattern: number-number-name (e.g., "1-2-user-auth")
|
|
||||||
- NOT an epic key (epic-X) or retrospective (epic-X-retrospective)
|
|
||||||
- Status value equals "ready-for-dev"
|
|
||||||
</action>
|
|
||||||
|
|
||||||
<check if="no ready-for-dev or in-progress story found">
|
|
||||||
<output>📋 No ready-for-dev stories found in sprint-status.yaml
|
|
||||||
|
|
||||||
**Current Sprint Status:** {{sprint_status_summary}}
|
|
||||||
|
|
||||||
**What would you like to do?**
|
|
||||||
1. Run `create-story` to create next story from epics with comprehensive context
|
|
||||||
2. Run `*validate-create-story` to improve existing stories before development (recommended quality check)
|
|
||||||
3. Specify a particular story file to develop (provide full path)
|
|
||||||
4. Check {{sprint_status}} file to see current sprint status
|
|
||||||
|
|
||||||
💡 **Tip:** Stories in `ready-for-dev` may not have been validated. Consider running `validate-create-story` first for a quality
|
|
||||||
check.
|
|
||||||
</output>
|
|
||||||
<ask>Choose option [1], [2], [3], or [4], or specify story file path:</ask>
|
|
||||||
|
|
||||||
<check if="user chooses '1'">
|
|
||||||
<action>HALT - Run create-story to create next story</action>
|
|
||||||
</check>
|
|
||||||
|
|
||||||
<check if="user chooses '2'">
|
|
||||||
<action>HALT - Run validate-create-story to improve existing stories</action>
|
|
||||||
</check>
|
|
||||||
|
|
||||||
<check if="user chooses '3'">
|
|
||||||
<ask>Provide the story file path to develop:</ask>
|
|
||||||
<action>Store user-provided story path as {{story_path}}</action>
|
|
||||||
<goto anchor="task_check" />
|
|
||||||
</check>
|
|
||||||
|
|
||||||
<check if="user chooses '4'">
|
|
||||||
<output>Loading {{sprint_status}} for detailed status review...</output>
|
|
||||||
<action>Display detailed sprint status analysis</action>
|
|
||||||
<action>HALT - User can review sprint status and provide story path</action>
|
|
||||||
</check>
|
|
||||||
|
|
||||||
<check if="user provides story file path">
|
|
||||||
<action>Store user-provided story path as {{story_path}}</action>
|
|
||||||
<goto anchor="task_check" />
|
|
||||||
</check>
|
|
||||||
</check>
|
|
||||||
</check>
|
|
||||||
|
|
||||||
<!-- Non-sprint story discovery -->
|
|
||||||
<check if="{{sprint_status}} file does NOT exist">
|
|
||||||
<action>Search {implementation_artifacts} for stories directly</action>
|
|
||||||
<action>Find stories with "ready-for-dev" status in files</action>
|
|
||||||
<action>Look for story files matching pattern: *-*-*.md</action>
|
|
||||||
<action>Read each candidate story file to check Status section</action>
|
|
||||||
|
|
||||||
<check if="no ready-for-dev stories found in story files">
|
|
||||||
<output>📋 No ready-for-dev stories found
|
|
||||||
|
|
||||||
**Available Options:**
|
|
||||||
1. Run `create-story` to create next story from epics with comprehensive context
|
|
||||||
2. Run `*validate-create-story` to improve existing stories
|
|
||||||
3. Specify which story to develop
|
|
||||||
</output>
|
|
||||||
<ask>What would you like to do? Choose option [1], [2], or [3]:</ask>
|
|
||||||
|
|
||||||
<check if="user chooses '1'">
|
|
||||||
<action>HALT - Run create-story to create next story</action>
|
|
||||||
</check>
|
|
||||||
|
|
||||||
<check if="user chooses '2'">
|
|
||||||
<action>HALT - Run validate-create-story to improve existing stories</action>
|
|
||||||
</check>
|
|
||||||
|
|
||||||
<check if="user chooses '3'">
|
|
||||||
<ask>It's unclear what story you want developed. Please provide the full path to the story file:</ask>
|
|
||||||
<action>Store user-provided story path as {{story_path}}</action>
|
|
||||||
<action>Continue with provided story file</action>
|
|
||||||
</check>
|
|
||||||
</check>
|
|
||||||
|
|
||||||
<check if="ready-for-dev story found in files">
|
|
||||||
<action>Use discovered story file and extract story_key</action>
|
|
||||||
</check>
|
|
||||||
</check>
|
|
||||||
|
|
||||||
<action>Store the found story_key (e.g., "1-2-user-authentication") for later status updates</action>
|
|
||||||
<action>Find matching story file in {implementation_artifacts} using story_key pattern: {{story_key}}.md</action>
|
|
||||||
<action>Read COMPLETE story file from discovered path</action>
|
|
||||||
|
|
||||||
<anchor id="task_check" />
|
|
||||||
|
|
||||||
<action>Parse sections: Story, Acceptance Criteria, Tasks/Subtasks, Dev Notes, Dev Agent Record, File List, Change Log, Status</action>
|
|
||||||
|
|
||||||
<action>Load comprehensive context from story file's Dev Notes section</action>
|
|
||||||
<action>Extract developer guidance from Dev Notes: architecture requirements, previous learnings, technical specifications</action>
|
|
||||||
<action>Use enhanced story context to inform implementation decisions and approaches</action>
|
|
||||||
|
|
||||||
<action>Identify first incomplete task (unchecked [ ]) in Tasks/Subtasks</action>
|
|
||||||
|
|
||||||
<action if="no incomplete tasks">
|
|
||||||
<goto step="6">Completion sequence</goto>
|
|
||||||
</action>
|
|
||||||
<action if="story file inaccessible">HALT: "Cannot develop story without access to story file"</action>
|
|
||||||
<action if="incomplete task or subtask requirements ambiguous">ASK user to clarify or HALT</action>
|
|
||||||
</step>
|
|
||||||
|
|
||||||
<step n="2" goal="Load project context and story information">
|
|
||||||
<critical>Load all available context to inform implementation</critical>
|
|
||||||
|
|
||||||
<action>Load {project_context} for coding standards and project-wide patterns (if exists)</action>
|
|
||||||
<action>Parse sections: Story, Acceptance Criteria, Tasks/Subtasks, Dev Notes, Dev Agent Record, File List, Change Log, Status</action>
|
|
||||||
<action>Load comprehensive context from story file's Dev Notes section</action>
|
|
||||||
<action>Extract developer guidance from Dev Notes: architecture requirements, previous learnings, technical specifications</action>
|
|
||||||
<action>Use enhanced story context to inform implementation decisions and approaches</action>
|
|
||||||
<output>✅ **Context Loaded**
|
|
||||||
Story and project context available for implementation
|
|
||||||
</output>
|
|
||||||
</step>
|
|
||||||
|
|
||||||
<step n="3" goal="Detect review continuation and extract review context">
|
|
||||||
<critical>Determine if this is a fresh start or continuation after code review</critical>
|
|
||||||
|
|
||||||
<action>Check if "Senior Developer Review (AI)" section exists in the story file</action>
|
|
||||||
<action>Check if "Review Follow-ups (AI)" subsection exists under Tasks/Subtasks</action>
|
|
||||||
|
|
||||||
<check if="Senior Developer Review section exists">
|
|
||||||
<action>Set review_continuation = true</action>
|
|
||||||
<action>Extract from "Senior Developer Review (AI)" section:
|
|
||||||
- Review outcome (Approve/Changes Requested/Blocked)
|
|
||||||
- Review date
|
|
||||||
- Total action items with checkboxes (count checked vs unchecked)
|
|
||||||
- Severity breakdown (High/Med/Low counts)
|
|
||||||
</action>
|
|
||||||
<action>Count unchecked [ ] review follow-up tasks in "Review Follow-ups (AI)" subsection</action>
|
|
||||||
<action>Store list of unchecked review items as {{pending_review_items}}</action>
|
|
||||||
|
|
||||||
<output>⏯️ **Resuming Story After Code Review** ({{review_date}})
|
|
||||||
|
|
||||||
**Review Outcome:** {{review_outcome}}
|
|
||||||
**Action Items:** {{unchecked_review_count}} remaining to address
|
|
||||||
**Priorities:** {{high_count}} High, {{med_count}} Medium, {{low_count}} Low
|
|
||||||
|
|
||||||
**Strategy:** Will prioritize review follow-up tasks (marked [AI-Review]) before continuing with regular tasks.
|
|
||||||
</output>
|
|
||||||
</check>
|
|
||||||
|
|
||||||
<check if="Senior Developer Review section does NOT exist">
|
|
||||||
<action>Set review_continuation = false</action>
|
|
||||||
<action>Set {{pending_review_items}} = empty</action>
|
|
||||||
|
|
||||||
<output>🚀 **Starting Fresh Implementation**
|
|
||||||
|
|
||||||
Story: {{story_key}}
|
|
||||||
Story Status: {{current_status}}
|
|
||||||
First incomplete task: {{first_task_description}}
|
|
||||||
</output>
|
|
||||||
</check>
|
|
||||||
</step>
|
|
||||||
|
|
||||||
<step n="4" goal="Mark story in-progress" tag="sprint-status">
|
|
||||||
<check if="{{sprint_status}} file exists">
|
|
||||||
<action>Load the FULL file: {{sprint_status}}</action>
|
|
||||||
<action>Read all development_status entries to find {{story_key}}</action>
|
|
||||||
<action>Get current status value for development_status[{{story_key}}]</action>
|
|
||||||
|
|
||||||
<check if="current status == 'ready-for-dev' OR review_continuation == true">
|
|
||||||
<action>Update the story in the sprint status report to = "in-progress"</action>
|
|
||||||
<action>Update last_updated field to current date</action>
|
|
||||||
<output>🚀 Starting work on story {{story_key}}
|
|
||||||
Status updated: ready-for-dev → in-progress
|
|
||||||
</output>
|
|
||||||
</check>
|
|
||||||
|
|
||||||
<check if="current status == 'in-progress'">
|
|
||||||
<output>⏯️ Resuming work on story {{story_key}}
|
|
||||||
Story is already marked in-progress
|
|
||||||
</output>
|
|
||||||
</check>
|
|
||||||
|
|
||||||
<check if="current status is neither ready-for-dev nor in-progress">
|
|
||||||
<output>⚠️ Unexpected story status: {{current_status}}
|
|
||||||
Expected ready-for-dev or in-progress. Continuing anyway...
|
|
||||||
</output>
|
|
||||||
</check>
|
|
||||||
|
|
||||||
<action>Store {{current_sprint_status}} for later use</action>
|
|
||||||
</check>
|
|
||||||
|
|
||||||
<check if="{{sprint_status}} file does NOT exist">
|
|
||||||
<output>ℹ️ No sprint status file exists - story progress will be tracked in story file only</output>
|
|
||||||
<action>Set {{current_sprint_status}} = "no-sprint-tracking"</action>
|
|
||||||
</check>
|
|
||||||
</step>
|
|
||||||
|
|
||||||
<step n="5" goal="Implement task following red-green-refactor cycle">
|
|
||||||
<critical>FOLLOW THE STORY FILE TASKS/SUBTASKS SEQUENCE EXACTLY AS WRITTEN - NO DEVIATION</critical>
|
|
||||||
|
|
||||||
<action>Review the current task/subtask from the story file - this is your authoritative implementation guide</action>
|
|
||||||
<action>Plan implementation following red-green-refactor cycle</action>
|
|
||||||
|
|
||||||
<!-- RED PHASE -->
|
|
||||||
<action>Write FAILING tests first for the task/subtask functionality</action>
|
|
||||||
<action>Confirm tests fail before implementation - this validates test correctness</action>
|
|
||||||
|
|
||||||
<!-- GREEN PHASE -->
|
|
||||||
<action>Implement MINIMAL code to make tests pass</action>
|
|
||||||
<action>Run tests to confirm they now pass</action>
|
|
||||||
<action>Handle error conditions and edge cases as specified in task/subtask</action>
|
|
||||||
|
|
||||||
<!-- REFACTOR PHASE -->
|
|
||||||
<action>Improve code structure while keeping tests green</action>
|
|
||||||
<action>Ensure code follows architecture patterns and coding standards from Dev Notes</action>
|
|
||||||
|
|
||||||
<action>Document technical approach and decisions in Dev Agent Record → Implementation Plan</action>
|
|
||||||
|
|
||||||
<action if="new dependencies required beyond story specifications">HALT: "Additional dependencies need user approval"</action>
|
|
||||||
<action if="3 consecutive implementation failures occur">HALT and request guidance</action>
|
|
||||||
<action if="required configuration is missing">HALT: "Cannot proceed without necessary configuration files"</action>
|
|
||||||
|
|
||||||
<critical>NEVER implement anything not mapped to a specific task/subtask in the story file</critical>
|
|
||||||
<critical>NEVER proceed to next task until current task/subtask is complete AND tests pass</critical>
|
|
||||||
<critical>Execute continuously without pausing until all tasks/subtasks are complete or explicit HALT condition</critical>
|
|
||||||
<critical>Do NOT propose to pause for review until Step 9 completion gates are satisfied</critical>
|
|
||||||
</step>
|
|
||||||
|
|
||||||
<step n="6" goal="Author comprehensive tests">
|
|
||||||
<action>Create unit tests for business logic and core functionality introduced/changed by the task</action>
|
|
||||||
<action>Add integration tests for component interactions specified in story requirements</action>
|
|
||||||
<action>Include end-to-end tests for critical user flows when story requirements demand them</action>
|
|
||||||
<action>Cover edge cases and error handling scenarios identified in story Dev Notes</action>
|
|
||||||
</step>
|
|
||||||
|
|
||||||
<step n="7" goal="Run validations and tests">
|
|
||||||
<action>Determine how to run tests for this repo (infer test framework from project structure)</action>
|
|
||||||
<action>Run all existing tests to ensure no regressions</action>
|
|
||||||
<action>Run the new tests to verify implementation correctness</action>
|
|
||||||
<action>Run linting and code quality checks if configured in project</action>
|
|
||||||
<action>Validate implementation meets ALL story acceptance criteria; enforce quantitative thresholds explicitly</action>
|
|
||||||
<action if="regression tests fail">STOP and fix before continuing - identify breaking changes immediately</action>
|
|
||||||
<action if="new tests fail">STOP and fix before continuing - ensure implementation correctness</action>
|
|
||||||
</step>
|
|
||||||
|
|
||||||
<step n="8" goal="Validate and mark task complete ONLY when fully done">
|
|
||||||
<critical>NEVER mark a task complete unless ALL conditions are met - NO LYING OR CHEATING</critical>
|
|
||||||
|
|
||||||
<!-- VALIDATION GATES -->
|
|
||||||
<action>Verify ALL tests for this task/subtask ACTUALLY EXIST and PASS 100%</action>
|
|
||||||
<action>Confirm implementation matches EXACTLY what the task/subtask specifies - no extra features</action>
|
|
||||||
<action>Validate that ALL acceptance criteria related to this task are satisfied</action>
|
|
||||||
<action>Run full test suite to ensure NO regressions introduced</action>
|
|
||||||
|
|
||||||
<!-- REVIEW FOLLOW-UP HANDLING -->
|
|
||||||
<check if="task is review follow-up (has [AI-Review] prefix)">
|
|
||||||
<action>Extract review item details (severity, description, related AC/file)</action>
|
|
||||||
<action>Add to resolution tracking list: {{resolved_review_items}}</action>
|
|
||||||
|
|
||||||
<!-- Mark task in Review Follow-ups section -->
|
|
||||||
<action>Mark task checkbox [x] in "Tasks/Subtasks → Review Follow-ups (AI)" section</action>
|
|
||||||
|
|
||||||
<!-- CRITICAL: Also mark corresponding action item in review section -->
|
|
||||||
<action>Find matching action item in "Senior Developer Review (AI) → Action Items" section by matching description</action>
|
|
||||||
<action>Mark that action item checkbox [x] as resolved</action>
|
|
||||||
|
|
||||||
<action>Add to Dev Agent Record → Completion Notes: "✅ Resolved review finding [{{severity}}]: {{description}}"</action>
|
|
||||||
</check>
|
|
||||||
|
|
||||||
<!-- ONLY MARK COMPLETE IF ALL VALIDATION PASS -->
|
|
||||||
<check if="ALL validation gates pass AND tests ACTUALLY exist and pass">
|
|
||||||
<action>ONLY THEN mark the task (and subtasks) checkbox with [x]</action>
|
|
||||||
<action>Update File List section with ALL new, modified, or deleted files (paths relative to repo root)</action>
|
|
||||||
<action>Add completion notes to Dev Agent Record summarizing what was ACTUALLY implemented and tested</action>
|
|
||||||
</check>
|
|
||||||
|
|
||||||
<check if="ANY validation fails">
|
|
||||||
<action>DO NOT mark task complete - fix issues first</action>
|
|
||||||
<action>HALT if unable to fix validation failures</action>
|
|
||||||
</check>
|
|
||||||
|
|
||||||
<check if="review_continuation == true and {{resolved_review_items}} is not empty">
|
|
||||||
<action>Count total resolved review items in this session</action>
|
|
||||||
<action>Add Change Log entry: "Addressed code review findings - {{resolved_count}} items resolved (Date: {{date}})"</action>
|
|
||||||
</check>
|
|
||||||
|
|
||||||
<action>Save the story file</action>
|
|
||||||
<action>Determine if more incomplete tasks remain</action>
|
|
||||||
<action if="more tasks remain">
|
|
||||||
<goto step="5">Next task</goto>
|
|
||||||
</action>
|
|
||||||
<action if="no tasks remain">
|
|
||||||
<goto step="9">Completion</goto>
|
|
||||||
</action>
|
|
||||||
</step>
|
|
||||||
|
|
||||||
<step n="9" goal="Story completion and mark for review" tag="sprint-status">
|
|
||||||
<action>Verify ALL tasks and subtasks are marked [x] (re-scan the story document now)</action>
|
|
||||||
<action>Run the full regression suite (do not skip)</action>
|
|
||||||
<action>Confirm File List includes every changed file</action>
|
|
||||||
<action>Execute enhanced definition-of-done validation</action>
|
|
||||||
<action>Update the story Status to: "review"</action>
|
|
||||||
|
|
||||||
<!-- Enhanced Definition of Done Validation -->
|
|
||||||
<action>Validate definition-of-done checklist with essential requirements:
|
|
||||||
- All tasks/subtasks marked complete with [x]
|
|
||||||
- Implementation satisfies every Acceptance Criterion
|
|
||||||
- Unit tests for core functionality added/updated
|
|
||||||
- Integration tests for component interactions added when required
|
|
||||||
- End-to-end tests for critical flows added when story demands them
|
|
||||||
- All tests pass (no regressions, new tests successful)
|
|
||||||
- Code quality checks pass (linting, static analysis if configured)
|
|
||||||
- File List includes every new/modified/deleted file (relative paths)
|
|
||||||
- Dev Agent Record contains implementation notes
|
|
||||||
- Change Log includes summary of changes
|
|
||||||
- Only permitted story sections were modified
|
|
||||||
</action>
|
|
||||||
|
|
||||||
<!-- Mark story ready for review - sprint status conditional -->
|
|
||||||
<check if="{sprint_status} file exists AND {{current_sprint_status}} != 'no-sprint-tracking'">
|
|
||||||
<action>Load the FULL file: {sprint_status}</action>
|
|
||||||
<action>Find development_status key matching {{story_key}}</action>
|
|
||||||
<action>Verify current status is "in-progress" (expected previous state)</action>
|
|
||||||
<action>Update development_status[{{story_key}}] = "review"</action>
|
|
||||||
<action>Update last_updated field to current date</action>
|
|
||||||
<action>Save file, preserving ALL comments and structure including STATUS DEFINITIONS</action>
|
|
||||||
<output>✅ Story status updated to "review" in sprint-status.yaml</output>
|
|
||||||
</check>
|
|
||||||
|
|
||||||
<check if="{sprint_status} file does NOT exist OR {{current_sprint_status}} == 'no-sprint-tracking'">
|
|
||||||
<output>ℹ️ Story status updated to "review" in story file (no sprint tracking configured)</output>
|
|
||||||
</check>
|
|
||||||
|
|
||||||
<check if="story key not found in sprint status">
|
|
||||||
<output>⚠️ Story file updated, but sprint-status update failed: {{story_key}} not found
|
|
||||||
|
|
||||||
Story status is set to "review" in file, but sprint-status.yaml may be out of sync.
|
|
||||||
</output>
|
|
||||||
</check>
|
|
||||||
|
|
||||||
<!-- Final validation gates -->
|
|
||||||
<action if="any task is incomplete">HALT - Complete remaining tasks before marking ready for review</action>
|
|
||||||
<action if="regression failures exist">HALT - Fix regression issues before completing</action>
|
|
||||||
<action if="File List is incomplete">HALT - Update File List with all changed files</action>
|
|
||||||
<action if="definition-of-done validation fails">HALT - Address DoD failures before completing</action>
|
|
||||||
</step>
|
|
||||||
|
|
||||||
<step n="10" goal="Completion communication and user support">
|
|
||||||
<action>Execute the enhanced definition-of-done checklist using the validation framework</action>
|
|
||||||
<action>Prepare a concise summary in Dev Agent Record → Completion Notes</action>
|
|
||||||
|
|
||||||
<action>Communicate to {user_name} that story implementation is complete and ready for review</action>
|
|
||||||
<action>Summarize key accomplishments: story ID, story key, title, key changes made, tests added, files modified</action>
|
|
||||||
<action>Provide the story file path and current status (now "review")</action>
|
|
||||||
|
|
||||||
<action>Based on {user_skill_level}, ask if user needs any explanations about:
|
|
||||||
- What was implemented and how it works
|
|
||||||
- Why certain technical decisions were made
|
|
||||||
- How to test or verify the changes
|
|
||||||
- Any patterns, libraries, or approaches used
|
|
||||||
- Anything else they'd like clarified
|
|
||||||
</action>
|
|
||||||
|
|
||||||
<check if="user asks for explanations">
|
|
||||||
<action>Provide clear, contextual explanations tailored to {user_skill_level}</action>
|
|
||||||
<action>Use examples and references to specific code when helpful</action>
|
|
||||||
</check>
|
|
||||||
|
|
||||||
<action>Once explanations are complete (or user indicates no questions), suggest logical next steps</action>
|
|
||||||
<action>Recommended next steps (flexible based on project setup):
|
|
||||||
- Review the implemented story and test the changes
|
|
||||||
- Verify all acceptance criteria are met
|
|
||||||
- Ensure deployment readiness if applicable
|
|
||||||
- Run `code-review` workflow for peer review
|
|
||||||
- Optional: If Test Architect module installed, run `/bmad:tea:automate` to expand guardrail tests
|
|
||||||
</action>
|
|
||||||
|
|
||||||
<output>💡 **Tip:** For best results, run `code-review` using a **different** LLM than the one that implemented this story.</output>
|
|
||||||
<check if="{sprint_status} file exists">
|
|
||||||
<action>Suggest checking {sprint_status} to see project progress</action>
|
|
||||||
</check>
|
|
||||||
<action>Remain flexible - allow user to choose their own path or ask for other assistance</action>
|
|
||||||
</step>
|
|
||||||
|
|
||||||
</workflow>
|
|
||||||
|
|
@ -0,0 +1,41 @@
|
||||||
|
# DO NOT EDIT -- overwritten on every update.
|
||||||
|
#
|
||||||
|
# Workflow customization surface for bmad-quick-dev. Mirrors the
|
||||||
|
# agent customization shape under the [workflow] namespace.
|
||||||
|
|
||||||
|
[workflow]
|
||||||
|
|
||||||
|
# --- Configurable below. Overrides merge per BMad structural rules: ---
|
||||||
|
# scalars: override wins • arrays (persistent_facts, activation_steps_*): append
|
||||||
|
# arrays-of-tables with `code`/`id`: replace matching items, append new ones.
|
||||||
|
|
||||||
|
# Steps to run before the standard activation (config load, greet).
|
||||||
|
# Overrides append. Use for pre-flight loads, compliance checks, etc.
|
||||||
|
|
||||||
|
activation_steps_prepend = []
|
||||||
|
|
||||||
|
# Steps to run after greet but before the workflow begins.
|
||||||
|
# Overrides append. Use for context-heavy setup that should happen
|
||||||
|
# once the user has been acknowledged.
|
||||||
|
|
||||||
|
activation_steps_append = []
|
||||||
|
|
||||||
|
# Persistent facts the workflow keeps in mind for the whole run
|
||||||
|
# (standards, compliance constraints, stylistic guardrails).
|
||||||
|
# Distinct from the runtime memory sidecar — these are static context
|
||||||
|
# loaded on activation. Overrides append.
|
||||||
|
#
|
||||||
|
# Each entry is either:
|
||||||
|
# - a literal sentence, e.g. "All stories must include testable acceptance criteria."
|
||||||
|
# - a file reference prefixed with `file:`, e.g. "file:{project-root}/docs/standards.md"
|
||||||
|
# (glob patterns are supported; the file's contents are loaded and treated as facts).
|
||||||
|
|
||||||
|
persistent_facts = [
|
||||||
|
"file:{project-root}/**/project-context.md",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Scalar: executed when the workflow reaches its final step,
|
||||||
|
# after implementation is complete and explanations are provided. Override wins.
|
||||||
|
# Leave empty for no custom post-completion behavior.
|
||||||
|
|
||||||
|
on_complete = ""
|
||||||
|
|
@ -70,3 +70,9 @@ Display summary of your work to the user, including the commit hash if one was c
|
||||||
- Offer to push and/or create a pull request.
|
- Offer to push and/or create a pull request.
|
||||||
|
|
||||||
Workflow complete.
|
Workflow complete.
|
||||||
|
|
||||||
|
## On Complete
|
||||||
|
|
||||||
|
Run: `python3 {project-root}/_bmad/scripts/resolve_customization.py --skill {skill-root} --key workflow.on_complete`
|
||||||
|
|
||||||
|
If the resolved `workflow.on_complete` is non-empty, follow it as the final terminal instruction before exiting.
|
||||||
|
|
|
||||||
|
|
@ -59,3 +59,9 @@ If version control is available and the tree is dirty, create a local commit wit
|
||||||
HALT and wait for human input.
|
HALT and wait for human input.
|
||||||
|
|
||||||
Workflow complete.
|
Workflow complete.
|
||||||
|
|
||||||
|
## On Complete
|
||||||
|
|
||||||
|
Run: `python3 {project-root}/_bmad/scripts/resolve_customization.py --skill {skill-root} --key workflow.on_complete`
|
||||||
|
|
||||||
|
If the resolved `workflow.on_complete` is non-empty, follow it as the final terminal instruction before exiting.
|
||||||
|
|
|
||||||
|
|
@ -4,7 +4,6 @@
|
||||||
|
|
||||||
**CRITICAL:** If a step says "read fully and follow step-XX", you read and follow step-XX. No exceptions.
|
**CRITICAL:** If a step says "read fully and follow step-XX", you read and follow step-XX. No exceptions.
|
||||||
|
|
||||||
|
|
||||||
## READY FOR DEVELOPMENT STANDARD
|
## READY FOR DEVELOPMENT STANDARD
|
||||||
|
|
||||||
A specification is "Ready for Development" when:
|
A specification is "Ready for Development" when:
|
||||||
|
|
@ -14,7 +13,6 @@ A specification is "Ready for Development" when:
|
||||||
- **Testable**: All ACs use Given/When/Then.
|
- **Testable**: All ACs use Given/When/Then.
|
||||||
- **Complete**: No placeholders or TBDs.
|
- **Complete**: No placeholders or TBDs.
|
||||||
|
|
||||||
|
|
||||||
## SCOPE STANDARD
|
## SCOPE STANDARD
|
||||||
|
|
||||||
A specification should target a **single user-facing goal** within **900–1600 tokens**:
|
A specification should target a **single user-facing goal** within **900–1600 tokens**:
|
||||||
|
|
@ -25,6 +23,58 @@ A specification should target a **single user-facing goal** within **900–1600
|
||||||
- **900–1600 tokens**: Optimal range for LLM consumption. Below 900 risks ambiguity; above 1600 risks context-rot in implementation agents.
|
- **900–1600 tokens**: Optimal range for LLM consumption. Below 900 risks ambiguity; above 1600 risks context-rot in implementation agents.
|
||||||
- **Neither limit is a gate.** Both are proposals with user override.
|
- **Neither limit is a gate.** Both are proposals with user override.
|
||||||
|
|
||||||
|
## Conventions
|
||||||
|
|
||||||
|
- Bare paths (e.g. `step-01-clarify-and-route.md`) resolve from the skill root.
|
||||||
|
- `{skill-root}` resolves to this skill's installed directory (where `customize.toml` lives).
|
||||||
|
- `{project-root}`-prefixed paths resolve from the project working directory.
|
||||||
|
- `{skill-name}` resolves to the skill directory's basename.
|
||||||
|
|
||||||
|
## On Activation
|
||||||
|
|
||||||
|
### Step 1: Resolve the Workflow Block
|
||||||
|
|
||||||
|
Run: `python3 {project-root}/_bmad/scripts/resolve_customization.py --skill {skill-root} --key workflow`
|
||||||
|
|
||||||
|
**If the script fails**, resolve the `workflow` block yourself by reading these three files in base → team → user order and applying the same structural merge rules as the resolver:
|
||||||
|
|
||||||
|
1. `{skill-root}/customize.toml` — defaults
|
||||||
|
2. `{project-root}/_bmad/custom/{skill-name}.toml` — team overrides
|
||||||
|
3. `{project-root}/_bmad/custom/{skill-name}.user.toml` — personal overrides
|
||||||
|
|
||||||
|
Any missing file is skipped. Scalars override, tables deep-merge, arrays of tables keyed by `code` or `id` replace matching entries and append new entries, and all other arrays append.
|
||||||
|
|
||||||
|
### Step 2: Execute Prepend Steps
|
||||||
|
|
||||||
|
Execute each entry in `{workflow.activation_steps_prepend}` in order before proceeding.
|
||||||
|
|
||||||
|
### Step 3: Load Persistent Facts
|
||||||
|
|
||||||
|
Treat every entry in `{workflow.persistent_facts}` as foundational context you carry for the rest of the workflow run. Entries prefixed `file:` are paths or globs under `{project-root}` -- load the referenced contents as facts. All other entries are facts verbatim.
|
||||||
|
|
||||||
|
### Step 4: Load Config
|
||||||
|
|
||||||
|
Load config from `{{.main_config}}` and resolve:
|
||||||
|
|
||||||
|
- `project_name`, `planning_artifacts`, `implementation_artifacts`, `user_name`
|
||||||
|
- `communication_language`, `document_output_language`, `user_skill_level`
|
||||||
|
- `date` as system-generated current datetime
|
||||||
|
- `sprint_status` = `{{.sprint_status}}`
|
||||||
|
- `project_context` = `**/project-context.md` (load if exists)
|
||||||
|
- CLAUDE.md / memory files (load if exist)
|
||||||
|
- YOU MUST ALWAYS SPEAK OUTPUT in your Agent communication style with the config `{{.communication_language}}`
|
||||||
|
- Language MUST be tailored to `{{.user_skill_level}}`
|
||||||
|
- Generate all documents in `{{.document_output_language}}`
|
||||||
|
|
||||||
|
### Step 5: Greet the User
|
||||||
|
|
||||||
|
Greet `{{.user_name}}`, speaking in `{{.communication_language}}`.
|
||||||
|
|
||||||
|
### Step 6: Execute Append Steps
|
||||||
|
|
||||||
|
Execute each entry in `{workflow.activation_steps_append}` in order.
|
||||||
|
|
||||||
|
Activation is complete. Begin the workflow below.
|
||||||
|
|
||||||
## WORKFLOW ARCHITECTURE
|
## WORKFLOW ARCHITECTURE
|
||||||
|
|
||||||
|
|
@ -51,22 +101,6 @@ This uses **step-file architecture** for disciplined execution:
|
||||||
- **ALWAYS** follow the exact instructions in the step file
|
- **ALWAYS** follow the exact instructions in the step file
|
||||||
- **ALWAYS** halt at checkpoints and wait for human input
|
- **ALWAYS** halt at checkpoints and wait for human input
|
||||||
|
|
||||||
|
## FIRST STEP
|
||||||
## INITIALIZATION SEQUENCE
|
|
||||||
|
|
||||||
### 1. Configuration Loading
|
|
||||||
|
|
||||||
Load and read full config from `{{.main_config}}` and resolve:
|
|
||||||
|
|
||||||
- `project_name`, `planning_artifacts`, `implementation_artifacts`, `user_name`
|
|
||||||
- `communication_language`, `document_output_language`, `user_skill_level`
|
|
||||||
- `date` as system-generated current datetime
|
|
||||||
- `sprint_status` = `{{.sprint_status}}`
|
|
||||||
- `project_context` = `**/project-context.md` (load if exists)
|
|
||||||
- CLAUDE.md / memory files (load if exist)
|
|
||||||
|
|
||||||
YOU MUST ALWAYS SPEAK OUTPUT in your Agent communication style with the config `{{.communication_language}}`.
|
|
||||||
|
|
||||||
### 2. First Step Execution
|
|
||||||
|
|
||||||
Read fully and follow: `./step-01-clarify-and-route.md` to begin the workflow.
|
Read fully and follow: `./step-01-clarify-and-route.md` to begin the workflow.
|
||||||
|
|
|
||||||
|
|
@ -3,4 +3,297 @@ name: bmad-sprint-planning
|
||||||
description: 'Generate sprint status tracking from epics. Use when the user says "run sprint planning" or "generate sprint plan"'
|
description: 'Generate sprint status tracking from epics. Use when the user says "run sprint planning" or "generate sprint plan"'
|
||||||
---
|
---
|
||||||
|
|
||||||
Follow the instructions in ./workflow.md.
|
# Sprint Planning Workflow
|
||||||
|
|
||||||
|
**Goal:** Generate sprint status tracking from epics, detecting current story statuses and building a complete sprint-status.yaml file.
|
||||||
|
|
||||||
|
**Your Role:** You are a Developer generating and maintaining sprint tracking. Parse epic files, detect story statuses, and produce a structured sprint-status.yaml.
|
||||||
|
|
||||||
|
## Conventions
|
||||||
|
|
||||||
|
- Bare paths (e.g. `checklist.md`) resolve from the skill root.
|
||||||
|
- `{skill-root}` resolves to this skill's installed directory (where `customize.toml` lives).
|
||||||
|
- `{project-root}`-prefixed paths resolve from the project working directory.
|
||||||
|
- `{skill-name}` resolves to the skill directory's basename.
|
||||||
|
|
||||||
|
## On Activation
|
||||||
|
|
||||||
|
### Step 1: Resolve the Workflow Block
|
||||||
|
|
||||||
|
Run: `python3 {project-root}/_bmad/scripts/resolve_customization.py --skill {skill-root} --key workflow`
|
||||||
|
|
||||||
|
**If the script fails**, resolve the `workflow` block yourself by reading these three files in base → team → user order and applying the same structural merge rules as the resolver:
|
||||||
|
|
||||||
|
1. `{skill-root}/customize.toml` — defaults
|
||||||
|
2. `{project-root}/_bmad/custom/{skill-name}.toml` — team overrides
|
||||||
|
3. `{project-root}/_bmad/custom/{skill-name}.user.toml` — personal overrides
|
||||||
|
|
||||||
|
Any missing file is skipped. Scalars override, tables deep-merge, arrays of tables keyed by `code` or `id` replace matching entries and append new entries, and all other arrays append.
|
||||||
|
|
||||||
|
### Step 2: Execute Prepend Steps
|
||||||
|
|
||||||
|
Execute each entry in `{workflow.activation_steps_prepend}` in order before proceeding.
|
||||||
|
|
||||||
|
### Step 3: Load Persistent Facts
|
||||||
|
|
||||||
|
Treat every entry in `{workflow.persistent_facts}` as foundational context you carry for the rest of the workflow run. Entries prefixed `file:` are paths or globs under `{project-root}` — load the referenced contents as facts. All other entries are facts verbatim.
|
||||||
|
|
||||||
|
### Step 4: Load Config
|
||||||
|
|
||||||
|
Load config from `{project-root}/_bmad/bmm/config.yaml` and resolve:
|
||||||
|
|
||||||
|
- `project_name`, `user_name`
|
||||||
|
- `communication_language`, `document_output_language`
|
||||||
|
- `implementation_artifacts`
|
||||||
|
- `planning_artifacts`
|
||||||
|
- `date` as system-generated current datetime
|
||||||
|
- YOU MUST ALWAYS SPEAK OUTPUT in your Agent communication style with the config `{communication_language}`
|
||||||
|
- Generate all documents in `{document_output_language}`
|
||||||
|
|
||||||
|
### Step 5: Greet the User
|
||||||
|
|
||||||
|
Greet `{user_name}`, speaking in `{communication_language}`.
|
||||||
|
|
||||||
|
### Step 6: Execute Append Steps
|
||||||
|
|
||||||
|
Execute each entry in `{workflow.activation_steps_append}` in order.
|
||||||
|
|
||||||
|
Activation is complete. Begin the workflow below.
|
||||||
|
|
||||||
|
## Paths
|
||||||
|
|
||||||
|
- `tracking_system` = `file-system`
|
||||||
|
- `project_key` = `NOKEY`
|
||||||
|
- `story_location` = `{implementation_artifacts}`
|
||||||
|
- `story_location_absolute` = `{implementation_artifacts}`
|
||||||
|
- `epics_location` = `{planning_artifacts}`
|
||||||
|
- `epics_pattern` = `*epic*.md`
|
||||||
|
- `status_file` = `{implementation_artifacts}/sprint-status.yaml`
|
||||||
|
|
||||||
|
## Input Files
|
||||||
|
|
||||||
|
| Input | Path | Load Strategy |
|
||||||
|
|-------|------|---------------|
|
||||||
|
| Epics | `{planning_artifacts}/*epic*.md` (whole) or `{planning_artifacts}/*epic*/*.md` (sharded) | FULL_LOAD |
|
||||||
|
|
||||||
|
## Execution
|
||||||
|
|
||||||
|
### Document Discovery - Full Epic Loading
|
||||||
|
|
||||||
|
**Strategy**: Sprint planning needs ALL epics and stories to build complete status tracking.
|
||||||
|
|
||||||
|
**Epic Discovery Process:**
|
||||||
|
|
||||||
|
1. **Search for whole document first** - Look for `epics.md`, `bmm-epics.md`, or any `*epic*.md` file
|
||||||
|
2. **Check for sharded version** - If whole document not found, look for `epics/index.md`
|
||||||
|
3. **If sharded version found**:
|
||||||
|
- Read `index.md` to understand the document structure
|
||||||
|
- Read ALL epic section files listed in the index (e.g., `epic-1.md`, `epic-2.md`, etc.)
|
||||||
|
- Process all epics and their stories from the combined content
|
||||||
|
- This ensures complete sprint status coverage
|
||||||
|
4. **Priority**: If both whole and sharded versions exist, use the whole document
|
||||||
|
|
||||||
|
**Fuzzy matching**: Be flexible with document names - users may use variations like `epics.md`, `bmm-epics.md`, `user-stories.md`, etc.
|
||||||
|
|
||||||
|
<workflow>
|
||||||
|
|
||||||
|
<step n="1" goal="Parse epic files and extract all work items">
|
||||||
|
<action>Load {project_context} for project-wide patterns and conventions (if exists)</action>
|
||||||
|
<action>Communicate in {communication_language} with {user_name}</action>
|
||||||
|
<action>Look for all files matching `{epics_pattern}` in {epics_location}</action>
|
||||||
|
<action>Could be a single `epics.md` file or multiple `epic-1.md`, `epic-2.md` files</action>
|
||||||
|
|
||||||
|
<action>For each epic file found, extract:</action>
|
||||||
|
|
||||||
|
- Epic numbers from headers like `## Epic 1:` or `## Epic 2:`
|
||||||
|
- Story IDs and titles from patterns like `### Story 1.1: User Authentication`
|
||||||
|
- Convert story format from `Epic.Story: Title` to kebab-case key: `epic-story-title`
|
||||||
|
|
||||||
|
**Story ID Conversion Rules:**
|
||||||
|
|
||||||
|
- Original: `### Story 1.1: User Authentication`
|
||||||
|
- Replace period with dash: `1-1`
|
||||||
|
- Convert title to kebab-case: `user-authentication`
|
||||||
|
- Final key: `1-1-user-authentication`
|
||||||
|
|
||||||
|
<action>Build complete inventory of all epics and stories from all epic files</action>
|
||||||
|
</step>
|
||||||
|
|
||||||
|
<step n="2" goal="Build sprint status structure">
|
||||||
|
<action>For each epic found, create entries in this order:</action>
|
||||||
|
|
||||||
|
1. **Epic entry** - Key: `epic-{num}`, Default status: `backlog`
|
||||||
|
2. **Story entries** - Key: `{epic}-{story}-{title}`, Default status: `backlog`
|
||||||
|
3. **Retrospective entry** - Key: `epic-{num}-retrospective`, Default status: `optional`
|
||||||
|
|
||||||
|
**Example structure:**
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
development_status:
|
||||||
|
epic-1: backlog
|
||||||
|
1-1-user-authentication: backlog
|
||||||
|
1-2-account-management: backlog
|
||||||
|
epic-1-retrospective: optional
|
||||||
|
```
|
||||||
|
|
||||||
|
</step>
|
||||||
|
|
||||||
|
<step n="3" goal="Apply intelligent status detection">
|
||||||
|
<action>For each story, detect current status by checking files:</action>
|
||||||
|
|
||||||
|
**Story file detection:**
|
||||||
|
|
||||||
|
- Check: `{story_location_absolute}/{story-key}.md` (e.g., `stories/1-1-user-authentication.md`)
|
||||||
|
- If exists → upgrade status to at least `ready-for-dev`
|
||||||
|
|
||||||
|
**Preservation rule:**
|
||||||
|
|
||||||
|
- If existing `{status_file}` exists and has more advanced status, preserve it
|
||||||
|
- Never downgrade status (e.g., don't change `done` to `ready-for-dev`)
|
||||||
|
|
||||||
|
**Status Flow Reference:**
|
||||||
|
|
||||||
|
- Epic: `backlog` → `in-progress` → `done`
|
||||||
|
- Story: `backlog` → `ready-for-dev` → `in-progress` → `review` → `done`
|
||||||
|
- Retrospective: `optional` ↔ `done`
|
||||||
|
</step>
|
||||||
|
|
||||||
|
<step n="4" goal="Generate sprint status file">
|
||||||
|
<action>Create or update {status_file} with:</action>
|
||||||
|
|
||||||
|
**File Structure:**
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# generated: {date}
|
||||||
|
# last_updated: {date}
|
||||||
|
# project: {project_name}
|
||||||
|
# project_key: {project_key}
|
||||||
|
# tracking_system: {tracking_system}
|
||||||
|
# story_location: {story_location}
|
||||||
|
|
||||||
|
# STATUS DEFINITIONS:
|
||||||
|
# ==================
|
||||||
|
# Epic Status:
|
||||||
|
# - backlog: Epic not yet started
|
||||||
|
# - in-progress: Epic actively being worked on
|
||||||
|
# - done: All stories in epic completed
|
||||||
|
#
|
||||||
|
# Epic Status Transitions:
|
||||||
|
# - backlog → in-progress: Automatically when first story is created (via create-story)
|
||||||
|
# - in-progress → done: Manually when all stories reach 'done' status
|
||||||
|
#
|
||||||
|
# Story Status:
|
||||||
|
# - backlog: Story only exists in epic file
|
||||||
|
# - ready-for-dev: Story file created in stories folder
|
||||||
|
# - in-progress: Developer actively working on implementation
|
||||||
|
# - review: Ready for code review (via Dev's code-review workflow)
|
||||||
|
# - done: Story completed
|
||||||
|
#
|
||||||
|
# Retrospective Status:
|
||||||
|
# - optional: Can be completed but not required
|
||||||
|
# - done: Retrospective has been completed
|
||||||
|
#
|
||||||
|
# WORKFLOW NOTES:
|
||||||
|
# ===============
|
||||||
|
# - Epic transitions to 'in-progress' automatically when first story is created
|
||||||
|
# - Stories can be worked in parallel if team capacity allows
|
||||||
|
# - Developer typically creates next story after previous one is 'done' to incorporate learnings
|
||||||
|
# - Dev moves story to 'review', then runs code-review (fresh context, different LLM recommended)
|
||||||
|
|
||||||
|
generated: { date }
|
||||||
|
last_updated: { date }
|
||||||
|
project: { project_name }
|
||||||
|
project_key: { project_key }
|
||||||
|
tracking_system: { tracking_system }
|
||||||
|
story_location: { story_location }
|
||||||
|
|
||||||
|
development_status:
|
||||||
|
# All epics, stories, and retrospectives in order
|
||||||
|
```
|
||||||
|
|
||||||
|
<action>Write the complete sprint status YAML to {status_file}</action>
|
||||||
|
<action>CRITICAL: Metadata appears TWICE - once as comments (#) for documentation, once as YAML key:value fields for parsing</action>
|
||||||
|
<action>Ensure all items are ordered: epic, its stories, its retrospective, next epic...</action>
|
||||||
|
</step>
|
||||||
|
|
||||||
|
<step n="5" goal="Validate and report">
|
||||||
|
<action>Perform validation checks:</action>
|
||||||
|
|
||||||
|
- [ ] Every epic in epic files appears in {status_file}
|
||||||
|
- [ ] Every story in epic files appears in {status_file}
|
||||||
|
- [ ] Every epic has a corresponding retrospective entry
|
||||||
|
- [ ] No items in {status_file} that don't exist in epic files
|
||||||
|
- [ ] All status values are legal (match state machine definitions)
|
||||||
|
- [ ] File is valid YAML syntax
|
||||||
|
|
||||||
|
<action>Count totals:</action>
|
||||||
|
|
||||||
|
- Total epics: {{epic_count}}
|
||||||
|
- Total stories: {{story_count}}
|
||||||
|
- Epics in-progress: {{in_progress_count}}
|
||||||
|
- Stories done: {{done_count}}
|
||||||
|
|
||||||
|
<action>Display completion summary to {user_name} in {communication_language}:</action>
|
||||||
|
|
||||||
|
**Sprint Status Generated Successfully**
|
||||||
|
|
||||||
|
- **File Location:** {status_file}
|
||||||
|
- **Total Epics:** {{epic_count}}
|
||||||
|
- **Total Stories:** {{story_count}}
|
||||||
|
- **Epics In Progress:** {{in_progress_count}}
|
||||||
|
- **Stories Completed:** {{done_count}}
|
||||||
|
|
||||||
|
**Next Steps:**
|
||||||
|
|
||||||
|
1. Review the generated {status_file}
|
||||||
|
2. Use this file to track development progress
|
||||||
|
3. Agents will update statuses as they work
|
||||||
|
4. Re-run this workflow to refresh auto-detected statuses
|
||||||
|
|
||||||
|
<action>Run: `python3 {project-root}/_bmad/scripts/resolve_customization.py --skill {skill-root} --key workflow.on_complete` — if the resolved value is non-empty, follow it as the final terminal instruction before exiting.</action>
|
||||||
|
</step>
|
||||||
|
|
||||||
|
</workflow>
|
||||||
|
|
||||||
|
## Additional Documentation
|
||||||
|
|
||||||
|
### Status State Machine
|
||||||
|
|
||||||
|
**Epic Status Flow:**
|
||||||
|
|
||||||
|
```
|
||||||
|
backlog → in-progress → done
|
||||||
|
```
|
||||||
|
|
||||||
|
- **backlog**: Epic not yet started
|
||||||
|
- **in-progress**: Epic actively being worked on (stories being created/implemented)
|
||||||
|
- **done**: All stories in epic completed
|
||||||
|
|
||||||
|
**Story Status Flow:**
|
||||||
|
|
||||||
|
```
|
||||||
|
backlog → ready-for-dev → in-progress → review → done
|
||||||
|
```
|
||||||
|
|
||||||
|
- **backlog**: Story only exists in epic file
|
||||||
|
- **ready-for-dev**: Story file created (e.g., `stories/1-3-plant-naming.md`)
|
||||||
|
- **in-progress**: Developer actively working
|
||||||
|
- **review**: Ready for code review (via Dev's code-review workflow)
|
||||||
|
- **done**: Completed
|
||||||
|
|
||||||
|
**Retrospective Status:**
|
||||||
|
|
||||||
|
```
|
||||||
|
optional ↔ done
|
||||||
|
```
|
||||||
|
|
||||||
|
- **optional**: Ready to be conducted but not required
|
||||||
|
- **done**: Finished
|
||||||
|
|
||||||
|
### Guidelines
|
||||||
|
|
||||||
|
1. **Epic Activation**: Mark epic as `in-progress` when starting work on its first story
|
||||||
|
2. **Sequential Default**: Stories are typically worked in order, but parallel work is supported
|
||||||
|
3. **Parallel Work Supported**: Multiple stories can be `in-progress` if team capacity allows
|
||||||
|
4. **Review Before Done**: Stories should pass through `review` before `done`
|
||||||
|
5. **Learning Transfer**: Developer typically creates next story after previous one is `done` to incorporate learnings
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,41 @@
|
||||||
|
# DO NOT EDIT -- overwritten on every update.
|
||||||
|
#
|
||||||
|
# Workflow customization surface for bmad-sprint-planning. Mirrors the
|
||||||
|
# agent customization shape under the [workflow] namespace.
|
||||||
|
|
||||||
|
[workflow]
|
||||||
|
|
||||||
|
# --- Configurable below. Overrides merge per BMad structural rules: ---
|
||||||
|
# scalars: override wins • arrays (persistent_facts, activation_steps_*): append
|
||||||
|
# arrays-of-tables with `code`/`id`: replace matching items, append new ones.
|
||||||
|
|
||||||
|
# Steps to run before the standard activation (config load, greet).
|
||||||
|
# Overrides append. Use for pre-flight loads, compliance checks, etc.
|
||||||
|
|
||||||
|
activation_steps_prepend = []
|
||||||
|
|
||||||
|
# Steps to run after greet but before the workflow begins.
|
||||||
|
# Overrides append. Use for context-heavy setup that should happen
|
||||||
|
# once the user has been acknowledged.
|
||||||
|
|
||||||
|
activation_steps_append = []
|
||||||
|
|
||||||
|
# Persistent facts the workflow keeps in mind for the whole run
|
||||||
|
# (standards, compliance constraints, stylistic guardrails).
|
||||||
|
# Distinct from the runtime memory sidecar — these are static context
|
||||||
|
# loaded on activation. Overrides append.
|
||||||
|
#
|
||||||
|
# Each entry is either:
|
||||||
|
# - a literal sentence, e.g. "All stories must include testable acceptance criteria."
|
||||||
|
# - a file reference prefixed with `file:`, e.g. "file:{project-root}/docs/standards.md"
|
||||||
|
# (glob patterns are supported; the file's contents are loaded and treated as facts).
|
||||||
|
|
||||||
|
persistent_facts = [
|
||||||
|
"file:{project-root}/**/project-context.md",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Scalar: executed when the workflow reaches its final step,
|
||||||
|
# after sprint-status.yaml is generated and validated. Override wins.
|
||||||
|
# Leave empty for no custom post-completion behavior.
|
||||||
|
|
||||||
|
on_complete = ""
|
||||||
|
|
@ -1,263 +0,0 @@
|
||||||
# Sprint Planning Workflow
|
|
||||||
|
|
||||||
**Goal:** Generate sprint status tracking from epics, detecting current story statuses and building a complete sprint-status.yaml file.
|
|
||||||
|
|
||||||
**Your Role:** You are a Developer generating and maintaining sprint tracking. Parse epic files, detect story statuses, and produce a structured sprint-status.yaml.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## INITIALIZATION
|
|
||||||
|
|
||||||
### Configuration Loading
|
|
||||||
|
|
||||||
Load config from `{project-root}/_bmad/bmm/config.yaml` and resolve:
|
|
||||||
|
|
||||||
- `project_name`, `user_name`
|
|
||||||
- `communication_language`, `document_output_language`
|
|
||||||
- `implementation_artifacts`
|
|
||||||
- `planning_artifacts`
|
|
||||||
- `date` as system-generated current datetime
|
|
||||||
- YOU MUST ALWAYS SPEAK OUTPUT in your Agent communication style with the config `{communication_language}`
|
|
||||||
|
|
||||||
### Paths
|
|
||||||
|
|
||||||
- `tracking_system` = `file-system`
|
|
||||||
- `project_key` = `NOKEY`
|
|
||||||
- `story_location` = `{implementation_artifacts}`
|
|
||||||
- `story_location_absolute` = `{implementation_artifacts}`
|
|
||||||
- `epics_location` = `{planning_artifacts}`
|
|
||||||
- `epics_pattern` = `*epic*.md`
|
|
||||||
- `status_file` = `{implementation_artifacts}/sprint-status.yaml`
|
|
||||||
|
|
||||||
### Input Files
|
|
||||||
|
|
||||||
| Input | Path | Load Strategy |
|
|
||||||
|-------|------|---------------|
|
|
||||||
| Epics | `{planning_artifacts}/*epic*.md` (whole) or `{planning_artifacts}/*epic*/*.md` (sharded) | FULL_LOAD |
|
|
||||||
|
|
||||||
### Context
|
|
||||||
|
|
||||||
- `project_context` = `**/project-context.md` (load if exists)
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## EXECUTION
|
|
||||||
|
|
||||||
### Document Discovery - Full Epic Loading
|
|
||||||
|
|
||||||
**Strategy**: Sprint planning needs ALL epics and stories to build complete status tracking.
|
|
||||||
|
|
||||||
**Epic Discovery Process:**
|
|
||||||
|
|
||||||
1. **Search for whole document first** - Look for `epics.md`, `bmm-epics.md`, or any `*epic*.md` file
|
|
||||||
2. **Check for sharded version** - If whole document not found, look for `epics/index.md`
|
|
||||||
3. **If sharded version found**:
|
|
||||||
- Read `index.md` to understand the document structure
|
|
||||||
- Read ALL epic section files listed in the index (e.g., `epic-1.md`, `epic-2.md`, etc.)
|
|
||||||
- Process all epics and their stories from the combined content
|
|
||||||
- This ensures complete sprint status coverage
|
|
||||||
4. **Priority**: If both whole and sharded versions exist, use the whole document
|
|
||||||
|
|
||||||
**Fuzzy matching**: Be flexible with document names - users may use variations like `epics.md`, `bmm-epics.md`, `user-stories.md`, etc.
|
|
||||||
|
|
||||||
<workflow>
|
|
||||||
|
|
||||||
<step n="1" goal="Parse epic files and extract all work items">
|
|
||||||
<action>Load {project_context} for project-wide patterns and conventions (if exists)</action>
|
|
||||||
<action>Communicate in {communication_language} with {user_name}</action>
|
|
||||||
<action>Look for all files matching `{epics_pattern}` in {epics_location}</action>
|
|
||||||
<action>Could be a single `epics.md` file or multiple `epic-1.md`, `epic-2.md` files</action>
|
|
||||||
|
|
||||||
<action>For each epic file found, extract:</action>
|
|
||||||
|
|
||||||
- Epic numbers from headers like `## Epic 1:` or `## Epic 2:`
|
|
||||||
- Story IDs and titles from patterns like `### Story 1.1: User Authentication`
|
|
||||||
- Convert story format from `Epic.Story: Title` to kebab-case key: `epic-story-title`
|
|
||||||
|
|
||||||
**Story ID Conversion Rules:**
|
|
||||||
|
|
||||||
- Original: `### Story 1.1: User Authentication`
|
|
||||||
- Replace period with dash: `1-1`
|
|
||||||
- Convert title to kebab-case: `user-authentication`
|
|
||||||
- Final key: `1-1-user-authentication`
|
|
||||||
|
|
||||||
<action>Build complete inventory of all epics and stories from all epic files</action>
|
|
||||||
</step>
|
|
||||||
|
|
||||||
<step n="2" goal="Build sprint status structure">
|
|
||||||
<action>For each epic found, create entries in this order:</action>
|
|
||||||
|
|
||||||
1. **Epic entry** - Key: `epic-{num}`, Default status: `backlog`
|
|
||||||
2. **Story entries** - Key: `{epic}-{story}-{title}`, Default status: `backlog`
|
|
||||||
3. **Retrospective entry** - Key: `epic-{num}-retrospective`, Default status: `optional`
|
|
||||||
|
|
||||||
**Example structure:**
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
development_status:
|
|
||||||
epic-1: backlog
|
|
||||||
1-1-user-authentication: backlog
|
|
||||||
1-2-account-management: backlog
|
|
||||||
epic-1-retrospective: optional
|
|
||||||
```
|
|
||||||
|
|
||||||
</step>
|
|
||||||
|
|
||||||
<step n="3" goal="Apply intelligent status detection">
|
|
||||||
<action>For each story, detect current status by checking files:</action>
|
|
||||||
|
|
||||||
**Story file detection:**
|
|
||||||
|
|
||||||
- Check: `{story_location_absolute}/{story-key}.md` (e.g., `stories/1-1-user-authentication.md`)
|
|
||||||
- If exists → upgrade status to at least `ready-for-dev`
|
|
||||||
|
|
||||||
**Preservation rule:**
|
|
||||||
|
|
||||||
- If existing `{status_file}` exists and has more advanced status, preserve it
|
|
||||||
- Never downgrade status (e.g., don't change `done` to `ready-for-dev`)
|
|
||||||
|
|
||||||
**Status Flow Reference:**
|
|
||||||
|
|
||||||
- Epic: `backlog` → `in-progress` → `done`
|
|
||||||
- Story: `backlog` → `ready-for-dev` → `in-progress` → `review` → `done`
|
|
||||||
- Retrospective: `optional` ↔ `done`
|
|
||||||
</step>
|
|
||||||
|
|
||||||
<step n="4" goal="Generate sprint status file">
|
|
||||||
<action>Create or update {status_file} with:</action>
|
|
||||||
|
|
||||||
**File Structure:**
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
# generated: {date}
|
|
||||||
# last_updated: {date}
|
|
||||||
# project: {project_name}
|
|
||||||
# project_key: {project_key}
|
|
||||||
# tracking_system: {tracking_system}
|
|
||||||
# story_location: {story_location}
|
|
||||||
|
|
||||||
# STATUS DEFINITIONS:
|
|
||||||
# ==================
|
|
||||||
# Epic Status:
|
|
||||||
# - backlog: Epic not yet started
|
|
||||||
# - in-progress: Epic actively being worked on
|
|
||||||
# - done: All stories in epic completed
|
|
||||||
#
|
|
||||||
# Epic Status Transitions:
|
|
||||||
# - backlog → in-progress: Automatically when first story is created (via create-story)
|
|
||||||
# - in-progress → done: Manually when all stories reach 'done' status
|
|
||||||
#
|
|
||||||
# Story Status:
|
|
||||||
# - backlog: Story only exists in epic file
|
|
||||||
# - ready-for-dev: Story file created in stories folder
|
|
||||||
# - in-progress: Developer actively working on implementation
|
|
||||||
# - review: Ready for code review (via Dev's code-review workflow)
|
|
||||||
# - done: Story completed
|
|
||||||
#
|
|
||||||
# Retrospective Status:
|
|
||||||
# - optional: Can be completed but not required
|
|
||||||
# - done: Retrospective has been completed
|
|
||||||
#
|
|
||||||
# WORKFLOW NOTES:
|
|
||||||
# ===============
|
|
||||||
# - Epic transitions to 'in-progress' automatically when first story is created
|
|
||||||
# - Stories can be worked in parallel if team capacity allows
|
|
||||||
# - Developer typically creates next story after previous one is 'done' to incorporate learnings
|
|
||||||
# - Dev moves story to 'review', then runs code-review (fresh context, different LLM recommended)
|
|
||||||
|
|
||||||
generated: { date }
|
|
||||||
last_updated: { date }
|
|
||||||
project: { project_name }
|
|
||||||
project_key: { project_key }
|
|
||||||
tracking_system: { tracking_system }
|
|
||||||
story_location: { story_location }
|
|
||||||
|
|
||||||
development_status:
|
|
||||||
# All epics, stories, and retrospectives in order
|
|
||||||
```
|
|
||||||
|
|
||||||
<action>Write the complete sprint status YAML to {status_file}</action>
|
|
||||||
<action>CRITICAL: Metadata appears TWICE - once as comments (#) for documentation, once as YAML key:value fields for parsing</action>
|
|
||||||
<action>Ensure all items are ordered: epic, its stories, its retrospective, next epic...</action>
|
|
||||||
</step>
|
|
||||||
|
|
||||||
<step n="5" goal="Validate and report">
|
|
||||||
<action>Perform validation checks:</action>
|
|
||||||
|
|
||||||
- [ ] Every epic in epic files appears in {status_file}
|
|
||||||
- [ ] Every story in epic files appears in {status_file}
|
|
||||||
- [ ] Every epic has a corresponding retrospective entry
|
|
||||||
- [ ] No items in {status_file} that don't exist in epic files
|
|
||||||
- [ ] All status values are legal (match state machine definitions)
|
|
||||||
- [ ] File is valid YAML syntax
|
|
||||||
|
|
||||||
<action>Count totals:</action>
|
|
||||||
|
|
||||||
- Total epics: {{epic_count}}
|
|
||||||
- Total stories: {{story_count}}
|
|
||||||
- Epics in-progress: {{in_progress_count}}
|
|
||||||
- Stories done: {{done_count}}
|
|
||||||
|
|
||||||
<action>Display completion summary to {user_name} in {communication_language}:</action>
|
|
||||||
|
|
||||||
**Sprint Status Generated Successfully**
|
|
||||||
|
|
||||||
- **File Location:** {status_file}
|
|
||||||
- **Total Epics:** {{epic_count}}
|
|
||||||
- **Total Stories:** {{story_count}}
|
|
||||||
- **Epics In Progress:** {{in_progress_count}}
|
|
||||||
- **Stories Completed:** {{done_count}}
|
|
||||||
|
|
||||||
**Next Steps:**
|
|
||||||
|
|
||||||
1. Review the generated {status_file}
|
|
||||||
2. Use this file to track development progress
|
|
||||||
3. Agents will update statuses as they work
|
|
||||||
4. Re-run this workflow to refresh auto-detected statuses
|
|
||||||
|
|
||||||
</step>
|
|
||||||
|
|
||||||
</workflow>
|
|
||||||
|
|
||||||
## Additional Documentation
|
|
||||||
|
|
||||||
### Status State Machine
|
|
||||||
|
|
||||||
**Epic Status Flow:**
|
|
||||||
|
|
||||||
```
|
|
||||||
backlog → in-progress → done
|
|
||||||
```
|
|
||||||
|
|
||||||
- **backlog**: Epic not yet started
|
|
||||||
- **in-progress**: Epic actively being worked on (stories being created/implemented)
|
|
||||||
- **done**: All stories in epic completed
|
|
||||||
|
|
||||||
**Story Status Flow:**
|
|
||||||
|
|
||||||
```
|
|
||||||
backlog → ready-for-dev → in-progress → review → done
|
|
||||||
```
|
|
||||||
|
|
||||||
- **backlog**: Story only exists in epic file
|
|
||||||
- **ready-for-dev**: Story file created (e.g., `stories/1-3-plant-naming.md`)
|
|
||||||
- **in-progress**: Developer actively working
|
|
||||||
- **review**: Ready for code review (via Dev's code-review workflow)
|
|
||||||
- **done**: Completed
|
|
||||||
|
|
||||||
**Retrospective Status:**
|
|
||||||
|
|
||||||
```
|
|
||||||
optional ↔ done
|
|
||||||
```
|
|
||||||
|
|
||||||
- **optional**: Ready to be conducted but not required
|
|
||||||
- **done**: Finished
|
|
||||||
|
|
||||||
### Guidelines
|
|
||||||
|
|
||||||
1. **Epic Activation**: Mark epic as `in-progress` when starting work on its first story
|
|
||||||
2. **Sequential Default**: Stories are typically worked in order, but parallel work is supported
|
|
||||||
3. **Parallel Work Supported**: Multiple stories can be `in-progress` if team capacity allows
|
|
||||||
4. **Review Before Done**: Stories should pass through `review` before `done`
|
|
||||||
5. **Learning Transfer**: Developer typically creates next story after previous one is `done` to incorporate learnings
|
|
||||||
|
|
@ -3,4 +3,295 @@ name: bmad-sprint-status
|
||||||
description: 'Summarize sprint status and surface risks. Use when the user says "check sprint status" or "show sprint status"'
|
description: 'Summarize sprint status and surface risks. Use when the user says "check sprint status" or "show sprint status"'
|
||||||
---
|
---
|
||||||
|
|
||||||
Follow the instructions in ./workflow.md.
|
# Sprint Status Workflow
|
||||||
|
|
||||||
|
**Goal:** Summarize sprint status, surface risks, and recommend the next workflow action.
|
||||||
|
|
||||||
|
**Your Role:** You are a Developer providing clear, actionable sprint visibility. No time estimates — focus on status, risks, and next steps.
|
||||||
|
|
||||||
|
## Conventions
|
||||||
|
|
||||||
|
- Bare paths (e.g. `checklist.md`) resolve from the skill root.
|
||||||
|
- `{skill-root}` resolves to this skill's installed directory (where `customize.toml` lives).
|
||||||
|
- `{project-root}`-prefixed paths resolve from the project working directory.
|
||||||
|
- `{skill-name}` resolves to the skill directory's basename.
|
||||||
|
|
||||||
|
## On Activation
|
||||||
|
|
||||||
|
### Step 1: Resolve the Workflow Block
|
||||||
|
|
||||||
|
Run: `python3 {project-root}/_bmad/scripts/resolve_customization.py --skill {skill-root} --key workflow`
|
||||||
|
|
||||||
|
**If the script fails**, resolve the `workflow` block yourself by reading these three files in base → team → user order and applying the same structural merge rules as the resolver:
|
||||||
|
|
||||||
|
1. `{skill-root}/customize.toml` — defaults
|
||||||
|
2. `{project-root}/_bmad/custom/{skill-name}.toml` — team overrides
|
||||||
|
3. `{project-root}/_bmad/custom/{skill-name}.user.toml` — personal overrides
|
||||||
|
|
||||||
|
Any missing file is skipped. Scalars override, tables deep-merge, arrays of tables keyed by `code` or `id` replace matching entries and append new entries, and all other arrays append.
|
||||||
|
|
||||||
|
### Step 2: Execute Prepend Steps
|
||||||
|
|
||||||
|
Execute each entry in `{workflow.activation_steps_prepend}` in order before proceeding.
|
||||||
|
|
||||||
|
### Step 3: Load Persistent Facts
|
||||||
|
|
||||||
|
Treat every entry in `{workflow.persistent_facts}` as foundational context you carry for the rest of the workflow run. Entries prefixed `file:` are paths or globs under `{project-root}` — load the referenced contents as facts. All other entries are facts verbatim.
|
||||||
|
|
||||||
|
### Step 4: Load Config
|
||||||
|
|
||||||
|
Load config from `{project-root}/_bmad/bmm/config.yaml` and resolve:
|
||||||
|
|
||||||
|
- `project_name`, `user_name`
|
||||||
|
- `communication_language`, `document_output_language`
|
||||||
|
- `implementation_artifacts`
|
||||||
|
- `date` as system-generated current datetime
|
||||||
|
- YOU MUST ALWAYS SPEAK OUTPUT in your Agent communication style with the config `{communication_language}`
|
||||||
|
|
||||||
|
### Step 5: Greet the User
|
||||||
|
|
||||||
|
Greet `{user_name}`, speaking in `{communication_language}`.
|
||||||
|
|
||||||
|
### Step 6: Execute Append Steps
|
||||||
|
|
||||||
|
Execute each entry in `{workflow.activation_steps_append}` in order.
|
||||||
|
|
||||||
|
Activation is complete. Begin the workflow below.
|
||||||
|
|
||||||
|
## Paths
|
||||||
|
|
||||||
|
- `sprint_status_file` = `{implementation_artifacts}/sprint-status.yaml`
|
||||||
|
|
||||||
|
## Input Files
|
||||||
|
|
||||||
|
| Input | Path | Load Strategy |
|
||||||
|
|-------|------|---------------|
|
||||||
|
| Sprint status | `{sprint_status_file}` | FULL_LOAD |
|
||||||
|
|
||||||
|
## Execution
|
||||||
|
|
||||||
|
<workflow>
|
||||||
|
|
||||||
|
<step n="0" goal="Determine execution mode">
|
||||||
|
<action>Set mode = {{mode}} if provided by caller; otherwise mode = "interactive"</action>
|
||||||
|
|
||||||
|
<check if="mode == data">
|
||||||
|
<action>Jump to Step 20</action>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="mode == validate">
|
||||||
|
<action>Jump to Step 30</action>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="mode == interactive">
|
||||||
|
<action>Continue to Step 1</action>
|
||||||
|
</check>
|
||||||
|
</step>
|
||||||
|
|
||||||
|
<step n="1" goal="Locate sprint status file">
|
||||||
|
<action>Load {project_context} for project-wide patterns and conventions (if exists)</action>
|
||||||
|
<action>Try {sprint_status_file}</action>
|
||||||
|
<check if="file not found">
|
||||||
|
<output>sprint-status.yaml not found.
|
||||||
|
Run `/bmad:bmm:workflows:sprint-planning` to generate it, then rerun sprint-status.</output>
|
||||||
|
<action>Exit workflow</action>
|
||||||
|
</check>
|
||||||
|
<action>Continue to Step 2</action>
|
||||||
|
</step>
|
||||||
|
|
||||||
|
<step n="2" goal="Read and parse sprint-status.yaml">
|
||||||
|
<action>Read the FULL file: {sprint_status_file}</action>
|
||||||
|
<action>Parse fields: generated, last_updated, project, project_key, tracking_system, story_location</action>
|
||||||
|
<action>Parse development_status map. Classify keys:</action>
|
||||||
|
- Epics: keys starting with "epic-" (and not ending with "-retrospective")
|
||||||
|
- Retrospectives: keys ending with "-retrospective"
|
||||||
|
- Stories: everything else (e.g., 1-2-login-form)
|
||||||
|
<action>Map legacy story status "drafted" → "ready-for-dev"</action>
|
||||||
|
<action>Count story statuses: backlog, ready-for-dev, in-progress, review, done</action>
|
||||||
|
<action>Map legacy epic status "contexted" → "in-progress"</action>
|
||||||
|
<action>Count epic statuses: backlog, in-progress, done</action>
|
||||||
|
<action>Count retrospective statuses: optional, done</action>
|
||||||
|
|
||||||
|
<action>Validate all statuses against known values:</action>
|
||||||
|
|
||||||
|
- Valid story statuses: backlog, ready-for-dev, in-progress, review, done, drafted (legacy)
|
||||||
|
- Valid epic statuses: backlog, in-progress, done, contexted (legacy)
|
||||||
|
- Valid retrospective statuses: optional, done
|
||||||
|
|
||||||
|
<check if="any status is unrecognized">
|
||||||
|
<output>
|
||||||
|
**Unknown status detected:**
|
||||||
|
{{#each invalid_entries}}
|
||||||
|
|
||||||
|
- `{{key}}`: "{{status}}" (not recognized)
|
||||||
|
{{/each}}
|
||||||
|
|
||||||
|
**Valid statuses:**
|
||||||
|
|
||||||
|
- Stories: backlog, ready-for-dev, in-progress, review, done
|
||||||
|
- Epics: backlog, in-progress, done
|
||||||
|
- Retrospectives: optional, done
|
||||||
|
</output>
|
||||||
|
<ask>How should these be corrected?
|
||||||
|
{{#each invalid_entries}}
|
||||||
|
{{@index}}. {{key}}: "{{status}}" → [select valid status]
|
||||||
|
{{/each}}
|
||||||
|
|
||||||
|
Enter corrections (e.g., "1=in-progress, 2=backlog") or "skip" to continue without fixing:</ask>
|
||||||
|
<check if="user provided corrections">
|
||||||
|
<action>Update sprint-status.yaml with corrected values</action>
|
||||||
|
<action>Re-parse the file with corrected statuses</action>
|
||||||
|
</check>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<action>Detect risks:</action>
|
||||||
|
|
||||||
|
- IF any story has status "review": suggest `/bmad:bmm:workflows:code-review`
|
||||||
|
- IF any story has status "in-progress" AND no stories have status "ready-for-dev": recommend staying focused on active story
|
||||||
|
- IF all epics have status "backlog" AND no stories have status "ready-for-dev": prompt `/bmad:bmm:workflows:create-story`
|
||||||
|
- IF `last_updated` timestamp is more than 7 days old (or `last_updated` is missing, fall back to `generated`): warn "sprint-status.yaml may be stale"
|
||||||
|
- IF any story key doesn't match an epic pattern (e.g., story "5-1-..." but no "epic-5"): warn "orphaned story detected"
|
||||||
|
- IF any epic has status in-progress but has no associated stories: warn "in-progress epic has no stories"
|
||||||
|
</step>
|
||||||
|
|
||||||
|
<step n="3" goal="Select next action recommendation">
|
||||||
|
<action>Pick the next recommended workflow using priority:</action>
|
||||||
|
<note>When selecting "first" story: sort by epic number, then story number (e.g., 1-1 before 1-2 before 2-1)</note>
|
||||||
|
1. If any story status == in-progress → recommend `dev-story` for the first in-progress story
|
||||||
|
2. Else if any story status == review → recommend `code-review` for the first review story
|
||||||
|
3. Else if any story status == ready-for-dev → recommend `dev-story`
|
||||||
|
4. Else if any story status == backlog → recommend `create-story`
|
||||||
|
5. Else if any retrospective status == optional → recommend `retrospective`
|
||||||
|
6. Else → All implementation items done; congratulate the user - you both did amazing work together!
|
||||||
|
<action>Store selected recommendation as: next_story_id, next_workflow_id, next_agent (DEV)</action>
|
||||||
|
</step>
|
||||||
|
|
||||||
|
<step n="4" goal="Display summary">
|
||||||
|
<output>
|
||||||
|
## Sprint Status
|
||||||
|
|
||||||
|
- Project: {{project}} ({{project_key}})
|
||||||
|
- Tracking: {{tracking_system}}
|
||||||
|
- Status file: {sprint_status_file}
|
||||||
|
|
||||||
|
**Stories:** backlog {{count_backlog}}, ready-for-dev {{count_ready}}, in-progress {{count_in_progress}}, review {{count_review}}, done {{count_done}}
|
||||||
|
|
||||||
|
**Epics:** backlog {{epic_backlog}}, in-progress {{epic_in_progress}}, done {{epic_done}}
|
||||||
|
|
||||||
|
**Next Recommendation:** /bmad:bmm:workflows:{{next_workflow_id}} ({{next_story_id}})
|
||||||
|
|
||||||
|
{{#if risks}}
|
||||||
|
**Risks:**
|
||||||
|
{{#each risks}}
|
||||||
|
|
||||||
|
- {{this}}
|
||||||
|
{{/each}}
|
||||||
|
{{/if}}
|
||||||
|
|
||||||
|
</output>
|
||||||
|
</step>
|
||||||
|
|
||||||
|
<step n="5" goal="Offer actions">
|
||||||
|
<ask>Pick an option:
|
||||||
|
1) Run recommended workflow now
|
||||||
|
2) Show all stories grouped by status
|
||||||
|
3) Show raw sprint-status.yaml
|
||||||
|
4) Exit
|
||||||
|
Choice:</ask>
|
||||||
|
|
||||||
|
<check if="choice == 1">
|
||||||
|
<output>Run `/bmad:bmm:workflows:{{next_workflow_id}}`.
|
||||||
|
If the command targets a story, set `story_key={{next_story_id}}` when prompted.</output>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="choice == 2">
|
||||||
|
<output>
|
||||||
|
### Stories by Status
|
||||||
|
- In Progress: {{stories_in_progress}}
|
||||||
|
- Review: {{stories_in_review}}
|
||||||
|
- Ready for Dev: {{stories_ready_for_dev}}
|
||||||
|
- Backlog: {{stories_backlog}}
|
||||||
|
- Done: {{stories_done}}
|
||||||
|
</output>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="choice == 3">
|
||||||
|
<action>Display the full contents of {sprint_status_file}</action>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="choice == 4">
|
||||||
|
<action>Run: `python3 {project-root}/_bmad/scripts/resolve_customization.py --skill {skill-root} --key workflow.on_complete` — if the resolved value is non-empty, follow it as the final terminal instruction before exiting.</action>
|
||||||
|
<action>Exit workflow</action>
|
||||||
|
</check>
|
||||||
|
</step>
|
||||||
|
|
||||||
|
<!-- ========================= -->
|
||||||
|
<!-- Data mode for other flows -->
|
||||||
|
<!-- ========================= -->
|
||||||
|
|
||||||
|
<step n="20" goal="Data mode output">
|
||||||
|
<action>Load and parse {sprint_status_file} same as Step 2</action>
|
||||||
|
<action>Compute recommendation same as Step 3</action>
|
||||||
|
<template-output>next_workflow_id = {{next_workflow_id}}</template-output>
|
||||||
|
<template-output>next_story_id = {{next_story_id}}</template-output>
|
||||||
|
<template-output>count_backlog = {{count_backlog}}</template-output>
|
||||||
|
<template-output>count_ready = {{count_ready}}</template-output>
|
||||||
|
<template-output>count_in_progress = {{count_in_progress}}</template-output>
|
||||||
|
<template-output>count_review = {{count_review}}</template-output>
|
||||||
|
<template-output>count_done = {{count_done}}</template-output>
|
||||||
|
<template-output>epic_backlog = {{epic_backlog}}</template-output>
|
||||||
|
<template-output>epic_in_progress = {{epic_in_progress}}</template-output>
|
||||||
|
<template-output>epic_done = {{epic_done}}</template-output>
|
||||||
|
<template-output>risks = {{risks}}</template-output>
|
||||||
|
<action>Return to caller</action>
|
||||||
|
</step>
|
||||||
|
|
||||||
|
<!-- ========================= -->
|
||||||
|
<!-- Validate mode -->
|
||||||
|
<!-- ========================= -->
|
||||||
|
|
||||||
|
<step n="30" goal="Validate sprint-status file">
|
||||||
|
<action>Check that {sprint_status_file} exists</action>
|
||||||
|
<check if="missing">
|
||||||
|
<template-output>is_valid = false</template-output>
|
||||||
|
<template-output>error = "sprint-status.yaml missing"</template-output>
|
||||||
|
<template-output>suggestion = "Run sprint-planning to create it"</template-output>
|
||||||
|
<action>Return</action>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<action>Read and parse {sprint_status_file}</action>
|
||||||
|
|
||||||
|
<action>Validate required metadata fields exist: generated, project, project_key, tracking_system, story_location (last_updated is optional for backward compatibility)</action>
|
||||||
|
<check if="any required field missing">
|
||||||
|
<template-output>is_valid = false</template-output>
|
||||||
|
<template-output>error = "Missing required field(s): {{missing_fields}}"</template-output>
|
||||||
|
<template-output>suggestion = "Re-run sprint-planning or add missing fields manually"</template-output>
|
||||||
|
<action>Return</action>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<action>Verify development_status section exists with at least one entry</action>
|
||||||
|
<check if="development_status missing or empty">
|
||||||
|
<template-output>is_valid = false</template-output>
|
||||||
|
<template-output>error = "development_status missing or empty"</template-output>
|
||||||
|
<template-output>suggestion = "Re-run sprint-planning or repair the file manually"</template-output>
|
||||||
|
<action>Return</action>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<action>Validate all status values against known valid statuses:</action>
|
||||||
|
|
||||||
|
- Stories: backlog, ready-for-dev, in-progress, review, done (legacy: drafted)
|
||||||
|
- Epics: backlog, in-progress, done (legacy: contexted)
|
||||||
|
- Retrospectives: optional, done
|
||||||
|
<check if="any invalid status found">
|
||||||
|
<template-output>is_valid = false</template-output>
|
||||||
|
<template-output>error = "Invalid status values: {{invalid_entries}}"</template-output>
|
||||||
|
<template-output>suggestion = "Fix invalid statuses in sprint-status.yaml"</template-output>
|
||||||
|
<action>Return</action>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<template-output>is_valid = true</template-output>
|
||||||
|
<template-output>message = "sprint-status.yaml valid: metadata complete, all statuses recognized"</template-output>
|
||||||
|
<action>Run: `python3 {project-root}/_bmad/scripts/resolve_customization.py --skill {skill-root} --key workflow.on_complete` — if the resolved value is non-empty, follow it as the final terminal instruction before exiting.</action>
|
||||||
|
</step>
|
||||||
|
|
||||||
|
</workflow>
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,41 @@
|
||||||
|
# DO NOT EDIT -- overwritten on every update.
|
||||||
|
#
|
||||||
|
# Workflow customization surface for bmad-sprint-status. Mirrors the
|
||||||
|
# agent customization shape under the [workflow] namespace.
|
||||||
|
|
||||||
|
[workflow]
|
||||||
|
|
||||||
|
# --- Configurable below. Overrides merge per BMad structural rules: ---
|
||||||
|
# scalars: override wins • arrays (persistent_facts, activation_steps_*): append
|
||||||
|
# arrays-of-tables with `code`/`id`: replace matching items, append new ones.
|
||||||
|
|
||||||
|
# Steps to run before the standard activation (config load, greet).
|
||||||
|
# Overrides append. Use for pre-flight loads, compliance checks, etc.
|
||||||
|
|
||||||
|
activation_steps_prepend = []
|
||||||
|
|
||||||
|
# Steps to run after greet but before the workflow begins.
|
||||||
|
# Overrides append. Use for context-heavy setup that should happen
|
||||||
|
# once the user has been acknowledged.
|
||||||
|
|
||||||
|
activation_steps_append = []
|
||||||
|
|
||||||
|
# Persistent facts the workflow keeps in mind for the whole run
|
||||||
|
# (standards, compliance constraints, stylistic guardrails).
|
||||||
|
# Distinct from the runtime memory sidecar — these are static context
|
||||||
|
# loaded on activation. Overrides append.
|
||||||
|
#
|
||||||
|
# Each entry is either:
|
||||||
|
# - a literal sentence, e.g. "All stories must include testable acceptance criteria."
|
||||||
|
# - a file reference prefixed with `file:`, e.g. "file:{project-root}/docs/standards.md"
|
||||||
|
# (glob patterns are supported; the file's contents are loaded and treated as facts).
|
||||||
|
|
||||||
|
persistent_facts = [
|
||||||
|
"file:{project-root}/**/project-context.md",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Scalar: executed when the workflow reaches its final step,
|
||||||
|
# after sprint status is summarized and risks are surfaced. Override wins.
|
||||||
|
# Leave empty for no custom post-completion behavior.
|
||||||
|
|
||||||
|
on_complete = ""
|
||||||
|
|
@ -1,261 +0,0 @@
|
||||||
# Sprint Status Workflow
|
|
||||||
|
|
||||||
**Goal:** Summarize sprint status, surface risks, and recommend the next workflow action.
|
|
||||||
|
|
||||||
**Your Role:** You are a Developer providing clear, actionable sprint visibility. No time estimates — focus on status, risks, and next steps.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## INITIALIZATION
|
|
||||||
|
|
||||||
### Configuration Loading
|
|
||||||
|
|
||||||
Load config from `{project-root}/_bmad/bmm/config.yaml` and resolve:
|
|
||||||
|
|
||||||
- `project_name`, `user_name`
|
|
||||||
- `communication_language`, `document_output_language`
|
|
||||||
- `implementation_artifacts`
|
|
||||||
- `date` as system-generated current datetime
|
|
||||||
- YOU MUST ALWAYS SPEAK OUTPUT in your Agent communication style with the config `{communication_language}`
|
|
||||||
|
|
||||||
### Paths
|
|
||||||
|
|
||||||
- `sprint_status_file` = `{implementation_artifacts}/sprint-status.yaml`
|
|
||||||
|
|
||||||
### Input Files
|
|
||||||
|
|
||||||
| Input | Path | Load Strategy |
|
|
||||||
|-------|------|---------------|
|
|
||||||
| Sprint status | `{sprint_status_file}` | FULL_LOAD |
|
|
||||||
|
|
||||||
### Context
|
|
||||||
|
|
||||||
- `project_context` = `**/project-context.md` (load if exists)
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## EXECUTION
|
|
||||||
|
|
||||||
<workflow>
|
|
||||||
|
|
||||||
<step n="0" goal="Determine execution mode">
|
|
||||||
<action>Set mode = {{mode}} if provided by caller; otherwise mode = "interactive"</action>
|
|
||||||
|
|
||||||
<check if="mode == data">
|
|
||||||
<action>Jump to Step 20</action>
|
|
||||||
</check>
|
|
||||||
|
|
||||||
<check if="mode == validate">
|
|
||||||
<action>Jump to Step 30</action>
|
|
||||||
</check>
|
|
||||||
|
|
||||||
<check if="mode == interactive">
|
|
||||||
<action>Continue to Step 1</action>
|
|
||||||
</check>
|
|
||||||
</step>
|
|
||||||
|
|
||||||
<step n="1" goal="Locate sprint status file">
|
|
||||||
<action>Load {project_context} for project-wide patterns and conventions (if exists)</action>
|
|
||||||
<action>Try {sprint_status_file}</action>
|
|
||||||
<check if="file not found">
|
|
||||||
<output>❌ sprint-status.yaml not found.
|
|
||||||
Run `/bmad:bmm:workflows:sprint-planning` to generate it, then rerun sprint-status.</output>
|
|
||||||
<action>Exit workflow</action>
|
|
||||||
</check>
|
|
||||||
<action>Continue to Step 2</action>
|
|
||||||
</step>
|
|
||||||
|
|
||||||
<step n="2" goal="Read and parse sprint-status.yaml">
|
|
||||||
<action>Read the FULL file: {sprint_status_file}</action>
|
|
||||||
<action>Parse fields: generated, last_updated, project, project_key, tracking_system, story_location</action>
|
|
||||||
<action>Parse development_status map. Classify keys:</action>
|
|
||||||
- Epics: keys starting with "epic-" (and not ending with "-retrospective")
|
|
||||||
- Retrospectives: keys ending with "-retrospective"
|
|
||||||
- Stories: everything else (e.g., 1-2-login-form)
|
|
||||||
<action>Map legacy story status "drafted" → "ready-for-dev"</action>
|
|
||||||
<action>Count story statuses: backlog, ready-for-dev, in-progress, review, done</action>
|
|
||||||
<action>Map legacy epic status "contexted" → "in-progress"</action>
|
|
||||||
<action>Count epic statuses: backlog, in-progress, done</action>
|
|
||||||
<action>Count retrospective statuses: optional, done</action>
|
|
||||||
|
|
||||||
<action>Validate all statuses against known values:</action>
|
|
||||||
|
|
||||||
- Valid story statuses: backlog, ready-for-dev, in-progress, review, done, drafted (legacy)
|
|
||||||
- Valid epic statuses: backlog, in-progress, done, contexted (legacy)
|
|
||||||
- Valid retrospective statuses: optional, done
|
|
||||||
|
|
||||||
<check if="any status is unrecognized">
|
|
||||||
<output>
|
|
||||||
⚠️ **Unknown status detected:**
|
|
||||||
{{#each invalid_entries}}
|
|
||||||
|
|
||||||
- `{{key}}`: "{{status}}" (not recognized)
|
|
||||||
{{/each}}
|
|
||||||
|
|
||||||
**Valid statuses:**
|
|
||||||
|
|
||||||
- Stories: backlog, ready-for-dev, in-progress, review, done
|
|
||||||
- Epics: backlog, in-progress, done
|
|
||||||
- Retrospectives: optional, done
|
|
||||||
</output>
|
|
||||||
<ask>How should these be corrected?
|
|
||||||
{{#each invalid_entries}}
|
|
||||||
{{@index}}. {{key}}: "{{status}}" → [select valid status]
|
|
||||||
{{/each}}
|
|
||||||
|
|
||||||
Enter corrections (e.g., "1=in-progress, 2=backlog") or "skip" to continue without fixing:</ask>
|
|
||||||
<check if="user provided corrections">
|
|
||||||
<action>Update sprint-status.yaml with corrected values</action>
|
|
||||||
<action>Re-parse the file with corrected statuses</action>
|
|
||||||
</check>
|
|
||||||
</check>
|
|
||||||
|
|
||||||
<action>Detect risks:</action>
|
|
||||||
|
|
||||||
- IF any story has status "review": suggest `/bmad:bmm:workflows:code-review`
|
|
||||||
- IF any story has status "in-progress" AND no stories have status "ready-for-dev": recommend staying focused on active story
|
|
||||||
- IF all epics have status "backlog" AND no stories have status "ready-for-dev": prompt `/bmad:bmm:workflows:create-story`
|
|
||||||
- IF `last_updated` timestamp is more than 7 days old (or `last_updated` is missing, fall back to `generated`): warn "sprint-status.yaml may be stale"
|
|
||||||
- IF any story key doesn't match an epic pattern (e.g., story "5-1-..." but no "epic-5"): warn "orphaned story detected"
|
|
||||||
- IF any epic has status in-progress but has no associated stories: warn "in-progress epic has no stories"
|
|
||||||
</step>
|
|
||||||
|
|
||||||
<step n="3" goal="Select next action recommendation">
|
|
||||||
<action>Pick the next recommended workflow using priority:</action>
|
|
||||||
<note>When selecting "first" story: sort by epic number, then story number (e.g., 1-1 before 1-2 before 2-1)</note>
|
|
||||||
1. If any story status == in-progress → recommend `dev-story` for the first in-progress story
|
|
||||||
2. Else if any story status == review → recommend `code-review` for the first review story
|
|
||||||
3. Else if any story status == ready-for-dev → recommend `dev-story`
|
|
||||||
4. Else if any story status == backlog → recommend `create-story`
|
|
||||||
5. Else if any retrospective status == optional → recommend `retrospective`
|
|
||||||
6. Else → All implementation items done; congratulate the user - you both did amazing work together!
|
|
||||||
<action>Store selected recommendation as: next_story_id, next_workflow_id, next_agent (DEV)</action>
|
|
||||||
</step>
|
|
||||||
|
|
||||||
<step n="4" goal="Display summary">
|
|
||||||
<output>
|
|
||||||
## 📊 Sprint Status
|
|
||||||
|
|
||||||
- Project: {{project}} ({{project_key}})
|
|
||||||
- Tracking: {{tracking_system}}
|
|
||||||
- Status file: {sprint_status_file}
|
|
||||||
|
|
||||||
**Stories:** backlog {{count_backlog}}, ready-for-dev {{count_ready}}, in-progress {{count_in_progress}}, review {{count_review}}, done {{count_done}}
|
|
||||||
|
|
||||||
**Epics:** backlog {{epic_backlog}}, in-progress {{epic_in_progress}}, done {{epic_done}}
|
|
||||||
|
|
||||||
**Next Recommendation:** /bmad:bmm:workflows:{{next_workflow_id}} ({{next_story_id}})
|
|
||||||
|
|
||||||
{{#if risks}}
|
|
||||||
**Risks:**
|
|
||||||
{{#each risks}}
|
|
||||||
|
|
||||||
- {{this}}
|
|
||||||
{{/each}}
|
|
||||||
{{/if}}
|
|
||||||
|
|
||||||
</output>
|
|
||||||
</step>
|
|
||||||
|
|
||||||
<step n="5" goal="Offer actions">
|
|
||||||
<ask>Pick an option:
|
|
||||||
1) Run recommended workflow now
|
|
||||||
2) Show all stories grouped by status
|
|
||||||
3) Show raw sprint-status.yaml
|
|
||||||
4) Exit
|
|
||||||
Choice:</ask>
|
|
||||||
|
|
||||||
<check if="choice == 1">
|
|
||||||
<output>Run `/bmad:bmm:workflows:{{next_workflow_id}}`.
|
|
||||||
If the command targets a story, set `story_key={{next_story_id}}` when prompted.</output>
|
|
||||||
</check>
|
|
||||||
|
|
||||||
<check if="choice == 2">
|
|
||||||
<output>
|
|
||||||
### Stories by Status
|
|
||||||
- In Progress: {{stories_in_progress}}
|
|
||||||
- Review: {{stories_in_review}}
|
|
||||||
- Ready for Dev: {{stories_ready_for_dev}}
|
|
||||||
- Backlog: {{stories_backlog}}
|
|
||||||
- Done: {{stories_done}}
|
|
||||||
</output>
|
|
||||||
</check>
|
|
||||||
|
|
||||||
<check if="choice == 3">
|
|
||||||
<action>Display the full contents of {sprint_status_file}</action>
|
|
||||||
</check>
|
|
||||||
|
|
||||||
<check if="choice == 4">
|
|
||||||
<action>Exit workflow</action>
|
|
||||||
</check>
|
|
||||||
</step>
|
|
||||||
|
|
||||||
<!-- ========================= -->
|
|
||||||
<!-- Data mode for other flows -->
|
|
||||||
<!-- ========================= -->
|
|
||||||
|
|
||||||
<step n="20" goal="Data mode output">
|
|
||||||
<action>Load and parse {sprint_status_file} same as Step 2</action>
|
|
||||||
<action>Compute recommendation same as Step 3</action>
|
|
||||||
<template-output>next_workflow_id = {{next_workflow_id}}</template-output>
|
|
||||||
<template-output>next_story_id = {{next_story_id}}</template-output>
|
|
||||||
<template-output>count_backlog = {{count_backlog}}</template-output>
|
|
||||||
<template-output>count_ready = {{count_ready}}</template-output>
|
|
||||||
<template-output>count_in_progress = {{count_in_progress}}</template-output>
|
|
||||||
<template-output>count_review = {{count_review}}</template-output>
|
|
||||||
<template-output>count_done = {{count_done}}</template-output>
|
|
||||||
<template-output>epic_backlog = {{epic_backlog}}</template-output>
|
|
||||||
<template-output>epic_in_progress = {{epic_in_progress}}</template-output>
|
|
||||||
<template-output>epic_done = {{epic_done}}</template-output>
|
|
||||||
<template-output>risks = {{risks}}</template-output>
|
|
||||||
<action>Return to caller</action>
|
|
||||||
</step>
|
|
||||||
|
|
||||||
<!-- ========================= -->
|
|
||||||
<!-- Validate mode -->
|
|
||||||
<!-- ========================= -->
|
|
||||||
|
|
||||||
<step n="30" goal="Validate sprint-status file">
|
|
||||||
<action>Check that {sprint_status_file} exists</action>
|
|
||||||
<check if="missing">
|
|
||||||
<template-output>is_valid = false</template-output>
|
|
||||||
<template-output>error = "sprint-status.yaml missing"</template-output>
|
|
||||||
<template-output>suggestion = "Run sprint-planning to create it"</template-output>
|
|
||||||
<action>Return</action>
|
|
||||||
</check>
|
|
||||||
|
|
||||||
<action>Read and parse {sprint_status_file}</action>
|
|
||||||
|
|
||||||
<action>Validate required metadata fields exist: generated, project, project_key, tracking_system, story_location (last_updated is optional for backward compatibility)</action>
|
|
||||||
<check if="any required field missing">
|
|
||||||
<template-output>is_valid = false</template-output>
|
|
||||||
<template-output>error = "Missing required field(s): {{missing_fields}}"</template-output>
|
|
||||||
<template-output>suggestion = "Re-run sprint-planning or add missing fields manually"</template-output>
|
|
||||||
<action>Return</action>
|
|
||||||
</check>
|
|
||||||
|
|
||||||
<action>Verify development_status section exists with at least one entry</action>
|
|
||||||
<check if="development_status missing or empty">
|
|
||||||
<template-output>is_valid = false</template-output>
|
|
||||||
<template-output>error = "development_status missing or empty"</template-output>
|
|
||||||
<template-output>suggestion = "Re-run sprint-planning or repair the file manually"</template-output>
|
|
||||||
<action>Return</action>
|
|
||||||
</check>
|
|
||||||
|
|
||||||
<action>Validate all status values against known valid statuses:</action>
|
|
||||||
|
|
||||||
- Stories: backlog, ready-for-dev, in-progress, review, done (legacy: drafted)
|
|
||||||
- Epics: backlog, in-progress, done (legacy: contexted)
|
|
||||||
- Retrospectives: optional, done
|
|
||||||
<check if="any invalid status found">
|
|
||||||
<template-output>is_valid = false</template-output>
|
|
||||||
<template-output>error = "Invalid status values: {{invalid_entries}}"</template-output>
|
|
||||||
<template-output>suggestion = "Fix invalid statuses in sprint-status.yaml"</template-output>
|
|
||||||
<action>Return</action>
|
|
||||||
</check>
|
|
||||||
|
|
||||||
<template-output>is_valid = true</template-output>
|
|
||||||
<template-output>message = "sprint-status.yaml valid: metadata complete, all statuses recognized"</template-output>
|
|
||||||
</step>
|
|
||||||
|
|
||||||
</workflow>
|
|
||||||
|
|
@ -1,33 +1,33 @@
|
||||||
module,skill,display-name,menu-code,description,action,args,phase,after,before,required,output-location,outputs
|
module,skill,display-name,menu-code,description,action,args,phase,preceded-by,followed-by,required,output-location,outputs
|
||||||
BMad Method,_meta,,,,,,,,,false,https://docs.bmad-method.org/llms.txt,
|
BMad Method,_meta,,,,,,,,,false,https://docs.bmad-method.org/llms.txt,
|
||||||
BMad Method,bmad-document-project,Document Project,DP,Analyze an existing project to produce useful documentation.,,anytime,,,false,project-knowledge,*
|
BMad Method,bmad-document-project,Document Project,DP,Analyze an existing project to produce useful documentation.,,,anytime,,,false,project-knowledge,*
|
||||||
BMad Method,bmad-generate-project-context,Generate Project Context,GPC,Scan existing codebase to generate a lean LLM-optimized project-context.md. Essential for brownfield projects.,,anytime,,,false,output_folder,project context
|
BMad Method,bmad-generate-project-context,Generate Project Context,GPC,Scan existing codebase to generate a lean LLM-optimized project-context.md. Essential for brownfield projects.,,,anytime,,,false,output_folder,project context
|
||||||
BMad Method,bmad-quick-dev,Quick Dev,QQ,Unified intent-in code-out workflow: clarify plan implement review and present.,,anytime,,,false,implementation_artifacts,spec and project implementation
|
BMad Method,bmad-quick-dev,Quick Dev,QQ,Unified intent-in code-out workflow: clarify plan implement review and present.,,,anytime,,,false,implementation_artifacts,spec and project implementation
|
||||||
BMad Method,bmad-correct-course,Correct Course,CC,Navigate significant changes. May recommend start over update PRD redo architecture sprint planning or correct epics and stories.,,anytime,,,false,planning_artifacts,change proposal
|
BMad Method,bmad-correct-course,Correct Course,CC,Navigate significant changes. May recommend start over update PRD redo architecture sprint planning or correct epics and stories.,,,anytime,,,false,planning_artifacts,change proposal
|
||||||
BMad Method,bmad-agent-tech-writer,Write Document,WD,"Describe in detail what you want, and the agent will follow documentation best practices. Multi-turn conversation with subprocess for research/review.",write,,anytime,,,false,project-knowledge,document
|
BMad Method,bmad-agent-tech-writer,Write Document,WD,"Describe in detail what you want, and the agent will follow documentation best practices. Multi-turn conversation with subprocess for research/review.",write,,anytime,,,false,project-knowledge,document
|
||||||
BMad Method,bmad-agent-tech-writer,Update Standards,US,Update agent memory documentation-standards.md with your specific preferences if you discover missing document conventions.,update-standards,,anytime,,,false,_bmad/_memory/tech-writer-sidecar,standards
|
BMad Method,bmad-agent-tech-writer,Update Standards,US,Update agent memory documentation-standards.md with your specific preferences if you discover missing document conventions.,update-standards,,anytime,,,false,_bmad/_memory/tech-writer-sidecar,standards
|
||||||
BMad Method,bmad-agent-tech-writer,Mermaid Generate,MG,Create a Mermaid diagram based on user description. Will suggest diagram types if not specified.,mermaid,,anytime,,,false,planning_artifacts,mermaid diagram
|
BMad Method,bmad-agent-tech-writer,Mermaid Generate,MG,Create a Mermaid diagram based on user description. Will suggest diagram types if not specified.,mermaid,,anytime,,,false,planning_artifacts,mermaid diagram
|
||||||
BMad Method,bmad-agent-tech-writer,Validate Document,VD,Review the specified document against documentation standards and best practices. Returns specific actionable improvement suggestions organized by priority.,validate,[path],anytime,,,false,planning_artifacts,validation report
|
BMad Method,bmad-agent-tech-writer,Validate Document,VD,Review the specified document against documentation standards and best practices. Returns specific actionable improvement suggestions organized by priority.,validate,[path],anytime,,,false,planning_artifacts,validation report
|
||||||
BMad Method,bmad-agent-tech-writer,Explain Concept,EC,Create clear technical explanations with examples and diagrams for complex concepts.,explain,[topic],anytime,,,false,project_knowledge,explanation
|
BMad Method,bmad-agent-tech-writer,Explain Concept,EC,Create clear technical explanations with examples and diagrams for complex concepts.,explain,[topic],anytime,,,false,project_knowledge,explanation
|
||||||
BMad Method,bmad-brainstorming,Brainstorm Project,BP,Expert guided facilitation through a single or multiple techniques.,,1-analysis,,,false,planning_artifacts,brainstorming session
|
BMad Method,bmad-brainstorming,Brainstorm Project,BP,Expert guided facilitation through a single or multiple techniques.,,,1-analysis,,,false,planning_artifacts,brainstorming session
|
||||||
BMad Method,bmad-market-research,Market Research,MR,"Market analysis competitive landscape customer needs and trends.",,1-analysis,,,false,"planning_artifacts|project-knowledge",research documents
|
BMad Method,bmad-market-research,Market Research,MR,Market analysis competitive landscape customer needs and trends.,,,1-analysis,,,false,planning_artifacts|project-knowledge,research documents
|
||||||
BMad Method,bmad-domain-research,Domain Research,DR,Industry domain deep dive subject matter expertise and terminology.,,1-analysis,,,false,"planning_artifacts|project_knowledge",research documents
|
BMad Method,bmad-domain-research,Domain Research,DR,Industry domain deep dive subject matter expertise and terminology.,,,1-analysis,,,false,planning_artifacts|project_knowledge,research documents
|
||||||
BMad Method,bmad-technical-research,Technical Research,TR,Technical feasibility architecture options and implementation approaches.,,1-analysis,,,false,"planning_artifacts|project_knowledge",research documents
|
BMad Method,bmad-technical-research,Technical Research,TR,Technical feasibility architecture options and implementation approaches.,,,1-analysis,,,false,planning_artifacts|project_knowledge,research documents
|
||||||
BMad Method,bmad-product-brief,Create Brief,CB,An expert guided experience to nail down your product idea in a brief. a gentler approach than PRFAQ when you are already sure of your concept and nothing will sway you.,,-A,1-analysis,,,false,planning_artifacts,product brief
|
BMad Method,bmad-product-brief,Create Brief,CB,An expert guided experience to nail down your product idea in a brief. a gentler approach than PRFAQ when you are already sure of your concept and nothing will sway you.,,-A,1-analysis,,,false,planning_artifacts,product brief
|
||||||
BMad Method,bmad-prfaq,PRFAQ Challenge,WB,Working Backwards guided experience to forge and stress-test your product concept to ensure you have a great product that users will love and need through the PRFAQ gauntlet to determine feasibility and alignment with user needs. alternative to product brief.,,-H,1-analysis,,,false,planning_artifacts,prfaq document
|
BMad Method,bmad-prfaq,PRFAQ Challenge,WB,Working Backwards guided experience to forge and stress-test your product concept to ensure you have a great product that users will love and need through the PRFAQ gauntlet to determine feasibility and alignment with user needs. alternative to product brief.,,-H,1-analysis,,,false,planning_artifacts,prfaq document
|
||||||
BMad Method,bmad-create-prd,Create PRD,CP,Expert led facilitation to produce your Product Requirements Document.,,2-planning,,,true,planning_artifacts,prd
|
BMad Method,bmad-create-prd,Create PRD,CP,Expert led facilitation to produce your Product Requirements Document.,,,2-planning,,,true,planning_artifacts,prd
|
||||||
BMad Method,bmad-validate-prd,Validate PRD,VP,,,[path],2-planning,bmad-create-prd,,false,planning_artifacts,prd validation report
|
BMad Method,bmad-validate-prd,Validate PRD,VP,,,[path],2-planning,bmad-create-prd,,false,planning_artifacts,prd validation report
|
||||||
BMad Method,bmad-edit-prd,Edit PRD,EP,,,[path],2-planning,bmad-validate-prd,,false,planning_artifacts,updated prd
|
BMad Method,bmad-edit-prd,Edit PRD,EP,,,[path],2-planning,bmad-validate-prd,,false,planning_artifacts,updated prd
|
||||||
BMad Method,bmad-create-ux-design,Create UX,CU,"Guidance through realizing the plan for your UX, strongly recommended if a UI is a primary piece of the proposed project.",,2-planning,bmad-create-prd,,false,planning_artifacts,ux design
|
BMad Method,bmad-create-ux-design,Create UX,CU,"Guidance through realizing the plan for your UX, strongly recommended if a UI is a primary piece of the proposed project.",,,2-planning,bmad-create-prd,,false,planning_artifacts,ux design
|
||||||
BMad Method,bmad-create-architecture,Create Architecture,CA,Guided workflow to document technical decisions.,,3-solutioning,,,true,planning_artifacts,architecture
|
BMad Method,bmad-create-architecture,Create Architecture,CA,Guided workflow to document technical decisions.,,,3-solutioning,,,true,planning_artifacts,architecture
|
||||||
BMad Method,bmad-create-epics-and-stories,Create Epics and Stories,CE,,,3-solutioning,bmad-create-architecture,,true,planning_artifacts,epics and stories
|
BMad Method,bmad-create-epics-and-stories,Create Epics and Stories,CE,,,,3-solutioning,bmad-create-architecture,,true,planning_artifacts,epics and stories
|
||||||
BMad Method,bmad-check-implementation-readiness,Check Implementation Readiness,IR,Ensure PRD UX Architecture and Epics Stories are aligned.,,3-solutioning,bmad-create-epics-and-stories,,true,planning_artifacts,readiness report
|
BMad Method,bmad-check-implementation-readiness,Check Implementation Readiness,IR,Ensure PRD UX Architecture and Epics Stories are aligned.,,,3-solutioning,bmad-create-epics-and-stories,,true,planning_artifacts,readiness report
|
||||||
BMad Method,bmad-sprint-planning,Sprint Planning,SP,Kicks off implementation by producing a plan the implementation agents will follow in sequence for every story.,,4-implementation,,,true,implementation_artifacts,sprint status
|
BMad Method,bmad-sprint-planning,Sprint Planning,SP,Kicks off implementation by producing a plan the implementation agents will follow in sequence for every story.,,,4-implementation,,,true,implementation_artifacts,sprint status
|
||||||
BMad Method,bmad-sprint-status,Sprint Status,SS,Anytime: Summarize sprint status and route to next workflow.,,4-implementation,bmad-sprint-planning,,false,,
|
BMad Method,bmad-sprint-status,Sprint Status,SS,Anytime: Summarize sprint status and route to next workflow.,,,4-implementation,bmad-sprint-planning,,false,,
|
||||||
BMad Method,bmad-create-story,Create Story,CS,"Story cycle start: Prepare first found story in the sprint plan that is next or a specific epic/story designation.",create,,4-implementation,bmad-sprint-planning,bmad-create-story:validate,true,implementation_artifacts,story
|
BMad Method,bmad-create-story,Create Story,CS,Story cycle start: Prepare first found story in the sprint plan that is next or a specific epic/story designation.,create,,4-implementation,bmad-sprint-planning,bmad-create-story:validate,true,implementation_artifacts,story
|
||||||
BMad Method,bmad-create-story,Validate Story,VS,Validates story readiness and completeness before development work begins.,validate,,4-implementation,bmad-create-story:create,bmad-dev-story,false,implementation_artifacts,story validation report
|
BMad Method,bmad-create-story,Validate Story,VS,Validates story readiness and completeness before development work begins.,validate,,4-implementation,bmad-create-story:create,bmad-dev-story,false,implementation_artifacts,story validation report
|
||||||
BMad Method,bmad-dev-story,Dev Story,DS,Story cycle: Execute story implementation tasks and tests then CR then back to DS if fixes needed.,,4-implementation,bmad-create-story:validate,,true,,
|
BMad Method,bmad-dev-story,Dev Story,DS,Story cycle: Execute story implementation tasks and tests then CR then back to DS if fixes needed.,,,4-implementation,bmad-create-story:validate,,true,,
|
||||||
BMad Method,bmad-code-review,Code Review,CR,Story cycle: If issues back to DS if approved then next CS or ER if epic complete.,,4-implementation,bmad-dev-story,,false,,
|
BMad Method,bmad-code-review,Code Review,CR,Story cycle: If issues back to DS if approved then next CS or ER if epic complete.,,,4-implementation,bmad-dev-story,,false,,
|
||||||
BMad Method,bmad-checkpoint-preview,Checkpoint,CK,Guided walkthrough of a change from purpose and context into details. Use for human review of commits branches or PRs.,,4-implementation,,,false,,
|
BMad Method,bmad-checkpoint-preview,Checkpoint,CK,Guided walkthrough of a change from purpose and context into details. Use for human review of commits branches or PRs.,,,4-implementation,,,false,,
|
||||||
BMad Method,bmad-qa-generate-e2e-tests,QA Automation Test,QA,Generate automated API and E2E tests for implemented code. NOT for code review or story validation — use CR for that.,,4-implementation,bmad-dev-story,,false,implementation_artifacts,test suite
|
BMad Method,bmad-qa-generate-e2e-tests,QA Automation Test,QA,Generate automated API and E2E tests for implemented code. NOT for code review or story validation — use CR for that.,,,4-implementation,bmad-dev-story,,false,implementation_artifacts,test suite
|
||||||
BMad Method,bmad-retrospective,Retrospective,ER,Optional at epic end: Review completed work lessons learned and next epic or if major issues consider CC.,,4-implementation,bmad-code-review,,false,implementation_artifacts,retrospective
|
BMad Method,bmad-retrospective,Retrospective,ER,Optional at epic end: Review completed work lessons learned and next epic or if major issues consider CC.,,,4-implementation,bmad-code-review,,false,implementation_artifacts,retrospective
|
||||||
|
|
|
||||||
|
Can't render this file because it has a wrong number of fields in line 3.
|
|
|
@ -5,15 +5,11 @@ default_selected: true # This module will be selected by default for new install
|
||||||
|
|
||||||
# Variables from Core Config inserted:
|
# Variables from Core Config inserted:
|
||||||
## user_name
|
## user_name
|
||||||
|
## project_name
|
||||||
## communication_language
|
## communication_language
|
||||||
## document_output_language
|
## document_output_language
|
||||||
## output_folder
|
## output_folder
|
||||||
|
|
||||||
project_name:
|
|
||||||
prompt: "What is your project called?"
|
|
||||||
default: "{directory_name}"
|
|
||||||
result: "{value}"
|
|
||||||
|
|
||||||
user_skill_level:
|
user_skill_level:
|
||||||
prompt:
|
prompt:
|
||||||
- "What is your development experience level?"
|
- "What is your development experience level?"
|
||||||
|
|
|
||||||
|
|
@ -139,7 +139,7 @@ parts: 1
|
||||||
|
|
||||||
## Solution Architecture
|
## Solution Architecture
|
||||||
- Plugins: skill bundles with Anthropic plugin standard as base format + bmad-manifest.json extending for BMAD-specific metadata (installer options, capabilities, help integration, phase ordering, dependencies)
|
- Plugins: skill bundles with Anthropic plugin standard as base format + bmad-manifest.json extending for BMAD-specific metadata (installer options, capabilities, help integration, phase ordering, dependencies)
|
||||||
- Existing manifest example: `{"module-code":"bmm","replaces-skill":"bmad-create-product-brief","capabilities":[{"name":"create-brief","menu-code":"CB","supports-headless":true,"phase-name":"1-analysis","after":["brainstorming"],"before":["create-prd"],"is-required":true}]}`
|
- Existing manifest example: `{"module-code":"bmm","replaces-skill":"bmad-create-product-brief","capabilities":[{"name":"create-brief","menu-code":"CB","supports-headless":true,"phase-name":"1-analysis","preceded-by":["brainstorming"],"followed-by":["create-prd"],"is-required":true}]}`
|
||||||
- Vercel skills CLI handles platform translation; integration pattern (wrap/fork/call) is PRD decision
|
- Vercel skills CLI handles platform translation; integration pattern (wrap/fork/call) is PRD decision
|
||||||
- bmad-setup: global skill scanning installed bmad-manifest.json files, registering capabilities, configuring project settings; always included as base skill in every bundle (solves bootstrapping)
|
- bmad-setup: global skill scanning installed bmad-manifest.json files, registering capabilities, configuring project settings; always included as base skill in every bundle (solves bootstrapping)
|
||||||
- bmad-update: plugin update path without full reinstall; technical approach (diff/replace/preserve customizations) is PRD decision
|
- bmad-update: plugin update path without full reinstall; technical approach (diff/replace/preserve customizations) is PRD decision
|
||||||
|
|
|
||||||
|
|
@ -33,16 +33,16 @@ When this skill completes, the user should:
|
||||||
The catalog uses this format:
|
The catalog uses this format:
|
||||||
|
|
||||||
```
|
```
|
||||||
module,skill,display-name,menu-code,description,action,args,phase,after,before,required,output-location,outputs
|
module,skill,display-name,menu-code,description,action,args,phase,preceded-by,followed-by,required,output-location,outputs
|
||||||
```
|
```
|
||||||
|
|
||||||
**Phases** determine the high-level flow:
|
**Phases** determine the high-level flow:
|
||||||
- `anytime` — available regardless of workflow state
|
- `anytime` — available regardless of workflow state
|
||||||
- Numbered phases (`1-analysis`, `2-planning`, etc.) flow in order; naming varies by module
|
- Numbered phases (`1-analysis`, `2-planning`, etc.) flow in order; naming varies by module
|
||||||
|
|
||||||
**Dependencies** determine ordering within and across phases:
|
**Sequencing** determines recommended ordering within and across phases (these are soft suggestions, not hard gates — see `required` for gating):
|
||||||
- `after` — skills that should ideally complete before this one
|
- `preceded-by` — skills that should ideally complete before this one
|
||||||
- `before` — skills that should run after this one
|
- `followed-by` — skills that should ideally run after this one
|
||||||
- Format: `skill-name` for single-action skills, `skill-name:action` for multi-action skills
|
- Format: `skill-name` for single-action skills, `skill-name:action` for multi-action skills
|
||||||
|
|
||||||
**Required gates**:
|
**Required gates**:
|
||||||
|
|
|
||||||
|
|
@ -1,13 +1,13 @@
|
||||||
module,skill,display-name,menu-code,description,action,args,phase,after,before,required,output-location,outputs
|
module,skill,display-name,menu-code,description,action,args,phase,preceded-by,followed-by,required,output-location,outputs
|
||||||
Core,_meta,,,,,,,,,false,https://docs.bmad-method.org/llms.txt,
|
Core,_meta,,,,,,,,,false,https://docs.bmad-method.org/llms.txt,
|
||||||
Core,bmad-brainstorming,Brainstorming,BSP,Use early in ideation or when stuck generating ideas.,,anytime,,,false,{output_folder}/brainstorming,brainstorming session
|
Core,bmad-brainstorming,Brainstorming,BSP,Use early in ideation or when stuck generating ideas.,,,anytime,,,false,{output_folder}/brainstorming,brainstorming session
|
||||||
Core,bmad-party-mode,Party Mode,PM,Orchestrate multi-agent discussions when you need multiple perspectives or want agents to collaborate.,,anytime,,,false,,
|
Core,bmad-party-mode,Party Mode,PM,Orchestrate multi-agent discussions when you need multiple perspectives or want agents to collaborate.,,,anytime,,,false,,
|
||||||
Core,bmad-help,BMad Help,BH,,,anytime,,,false,,
|
Core,bmad-help,BMad Help,BH,,,,anytime,,,false,,
|
||||||
Core,bmad-index-docs,Index Docs,ID,Use when LLM needs to understand available docs without loading everything.,,anytime,,,false,,
|
Core,bmad-index-docs,Index Docs,ID,Use when LLM needs to understand available docs without loading everything.,,,anytime,,,false,,
|
||||||
Core,bmad-shard-doc,Shard Document,SD,Use when doc becomes too large (>500 lines) to manage effectively.,[path],anytime,,,false,,
|
Core,bmad-shard-doc,Shard Document,SD,Use when doc becomes too large (>500 lines) to manage effectively.,,[path],anytime,,,false,,
|
||||||
Core,bmad-editorial-review-prose,Editorial Review - Prose,EP,Use after drafting to polish written content.,[path],anytime,,,false,report located with target document,three-column markdown table with suggested fixes
|
Core,bmad-editorial-review-prose,Editorial Review - Prose,EP,Use after drafting to polish written content.,,[path],anytime,,,false,report located with target document,three-column markdown table with suggested fixes
|
||||||
Core,bmad-editorial-review-structure,Editorial Review - Structure,ES,Use when doc produced from multiple subprocesses or needs structural improvement.,[path],anytime,,,false,report located with target document,
|
Core,bmad-editorial-review-structure,Editorial Review - Structure,ES,Use when doc produced from multiple subprocesses or needs structural improvement.,,[path],anytime,,,false,report located with target document,
|
||||||
Core,bmad-review-adversarial-general,Adversarial Review,AR,"Use for quality assurance or before finalizing deliverables. Code Review in other modules runs this automatically, but also useful for document reviews.",[path],anytime,,,false,,
|
Core,bmad-review-adversarial-general,Adversarial Review,AR,"Use for quality assurance or before finalizing deliverables. Code Review in other modules runs this automatically, but also useful for document reviews.",,[path],anytime,,,false,,
|
||||||
Core,bmad-review-edge-case-hunter,Edge Case Hunter Review,ECH,Use alongside adversarial review for orthogonal coverage — method-driven not attitude-driven.,[path],anytime,,,false,,
|
Core,bmad-review-edge-case-hunter,Edge Case Hunter Review,ECH,Use alongside adversarial review for orthogonal coverage — method-driven not attitude-driven.,,[path],anytime,,,false,,
|
||||||
Core,bmad-distillator,Distillator,DG,Use when you need token-efficient distillates that preserve all information for downstream LLM consumption.,[path],anytime,,,false,adjacent to source document or specified output_path,distillate markdown file(s)
|
Core,bmad-distillator,Distillator,DG,Use when you need token-efficient distillates that preserve all information for downstream LLM consumption.,,[path],anytime,,,false,adjacent to source document or specified output_path,distillate markdown file(s)
|
||||||
Core,bmad-customize,BMad Customize,BC,"Use when you want to change how an agent or workflow behaves — add persistent facts, swap templates, insert activation hooks, or customize menus. Scans what's customizable, picks the right scope (agent vs workflow), writes the override to _bmad/custom/, and verifies the merge. No TOML hand-authoring required.",,anytime,,,false,{project-root}/_bmad/custom,TOML override files
|
Core,bmad-customize,BMad Customize,BC,"Use when you want to change how an agent or workflow behaves — add persistent facts, swap templates, insert activation hooks, or customize menus. Scans what's customizable, picks the right scope (agent vs workflow), writes the override to _bmad/custom/, and verifies the merge. No TOML hand-authoring required.",,,anytime,,,false,{project-root}/_bmad/custom,TOML override files
|
||||||
|
|
|
||||||
|
Can't render this file because it has a wrong number of fields in line 3.
|
|
|
@ -11,6 +11,11 @@ user_name:
|
||||||
default: "BMad"
|
default: "BMad"
|
||||||
result: "{value}"
|
result: "{value}"
|
||||||
|
|
||||||
|
project_name:
|
||||||
|
prompt: "What is your project called?"
|
||||||
|
default: "{directory_name}"
|
||||||
|
result: "{value}"
|
||||||
|
|
||||||
communication_language:
|
communication_language:
|
||||||
prompt: "What language should agents use when chatting with you?"
|
prompt: "What language should agents use when chatting with you?"
|
||||||
scope: user
|
scope: user
|
||||||
|
|
|
||||||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,348 @@
|
||||||
|
/**
|
||||||
|
* Installer Channel Resolution Tests
|
||||||
|
*
|
||||||
|
* Unit tests for the pure planning/resolution modules:
|
||||||
|
* - tools/installer/modules/channel-plan.js
|
||||||
|
* - tools/installer/modules/channel-resolver.js
|
||||||
|
*
|
||||||
|
* Neither module does I/O outside of GitHub tag lookups (which we don't
|
||||||
|
* exercise here) and semver math. All tests are deterministic.
|
||||||
|
*
|
||||||
|
* Usage: node test/test-installer-channels.js
|
||||||
|
*/
|
||||||
|
|
||||||
|
const {
|
||||||
|
parseChannelOptions,
|
||||||
|
decideChannelForModule,
|
||||||
|
buildPlan,
|
||||||
|
orphanPinWarnings,
|
||||||
|
bundledTargetWarnings,
|
||||||
|
parsePinSpec,
|
||||||
|
} = require('../tools/installer/modules/channel-plan');
|
||||||
|
|
||||||
|
const { parseGitHubRepo, normalizeStableTag, classifyUpgrade, releaseNotesUrl } = require('../tools/installer/modules/channel-resolver');
|
||||||
|
|
||||||
|
const colors = {
|
||||||
|
reset: '[0m',
|
||||||
|
green: '[32m',
|
||||||
|
red: '[31m',
|
||||||
|
yellow: '[33m',
|
||||||
|
cyan: '[36m',
|
||||||
|
dim: '[2m',
|
||||||
|
};
|
||||||
|
|
||||||
|
let passed = 0;
|
||||||
|
let failed = 0;
|
||||||
|
|
||||||
|
function assert(condition, testName, errorMessage = '') {
|
||||||
|
if (condition) {
|
||||||
|
console.log(`${colors.green}✓${colors.reset} ${testName}`);
|
||||||
|
passed++;
|
||||||
|
} else {
|
||||||
|
console.log(`${colors.red}✗${colors.reset} ${testName}`);
|
||||||
|
if (errorMessage) {
|
||||||
|
console.log(` ${colors.dim}${errorMessage}${colors.reset}`);
|
||||||
|
}
|
||||||
|
failed++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function assertEqual(actual, expected, testName) {
|
||||||
|
const ok = actual === expected;
|
||||||
|
assert(ok, testName, ok ? '' : `expected ${JSON.stringify(expected)}, got ${JSON.stringify(actual)}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
function section(title) {
|
||||||
|
console.log(`\n${colors.cyan}── ${title} ──${colors.reset}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
function runTests() {
|
||||||
|
// ─────────────────────────────────────────────────────────────────────────
|
||||||
|
// channel-plan.js :: parsePinSpec
|
||||||
|
// ─────────────────────────────────────────────────────────────────────────
|
||||||
|
section('channel-plan :: parsePinSpec');
|
||||||
|
|
||||||
|
{
|
||||||
|
const r = parsePinSpec('bmb=v1.2.3');
|
||||||
|
assert(r && r.code === 'bmb' && r.tag === 'v1.2.3', 'valid CODE=TAG');
|
||||||
|
}
|
||||||
|
{
|
||||||
|
const r = parsePinSpec(' cis = v0.1.0 ');
|
||||||
|
assert(r && r.code === 'cis' && r.tag === 'v0.1.0', 'trims whitespace around code and tag');
|
||||||
|
}
|
||||||
|
assert(parsePinSpec('') === null, 'empty string returns null');
|
||||||
|
assert(parsePinSpec('bmb') === null, 'missing = returns null');
|
||||||
|
assert(parsePinSpec('=v1.0.0') === null, 'leading = returns null');
|
||||||
|
assert(parsePinSpec('bmb=') === null, 'trailing = returns null');
|
||||||
|
assert(parsePinSpec(null) === null, 'null input returns null');
|
||||||
|
let undef;
|
||||||
|
assert(parsePinSpec(undef) === null, 'undefined input returns null');
|
||||||
|
assert(parsePinSpec(42) === null, 'non-string input returns null');
|
||||||
|
|
||||||
|
// ─────────────────────────────────────────────────────────────────────────
|
||||||
|
// channel-plan.js :: parseChannelOptions
|
||||||
|
// ─────────────────────────────────────────────────────────────────────────
|
||||||
|
section('channel-plan :: parseChannelOptions');
|
||||||
|
|
||||||
|
{
|
||||||
|
const r = parseChannelOptions({});
|
||||||
|
assert(r.global === null, 'empty: global is null');
|
||||||
|
assert(r.nextSet instanceof Set && r.nextSet.size === 0, 'empty: nextSet is empty Set');
|
||||||
|
assert(r.pins instanceof Map && r.pins.size === 0, 'empty: pins is empty Map');
|
||||||
|
assert(Array.isArray(r.warnings) && r.warnings.length === 0, 'empty: no warnings');
|
||||||
|
assert(r.acceptBypass === false, 'empty: acceptBypass false by default');
|
||||||
|
}
|
||||||
|
{
|
||||||
|
const r = parseChannelOptions({ channel: 'stable' });
|
||||||
|
assertEqual(r.global, 'stable', '--channel=stable sets global');
|
||||||
|
}
|
||||||
|
{
|
||||||
|
const r = parseChannelOptions({ channel: 'NEXT' });
|
||||||
|
assertEqual(r.global, 'next', '--channel is case-insensitive');
|
||||||
|
}
|
||||||
|
{
|
||||||
|
const r = parseChannelOptions({ allStable: true });
|
||||||
|
assertEqual(r.global, 'stable', '--all-stable sets global stable');
|
||||||
|
}
|
||||||
|
{
|
||||||
|
const r = parseChannelOptions({ allNext: true });
|
||||||
|
assertEqual(r.global, 'next', '--all-next sets global next');
|
||||||
|
}
|
||||||
|
{
|
||||||
|
const r = parseChannelOptions({ channel: 'bogus' });
|
||||||
|
assert(r.global === null, 'invalid --channel value is rejected (global stays null)');
|
||||||
|
assert(
|
||||||
|
r.warnings.some((w) => w.includes("Ignoring invalid --channel value 'bogus'")),
|
||||||
|
'invalid --channel produces a warning',
|
||||||
|
);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
// --all-stable and --all-next conflict → warning, first-wins
|
||||||
|
const r = parseChannelOptions({ allStable: true, allNext: true });
|
||||||
|
assertEqual(r.global, 'stable', 'conflict: first flag (--all-stable) wins');
|
||||||
|
assert(
|
||||||
|
r.warnings.some((w) => w.includes('Conflicting channel flags')),
|
||||||
|
'conflict produces warning',
|
||||||
|
);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
const r = parseChannelOptions({ next: ['bmb', 'cis', ' '] });
|
||||||
|
assert(r.nextSet.has('bmb') && r.nextSet.has('cis'), '--next=CODE adds to nextSet');
|
||||||
|
assert(!r.nextSet.has(''), 'blank --next entries are skipped');
|
||||||
|
}
|
||||||
|
{
|
||||||
|
const r = parseChannelOptions({ pin: ['bmb=v1.0.0', 'cis=v2.0.0'] });
|
||||||
|
assertEqual(r.pins.get('bmb'), 'v1.0.0', '--pin bmb=v1.0.0 recorded');
|
||||||
|
assertEqual(r.pins.get('cis'), 'v2.0.0', '--pin cis=v2.0.0 recorded');
|
||||||
|
}
|
||||||
|
{
|
||||||
|
const r = parseChannelOptions({ pin: ['bmb=v1.0.0', 'bmb=v1.1.0'] });
|
||||||
|
assertEqual(r.pins.get('bmb'), 'v1.1.0', 'duplicate --pin: last wins');
|
||||||
|
assert(
|
||||||
|
r.warnings.some((w) => w.includes('--pin specified multiple times')),
|
||||||
|
'duplicate --pin produces warning',
|
||||||
|
);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
const r = parseChannelOptions({ pin: ['malformed-no-equals'] });
|
||||||
|
assert(r.pins.size === 0, 'malformed --pin is ignored');
|
||||||
|
assert(
|
||||||
|
r.warnings.some((w) => w.includes('malformed --pin')),
|
||||||
|
'malformed --pin warns',
|
||||||
|
);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
const r = parseChannelOptions({ yes: true });
|
||||||
|
assertEqual(r.acceptBypass, true, '--yes sets acceptBypass so curator-bypass prompt is auto-confirmed');
|
||||||
|
}
|
||||||
|
{
|
||||||
|
const r = parseChannelOptions({ acceptBypass: true });
|
||||||
|
assertEqual(r.acceptBypass, true, 'explicit acceptBypass: true honored');
|
||||||
|
}
|
||||||
|
|
||||||
|
// ─────────────────────────────────────────────────────────────────────────
|
||||||
|
// channel-plan.js :: decideChannelForModule (precedence)
|
||||||
|
// ─────────────────────────────────────────────────────────────────────────
|
||||||
|
section('channel-plan :: decideChannelForModule (precedence)');
|
||||||
|
|
||||||
|
const emptyOpts = parseChannelOptions({});
|
||||||
|
|
||||||
|
{
|
||||||
|
const r = decideChannelForModule({ code: 'bmb', channelOptions: emptyOpts });
|
||||||
|
assertEqual(r.channel, 'stable', 'no signal → stable default');
|
||||||
|
assertEqual(r.source, 'default', 'source: default');
|
||||||
|
}
|
||||||
|
{
|
||||||
|
const r = decideChannelForModule({ code: 'bmb', channelOptions: emptyOpts, registryDefault: 'next' });
|
||||||
|
assertEqual(r.channel, 'next', 'registry default applied when no flags');
|
||||||
|
assertEqual(r.source, 'registry', 'source: registry');
|
||||||
|
}
|
||||||
|
{
|
||||||
|
const r = decideChannelForModule({ code: 'bmb', channelOptions: emptyOpts, registryDefault: 'bogus' });
|
||||||
|
assertEqual(r.channel, 'stable', 'invalid registry default ignored, falls to stable');
|
||||||
|
}
|
||||||
|
{
|
||||||
|
const opts = parseChannelOptions({ channel: 'next' });
|
||||||
|
const r = decideChannelForModule({ code: 'bmb', channelOptions: opts, registryDefault: 'stable' });
|
||||||
|
assertEqual(r.channel, 'next', 'global --channel beats registry default');
|
||||||
|
assertEqual(r.source, 'flag:--channel', 'source reflects --channel origin');
|
||||||
|
}
|
||||||
|
{
|
||||||
|
const opts = parseChannelOptions({ channel: 'stable', next: ['bmb'] });
|
||||||
|
const r = decideChannelForModule({ code: 'bmb', channelOptions: opts });
|
||||||
|
assertEqual(r.channel, 'next', '--next=bmb beats --channel=stable for bmb');
|
||||||
|
assertEqual(r.source, 'flag:--next', 'source: flag:--next');
|
||||||
|
}
|
||||||
|
{
|
||||||
|
const opts = parseChannelOptions({ channel: 'next', pin: ['bmb=v1.0.0'] });
|
||||||
|
const r = decideChannelForModule({ code: 'bmb', channelOptions: opts });
|
||||||
|
assertEqual(r.channel, 'pinned', '--pin beats --channel');
|
||||||
|
assertEqual(r.pin, 'v1.0.0', 'pin value carried through');
|
||||||
|
assertEqual(r.source, 'flag:--pin', 'source: flag:--pin');
|
||||||
|
}
|
||||||
|
{
|
||||||
|
const opts = parseChannelOptions({ next: ['bmb'], pin: ['bmb=v1.0.0'] });
|
||||||
|
const r = decideChannelForModule({ code: 'bmb', channelOptions: opts });
|
||||||
|
assertEqual(r.channel, 'pinned', '--pin beats --next for same code');
|
||||||
|
}
|
||||||
|
|
||||||
|
// ─────────────────────────────────────────────────────────────────────────
|
||||||
|
// channel-plan.js :: buildPlan, orphanPinWarnings, bundledTargetWarnings
|
||||||
|
// ─────────────────────────────────────────────────────────────────────────
|
||||||
|
section('channel-plan :: buildPlan / warnings');
|
||||||
|
|
||||||
|
{
|
||||||
|
const opts = parseChannelOptions({ allStable: true, pin: ['bmb=v1.0.0'] });
|
||||||
|
const plan = buildPlan({
|
||||||
|
modules: [
|
||||||
|
{ code: 'bmb', defaultChannel: 'stable' },
|
||||||
|
{ code: 'cis', defaultChannel: 'stable' },
|
||||||
|
],
|
||||||
|
channelOptions: opts,
|
||||||
|
});
|
||||||
|
assertEqual(plan.get('bmb').channel, 'pinned', 'buildPlan: bmb pinned');
|
||||||
|
assertEqual(plan.get('cis').channel, 'stable', 'buildPlan: cis stable via global');
|
||||||
|
}
|
||||||
|
{
|
||||||
|
const opts = parseChannelOptions({ pin: ['ghost=v1.0.0', 'bmb=v1.0.0'], next: ['gds'] });
|
||||||
|
const warnings = orphanPinWarnings(opts, ['bmb']);
|
||||||
|
assert(
|
||||||
|
warnings.some((w) => w.includes("--pin for 'ghost'")),
|
||||||
|
'orphanPinWarnings: flags pin for unselected module',
|
||||||
|
);
|
||||||
|
assert(
|
||||||
|
warnings.some((w) => w.includes("--next for 'gds'")),
|
||||||
|
'orphanPinWarnings: flags --next for unselected module',
|
||||||
|
);
|
||||||
|
assert(!warnings.some((w) => w.includes("'bmb'")), 'orphanPinWarnings: no warning for selected module');
|
||||||
|
}
|
||||||
|
{
|
||||||
|
const opts = parseChannelOptions({ pin: ['bmm=v1.0.0'], next: ['core'] });
|
||||||
|
const warnings = bundledTargetWarnings(opts, ['core', 'bmm']);
|
||||||
|
assert(
|
||||||
|
warnings.some((w) => w.includes('bundled module')),
|
||||||
|
'bundledTargetWarnings: warns bundled pin',
|
||||||
|
);
|
||||||
|
assert(warnings.length === 2, 'bundledTargetWarnings: both pin and next warned');
|
||||||
|
}
|
||||||
|
|
||||||
|
// ─────────────────────────────────────────────────────────────────────────
|
||||||
|
// channel-resolver.js :: parseGitHubRepo
|
||||||
|
// ─────────────────────────────────────────────────────────────────────────
|
||||||
|
section('channel-resolver :: parseGitHubRepo');
|
||||||
|
|
||||||
|
{
|
||||||
|
const r = parseGitHubRepo('https://github.com/bmad-code-org/BMAD-METHOD');
|
||||||
|
assert(r && r.owner === 'bmad-code-org' && r.repo === 'BMAD-METHOD', 'https URL basic');
|
||||||
|
}
|
||||||
|
{
|
||||||
|
const r = parseGitHubRepo('https://github.com/bmad-code-org/BMAD-METHOD.git');
|
||||||
|
assert(r && r.repo === 'BMAD-METHOD', '.git suffix stripped');
|
||||||
|
}
|
||||||
|
{
|
||||||
|
const r = parseGitHubRepo('https://github.com/bmad-code-org/BMAD-METHOD/');
|
||||||
|
assert(r && r.repo === 'BMAD-METHOD', 'trailing slash stripped');
|
||||||
|
}
|
||||||
|
{
|
||||||
|
const r = parseGitHubRepo('https://github.com/org/repo/tree/main/subdir');
|
||||||
|
assert(r && r.owner === 'org' && r.repo === 'repo', 'deep path yields owner/repo');
|
||||||
|
}
|
||||||
|
{
|
||||||
|
const r = parseGitHubRepo('git@github.com:org/repo.git');
|
||||||
|
assert(r && r.owner === 'org' && r.repo === 'repo', 'SSH URL parsed');
|
||||||
|
}
|
||||||
|
assert(parseGitHubRepo('https://gitlab.com/foo/bar') === null, 'non-github URL returns null');
|
||||||
|
assert(parseGitHubRepo('') === null, 'empty string returns null');
|
||||||
|
assert(parseGitHubRepo(null) === null, 'null input returns null');
|
||||||
|
assert(parseGitHubRepo(123) === null, 'non-string input returns null');
|
||||||
|
|
||||||
|
// ─────────────────────────────────────────────────────────────────────────
|
||||||
|
// channel-resolver.js :: normalizeStableTag
|
||||||
|
// ─────────────────────────────────────────────────────────────────────────
|
||||||
|
section('channel-resolver :: normalizeStableTag');
|
||||||
|
|
||||||
|
assertEqual(normalizeStableTag('v1.2.3'), '1.2.3', 'strips leading v');
|
||||||
|
assertEqual(normalizeStableTag('1.2.3'), '1.2.3', 'bare semver accepted');
|
||||||
|
assertEqual(normalizeStableTag('v1.2.3-alpha.1'), null, 'prerelease -alpha excluded');
|
||||||
|
assertEqual(normalizeStableTag('v1.2.3-beta'), null, 'prerelease -beta excluded');
|
||||||
|
assertEqual(normalizeStableTag('v1.2.3-rc.1'), null, 'prerelease -rc excluded');
|
||||||
|
assertEqual(normalizeStableTag('not-a-version'), null, 'invalid string returns null');
|
||||||
|
assertEqual(normalizeStableTag('v1.2'), null, 'incomplete semver returns null');
|
||||||
|
assertEqual(normalizeStableTag(null), null, 'null returns null');
|
||||||
|
assertEqual(normalizeStableTag(123), null, 'non-string returns null');
|
||||||
|
|
||||||
|
// ─────────────────────────────────────────────────────────────────────────
|
||||||
|
// channel-resolver.js :: classifyUpgrade
|
||||||
|
// ─────────────────────────────────────────────────────────────────────────
|
||||||
|
section('channel-resolver :: classifyUpgrade');
|
||||||
|
|
||||||
|
assertEqual(classifyUpgrade('v1.2.3', 'v1.2.3'), 'none', 'equal versions → none');
|
||||||
|
assertEqual(classifyUpgrade('v1.2.3', 'v1.2.2'), 'none', 'downgrade → none');
|
||||||
|
assertEqual(classifyUpgrade('v1.2.3', 'v1.2.4'), 'patch', 'patch bump');
|
||||||
|
assertEqual(classifyUpgrade('v1.2.3', 'v1.3.0'), 'minor', 'minor bump');
|
||||||
|
assertEqual(classifyUpgrade('v1.2.3', 'v2.0.0'), 'major', 'major bump');
|
||||||
|
assertEqual(classifyUpgrade('1.2.3', '1.2.4'), 'patch', 'unprefixed versions work');
|
||||||
|
assertEqual(classifyUpgrade('main', 'v1.2.3'), 'unknown', 'non-semver current → unknown');
|
||||||
|
assertEqual(classifyUpgrade('v1.2.3', 'main'), 'unknown', 'non-semver next → unknown');
|
||||||
|
assertEqual(classifyUpgrade('', ''), 'unknown', 'both empty → unknown');
|
||||||
|
|
||||||
|
// ─────────────────────────────────────────────────────────────────────────
|
||||||
|
// channel-resolver.js :: releaseNotesUrl
|
||||||
|
// ─────────────────────────────────────────────────────────────────────────
|
||||||
|
section('channel-resolver :: releaseNotesUrl');
|
||||||
|
|
||||||
|
assertEqual(
|
||||||
|
releaseNotesUrl('https://github.com/bmad-code-org/BMAD-METHOD', 'v1.2.3'),
|
||||||
|
'https://github.com/bmad-code-org/BMAD-METHOD/releases/tag/v1.2.3',
|
||||||
|
'builds standard release URL',
|
||||||
|
);
|
||||||
|
assertEqual(releaseNotesUrl('https://gitlab.com/foo/bar', 'v1.0.0'), null, 'non-github repo → null');
|
||||||
|
assertEqual(releaseNotesUrl('https://github.com/foo/bar', null), null, 'null tag → null');
|
||||||
|
assertEqual(releaseNotesUrl('', 'v1.0.0'), null, 'empty URL → null');
|
||||||
|
|
||||||
|
// ─────────────────────────────────────────────────────────────────────────
|
||||||
|
// Summary
|
||||||
|
// ─────────────────────────────────────────────────────────────────────────
|
||||||
|
console.log('');
|
||||||
|
console.log(`${colors.cyan}========================================`);
|
||||||
|
console.log('Test Results:');
|
||||||
|
console.log(` Passed: ${colors.green}${passed}${colors.reset}`);
|
||||||
|
console.log(` Failed: ${colors.red}${failed}${colors.reset}`);
|
||||||
|
console.log(`========================================${colors.reset}\n`);
|
||||||
|
|
||||||
|
if (failed === 0) {
|
||||||
|
console.log(`${colors.green}✨ All channel resolution tests passed!${colors.reset}\n`);
|
||||||
|
process.exit(0);
|
||||||
|
} else {
|
||||||
|
console.log(`${colors.red}❌ Some channel resolution tests failed${colors.reset}\n`);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
runTests();
|
||||||
|
} catch (error) {
|
||||||
|
console.error(`${colors.red}Test runner failed:${colors.reset}`, error.message);
|
||||||
|
console.error(error.stack);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,294 @@
|
||||||
|
/**
|
||||||
|
* parseSource() URL parsing tests
|
||||||
|
*
|
||||||
|
* Verifies that CustomModuleManager.parseSource() correctly handles Git URLs
|
||||||
|
* across arbitrary hosts and path shapes (deep paths, nested groups, browse
|
||||||
|
* links, repo names containing dots, etc.) using host-agnostic rules.
|
||||||
|
*
|
||||||
|
* Usage: node test/test-parse-source-urls.js
|
||||||
|
*/
|
||||||
|
|
||||||
|
const { CustomModuleManager } = require('../tools/installer/modules/custom-module-manager');
|
||||||
|
|
||||||
|
// ANSI colors
|
||||||
|
const colors = {
|
||||||
|
reset: '\u001B[0m',
|
||||||
|
green: '\u001B[32m',
|
||||||
|
red: '\u001B[31m',
|
||||||
|
cyan: '\u001B[36m',
|
||||||
|
dim: '\u001B[2m',
|
||||||
|
};
|
||||||
|
|
||||||
|
let passed = 0;
|
||||||
|
let failed = 0;
|
||||||
|
|
||||||
|
function assert(condition, testName, errorMessage = '') {
|
||||||
|
if (condition) {
|
||||||
|
console.log(`${colors.green}✓${colors.reset} ${testName}`);
|
||||||
|
passed++;
|
||||||
|
} else {
|
||||||
|
console.log(`${colors.red}✗${colors.reset} ${testName}`);
|
||||||
|
if (errorMessage) {
|
||||||
|
console.log(` ${colors.dim}${errorMessage}${colors.reset}`);
|
||||||
|
}
|
||||||
|
failed++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const manager = new CustomModuleManager();
|
||||||
|
|
||||||
|
// ─── Deep path shapes (4+ segments) ─────────────────────────────────────────
|
||||||
|
|
||||||
|
console.log(`\n${colors.cyan}Deep path shapes${colors.reset}\n`);
|
||||||
|
|
||||||
|
{
|
||||||
|
// Hosts that expose the repo at a nested path like /<org>/<project>/<marker>/<repo>.
|
||||||
|
// The parser must preserve the full path (no stripping of intermediate segments).
|
||||||
|
const result = manager.parseSource('https://git.example.com/myorg/MyProject/_git/my-module');
|
||||||
|
assert(result.isValid === true, 'nested-path URL is valid');
|
||||||
|
assert(result.type === 'url', 'nested-path type is url');
|
||||||
|
assert(
|
||||||
|
result.cloneUrl === 'https://git.example.com/myorg/MyProject/_git/my-module',
|
||||||
|
'nested-path cloneUrl preserves full path',
|
||||||
|
`Got: ${result.cloneUrl}`,
|
||||||
|
);
|
||||||
|
assert(result.subdir === null, 'nested-path URL has no subdir');
|
||||||
|
assert(
|
||||||
|
result.cacheKey === 'git.example.com/myorg/MyProject/_git/my-module',
|
||||||
|
'nested-path cacheKey includes full repo path',
|
||||||
|
`Got: ${result.cacheKey}`,
|
||||||
|
);
|
||||||
|
assert(result.displayName === '_git/my-module', 'nested-path displayName uses last two segments', `Got: ${result.displayName}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
const result = manager.parseSource('https://git.example.com/myorg/MyProject/_git/my-module.git');
|
||||||
|
assert(result.isValid === true, 'nested-path URL with .git suffix is valid');
|
||||||
|
assert(
|
||||||
|
result.cloneUrl === 'https://git.example.com/myorg/MyProject/_git/my-module',
|
||||||
|
'nested-path .git suffix stripped from cloneUrl',
|
||||||
|
`Got: ${result.cloneUrl}`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// Browse links that use ?path=/... to point at a subdirectory.
|
||||||
|
const result = manager.parseSource('https://git.example.com/myorg/MyProject/_git/my-module?path=/path/to/subdir');
|
||||||
|
assert(result.isValid === true, 'URL with ?path= is valid');
|
||||||
|
assert(
|
||||||
|
result.cloneUrl === 'https://git.example.com/myorg/MyProject/_git/my-module',
|
||||||
|
'?path= cloneUrl excludes subdir',
|
||||||
|
`Got: ${result.cloneUrl}`,
|
||||||
|
);
|
||||||
|
assert(result.subdir === 'path/to/subdir', '?path= subdir correctly extracted', `Got: ${result.subdir}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// ─── Azure DevOps URLs (Issue #2268) ────────────────────────────────────────
|
||||||
|
|
||||||
|
console.log(`\n${colors.cyan}Azure DevOps URLs (Issue #2268)${colors.reset}\n`);
|
||||||
|
|
||||||
|
{
|
||||||
|
// Modern dev.azure.com format — the exact URL from the bug report.
|
||||||
|
const result = manager.parseSource('https://dev.azure.com/myorg/MyProject/_git/my-module');
|
||||||
|
assert(result.isValid === true, 'ADO modern URL is valid');
|
||||||
|
assert(result.type === 'url', 'ADO modern type is url');
|
||||||
|
assert(
|
||||||
|
result.cloneUrl === 'https://dev.azure.com/myorg/MyProject/_git/my-module',
|
||||||
|
'ADO modern cloneUrl preserves full _git path',
|
||||||
|
`Got: ${result.cloneUrl}`,
|
||||||
|
);
|
||||||
|
assert(
|
||||||
|
result.cacheKey === 'dev.azure.com/myorg/MyProject/_git/my-module',
|
||||||
|
'ADO modern cacheKey includes full path',
|
||||||
|
`Got: ${result.cacheKey}`,
|
||||||
|
);
|
||||||
|
assert(result.subdir === null, 'ADO modern URL has no subdir');
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// Modern format with .git suffix
|
||||||
|
const result = manager.parseSource('https://dev.azure.com/myorg/MyProject/_git/my-module.git');
|
||||||
|
assert(result.isValid === true, 'ADO modern .git suffix is valid');
|
||||||
|
assert(
|
||||||
|
result.cloneUrl === 'https://dev.azure.com/myorg/MyProject/_git/my-module',
|
||||||
|
'ADO modern .git suffix stripped from cloneUrl',
|
||||||
|
`Got: ${result.cloneUrl}`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// Modern format with ?path= subdir (browse link)
|
||||||
|
const result = manager.parseSource('https://dev.azure.com/myorg/MyProject/_git/my-module?path=/src/skills');
|
||||||
|
assert(result.isValid === true, 'ADO modern ?path= is valid');
|
||||||
|
assert(
|
||||||
|
result.cloneUrl === 'https://dev.azure.com/myorg/MyProject/_git/my-module',
|
||||||
|
'ADO modern ?path= cloneUrl excludes subdir',
|
||||||
|
`Got: ${result.cloneUrl}`,
|
||||||
|
);
|
||||||
|
assert(result.subdir === 'src/skills', 'ADO modern ?path= subdir extracted', `Got: ${result.subdir}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// Legacy visualstudio.com format
|
||||||
|
const result = manager.parseSource('https://myorg.visualstudio.com/MyProject/_git/my-module');
|
||||||
|
assert(result.isValid === true, 'ADO legacy URL is valid');
|
||||||
|
assert(
|
||||||
|
result.cloneUrl === 'https://myorg.visualstudio.com/MyProject/_git/my-module',
|
||||||
|
'ADO legacy cloneUrl preserves full path',
|
||||||
|
`Got: ${result.cloneUrl}`,
|
||||||
|
);
|
||||||
|
assert(
|
||||||
|
result.cacheKey === 'myorg.visualstudio.com/MyProject/_git/my-module',
|
||||||
|
'ADO legacy cacheKey includes full path',
|
||||||
|
`Got: ${result.cacheKey}`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// Legacy format with .git suffix
|
||||||
|
const result = manager.parseSource('https://myorg.visualstudio.com/MyProject/_git/my-module.git');
|
||||||
|
assert(result.isValid === true, 'ADO legacy .git suffix is valid');
|
||||||
|
assert(
|
||||||
|
result.cloneUrl === 'https://myorg.visualstudio.com/MyProject/_git/my-module',
|
||||||
|
'ADO legacy .git suffix stripped from cloneUrl',
|
||||||
|
`Got: ${result.cloneUrl}`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// Legacy format with ?path= subdir
|
||||||
|
const result = manager.parseSource('https://myorg.visualstudio.com/MyProject/_git/my-module?path=/src');
|
||||||
|
assert(result.isValid === true, 'ADO legacy ?path= is valid');
|
||||||
|
assert(
|
||||||
|
result.cloneUrl === 'https://myorg.visualstudio.com/MyProject/_git/my-module',
|
||||||
|
'ADO legacy ?path= cloneUrl excludes subdir',
|
||||||
|
`Got: ${result.cloneUrl}`,
|
||||||
|
);
|
||||||
|
assert(result.subdir === 'src', 'ADO legacy ?path= subdir extracted', `Got: ${result.subdir}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// ─── Subdomain hosts ────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
console.log(`\n${colors.cyan}Subdomain hosts${colors.reset}\n`);
|
||||||
|
|
||||||
|
{
|
||||||
|
const result = manager.parseSource('https://myorg.example.com/MyProject/_git/my-module');
|
||||||
|
assert(result.isValid === true, 'subdomain URL is valid');
|
||||||
|
assert(result.type === 'url', 'subdomain type is url');
|
||||||
|
assert(
|
||||||
|
result.cloneUrl === 'https://myorg.example.com/MyProject/_git/my-module',
|
||||||
|
'subdomain cloneUrl preserves full path',
|
||||||
|
`Got: ${result.cloneUrl}`,
|
||||||
|
);
|
||||||
|
assert(result.subdir === null, 'subdomain URL has no subdir');
|
||||||
|
assert(
|
||||||
|
result.cacheKey === 'myorg.example.com/MyProject/_git/my-module',
|
||||||
|
'subdomain cacheKey includes full repo path',
|
||||||
|
`Got: ${result.cacheKey}`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// ─── Simple owner/repo URLs (regression) ────────────────────────────────────
|
||||||
|
|
||||||
|
console.log(`\n${colors.cyan}Simple owner/repo URLs (regression check)${colors.reset}\n`);
|
||||||
|
|
||||||
|
{
|
||||||
|
const result = manager.parseSource('https://github.com/owner/repo');
|
||||||
|
assert(result.isValid === true, 'GitHub basic URL still valid');
|
||||||
|
assert(result.cloneUrl === 'https://github.com/owner/repo', 'GitHub cloneUrl unchanged', `Got: ${result.cloneUrl}`);
|
||||||
|
assert(result.cacheKey === 'github.com/owner/repo', 'GitHub cacheKey unchanged', `Got: ${result.cacheKey}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
const result = manager.parseSource('https://github.com/owner/repo/tree/main/subdir');
|
||||||
|
assert(result.isValid === true, 'GitHub URL with tree path still valid');
|
||||||
|
assert(result.cloneUrl === 'https://github.com/owner/repo', 'GitHub tree URL cloneUrl correct', `Got: ${result.cloneUrl}`);
|
||||||
|
assert(result.subdir === 'subdir', 'GitHub tree subdir still extracted', `Got: ${result.subdir}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
const result = manager.parseSource('git@github.com:owner/repo.git');
|
||||||
|
assert(result.isValid === true, 'SSH URL still valid');
|
||||||
|
assert(result.cloneUrl === 'git@github.com:owner/repo.git', 'SSH cloneUrl unchanged', `Got: ${result.cloneUrl}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// ─── Generic URL handling (any host, any path depth) ────────────────────────
|
||||||
|
|
||||||
|
console.log(`\n${colors.cyan}Generic URL handling${colors.reset}\n`);
|
||||||
|
|
||||||
|
{
|
||||||
|
// GitLab nested groups — the old 2-segment regex would have failed this.
|
||||||
|
const result = manager.parseSource('https://gitlab.com/group/subgroup/repo');
|
||||||
|
assert(result.isValid === true, 'GitLab nested-group URL is valid');
|
||||||
|
assert(
|
||||||
|
result.cloneUrl === 'https://gitlab.com/group/subgroup/repo',
|
||||||
|
'GitLab nested-group cloneUrl preserves full path',
|
||||||
|
`Got: ${result.cloneUrl}`,
|
||||||
|
);
|
||||||
|
assert(
|
||||||
|
result.cacheKey === 'gitlab.com/group/subgroup/repo',
|
||||||
|
'GitLab nested-group cacheKey includes full path',
|
||||||
|
`Got: ${result.cacheKey}`,
|
||||||
|
);
|
||||||
|
assert(result.displayName === 'subgroup/repo', 'GitLab nested-group displayName uses last two segments', `Got: ${result.displayName}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
const result = manager.parseSource('https://gitlab.com/group/subgroup/repo/-/tree/main/src/module');
|
||||||
|
assert(result.isValid === true, 'GitLab nested-group tree URL is valid');
|
||||||
|
assert(
|
||||||
|
result.cloneUrl === 'https://gitlab.com/group/subgroup/repo',
|
||||||
|
'GitLab nested-group tree cloneUrl excludes subdir',
|
||||||
|
`Got: ${result.cloneUrl}`,
|
||||||
|
);
|
||||||
|
assert(result.subdir === 'src/module', 'GitLab nested-group tree subdir extracted', `Got: ${result.subdir}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// Self-hosted host with a repo name containing dots — the old regex
|
||||||
|
// explicitly excluded dots from the repo segment.
|
||||||
|
const result = manager.parseSource('https://git.example.com/owner/my.repo.name');
|
||||||
|
assert(result.isValid === true, 'repo name with dots is valid');
|
||||||
|
assert(
|
||||||
|
result.cloneUrl === 'https://git.example.com/owner/my.repo.name',
|
||||||
|
'repo name with dots preserved in cloneUrl',
|
||||||
|
`Got: ${result.cloneUrl}`,
|
||||||
|
);
|
||||||
|
assert(result.displayName === 'owner/my.repo.name', 'repo name with dots preserved in displayName', `Got: ${result.displayName}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// Browser URL pointing at a ref with NO trailing subdir must still strip
|
||||||
|
// the /tree/<ref> segment from the clone URL.
|
||||||
|
const result = manager.parseSource('https://github.com/owner/repo/tree/main');
|
||||||
|
assert(result.isValid === true, 'tree URL without subdir is valid');
|
||||||
|
assert(
|
||||||
|
result.cloneUrl === 'https://github.com/owner/repo',
|
||||||
|
'tree URL without subdir strips ref from cloneUrl',
|
||||||
|
`Got: ${result.cloneUrl}`,
|
||||||
|
);
|
||||||
|
assert(result.subdir === null, 'tree URL without subdir yields null subdir', `Got: ${result.subdir}`);
|
||||||
|
assert(result.displayName === 'owner/repo', 'tree URL without subdir displayName is owner/repo', `Got: ${result.displayName}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// Same shape for GitLab's /-/tree form and Gitea's /src/branch form.
|
||||||
|
const gitlab = manager.parseSource('https://gitlab.com/group/repo/-/tree/main');
|
||||||
|
assert(
|
||||||
|
gitlab.cloneUrl === 'https://gitlab.com/group/repo' && gitlab.subdir === null,
|
||||||
|
'GitLab /-/tree/<ref> without subdir strips ref',
|
||||||
|
`Got: ${gitlab.cloneUrl} subdir=${gitlab.subdir}`,
|
||||||
|
);
|
||||||
|
|
||||||
|
const gitea = manager.parseSource('https://gitea.example.com/owner/repo/src/branch/main');
|
||||||
|
assert(
|
||||||
|
gitea.cloneUrl === 'https://gitea.example.com/owner/repo' && gitea.subdir === null,
|
||||||
|
'Gitea /src/branch/<ref> without subdir strips ref',
|
||||||
|
`Got: ${gitea.cloneUrl} subdir=${gitea.subdir}`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// ─── Summary ────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
console.log(`\n${colors.cyan}Results: ${passed} passed, ${failed} failed${colors.reset}\n`);
|
||||||
|
process.exit(failed > 0 ? 1 : 0);
|
||||||
|
|
@ -222,7 +222,6 @@ Support assumption: full Agent Skills support. Gemini CLI docs confirm workspace
|
||||||
|
|
||||||
- [x] Confirm Gemini CLI native skills path is `.gemini/skills/{skill-name}/SKILL.md` (per [geminicli.com/docs/cli/skills](https://geminicli.com/docs/cli/skills/))
|
- [x] Confirm Gemini CLI native skills path is `.gemini/skills/{skill-name}/SKILL.md` (per [geminicli.com/docs/cli/skills](https://geminicli.com/docs/cli/skills/))
|
||||||
- [x] Implement native skills output — target_dir `.gemini/skills`, skill_format true, template_type default (replaces TOML templates)
|
- [x] Implement native skills output — target_dir `.gemini/skills`, skill_format true, template_type default (replaces TOML templates)
|
||||||
- [x] Add legacy cleanup for `.gemini/commands` (via `legacy_targets`)
|
|
||||||
- [x] Test fresh install — skills written to `.gemini/skills/bmad-master/SKILL.md` with correct frontmatter
|
- [x] Test fresh install — skills written to `.gemini/skills/bmad-master/SKILL.md` with correct frontmatter
|
||||||
- [x] Test reinstall/upgrade from legacy TOML command output — legacy dir removed, skills installed
|
- [x] Test reinstall/upgrade from legacy TOML command output — legacy dir removed, skills installed
|
||||||
- [x] Confirm no ancestor conflict protection is needed — Gemini CLI uses workspace > user > extension precedence, no ancestor directory inheritance
|
- [x] Confirm no ancestor conflict protection is needed — Gemini CLI uses workspace > user > extension precedence, no ancestor directory inheritance
|
||||||
|
|
@ -236,7 +235,6 @@ Support assumption: full Agent Skills support. iFlow docs confirm workspace skil
|
||||||
|
|
||||||
- [x] Confirm iFlow native skills path is `.iflow/skills/{skill-name}/SKILL.md`
|
- [x] Confirm iFlow native skills path is `.iflow/skills/{skill-name}/SKILL.md`
|
||||||
- [x] Implement native skills output — target_dir `.iflow/skills`, skill_format true, template_type default
|
- [x] Implement native skills output — target_dir `.iflow/skills`, skill_format true, template_type default
|
||||||
- [x] Add legacy cleanup for `.iflow/commands` (via `legacy_targets`)
|
|
||||||
- [x] Test fresh install — skills written to `.iflow/skills/bmad-master/SKILL.md`
|
- [x] Test fresh install — skills written to `.iflow/skills/bmad-master/SKILL.md`
|
||||||
- [x] Test legacy cleanup — legacy commands dir removed
|
- [x] Test legacy cleanup — legacy commands dir removed
|
||||||
- [x] Implement/extend automated tests — 6 assertions in test suite 24
|
- [x] Implement/extend automated tests — 6 assertions in test suite 24
|
||||||
|
|
@ -249,7 +247,6 @@ Support assumption: full Agent Skills support. Qwen Code supports workspace skil
|
||||||
|
|
||||||
- [x] Confirm QwenCoder native skills path is `.qwen/skills/{skill-name}/SKILL.md`
|
- [x] Confirm QwenCoder native skills path is `.qwen/skills/{skill-name}/SKILL.md`
|
||||||
- [x] Implement native skills output — target_dir `.qwen/skills`, skill_format true, template_type default
|
- [x] Implement native skills output — target_dir `.qwen/skills`, skill_format true, template_type default
|
||||||
- [x] Add legacy cleanup for `.qwen/commands` (via `legacy_targets`)
|
|
||||||
- [x] Test fresh install — skills written to `.qwen/skills/bmad-master/SKILL.md`
|
- [x] Test fresh install — skills written to `.qwen/skills/bmad-master/SKILL.md`
|
||||||
- [x] Test legacy cleanup — legacy commands dir removed
|
- [x] Test legacy cleanup — legacy commands dir removed
|
||||||
- [x] Implement/extend automated tests — 6 assertions in test suite 25
|
- [x] Implement/extend automated tests — 6 assertions in test suite 25
|
||||||
|
|
@ -262,7 +259,6 @@ Support assumption: full Agent Skills support. Rovo Dev now supports workspace s
|
||||||
|
|
||||||
- [x] Confirm Rovo Dev native skills path is `.rovodev/skills/{skill-name}/SKILL.md` (per Atlassian blog)
|
- [x] Confirm Rovo Dev native skills path is `.rovodev/skills/{skill-name}/SKILL.md` (per Atlassian blog)
|
||||||
- [x] Replace 257-line custom `rovodev.js` with config-driven entry in `platform-codes.yaml`
|
- [x] Replace 257-line custom `rovodev.js` with config-driven entry in `platform-codes.yaml`
|
||||||
- [x] Add legacy cleanup for `.rovodev/workflows` (via `legacy_targets`) and BMAD entries in `prompts.yml` (via `cleanupRovoDevPrompts()` in `_config-driven.js`)
|
|
||||||
- [x] Test fresh install — skills written to `.rovodev/skills/bmad-master/SKILL.md`
|
- [x] Test fresh install — skills written to `.rovodev/skills/bmad-master/SKILL.md`
|
||||||
- [x] Test legacy cleanup — legacy workflows dir removed, `prompts.yml` BMAD entries stripped while preserving user entries
|
- [x] Test legacy cleanup — legacy workflows dir removed, `prompts.yml` BMAD entries stripped while preserving user entries
|
||||||
- [x] Implement/extend automated tests — 8 assertions in test suite 26
|
- [x] Implement/extend automated tests — 8 assertions in test suite 26
|
||||||
|
|
|
||||||
|
|
@ -23,13 +23,10 @@ checkForUpdate().catch(() => {
|
||||||
|
|
||||||
async function checkForUpdate() {
|
async function checkForUpdate() {
|
||||||
try {
|
try {
|
||||||
// For beta versions, check the beta tag; otherwise check latest
|
// Prereleases (e.g. 6.5.1-next.0) live on the `next` dist-tag; stable
|
||||||
const isBeta =
|
// releases live on `latest`. semver.prerelease() returns null for stable,
|
||||||
packageJson.version.includes('Beta') ||
|
// so this correctly routes pre-1.0-next/rc/etc. without string matching.
|
||||||
packageJson.version.includes('beta') ||
|
const tag = semver.prerelease(packageJson.version) ? 'next' : 'latest';
|
||||||
packageJson.version.includes('alpha') ||
|
|
||||||
packageJson.version.includes('rc');
|
|
||||||
const tag = isBeta ? 'beta' : 'latest';
|
|
||||||
|
|
||||||
const result = execSync(`npm view ${packageName}@${tag} version`, {
|
const result = execSync(`npm view ${packageName}@${tag} version`, {
|
||||||
encoding: 'utf8',
|
encoding: 'utf8',
|
||||||
|
|
|
||||||
|
|
@ -15,7 +15,18 @@ module.exports = {
|
||||||
['--modules <modules>', 'Comma-separated list of module IDs to install (e.g., "bmm,bmb")'],
|
['--modules <modules>', 'Comma-separated list of module IDs to install (e.g., "bmm,bmb")'],
|
||||||
[
|
[
|
||||||
'--tools <tools>',
|
'--tools <tools>',
|
||||||
'Comma-separated list of tool/IDE IDs to configure (e.g., "claude-code,cursor"). Use "none" to skip tool configuration.',
|
'Comma-separated list of tool/IDE IDs to configure (e.g., "claude-code,cursor"). Required for fresh non-interactive (--yes) installs. Run with --list-tools to see all valid IDs.',
|
||||||
|
],
|
||||||
|
['--list-tools', 'Print all supported tool/IDE IDs (with target directories) and exit.'],
|
||||||
|
[
|
||||||
|
'--set <spec>',
|
||||||
|
'Set a module config option non-interactively. Spec format: <module>.<key>=<value> (e.g. bmm.project_knowledge=research). Repeatable. Run --list-options to see available keys.',
|
||||||
|
(value, prev) => [...(prev || []), value],
|
||||||
|
[],
|
||||||
|
],
|
||||||
|
[
|
||||||
|
'--list-options [module]',
|
||||||
|
'List available --set keys for all locally-known official modules, or for a single module by code, then exit.',
|
||||||
],
|
],
|
||||||
['--action <type>', 'Action type for existing installations: install, update, or quick-update'],
|
['--action <type>', 'Action type for existing installations: install, update, or quick-update'],
|
||||||
['--user-name <name>', 'Name for agents to use (default: system username)'],
|
['--user-name <name>', 'Name for agents to use (default: system username)'],
|
||||||
|
|
@ -24,15 +35,65 @@ module.exports = {
|
||||||
['--output-folder <path>', 'Output folder path relative to project root (default: _bmad-output)'],
|
['--output-folder <path>', 'Output folder path relative to project root (default: _bmad-output)'],
|
||||||
['--custom-source <sources>', 'Comma-separated Git URLs or local paths to install custom modules from'],
|
['--custom-source <sources>', 'Comma-separated Git URLs or local paths to install custom modules from'],
|
||||||
['-y, --yes', 'Accept all defaults and skip prompts where possible'],
|
['-y, --yes', 'Accept all defaults and skip prompts where possible'],
|
||||||
|
[
|
||||||
|
'--channel <channel>',
|
||||||
|
'Apply channel (stable|next) to all external modules being installed. --all-stable and --all-next are aliases.',
|
||||||
|
],
|
||||||
|
['--all-stable', 'Alias for --channel=stable. Resolves externals to the highest stable release tag.'],
|
||||||
|
['--all-next', 'Alias for --channel=next. Resolves externals to main HEAD.'],
|
||||||
|
['--next <code>', 'Install module <code> from main HEAD (next channel). Repeatable.', (value, prev) => [...(prev || []), value], []],
|
||||||
|
[
|
||||||
|
'--pin <spec>',
|
||||||
|
'Pin module to a specific tag: --pin CODE=TAG (e.g. --pin bmb=v1.7.0). Repeatable.',
|
||||||
|
(value, prev) => [...(prev || []), value],
|
||||||
|
[],
|
||||||
|
],
|
||||||
],
|
],
|
||||||
action: async (options) => {
|
action: async (options) => {
|
||||||
try {
|
try {
|
||||||
|
if (options.listTools) {
|
||||||
|
const { formatPlatformList } = require('../ide/platform-codes');
|
||||||
|
process.stdout.write((await formatPlatformList()) + '\n');
|
||||||
|
process.exit(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (options.listOptions !== undefined) {
|
||||||
|
const { formatOptionsList } = require('../list-options');
|
||||||
|
const moduleArg = options.listOptions === true ? null : options.listOptions;
|
||||||
|
const { text, ok } = await formatOptionsList(moduleArg);
|
||||||
|
const stream = ok ? process.stdout : process.stderr;
|
||||||
|
// process.exit() forces immediate termination and can truncate the
|
||||||
|
// buffered write when stdout/stderr is piped or captured by CI. Wait
|
||||||
|
// for the write to flush, then set process.exitCode and return so the
|
||||||
|
// event loop drains naturally. Non-zero exit when a single-module
|
||||||
|
// lookup misses so a CI typo like `--list-options bmn` doesn't look
|
||||||
|
// successful in scripts.
|
||||||
|
await new Promise((resolve, reject) => {
|
||||||
|
stream.write(text + '\n', (error) => (error ? reject(error) : resolve()));
|
||||||
|
});
|
||||||
|
process.exitCode = ok ? 0 : 1;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
// Set debug flag as environment variable for all components
|
// Set debug flag as environment variable for all components
|
||||||
if (options.debug) {
|
if (options.debug) {
|
||||||
process.env.BMAD_DEBUG_MANIFEST = 'true';
|
process.env.BMAD_DEBUG_MANIFEST = 'true';
|
||||||
await prompts.log.info('Debug mode enabled');
|
await prompts.log.info('Debug mode enabled');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Validate --set syntax up-front so malformed entries fail fast,
|
||||||
|
// before we touch the network or filesystem. Parsed entries are
|
||||||
|
// re-derived inside ui.js where overrides are seeded.
|
||||||
|
if (options.set && options.set.length > 0) {
|
||||||
|
const { parseSetEntries } = require('../set-overrides');
|
||||||
|
try {
|
||||||
|
parseSetEntries(options.set);
|
||||||
|
} catch (error) {
|
||||||
|
await prompts.log.error(error.message);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const config = await ui.promptInstall(options);
|
const config = await ui.promptInstall(options);
|
||||||
|
|
||||||
// Handle cancel
|
// Handle cancel
|
||||||
|
|
@ -41,8 +102,13 @@ module.exports = {
|
||||||
process.exit(0);
|
process.exit(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle quick update separately
|
// Handle quick update separately. --set is a post-install TOML patch so
|
||||||
|
// it works the same way for quick-update as for a regular install — the
|
||||||
|
// installer runs, then `applySetOverrides` patches the central config
|
||||||
|
// files. Pass the parsed overrides through.
|
||||||
if (config.actionType === 'quick-update') {
|
if (config.actionType === 'quick-update') {
|
||||||
|
const { parseSetEntries } = require('../set-overrides');
|
||||||
|
config.setOverrides = parseSetEntries(options.set || []);
|
||||||
const result = await installer.quickUpdate(config);
|
const result = await installer.quickUpdate(config);
|
||||||
await prompts.log.success('Quick update complete!');
|
await prompts.log.success('Quick update complete!');
|
||||||
await prompts.log.info(`Updated ${result.moduleCount} modules with preserved settings (${result.modules.join(', ')})`);
|
await prompts.log.info(`Updated ${result.moduleCount} modules with preserved settings (${result.modules.join(', ')})`);
|
||||||
|
|
@ -68,7 +134,7 @@ module.exports = {
|
||||||
} else {
|
} else {
|
||||||
await prompts.log.error(`Installation failed: ${error.message}`);
|
await prompts.log.error(`Installation failed: ${error.message}`);
|
||||||
}
|
}
|
||||||
if (error.stack) {
|
if (error.stack && !error.expected) {
|
||||||
await prompts.log.message(error.stack);
|
await prompts.log.message(error.stack);
|
||||||
}
|
}
|
||||||
} catch {
|
} catch {
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,19 @@
|
||||||
* User input comes from either UI answers or headless CLI flags.
|
* User input comes from either UI answers or headless CLI flags.
|
||||||
*/
|
*/
|
||||||
class Config {
|
class Config {
|
||||||
constructor({ directory, modules, ides, skipPrompts, verbose, actionType, coreConfig, moduleConfigs, quickUpdate }) {
|
constructor({
|
||||||
|
directory,
|
||||||
|
modules,
|
||||||
|
ides,
|
||||||
|
skipPrompts,
|
||||||
|
verbose,
|
||||||
|
actionType,
|
||||||
|
coreConfig,
|
||||||
|
moduleConfigs,
|
||||||
|
quickUpdate,
|
||||||
|
channelOptions,
|
||||||
|
setOverrides,
|
||||||
|
}) {
|
||||||
this.directory = directory;
|
this.directory = directory;
|
||||||
this.modules = Object.freeze([...modules]);
|
this.modules = Object.freeze([...modules]);
|
||||||
this.ides = Object.freeze([...ides]);
|
this.ides = Object.freeze([...ides]);
|
||||||
|
|
@ -13,6 +25,13 @@ class Config {
|
||||||
this.coreConfig = coreConfig;
|
this.coreConfig = coreConfig;
|
||||||
this.moduleConfigs = moduleConfigs;
|
this.moduleConfigs = moduleConfigs;
|
||||||
this._quickUpdate = quickUpdate;
|
this._quickUpdate = quickUpdate;
|
||||||
|
// channelOptions carry a Map + Set; don't deep-freeze.
|
||||||
|
this.channelOptions = channelOptions || null;
|
||||||
|
// Parsed `--set <module>.<key>=<value>` overrides, applied as a TOML
|
||||||
|
// patch AFTER the install finishes. Shape: { moduleCode: { key: value } }.
|
||||||
|
// Intentionally NOT integrated with the prompt/template/schema flow; see
|
||||||
|
// `tools/installer/set-overrides.js` for the rationale and tradeoffs.
|
||||||
|
this.setOverrides = setOverrides || {};
|
||||||
Object.freeze(this);
|
Object.freeze(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -37,6 +56,8 @@ class Config {
|
||||||
coreConfig: userInput.coreConfig || {},
|
coreConfig: userInput.coreConfig || {},
|
||||||
moduleConfigs: userInput.moduleConfigs || null,
|
moduleConfigs: userInput.moduleConfigs || null,
|
||||||
quickUpdate: userInput._quickUpdate || false,
|
quickUpdate: userInput._quickUpdate || false,
|
||||||
|
channelOptions: userInput.channelOptions || null,
|
||||||
|
setOverrides: userInput.setOverrides || {},
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -11,8 +11,11 @@ const prompts = require('../prompts');
|
||||||
const { BMAD_FOLDER_NAME } = require('../ide/shared/path-utils');
|
const { BMAD_FOLDER_NAME } = require('../ide/shared/path-utils');
|
||||||
const { InstallPaths } = require('./install-paths');
|
const { InstallPaths } = require('./install-paths');
|
||||||
const { ExternalModuleManager } = require('../modules/external-manager');
|
const { ExternalModuleManager } = require('../modules/external-manager');
|
||||||
|
const { resolveModuleVersion } = require('../modules/version-resolver');
|
||||||
|
const { MODULE_HELP_CSV_HEADER } = require('../modules/module-help-schema');
|
||||||
|
|
||||||
const { ExistingInstall } = require('./existing-install');
|
const { ExistingInstall } = require('./existing-install');
|
||||||
|
const { warnPreNativeSkillsLegacy } = require('./legacy-warnings');
|
||||||
|
|
||||||
class Installer {
|
class Installer {
|
||||||
constructor() {
|
constructor() {
|
||||||
|
|
@ -24,44 +27,6 @@ class Installer {
|
||||||
this.bmadFolderName = BMAD_FOLDER_NAME;
|
this.bmadFolderName = BMAD_FOLDER_NAME;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Read the module version from .claude-plugin/marketplace.json
|
|
||||||
* Walks up from sourcePath looking for .claude-plugin/marketplace.json
|
|
||||||
* @param {string} sourcePath - Module source directory
|
|
||||||
* @returns {string} Version string or empty string
|
|
||||||
*/
|
|
||||||
async _getMarketplaceVersion(sourcePath) {
|
|
||||||
let dir = sourcePath;
|
|
||||||
for (let i = 0; i < 5; i++) {
|
|
||||||
const marketplacePath = path.join(dir, '.claude-plugin', 'marketplace.json');
|
|
||||||
if (await fs.pathExists(marketplacePath)) {
|
|
||||||
try {
|
|
||||||
const data = JSON.parse(await fs.readFile(marketplacePath, 'utf8'));
|
|
||||||
return this._extractMarketplaceVersion(data);
|
|
||||||
} catch {
|
|
||||||
return '';
|
|
||||||
}
|
|
||||||
}
|
|
||||||
const parent = path.dirname(dir);
|
|
||||||
if (parent === dir) break;
|
|
||||||
dir = parent;
|
|
||||||
}
|
|
||||||
return '';
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Extract the highest version from marketplace.json plugins array
|
|
||||||
*/
|
|
||||||
_extractMarketplaceVersion(data) {
|
|
||||||
const plugins = data?.plugins;
|
|
||||||
if (!Array.isArray(plugins) || plugins.length === 0) return '';
|
|
||||||
let best = '';
|
|
||||||
for (const p of plugins) {
|
|
||||||
if (p.version && (!best || p.version > best)) best = p.version;
|
|
||||||
}
|
|
||||||
return best;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Main installation method
|
* Main installation method
|
||||||
* @param {Object} config - Installation configuration
|
* @param {Object} config - Installation configuration
|
||||||
|
|
@ -78,6 +43,16 @@ class Installer {
|
||||||
const officialModules = await OfficialModules.build(config, paths);
|
const officialModules = await OfficialModules.build(config, paths);
|
||||||
const existingInstall = await ExistingInstall.detect(paths.bmadDir);
|
const existingInstall = await ExistingInstall.detect(paths.bmadDir);
|
||||||
|
|
||||||
|
try {
|
||||||
|
await warnPreNativeSkillsLegacy({
|
||||||
|
projectRoot: paths.projectRoot,
|
||||||
|
existingVersion: existingInstall.installed ? existingInstall.version : null,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
// Legacy-dir scan is informational; never let it abort install.
|
||||||
|
await prompts.log.warn(`Warning: Could not check for legacy BMAD entries: ${error.message}`);
|
||||||
|
}
|
||||||
|
|
||||||
if (existingInstall.installed) {
|
if (existingInstall.installed) {
|
||||||
await this._removeDeselectedModules(existingInstall, config, paths);
|
await this._removeDeselectedModules(existingInstall, config, paths);
|
||||||
updateState = await this._prepareUpdateState(paths, config, existingInstall, officialModules);
|
updateState = await this._prepareUpdateState(paths, config, existingInstall, officialModules);
|
||||||
|
|
@ -220,15 +195,16 @@ class Installer {
|
||||||
|
|
||||||
if (toRemove.length === 0) return;
|
if (toRemove.length === 0) return;
|
||||||
|
|
||||||
await this.ideManager.ensureInitialized();
|
// Pass the newly-selected list as remainingIdes so cleanupByList skips
|
||||||
for (const ide of toRemove) {
|
// target_dir wipes for IDEs whose directory is still owned by a peer
|
||||||
try {
|
// (e.g. removing 'cursor' while 'gemini' remains — both share .agents/skills).
|
||||||
const handler = this.ideManager.handlers.get(ide);
|
const results = await this.ideManager.cleanupByList(paths.projectRoot, toRemove, {
|
||||||
if (handler) {
|
remainingIdes: [...newlySelected],
|
||||||
await handler.cleanup(paths.projectRoot);
|
});
|
||||||
}
|
|
||||||
} catch (error) {
|
for (const result of results || []) {
|
||||||
await prompts.log.warn(`Warning: Failed to remove ${ide}: ${error.message}`);
|
if (result && result.success === false) {
|
||||||
|
await prompts.log.warn(`Warning: Failed to remove ${result.ide}: ${result.error || 'unknown error'}`);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -335,6 +311,19 @@ class Installer {
|
||||||
moduleConfigs,
|
moduleConfigs,
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// Apply post-install --set TOML patches. Runs after writeCentralConfig
|
||||||
|
// (inside generateManifests above) so the patch operates on the
|
||||||
|
// freshly written `_bmad/config.toml` / `_bmad/config.user.toml`.
|
||||||
|
// See `tools/installer/set-overrides.js` for routing rules.
|
||||||
|
if (config.setOverrides && Object.keys(config.setOverrides).length > 0) {
|
||||||
|
const { applySetOverrides } = require('../set-overrides');
|
||||||
|
const applied = await applySetOverrides(config.setOverrides, paths.bmadDir);
|
||||||
|
if (applied.length > 0) {
|
||||||
|
const summary = applied.map((a) => `${a.module}.${a.key} → ${a.file}`).join(', ');
|
||||||
|
await prompts.log.info(`Applied --set overrides: ${summary}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
message('Generating help catalog...');
|
message('Generating help catalog...');
|
||||||
await this.mergeModuleHelpCatalogs(paths.bmadDir, manifestGen.agents);
|
await this.mergeModuleHelpCatalogs(paths.bmadDir, manifestGen.agents);
|
||||||
addResult('Help catalog', 'ok');
|
addResult('Help catalog', 'ok');
|
||||||
|
|
@ -379,13 +368,14 @@ class Installer {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (const ide of validIdes) {
|
const setupResults = await this.ideManager.setupBatch(validIdes, paths.projectRoot, paths.bmadDir, {
|
||||||
const setupResult = await this.ideManager.setup(ide, paths.projectRoot, paths.bmadDir, {
|
|
||||||
selectedModules: allModules || [],
|
selectedModules: allModules || [],
|
||||||
verbose: config.verbose,
|
verbose: config.verbose,
|
||||||
previousSkillIds,
|
previousSkillIds,
|
||||||
});
|
});
|
||||||
|
|
||||||
|
for (const setupResult of setupResults) {
|
||||||
|
const ide = setupResult.ide;
|
||||||
if (setupResult.success) {
|
if (setupResult.success) {
|
||||||
addResult(ide, 'ok', setupResult.detail || '');
|
addResult(ide, 'ok', setupResult.detail || '');
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -638,19 +628,40 @@ class Installer {
|
||||||
moduleConfig: moduleConfig,
|
moduleConfig: moduleConfig,
|
||||||
installer: this,
|
installer: this,
|
||||||
silent: true,
|
silent: true,
|
||||||
|
channelOptions: config.channelOptions,
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
// Get display name from source module.yaml; version from resolution cache or marketplace.json
|
// Get display name from source module.yaml and resolve the freshest version metadata we can find locally.
|
||||||
const sourcePath = await officialModules.findModuleSource(moduleName, { silent: true });
|
const sourcePath = await officialModules.findModuleSource(moduleName, {
|
||||||
|
silent: true,
|
||||||
|
channelOptions: config.channelOptions,
|
||||||
|
});
|
||||||
const moduleInfo = sourcePath ? await officialModules.getModuleInfo(sourcePath, moduleName, '') : null;
|
const moduleInfo = sourcePath ? await officialModules.getModuleInfo(sourcePath, moduleName, '') : null;
|
||||||
const displayName = moduleInfo?.name || moduleName;
|
const displayName = moduleInfo?.name || moduleName;
|
||||||
|
|
||||||
// Prefer version from resolution cache (accurate for custom/local modules),
|
const externalResolution = officialModules.externalModuleManager.getResolution(moduleName);
|
||||||
// fall back to marketplace.json walk-up for official modules
|
let communityResolution = null;
|
||||||
|
if (!externalResolution) {
|
||||||
|
const { CommunityModuleManager } = require('../modules/community-manager');
|
||||||
|
communityResolution = new CommunityModuleManager().getResolution(moduleName);
|
||||||
|
}
|
||||||
|
const resolution = externalResolution || communityResolution;
|
||||||
const cachedResolution = CustomModuleManager._resolutionCache.get(moduleName);
|
const cachedResolution = CustomModuleManager._resolutionCache.get(moduleName);
|
||||||
const version = cachedResolution?.version || (sourcePath ? await this._getMarketplaceVersion(sourcePath) : '');
|
const versionInfo = await resolveModuleVersion(moduleName, {
|
||||||
addResult(displayName, 'ok', '', { moduleCode: moduleName, newVersion: version });
|
moduleSourcePath: sourcePath,
|
||||||
|
fallbackVersion: resolution?.version || cachedResolution?.version,
|
||||||
|
marketplacePluginNames: cachedResolution?.pluginName ? [cachedResolution.pluginName] : [],
|
||||||
|
});
|
||||||
|
// Prefer the git tag recorded by the resolution (e.g. "v1.7.0") over
|
||||||
|
// the on-disk package.json (which may be ahead of the released tag).
|
||||||
|
const version = resolution?.version || versionInfo.version || '';
|
||||||
|
addResult(displayName, 'ok', '', {
|
||||||
|
moduleCode: moduleName,
|
||||||
|
newVersion: version,
|
||||||
|
newChannel: resolution?.channel || null,
|
||||||
|
newSha: resolution?.sha || null,
|
||||||
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -926,29 +937,15 @@ class Installer {
|
||||||
/**
|
/**
|
||||||
* Merge all module-help.csv files into a single bmad-help.csv.
|
* Merge all module-help.csv files into a single bmad-help.csv.
|
||||||
* Scans all installed modules for module-help.csv and merges them.
|
* Scans all installed modules for module-help.csv and merges them.
|
||||||
* Enriches agent info from the in-memory agent list produced by ManifestGenerator.
|
* Output preserves the source schema verbatim — see schema below.
|
||||||
* Output is written to _bmad/_config/bmad-help.csv.
|
|
||||||
* @param {string} bmadDir - BMAD installation directory
|
* @param {string} bmadDir - BMAD installation directory
|
||||||
* @param {Array<Object>} agentEntries - Agents collected from module.yaml (code, name, title, icon, module, ...)
|
* @param {Array<Object>} _agentEntries - Unused; retained for call-site compatibility
|
||||||
*/
|
*/
|
||||||
async mergeModuleHelpCatalogs(bmadDir, agentEntries = []) {
|
async mergeModuleHelpCatalogs(bmadDir, _agentEntries = []) {
|
||||||
const allRows = [];
|
const allRows = [];
|
||||||
const headerRow =
|
const headerRow = MODULE_HELP_CSV_HEADER;
|
||||||
'module,phase,name,code,sequence,workflow-file,command,required,agent-name,agent-command,agent-display-name,agent-title,options,description,output-location,outputs';
|
const COLUMN_COUNT = 13;
|
||||||
|
const PHASE_INDEX = 7;
|
||||||
// Build agent lookup from the in-memory list (agent code → command + display fields).
|
|
||||||
const agentInfo = new Map();
|
|
||||||
for (const agent of agentEntries) {
|
|
||||||
if (!agent || !agent.code) continue;
|
|
||||||
const agentCommand = agent.module ? `bmad:${agent.module}:agent:${agent.code}` : `bmad:agent:${agent.code}`;
|
|
||||||
const displayName = agent.name || agent.code;
|
|
||||||
const titleCombined = agent.icon && agent.title ? `${agent.icon} ${agent.title}` : agent.title || agent.code;
|
|
||||||
agentInfo.set(agent.code, {
|
|
||||||
command: agentCommand,
|
|
||||||
displayName,
|
|
||||||
title: titleCombined,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get all installed module directories
|
// Get all installed module directories
|
||||||
const entries = await fs.readdir(bmadDir, { withFileTypes: true });
|
const entries = await fs.readdir(bmadDir, { withFileTypes: true });
|
||||||
|
|
@ -979,72 +976,37 @@ class Installer {
|
||||||
const content = await fs.readFile(helpFilePath, 'utf8');
|
const content = await fs.readFile(helpFilePath, 'utf8');
|
||||||
const lines = content.split('\n').filter((line) => line.trim() && !line.startsWith('#'));
|
const lines = content.split('\n').filter((line) => line.trim() && !line.startsWith('#'));
|
||||||
|
|
||||||
|
let headerWarned = false;
|
||||||
for (const line of lines) {
|
for (const line of lines) {
|
||||||
// Skip header row
|
// Header row: warn on drift from canonical schema, then skip.
|
||||||
|
// Data rows are loaded positionally regardless, so the warning
|
||||||
|
// is advisory — the maintainer should rename their columns.
|
||||||
if (line.startsWith('module,')) {
|
if (line.startsWith('module,')) {
|
||||||
|
if (!headerWarned && line.trim() !== headerRow) {
|
||||||
|
await prompts.log.warn(
|
||||||
|
` ${moduleName}/module-help.csv header does not match canonical schema. ` +
|
||||||
|
`Expected: ${headerRow} | Found: ${line.trim()} | Data loaded positionally.`,
|
||||||
|
);
|
||||||
|
headerWarned = true;
|
||||||
|
}
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parse the line - handle quoted fields with commas
|
// Parse the line - handle quoted fields with commas
|
||||||
const columns = this.parseCSVLine(line);
|
const columns = this.parseCSVLine(line);
|
||||||
if (columns.length >= 12) {
|
if (columns.length < COLUMN_COUNT - 1) continue;
|
||||||
// Map old schema to new schema
|
|
||||||
// Old: module,phase,name,code,sequence,workflow-file,command,required,agent,options,description,output-location,outputs
|
|
||||||
// New: module,phase,name,code,sequence,workflow-file,command,required,agent-name,agent-command,agent-display-name,agent-title,options,description,output-location,outputs
|
|
||||||
|
|
||||||
const [
|
// Pad short rows; truncate over-long rows
|
||||||
module,
|
const padded = columns.slice(0, COLUMN_COUNT);
|
||||||
phase,
|
while (padded.length < COLUMN_COUNT) padded.push('');
|
||||||
name,
|
|
||||||
code,
|
|
||||||
sequence,
|
|
||||||
workflowFile,
|
|
||||||
command,
|
|
||||||
required,
|
|
||||||
agentName,
|
|
||||||
options,
|
|
||||||
description,
|
|
||||||
outputLocation,
|
|
||||||
outputs,
|
|
||||||
] = columns;
|
|
||||||
|
|
||||||
// Pass through _meta rows as-is (module metadata, not a skill)
|
// If module column is empty, fill with this module's name
|
||||||
if (phase === '_meta') {
|
// (core stays empty so its rows render as universal tools)
|
||||||
const finalModule = (!module || module.trim() === '') && moduleName !== 'core' ? moduleName : module || '';
|
if ((!padded[0] || padded[0].trim() === '') && moduleName !== 'core') {
|
||||||
const metaRow = [finalModule, '_meta', '', '', '', '', '', 'false', '', '', '', '', '', '', outputLocation || '', ''];
|
padded[0] = moduleName;
|
||||||
allRows.push(metaRow.map((c) => this.escapeCSVField(c)).join(','));
|
|
||||||
continue;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// If module column is empty, set it to this module's name (except for core which stays empty for universal tools)
|
allRows.push(padded.map((c) => this.escapeCSVField(c)).join(','));
|
||||||
const finalModule = (!module || module.trim() === '') && moduleName !== 'core' ? moduleName : module || '';
|
|
||||||
|
|
||||||
// Lookup agent info
|
|
||||||
const cleanAgentName = agentName ? agentName.trim() : '';
|
|
||||||
const agentData = agentInfo.get(cleanAgentName) || { command: '', displayName: '', title: '' };
|
|
||||||
|
|
||||||
// Build new row with agent info
|
|
||||||
const newRow = [
|
|
||||||
finalModule,
|
|
||||||
phase || '',
|
|
||||||
name || '',
|
|
||||||
code || '',
|
|
||||||
sequence || '',
|
|
||||||
workflowFile || '',
|
|
||||||
command || '',
|
|
||||||
required || 'false',
|
|
||||||
cleanAgentName,
|
|
||||||
agentData.command,
|
|
||||||
agentData.displayName,
|
|
||||||
agentData.title,
|
|
||||||
options || '',
|
|
||||||
description || '',
|
|
||||||
outputLocation || '',
|
|
||||||
outputs || '',
|
|
||||||
];
|
|
||||||
|
|
||||||
allRows.push(newRow.map((c) => this.escapeCSVField(c)).join(','));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (process.env.BMAD_VERBOSE_INSTALL === 'true') {
|
if (process.env.BMAD_VERBOSE_INSTALL === 'true') {
|
||||||
|
|
@ -1056,44 +1018,34 @@ class Installer {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sort by module, then phase, then sequence
|
// Sort by module, then phase. Stable sort preserves authored order within a phase.
|
||||||
allRows.sort((a, b) => {
|
const decorated = allRows.map((row, index) => ({ row, index, cols: this.parseCSVLine(row) }));
|
||||||
const colsA = this.parseCSVLine(a);
|
decorated.sort((a, b) => {
|
||||||
const colsB = this.parseCSVLine(b);
|
const moduleA = (a.cols[0] || '').toLowerCase();
|
||||||
|
const moduleB = (b.cols[0] || '').toLowerCase();
|
||||||
|
if (moduleA !== moduleB) return moduleA.localeCompare(moduleB);
|
||||||
|
|
||||||
// Module comparison (empty module/universal tools come first)
|
const phaseA = a.cols[PHASE_INDEX] || '';
|
||||||
const moduleA = (colsA[0] || '').toLowerCase();
|
const phaseB = b.cols[PHASE_INDEX] || '';
|
||||||
const moduleB = (colsB[0] || '').toLowerCase();
|
if (phaseA !== phaseB) return phaseA.localeCompare(phaseB);
|
||||||
if (moduleA !== moduleB) {
|
|
||||||
return moduleA.localeCompare(moduleB);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Phase comparison
|
return a.index - b.index;
|
||||||
const phaseA = colsA[1] || '';
|
|
||||||
const phaseB = colsB[1] || '';
|
|
||||||
if (phaseA !== phaseB) {
|
|
||||||
return phaseA.localeCompare(phaseB);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sequence comparison
|
|
||||||
const seqA = parseInt(colsA[4] || '0', 10);
|
|
||||||
const seqB = parseInt(colsB[4] || '0', 10);
|
|
||||||
return seqA - seqB;
|
|
||||||
});
|
});
|
||||||
|
const sortedRows = decorated.map((d) => d.row);
|
||||||
|
|
||||||
// Write merged catalog
|
// Write merged catalog
|
||||||
const outputDir = path.join(bmadDir, '_config');
|
const outputDir = path.join(bmadDir, '_config');
|
||||||
await fs.ensureDir(outputDir);
|
await fs.ensureDir(outputDir);
|
||||||
const outputPath = path.join(outputDir, 'bmad-help.csv');
|
const outputPath = path.join(outputDir, 'bmad-help.csv');
|
||||||
|
|
||||||
const mergedContent = [headerRow, ...allRows].join('\n');
|
const mergedContent = [headerRow, ...sortedRows].join('\n');
|
||||||
await fs.writeFile(outputPath, mergedContent, 'utf8');
|
await fs.writeFile(outputPath, mergedContent, 'utf8');
|
||||||
|
|
||||||
// Track the installed file
|
// Track the installed file
|
||||||
this.installedFiles.add(outputPath);
|
this.installedFiles.add(outputPath);
|
||||||
|
|
||||||
if (process.env.BMAD_VERBOSE_INSTALL === 'true') {
|
if (process.env.BMAD_VERBOSE_INSTALL === 'true') {
|
||||||
await prompts.log.message(` Generated bmad-help.csv: ${allRows.length} workflows`);
|
await prompts.log.message(` Generated bmad-help.csv: ${sortedRows.length} workflows`);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1125,12 +1077,30 @@ class Installer {
|
||||||
let detail = '';
|
let detail = '';
|
||||||
if (r.moduleCode && r.newVersion) {
|
if (r.moduleCode && r.newVersion) {
|
||||||
const oldVersion = preVersions.get(r.moduleCode);
|
const oldVersion = preVersions.get(r.moduleCode);
|
||||||
if (oldVersion && oldVersion === r.newVersion) {
|
// Format a version label for display:
|
||||||
detail = ` (v${r.newVersion}, no change)`;
|
// "main" → "main @ <short-sha>" (next channel shows what SHA landed)
|
||||||
|
// "v1.7.0" or "1.7.0" → "v1.7.0" (prefix 'v' when missing)
|
||||||
|
// anything else (legacy strings) → as-is
|
||||||
|
const fmt = (v, sha) => {
|
||||||
|
if (typeof v !== 'string' || !v) return '';
|
||||||
|
if (v === 'main' || v === 'HEAD') return sha ? `main @ ${sha.slice(0, 7)}` : 'main';
|
||||||
|
if (/^v?\d+\.\d+\.\d+/.test(v)) return v.startsWith('v') ? v : `v${v}`;
|
||||||
|
return v;
|
||||||
|
};
|
||||||
|
const newV = fmt(r.newVersion, r.newSha);
|
||||||
|
// 'main'/'HEAD' strings only identify the channel, not the commit, so
|
||||||
|
// we can't assert "no change" without comparing SHAs — and preVersions
|
||||||
|
// doesn't carry the old SHA. Render these as a refresh instead of a
|
||||||
|
// false-negative "no change".
|
||||||
|
const isMainLike = oldVersion === 'main' || oldVersion === 'HEAD';
|
||||||
|
if (oldVersion && oldVersion === r.newVersion && !isMainLike) {
|
||||||
|
detail = ` (${newV}, no change)`;
|
||||||
|
} else if (oldVersion && isMainLike) {
|
||||||
|
detail = ` (${newV}, refreshed)`;
|
||||||
} else if (oldVersion) {
|
} else if (oldVersion) {
|
||||||
detail = ` (v${oldVersion} → v${r.newVersion})`;
|
detail = ` (${fmt(oldVersion, r.newSha)} → ${newV})`;
|
||||||
} else {
|
} else {
|
||||||
detail = ` (v${r.newVersion}, installed)`;
|
detail = ` (${newV}, installed)`;
|
||||||
}
|
}
|
||||||
} else if (r.detail) {
|
} else if (r.detail) {
|
||||||
detail = ` (${r.detail})`;
|
detail = ` (${r.detail})`;
|
||||||
|
|
@ -1250,9 +1220,59 @@ class Installer {
|
||||||
await prompts.log.warn(`Skipping ${skippedModules.length} module(s) - no source available: ${skippedModules.join(', ')}`);
|
await prompts.log.warn(`Skipping ${skippedModules.length} module(s) - no source available: ${skippedModules.join(', ')}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Build channel options from the existing manifest FIRST so the config
|
||||||
|
// collector below (which triggers external-module clones via
|
||||||
|
// findModuleSource) knows each module's recorded channel and doesn't
|
||||||
|
// silently redecide it. Without this, modules previously on 'next' or
|
||||||
|
// 'pinned' would trigger a stable-channel tag lookup at config-collection
|
||||||
|
// time, burning GitHub API quota and potentially failing.
|
||||||
|
const manifestData = await this.manifest.read(bmadDir);
|
||||||
|
const channelOptions = { global: null, nextSet: new Set(), pins: new Map(), warnings: [] };
|
||||||
|
if (manifestData?.modulesDetailed) {
|
||||||
|
const { fetchStableTags, classifyUpgrade, parseGitHubRepo } = require('../modules/channel-resolver');
|
||||||
|
for (const entry of manifestData.modulesDetailed) {
|
||||||
|
if (!entry?.name || !entry?.channel) continue;
|
||||||
|
if (entry.channel === 'pinned' && entry.version) {
|
||||||
|
channelOptions.pins.set(entry.name, entry.version);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (entry.channel === 'next') {
|
||||||
|
channelOptions.nextSet.add(entry.name);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
// Stable: classify the available upgrade. Patches and minors fall
|
||||||
|
// through (stable default picks up the top tag). A major upgrade
|
||||||
|
// requires opt-in, so under quick-update's non-interactive semantics
|
||||||
|
// we pin to the current version to prevent a silent breaking jump.
|
||||||
|
if (entry.channel === 'stable' && entry.version && entry.repoUrl) {
|
||||||
|
const parsed = parseGitHubRepo(entry.repoUrl);
|
||||||
|
if (!parsed) continue;
|
||||||
|
try {
|
||||||
|
const tags = await fetchStableTags(parsed.owner, parsed.repo);
|
||||||
|
if (tags.length === 0) continue;
|
||||||
|
const topTag = tags[0].tag;
|
||||||
|
const cls = classifyUpgrade(entry.version, topTag);
|
||||||
|
if (cls === 'major') {
|
||||||
|
channelOptions.pins.set(entry.name, entry.version);
|
||||||
|
await prompts.log.warn(
|
||||||
|
`${entry.name} ${entry.version} → ${topTag} is a new major release; staying on ${entry.version}. ` +
|
||||||
|
`Run \`bmad install\` (Modify) with \`--pin ${entry.name}=${topTag}\` to accept.`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
// Tag lookup failed (offline, rate-limited). Stay on the current
|
||||||
|
// version rather than guessing — the existing cache is already
|
||||||
|
// at that ref, so re-using it keeps the install stable.
|
||||||
|
channelOptions.pins.set(entry.name, entry.version);
|
||||||
|
await prompts.log.warn(`Could not check ${entry.name} for updates (${error.message}); staying on ${entry.version}.`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Load existing configs and collect new fields (if any)
|
// Load existing configs and collect new fields (if any)
|
||||||
await prompts.log.info('Checking for new configuration options...');
|
await prompts.log.info('Checking for new configuration options...');
|
||||||
const quickModules = new OfficialModules();
|
const quickModules = new OfficialModules({ channelOptions });
|
||||||
await quickModules.loadExistingConfig(projectDir);
|
await quickModules.loadExistingConfig(projectDir);
|
||||||
|
|
||||||
let promptedForNewFields = false;
|
let promptedForNewFields = false;
|
||||||
|
|
@ -1287,10 +1307,15 @@ class Installer {
|
||||||
ides: configuredIdes,
|
ides: configuredIdes,
|
||||||
coreConfig: quickModules.collectedConfig.core,
|
coreConfig: quickModules.collectedConfig.core,
|
||||||
moduleConfigs: quickModules.collectedConfig,
|
moduleConfigs: quickModules.collectedConfig,
|
||||||
|
// Forward `--set` overrides so the post-install patch step
|
||||||
|
// (`applySetOverrides`) runs at the end of quick-update too. The
|
||||||
|
// installer.install path applies them after writeCentralConfig.
|
||||||
|
setOverrides: config.setOverrides || {},
|
||||||
actionType: 'install',
|
actionType: 'install',
|
||||||
_quickUpdate: true,
|
_quickUpdate: true,
|
||||||
_preserveModules: skippedModules,
|
_preserveModules: skippedModules,
|
||||||
_existingModules: installedModules,
|
_existingModules: installedModules,
|
||||||
|
channelOptions,
|
||||||
};
|
};
|
||||||
|
|
||||||
await this.install(installConfig);
|
await this.install(installConfig);
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,151 @@
|
||||||
|
const os = require('node:os');
|
||||||
|
const path = require('node:path');
|
||||||
|
const semver = require('semver');
|
||||||
|
const fs = require('../fs-native');
|
||||||
|
const prompts = require('../prompts');
|
||||||
|
const { BMAD_FOLDER_NAME } = require('../ide/shared/path-utils');
|
||||||
|
const { getInstalledCanonicalIds, isBmadOwnedEntry } = require('../ide/shared/installed-skills');
|
||||||
|
|
||||||
|
const MIN_NATIVE_SKILLS_VERSION = '6.1.0';
|
||||||
|
|
||||||
|
// Pre-v6.1.0 paths: BMAD used to install commands/workflows/etc in tool-specific dirs.
|
||||||
|
// In v6.1.0 BMAD switched to native SKILL.md format.
|
||||||
|
const LEGACY_COMMAND_PATHS = [
|
||||||
|
'.agent/workflows',
|
||||||
|
'.augment/commands',
|
||||||
|
'.claude/commands',
|
||||||
|
'.clinerules/workflows',
|
||||||
|
'.codex/prompts',
|
||||||
|
'~/.codex/prompts',
|
||||||
|
'.codebuddy/commands',
|
||||||
|
'.crush/commands',
|
||||||
|
'.cursor/commands',
|
||||||
|
'.gemini/commands',
|
||||||
|
'.github/agents',
|
||||||
|
'.github/prompts',
|
||||||
|
'.iflow/commands',
|
||||||
|
'.kilocode/workflows',
|
||||||
|
'.kiro/steering',
|
||||||
|
'.opencode/agents',
|
||||||
|
'.opencode/commands',
|
||||||
|
'.opencode/agent',
|
||||||
|
'.opencode/command',
|
||||||
|
'.qwen/commands',
|
||||||
|
'.roo/commands',
|
||||||
|
'.rovodev/workflows',
|
||||||
|
'.trae/rules',
|
||||||
|
'.windsurf/workflows',
|
||||||
|
];
|
||||||
|
|
||||||
|
// Skill paths that moved to the cross-tool .agents/skills/ standard.
|
||||||
|
// Users upgrading from a prior install may have stale BMAD skills here that
|
||||||
|
// the AI tool will load alongside the new ones, causing duplicates.
|
||||||
|
const LEGACY_SKILL_PATHS = [
|
||||||
|
'.augment/skills',
|
||||||
|
'~/.augment/skills',
|
||||||
|
'.codex/skills',
|
||||||
|
'.crush/skills',
|
||||||
|
'.cursor/skills',
|
||||||
|
'~/.cursor/skills',
|
||||||
|
'.gemini/skills',
|
||||||
|
'~/.gemini/skills',
|
||||||
|
'.github/skills',
|
||||||
|
'~/.github/skills',
|
||||||
|
'.kilocode/skills',
|
||||||
|
'.kimi/skills',
|
||||||
|
'~/.kimi/skills',
|
||||||
|
'.opencode/skills',
|
||||||
|
'~/.opencode/skills',
|
||||||
|
'.pi/skills',
|
||||||
|
'~/.pi/skills',
|
||||||
|
'.roo/skills',
|
||||||
|
'~/.roo/skills',
|
||||||
|
'.rovodev/skills',
|
||||||
|
'~/.rovodev/skills',
|
||||||
|
'.windsurf/skills',
|
||||||
|
'~/.windsurf/skills',
|
||||||
|
'~/.codeium/windsurf/skills',
|
||||||
|
];
|
||||||
|
|
||||||
|
const LEGACY_PATHS = [...LEGACY_COMMAND_PATHS, ...LEGACY_SKILL_PATHS];
|
||||||
|
|
||||||
|
function expandPath(p) {
|
||||||
|
if (p === '~') return os.homedir();
|
||||||
|
if (p.startsWith('~/')) return path.join(os.homedir(), p.slice(2));
|
||||||
|
return p;
|
||||||
|
}
|
||||||
|
|
||||||
|
function resolveLegacyPath(projectRoot, p) {
|
||||||
|
if (path.isAbsolute(p) || p.startsWith('~')) return expandPath(p);
|
||||||
|
return path.join(projectRoot, p);
|
||||||
|
}
|
||||||
|
|
||||||
|
async function findStaleLegacyDirs(projectRoot) {
|
||||||
|
const bmadDir = path.join(projectRoot, BMAD_FOLDER_NAME);
|
||||||
|
const canonicalIds = await getInstalledCanonicalIds(bmadDir);
|
||||||
|
|
||||||
|
const findings = [];
|
||||||
|
for (const legacyPath of LEGACY_PATHS) {
|
||||||
|
const resolved = resolveLegacyPath(projectRoot, legacyPath);
|
||||||
|
if (!(await fs.pathExists(resolved))) continue;
|
||||||
|
try {
|
||||||
|
const entries = await fs.readdir(resolved);
|
||||||
|
const bmadEntries = entries.filter((e) => isBmadOwnedEntry(e, canonicalIds));
|
||||||
|
if (bmadEntries.length > 0) {
|
||||||
|
findings.push({ path: resolved, displayPath: legacyPath, count: bmadEntries.length, entries: bmadEntries });
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// Unreadable dir — skip
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return findings;
|
||||||
|
}
|
||||||
|
|
||||||
|
function isPreNativeSkillsVersion(version) {
|
||||||
|
if (!version) return false;
|
||||||
|
const coerced = semver.valid(version) || semver.valid(semver.coerce(version));
|
||||||
|
if (!coerced) return false;
|
||||||
|
return semver.lt(coerced, MIN_NATIVE_SKILLS_VERSION);
|
||||||
|
}
|
||||||
|
|
||||||
|
async function warnPreNativeSkillsLegacy({ projectRoot, existingVersion } = {}) {
|
||||||
|
const versionTriggered = isPreNativeSkillsVersion(existingVersion);
|
||||||
|
const staleDirs = await findStaleLegacyDirs(projectRoot);
|
||||||
|
|
||||||
|
if (!versionTriggered && staleDirs.length === 0) return;
|
||||||
|
|
||||||
|
if (versionTriggered) {
|
||||||
|
await prompts.log.warn(
|
||||||
|
`Detected previous BMAD install v${existingVersion} (pre-${MIN_NATIVE_SKILLS_VERSION}). ` +
|
||||||
|
`BMAD switched to native skills format in v${MIN_NATIVE_SKILLS_VERSION}; old command/workflow directories from your prior install may still be present.`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (staleDirs.length > 0) {
|
||||||
|
await prompts.log.warn(
|
||||||
|
`Found stale BMAD entries in ${staleDirs.length} legacy location(s) that the new installer no longer manages. ` +
|
||||||
|
`Your AI tool may load these alongside the new skills, causing duplicates. Remove them manually:`,
|
||||||
|
);
|
||||||
|
for (const finding of staleDirs) {
|
||||||
|
// Print each entry by exact name. A `bmad*` glob would (a) miss
|
||||||
|
// custom-module skills the canonicalId scan now picks up, and
|
||||||
|
// (b) match bmad-os-* utility skills the user should keep.
|
||||||
|
const entries = finding.entries || [];
|
||||||
|
for (const entry of entries) {
|
||||||
|
await prompts.log.message(` rm -rf "${path.join(finding.path, entry)}"`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if (versionTriggered) {
|
||||||
|
await prompts.log.message(
|
||||||
|
' No stale legacy directories detected, but if your AI tool shows duplicate BMAD commands after install, check for old `bmad-*` entries in tool-specific dirs (e.g. .claude/commands, .cursor/commands).',
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
warnPreNativeSkillsLegacy,
|
||||||
|
findStaleLegacyDirs,
|
||||||
|
isPreNativeSkillsVersion,
|
||||||
|
LEGACY_PATHS,
|
||||||
|
MIN_NATIVE_SKILLS_VERSION,
|
||||||
|
};
|
||||||
|
|
@ -349,7 +349,22 @@ class ManifestGenerator {
|
||||||
npmPackage: versionInfo.npmPackage,
|
npmPackage: versionInfo.npmPackage,
|
||||||
repoUrl: versionInfo.repoUrl,
|
repoUrl: versionInfo.repoUrl,
|
||||||
};
|
};
|
||||||
if (versionInfo.localPath) moduleEntry.localPath = versionInfo.localPath;
|
// Preserve channel/sha from the resolution (external/community/custom)
|
||||||
|
// or from the existing entry if this is a no-change rewrite.
|
||||||
|
const channel = versionInfo.channel ?? existing?.channel;
|
||||||
|
const sha = versionInfo.sha ?? existing?.sha;
|
||||||
|
if (channel) moduleEntry.channel = channel;
|
||||||
|
if (sha) moduleEntry.sha = sha;
|
||||||
|
if (versionInfo.localPath || existing?.localPath) {
|
||||||
|
moduleEntry.localPath = versionInfo.localPath || existing.localPath;
|
||||||
|
}
|
||||||
|
if (versionInfo.rawSource || existing?.rawSource) {
|
||||||
|
moduleEntry.rawSource = versionInfo.rawSource || existing.rawSource;
|
||||||
|
}
|
||||||
|
const regTag = versionInfo.registryApprovedTag ?? existing?.registryApprovedTag;
|
||||||
|
const regSha = versionInfo.registryApprovedSha ?? existing?.registryApprovedSha;
|
||||||
|
if (regTag) moduleEntry.registryApprovedTag = regTag;
|
||||||
|
if (regSha) moduleEntry.registryApprovedSha = regSha;
|
||||||
updatedModules.push(moduleEntry);
|
updatedModules.push(moduleEntry);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -420,6 +435,9 @@ class ManifestGenerator {
|
||||||
// this means user-scoped keys (e.g. user_name) could mis-file into the
|
// this means user-scoped keys (e.g. user_name) could mis-file into the
|
||||||
// team config, so the operator should notice.
|
// team config, so the operator should notice.
|
||||||
const scopeByModuleKey = {};
|
const scopeByModuleKey = {};
|
||||||
|
// Maps installer moduleName (may be full display name) → module code field
|
||||||
|
// from module.yaml, so TOML sections use [modules.<code>] not [modules.<name>].
|
||||||
|
const codeByModuleName = {};
|
||||||
for (const moduleName of this.updatedModules) {
|
for (const moduleName of this.updatedModules) {
|
||||||
const moduleYamlPath = await resolveInstalledModuleYaml(moduleName);
|
const moduleYamlPath = await resolveInstalledModuleYaml(moduleName);
|
||||||
if (!moduleYamlPath) {
|
if (!moduleYamlPath) {
|
||||||
|
|
@ -432,6 +450,7 @@ class ManifestGenerator {
|
||||||
try {
|
try {
|
||||||
const parsed = yaml.parse(await fs.readFile(moduleYamlPath, 'utf8'));
|
const parsed = yaml.parse(await fs.readFile(moduleYamlPath, 'utf8'));
|
||||||
if (!parsed || typeof parsed !== 'object') continue;
|
if (!parsed || typeof parsed !== 'object') continue;
|
||||||
|
if (parsed.code) codeByModuleName[moduleName] = parsed.code;
|
||||||
scopeByModuleKey[moduleName] = {};
|
scopeByModuleKey[moduleName] = {};
|
||||||
for (const [key, value] of Object.entries(parsed)) {
|
for (const [key, value] of Object.entries(parsed)) {
|
||||||
if (value && typeof value === 'object' && 'prompt' in value) {
|
if (value && typeof value === 'object' && 'prompt' in value) {
|
||||||
|
|
@ -530,6 +549,9 @@ class ManifestGenerator {
|
||||||
if (moduleName === 'core') continue;
|
if (moduleName === 'core') continue;
|
||||||
const cfg = moduleConfigs[moduleName];
|
const cfg = moduleConfigs[moduleName];
|
||||||
if (!cfg || Object.keys(cfg).length === 0) continue;
|
if (!cfg || Object.keys(cfg).length === 0) continue;
|
||||||
|
// Use the module's code field from module.yaml as the TOML key so the
|
||||||
|
// section is [modules.mdo] not [modules.MDO: Maxio DevOps Operations].
|
||||||
|
const sectionKey = codeByModuleName[moduleName] || moduleName;
|
||||||
// Only filter out spread-from-core pollution when we actually know
|
// Only filter out spread-from-core pollution when we actually know
|
||||||
// this module's prompt schema. For external/marketplace modules whose
|
// this module's prompt schema. For external/marketplace modules whose
|
||||||
// module.yaml isn't in the src tree, fall through as all-team so we
|
// module.yaml isn't in the src tree, fall through as all-team so we
|
||||||
|
|
@ -537,14 +559,14 @@ class ManifestGenerator {
|
||||||
const haveSchema = Object.keys(scopeByModuleKey[moduleName] || {}).length > 0;
|
const haveSchema = Object.keys(scopeByModuleKey[moduleName] || {}).length > 0;
|
||||||
const { team: modTeam, user: modUser } = partition(moduleName, cfg, haveSchema);
|
const { team: modTeam, user: modUser } = partition(moduleName, cfg, haveSchema);
|
||||||
if (Object.keys(modTeam).length > 0) {
|
if (Object.keys(modTeam).length > 0) {
|
||||||
teamLines.push(`[modules.${moduleName}]`);
|
teamLines.push(`[modules.${sectionKey}]`);
|
||||||
for (const [key, value] of Object.entries(modTeam)) {
|
for (const [key, value] of Object.entries(modTeam)) {
|
||||||
teamLines.push(`${key} = ${formatTomlValue(value)}`);
|
teamLines.push(`${key} = ${formatTomlValue(value)}`);
|
||||||
}
|
}
|
||||||
teamLines.push('');
|
teamLines.push('');
|
||||||
}
|
}
|
||||||
if (Object.keys(modUser).length > 0) {
|
if (Object.keys(modUser).length > 0) {
|
||||||
userLines.push(`[modules.${moduleName}]`);
|
userLines.push(`[modules.${sectionKey}]`);
|
||||||
for (const [key, value] of Object.entries(modUser)) {
|
for (const [key, value] of Object.entries(modUser)) {
|
||||||
userLines.push(`${key} = ${formatTomlValue(value)}`);
|
userLines.push(`${key} = ${formatTomlValue(value)}`);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,9 +1,20 @@
|
||||||
const path = require('node:path');
|
const path = require('node:path');
|
||||||
|
const https = require('node:https');
|
||||||
|
const { execFile } = require('node:child_process');
|
||||||
|
const { promisify } = require('node:util');
|
||||||
const fs = require('../fs-native');
|
const fs = require('../fs-native');
|
||||||
const crypto = require('node:crypto');
|
const crypto = require('node:crypto');
|
||||||
const { getProjectRoot } = require('../project-root');
|
const { resolveModuleVersion } = require('../modules/version-resolver');
|
||||||
const prompts = require('../prompts');
|
const prompts = require('../prompts');
|
||||||
|
|
||||||
|
const execFileAsync = promisify(execFile);
|
||||||
|
const NPM_LOOKUP_TIMEOUT_MS = 10_000;
|
||||||
|
const NPM_PACKAGE_NAME_PATTERN = /^(?:@[a-z0-9][a-z0-9._~-]*\/)?[a-z0-9][a-z0-9._~-]*$/;
|
||||||
|
|
||||||
|
function isValidNpmPackageName(packageName) {
|
||||||
|
return typeof packageName === 'string' && NPM_PACKAGE_NAME_PATTERN.test(packageName);
|
||||||
|
}
|
||||||
|
|
||||||
class Manifest {
|
class Manifest {
|
||||||
/**
|
/**
|
||||||
* Create a new manifest
|
* Create a new manifest
|
||||||
|
|
@ -180,7 +191,12 @@ class Manifest {
|
||||||
npmPackage: options.npmPackage || null,
|
npmPackage: options.npmPackage || null,
|
||||||
repoUrl: options.repoUrl || null,
|
repoUrl: options.repoUrl || null,
|
||||||
};
|
};
|
||||||
|
if (options.channel) entry.channel = options.channel;
|
||||||
|
if (options.sha) entry.sha = options.sha;
|
||||||
if (options.localPath) entry.localPath = options.localPath;
|
if (options.localPath) entry.localPath = options.localPath;
|
||||||
|
if (options.rawSource) entry.rawSource = options.rawSource;
|
||||||
|
if (options.registryApprovedTag) entry.registryApprovedTag = options.registryApprovedTag;
|
||||||
|
if (options.registryApprovedSha) entry.registryApprovedSha = options.registryApprovedSha;
|
||||||
manifest.modules.push(entry);
|
manifest.modules.push(entry);
|
||||||
} else {
|
} else {
|
||||||
// Module exists, update its version info
|
// Module exists, update its version info
|
||||||
|
|
@ -192,6 +208,11 @@ class Manifest {
|
||||||
npmPackage: options.npmPackage === undefined ? existing.npmPackage : options.npmPackage,
|
npmPackage: options.npmPackage === undefined ? existing.npmPackage : options.npmPackage,
|
||||||
repoUrl: options.repoUrl === undefined ? existing.repoUrl : options.repoUrl,
|
repoUrl: options.repoUrl === undefined ? existing.repoUrl : options.repoUrl,
|
||||||
localPath: options.localPath === undefined ? existing.localPath : options.localPath,
|
localPath: options.localPath === undefined ? existing.localPath : options.localPath,
|
||||||
|
channel: options.channel === undefined ? existing.channel : options.channel,
|
||||||
|
sha: options.sha === undefined ? existing.sha : options.sha,
|
||||||
|
rawSource: options.rawSource === undefined ? existing.rawSource : options.rawSource,
|
||||||
|
registryApprovedTag: options.registryApprovedTag === undefined ? existing.registryApprovedTag : options.registryApprovedTag,
|
||||||
|
registryApprovedSha: options.registryApprovedSha === undefined ? existing.registryApprovedSha : options.registryApprovedSha,
|
||||||
lastUpdated: new Date().toISOString(),
|
lastUpdated: new Date().toISOString(),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
@ -258,13 +279,11 @@ class Manifest {
|
||||||
* @returns {Object} Version info object with version, source, npmPackage, repoUrl
|
* @returns {Object} Version info object with version, source, npmPackage, repoUrl
|
||||||
*/
|
*/
|
||||||
async getModuleVersionInfo(moduleName, bmadDir, moduleSourcePath = null) {
|
async getModuleVersionInfo(moduleName, bmadDir, moduleSourcePath = null) {
|
||||||
const yaml = require('yaml');
|
|
||||||
|
|
||||||
// Resolve source type first, then read version with the correct path context
|
// Resolve source type first, then read version with the correct path context
|
||||||
if (['core', 'bmm'].includes(moduleName)) {
|
if (['core', 'bmm'].includes(moduleName)) {
|
||||||
const version = await this._readMarketplaceVersion(moduleName, moduleSourcePath);
|
const versionInfo = await resolveModuleVersion(moduleName, { moduleSourcePath });
|
||||||
return {
|
return {
|
||||||
version,
|
version: versionInfo.version,
|
||||||
source: 'built-in',
|
source: 'built-in',
|
||||||
npmPackage: null,
|
npmPackage: null,
|
||||||
repoUrl: null,
|
repoUrl: null,
|
||||||
|
|
@ -277,13 +296,17 @@ class Manifest {
|
||||||
const moduleInfo = await extMgr.getModuleByCode(moduleName);
|
const moduleInfo = await extMgr.getModuleByCode(moduleName);
|
||||||
|
|
||||||
if (moduleInfo) {
|
if (moduleInfo) {
|
||||||
// External module: use moduleSourcePath if provided, otherwise fall back to cache
|
const externalResolution = extMgr.getResolution(moduleName);
|
||||||
const version = await this._readMarketplaceVersion(moduleName, moduleSourcePath);
|
const versionInfo = await resolveModuleVersion(moduleName, { moduleSourcePath });
|
||||||
return {
|
return {
|
||||||
version,
|
// Git tag recorded during install trumps the on-disk package.json
|
||||||
|
// version, so the manifest carries "v1.7.0" instead of "1.7.0".
|
||||||
|
version: externalResolution?.version || versionInfo.version,
|
||||||
source: 'external',
|
source: 'external',
|
||||||
npmPackage: moduleInfo.npmPackage || null,
|
npmPackage: moduleInfo.npmPackage || null,
|
||||||
repoUrl: moduleInfo.url || null,
|
repoUrl: moduleInfo.url || null,
|
||||||
|
channel: externalResolution?.channel || null,
|
||||||
|
sha: externalResolution?.sha || null,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -292,12 +315,20 @@ class Manifest {
|
||||||
const communityMgr = new CommunityModuleManager();
|
const communityMgr = new CommunityModuleManager();
|
||||||
const communityInfo = await communityMgr.getModuleByCode(moduleName);
|
const communityInfo = await communityMgr.getModuleByCode(moduleName);
|
||||||
if (communityInfo) {
|
if (communityInfo) {
|
||||||
const communityVersion = await this._readMarketplaceVersion(moduleName, moduleSourcePath);
|
const communityResolution = communityMgr.getResolution(moduleName);
|
||||||
|
const versionInfo = await resolveModuleVersion(moduleName, {
|
||||||
|
moduleSourcePath,
|
||||||
|
fallbackVersion: communityInfo.version,
|
||||||
|
});
|
||||||
return {
|
return {
|
||||||
version: communityVersion || communityInfo.version,
|
version: communityResolution?.version || versionInfo.version || communityInfo.version,
|
||||||
source: 'community',
|
source: 'community',
|
||||||
npmPackage: communityInfo.npmPackage || null,
|
npmPackage: communityInfo.npmPackage || null,
|
||||||
repoUrl: communityInfo.url || null,
|
repoUrl: communityInfo.url || null,
|
||||||
|
channel: communityResolution?.channel || null,
|
||||||
|
sha: communityResolution?.sha || null,
|
||||||
|
registryApprovedTag: communityResolution?.registryApprovedTag || null,
|
||||||
|
registryApprovedSha: communityResolution?.registryApprovedSha || null,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -307,98 +338,57 @@ class Manifest {
|
||||||
const resolved = customMgr.getResolution(moduleName);
|
const resolved = customMgr.getResolution(moduleName);
|
||||||
const customSource = await customMgr.findModuleSourceByCode(moduleName, { bmadDir });
|
const customSource = await customMgr.findModuleSourceByCode(moduleName, { bmadDir });
|
||||||
if (customSource || resolved) {
|
if (customSource || resolved) {
|
||||||
const customVersion = resolved?.version || (await this._readMarketplaceVersion(moduleName, moduleSourcePath));
|
const versionInfo = await resolveModuleVersion(moduleName, {
|
||||||
|
moduleSourcePath: moduleSourcePath || customSource,
|
||||||
|
fallbackVersion: resolved?.version,
|
||||||
|
marketplacePluginNames: resolved?.pluginName ? [resolved.pluginName] : [],
|
||||||
|
});
|
||||||
|
const hasGitClone = !!resolved?.repoUrl;
|
||||||
return {
|
return {
|
||||||
version: customVersion,
|
// Prefer the git ref we actually cloned over the package.json version.
|
||||||
|
version: resolved?.cloneRef || (hasGitClone ? 'main' : versionInfo.version),
|
||||||
source: 'custom',
|
source: 'custom',
|
||||||
npmPackage: null,
|
npmPackage: null,
|
||||||
repoUrl: resolved?.repoUrl || null,
|
repoUrl: resolved?.repoUrl || null,
|
||||||
localPath: resolved?.localPath || null,
|
localPath: resolved?.localPath || null,
|
||||||
|
channel: hasGitClone ? (resolved?.cloneRef ? 'pinned' : 'next') : null,
|
||||||
|
sha: resolved?.cloneSha || null,
|
||||||
|
rawSource: resolved?.rawInput || null,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unknown module
|
// Unknown module
|
||||||
const version = await this._readMarketplaceVersion(moduleName, moduleSourcePath);
|
const versionInfo = await resolveModuleVersion(moduleName, { moduleSourcePath });
|
||||||
return {
|
return {
|
||||||
version,
|
version: versionInfo.version,
|
||||||
source: 'unknown',
|
source: 'unknown',
|
||||||
npmPackage: null,
|
npmPackage: null,
|
||||||
repoUrl: null,
|
repoUrl: null,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Read version from .claude-plugin/marketplace.json for a module
|
|
||||||
* @param {string} moduleName - Module code
|
|
||||||
* @returns {string|null} Version or null
|
|
||||||
*/
|
|
||||||
async _readMarketplaceVersion(moduleName, moduleSourcePath = null) {
|
|
||||||
const os = require('node:os');
|
|
||||||
let marketplacePath;
|
|
||||||
|
|
||||||
if (['core', 'bmm'].includes(moduleName)) {
|
|
||||||
marketplacePath = path.join(getProjectRoot(), '.claude-plugin', 'marketplace.json');
|
|
||||||
} else if (moduleSourcePath) {
|
|
||||||
// Walk up from source path to find marketplace.json
|
|
||||||
let dir = moduleSourcePath;
|
|
||||||
for (let i = 0; i < 5; i++) {
|
|
||||||
const candidate = path.join(dir, '.claude-plugin', 'marketplace.json');
|
|
||||||
if (await fs.pathExists(candidate)) {
|
|
||||||
marketplacePath = candidate;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
const parent = path.dirname(dir);
|
|
||||||
if (parent === dir) break;
|
|
||||||
dir = parent;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fallback to external module cache
|
|
||||||
if (!marketplacePath) {
|
|
||||||
const cacheDir = path.join(os.homedir(), '.bmad', 'cache', 'external-modules', moduleName);
|
|
||||||
marketplacePath = path.join(cacheDir, '.claude-plugin', 'marketplace.json');
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
if (await fs.pathExists(marketplacePath)) {
|
|
||||||
const data = JSON.parse(await fs.readFile(marketplacePath, 'utf8'));
|
|
||||||
const plugins = data?.plugins;
|
|
||||||
if (!Array.isArray(plugins) || plugins.length === 0) return null;
|
|
||||||
let best = null;
|
|
||||||
for (const p of plugins) {
|
|
||||||
if (p.version && (!best || p.version > best)) best = p.version;
|
|
||||||
}
|
|
||||||
return best;
|
|
||||||
}
|
|
||||||
} catch {
|
|
||||||
// ignore
|
|
||||||
}
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Fetch latest version from npm for a package
|
* Fetch latest version from npm for a package
|
||||||
* @param {string} packageName - npm package name
|
* @param {string} packageName - npm package name
|
||||||
* @returns {string|null} Latest version or null
|
* @returns {string|null} Latest version or null
|
||||||
*/
|
*/
|
||||||
async fetchNpmVersion(packageName) {
|
async fetchNpmVersion(packageName) {
|
||||||
try {
|
if (!isValidNpmPackageName(packageName)) {
|
||||||
const https = require('node:https');
|
return null;
|
||||||
const { execSync } = require('node:child_process');
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
// Try using npm view first (more reliable)
|
// Try using npm view first (more reliable)
|
||||||
try {
|
try {
|
||||||
const result = execSync(`npm view ${packageName} version`, {
|
const { stdout } = await execFileAsync('npm', ['view', packageName, 'version'], {
|
||||||
encoding: 'utf8',
|
encoding: 'utf8',
|
||||||
stdio: 'pipe',
|
timeout: NPM_LOOKUP_TIMEOUT_MS,
|
||||||
timeout: 10_000,
|
|
||||||
});
|
});
|
||||||
return result.trim();
|
return stdout.trim();
|
||||||
} catch {
|
} catch {
|
||||||
// Fallback to npm registry API
|
// Fallback to npm registry API
|
||||||
return new Promise((resolve, reject) => {
|
return new Promise((resolve) => {
|
||||||
https
|
const request = https.get(`https://registry.npmjs.org/${encodeURIComponent(packageName)}`, (res) => {
|
||||||
.get(`https://registry.npmjs.org/${packageName}`, (res) => {
|
|
||||||
let data = '';
|
let data = '';
|
||||||
res.on('data', (chunk) => (data += chunk));
|
res.on('data', (chunk) => (data += chunk));
|
||||||
res.on('end', () => {
|
res.on('end', () => {
|
||||||
|
|
@ -409,8 +399,14 @@ class Manifest {
|
||||||
resolve(null);
|
resolve(null);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
})
|
});
|
||||||
.on('error', () => resolve(null));
|
|
||||||
|
request.setTimeout(NPM_LOOKUP_TIMEOUT_MS, () => {
|
||||||
|
request.destroy();
|
||||||
|
resolve(null);
|
||||||
|
});
|
||||||
|
|
||||||
|
request.on('error', () => resolve(null));
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
} catch {
|
} catch {
|
||||||
|
|
@ -424,6 +420,7 @@ class Manifest {
|
||||||
* @returns {Array} Array of update info objects
|
* @returns {Array} Array of update info objects
|
||||||
*/
|
*/
|
||||||
async checkForUpdates(bmadDir) {
|
async checkForUpdates(bmadDir) {
|
||||||
|
const semver = require('semver');
|
||||||
const modules = await this.getAllModuleVersions(bmadDir);
|
const modules = await this.getAllModuleVersions(bmadDir);
|
||||||
const updates = [];
|
const updates = [];
|
||||||
|
|
||||||
|
|
@ -437,7 +434,10 @@ class Manifest {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (module.version !== latestVersion) {
|
const installedVersion = semver.valid(module.version) || semver.valid(semver.coerce(module.version || ''));
|
||||||
|
const availableVersion = semver.valid(latestVersion) || semver.valid(semver.coerce(latestVersion));
|
||||||
|
|
||||||
|
if (installedVersion && availableVersion && semver.gt(availableVersion, installedVersion)) {
|
||||||
updates.push({
|
updates.push({
|
||||||
name: module.name,
|
name: module.name,
|
||||||
installedVersion: module.version,
|
installedVersion: module.version,
|
||||||
|
|
|
||||||
|
|
@ -1,10 +1,129 @@
|
||||||
const os = require('node:os');
|
|
||||||
const path = require('node:path');
|
const path = require('node:path');
|
||||||
const fs = require('../fs-native');
|
const fs = require('../fs-native');
|
||||||
const yaml = require('yaml');
|
const yaml = require('yaml');
|
||||||
const prompts = require('../prompts');
|
const prompts = require('../prompts');
|
||||||
const csv = require('csv-parse/sync');
|
const csv = require('csv-parse/sync');
|
||||||
const { BMAD_FOLDER_NAME } = require('./shared/path-utils');
|
const { BMAD_FOLDER_NAME } = require('./shared/path-utils');
|
||||||
|
const { getInstalledCanonicalIds, isBmadOwnedEntry } = require('./shared/installed-skills');
|
||||||
|
|
||||||
|
// Reserved OpenCode slash commands. A skill whose canonicalId collides with
|
||||||
|
// one of these is skipped during command-pointer generation so it doesn't
|
||||||
|
// shadow a built-in.
|
||||||
|
const RESERVED_OPENCODE_COMMANDS = new Set([
|
||||||
|
'review',
|
||||||
|
'commit',
|
||||||
|
'init',
|
||||||
|
'help',
|
||||||
|
'skills',
|
||||||
|
'fast',
|
||||||
|
'compact',
|
||||||
|
'clear',
|
||||||
|
'undo',
|
||||||
|
'redo',
|
||||||
|
'edit',
|
||||||
|
'editor',
|
||||||
|
'exit',
|
||||||
|
'quit',
|
||||||
|
'theme',
|
||||||
|
'config',
|
||||||
|
'model',
|
||||||
|
'session',
|
||||||
|
]);
|
||||||
|
|
||||||
|
// Wrap a description for safe insertion into single-line YAML frontmatter.
|
||||||
|
// Leaves plain values untouched; double-quotes (and escapes) anything that
|
||||||
|
// could break YAML parsing or span multiple lines.
|
||||||
|
function yamlSafeSingleLine(value) {
|
||||||
|
const collapsed = String(value)
|
||||||
|
.replaceAll(/[\r\n]+/g, ' ')
|
||||||
|
.trim();
|
||||||
|
const needsQuoting = /[:#'"\\]/.test(collapsed) || /^[!&*?|>%@`[{]/.test(collapsed);
|
||||||
|
if (!needsQuoting) return collapsed;
|
||||||
|
const escaped = collapsed.replaceAll('\\', '\\\\').replaceAll('"', String.raw`\"`);
|
||||||
|
return `"${escaped}"`;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate that a canonicalId is a safe basename — no path separators, no
|
||||||
|
// parent-dir traversal, no leading dots, only the character set we expect.
|
||||||
|
// Defense-in-depth: the manifest is trusted today, but the value flows
|
||||||
|
// directly into a file path and a malformed entry should not write outside
|
||||||
|
// the commands directory.
|
||||||
|
function isSafeCanonicalId(value) {
|
||||||
|
return typeof value === 'string' && /^[a-zA-Z0-9][a-zA-Z0-9_.-]*$/.test(value) && !value.includes('..');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default body template for command pointer files. Used when a platform's
|
||||||
|
// installer config doesn't override `commands_body_template`. Matches
|
||||||
|
// OpenCode's native `@skills/<id>` skill-reference syntax.
|
||||||
|
const DEFAULT_COMMANDS_BODY_TEMPLATE = '@skills/{canonicalId}';
|
||||||
|
|
||||||
|
// Is this skill a persona agent (vs. a workflow/tool/standalone skill)?
|
||||||
|
// Used by platforms that surface only persona agents (e.g. Copilot's Custom
|
||||||
|
// Agents picker). Signal: the skill's source `customize.toml` has an
|
||||||
|
// `[agent]` section. This is the actual configuration source of truth —
|
||||||
|
// every BMAD persona is configured via [agent] in its customize.toml,
|
||||||
|
// every workflow uses [workflow], every standalone skill has no
|
||||||
|
// customize.toml at all. Verified against the full installed manifest:
|
||||||
|
// catches exactly the 20 description-confirmed personas across BMM, CIS,
|
||||||
|
// GDS, WDS, TEA, and correctly excludes meta-skills like
|
||||||
|
// `bmad-agent-builder` (a skill-builder workflow whose canonical id
|
||||||
|
// contains `-agent-` but which has no [agent] section because it isn't a
|
||||||
|
// persona itself).
|
||||||
|
//
|
||||||
|
// Reading the source toml — at install time the source skill directory
|
||||||
|
// (resolved from manifest record.path) still exists; cleanup runs later
|
||||||
|
// in the install flow.
|
||||||
|
async function isAgentSkill(record, bmadDir) {
|
||||||
|
if (!record?.path || !bmadDir) return false;
|
||||||
|
const bmadFolderName = path.basename(bmadDir);
|
||||||
|
const bmadPrefix = bmadFolderName + '/';
|
||||||
|
const relativePath = record.path.startsWith(bmadPrefix) ? record.path.slice(bmadPrefix.length) : record.path;
|
||||||
|
const tomlPath = path.join(bmadDir, path.dirname(relativePath), 'customize.toml');
|
||||||
|
if (!(await fs.pathExists(tomlPath))) return false;
|
||||||
|
try {
|
||||||
|
const content = await fs.readFile(tomlPath, 'utf8');
|
||||||
|
return /^\[agent\]/m.test(content);
|
||||||
|
} catch {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resolve placeholders in a body template. Supported placeholders:
|
||||||
|
// {canonicalId} — the skill's canonical id
|
||||||
|
// {target_dir} — the platform's skill install directory (e.g. .agents/skills)
|
||||||
|
// {project-root} — left as a literal placeholder for the model/tool to expand
|
||||||
|
// at runtime; consistent with PR #1769's templates.
|
||||||
|
function expandBodyTemplate(template, { canonicalId, targetDir }) {
|
||||||
|
return template.replaceAll('{canonicalId}', canonicalId).replaceAll('{target_dir}', targetDir);
|
||||||
|
}
|
||||||
|
|
||||||
|
// The exact body the installer would generate for a given description and
|
||||||
|
// canonicalId, given the platform's body template. Centralised so both the
|
||||||
|
// write and the freshness-check paths agree on the canonical form.
|
||||||
|
function buildCommandPointerBody(description, canonicalId, { template, targetDir }) {
|
||||||
|
const bodyText = expandBodyTemplate(template, { canonicalId, targetDir });
|
||||||
|
return `---\ndescription: ${yamlSafeSingleLine(description)}\n---\n\n${bodyText}\n`;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Heuristic: does an existing pointer file look like our generator's output
|
||||||
|
// (and therefore safe to refresh) versus a user-modified file (which we
|
||||||
|
// preserve)? We check the body shape rather than full equality so that
|
||||||
|
// description-only edits in the manifest can propagate without trampling
|
||||||
|
// hand edits to the body.
|
||||||
|
function looksLikeGeneratorOutput(content, canonicalId, { template, targetDir }) {
|
||||||
|
if (typeof content !== 'string') return false;
|
||||||
|
const trimmed = content.trim();
|
||||||
|
const expectedTail = expandBodyTemplate(template, { canonicalId, targetDir }).trim();
|
||||||
|
// Must end with the exact body our generator writes (post-expansion).
|
||||||
|
if (!trimmed.endsWith(expectedTail)) return false;
|
||||||
|
// Must start with frontmatter containing exactly one description: line.
|
||||||
|
const fmMatch = trimmed.match(/^---\n([\S\s]*?)\n---\n/);
|
||||||
|
if (!fmMatch) return false;
|
||||||
|
const fmLines = fmMatch[1].split('\n').filter((l) => l.length > 0);
|
||||||
|
if (fmLines.length !== 1) return false;
|
||||||
|
if (!fmLines[0].startsWith('description:')) return false;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Config-driven IDE setup handler
|
* Config-driven IDE setup handler
|
||||||
|
|
@ -16,7 +135,7 @@ const { BMAD_FOLDER_NAME } = require('./shared/path-utils');
|
||||||
* Features:
|
* Features:
|
||||||
* - Config-driven from platform-codes.yaml
|
* - Config-driven from platform-codes.yaml
|
||||||
* - Verbatim skill installation from skill-manifest.csv
|
* - Verbatim skill installation from skill-manifest.csv
|
||||||
* - Legacy directory cleanup and IDE-specific marker removal
|
* - IDE-specific marker removal (copilot-instructions, kilo modes, rovodev prompts)
|
||||||
*/
|
*/
|
||||||
class ConfigDrivenIdeSetup {
|
class ConfigDrivenIdeSetup {
|
||||||
constructor(platformCode, platformConfig) {
|
constructor(platformCode, platformConfig) {
|
||||||
|
|
@ -44,16 +163,20 @@ class ConfigDrivenIdeSetup {
|
||||||
async detect(projectDir) {
|
async detect(projectDir) {
|
||||||
if (!this.configDir) return false;
|
if (!this.configDir) return false;
|
||||||
|
|
||||||
const dir = path.join(projectDir || process.cwd(), this.configDir);
|
const root = projectDir || process.cwd();
|
||||||
if (await fs.pathExists(dir)) {
|
const dir = path.join(root, this.configDir);
|
||||||
|
if (!(await fs.pathExists(dir))) return false;
|
||||||
|
|
||||||
|
let entries;
|
||||||
try {
|
try {
|
||||||
const entries = await fs.readdir(dir);
|
entries = await fs.readdir(dir);
|
||||||
return entries.some((e) => typeof e === 'string' && e.startsWith('bmad'));
|
|
||||||
} catch {
|
} catch {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return false;
|
const bmadDir = await this._findBmadDir(root);
|
||||||
|
const canonicalIds = await getInstalledCanonicalIds(bmadDir);
|
||||||
|
return entries.some((e) => isBmadOwnedEntry(e, canonicalIds));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -92,6 +215,18 @@ class ConfigDrivenIdeSetup {
|
||||||
return { success: false, reason: 'no-config' };
|
return { success: false, reason: 'no-config' };
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// When a peer platform in the same install batch owns this target_dir,
|
||||||
|
// skip the skill write — the peer has already populated it. Command
|
||||||
|
// pointers, however, write to a separate per-IDE directory and must
|
||||||
|
// still be generated for this IDE; they are not deduped across peers.
|
||||||
|
if (options.skipTarget) {
|
||||||
|
const results = { skills: 0, sharedTargetHandledByPeer: true };
|
||||||
|
if (this.installerConfig.commands_target_dir) {
|
||||||
|
results.commands = await this.installCommandPointers(projectDir, bmadDir, this.installerConfig, options);
|
||||||
|
}
|
||||||
|
return { success: true, results };
|
||||||
|
}
|
||||||
|
|
||||||
if (this.installerConfig.target_dir) {
|
if (this.installerConfig.target_dir) {
|
||||||
return this.installToTarget(projectDir, bmadDir, this.installerConfig, options);
|
return this.installToTarget(projectDir, bmadDir, this.installerConfig, options);
|
||||||
}
|
}
|
||||||
|
|
@ -118,11 +253,157 @@ class ConfigDrivenIdeSetup {
|
||||||
results.skills = await this.installVerbatimSkills(projectDir, bmadDir, targetPath, config);
|
results.skills = await this.installVerbatimSkills(projectDir, bmadDir, targetPath, config);
|
||||||
results.skillDirectories = this.skillWriteTracker.size;
|
results.skillDirectories = this.skillWriteTracker.size;
|
||||||
|
|
||||||
|
if (config.commands_target_dir) {
|
||||||
|
results.commands = await this.installCommandPointers(projectDir, bmadDir, config, options);
|
||||||
|
}
|
||||||
|
|
||||||
await this.printSummary(results, target_dir, options);
|
await this.printSummary(results, target_dir, options);
|
||||||
this.skillWriteTracker = null;
|
this.skillWriteTracker = null;
|
||||||
return { success: true, results };
|
return { success: true, results };
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generate per-skill command pointer files for IDEs that surface commands
|
||||||
|
* separately from skills (e.g. OpenCode's `.opencode/commands/<name>.md`).
|
||||||
|
*
|
||||||
|
* Each pointer is a tiny markdown file whose body is `@skills/<canonicalId>`
|
||||||
|
* so invoking `/<canonicalId>` routes the user straight to the skill instead
|
||||||
|
* of forcing them through a `/skills` menu.
|
||||||
|
*
|
||||||
|
* Skips:
|
||||||
|
* - Names that collide with reserved built-in slash commands.
|
||||||
|
* - canonicalIds that aren't safe basename-only identifiers (defense
|
||||||
|
* against path traversal even though the manifest is currently trusted).
|
||||||
|
* - Existing files whose body looks user-modified (preserves hand edits);
|
||||||
|
* pointer files matching the generator pattern get overwritten so that
|
||||||
|
* description changes in skill-manifest.csv propagate on re-install.
|
||||||
|
*
|
||||||
|
* Per-file write failures are recorded and reported but do not abort the
|
||||||
|
* rest of the install — pointer files are a non-essential adjunct to the
|
||||||
|
* skill copy that already succeeded.
|
||||||
|
*
|
||||||
|
* @param {string} projectDir
|
||||||
|
* @param {string} bmadDir
|
||||||
|
* @param {Object} config - Installer config; reads commands_target_dir.
|
||||||
|
* @param {Object} options - Setup options. forceCommands overwrites existing
|
||||||
|
* files unconditionally (including hand-modified ones).
|
||||||
|
* @returns {Promise<Object>} { created, updated, skippedExisting, skippedCollision, skippedInvalidId, writeFailures, fallbackDescription }
|
||||||
|
*/
|
||||||
|
async installCommandPointers(projectDir, bmadDir, config, options = {}) {
|
||||||
|
const result = {
|
||||||
|
created: 0,
|
||||||
|
updated: 0,
|
||||||
|
skippedExisting: 0,
|
||||||
|
skippedCollision: 0,
|
||||||
|
skippedInvalidId: 0,
|
||||||
|
skippedFiltered: 0,
|
||||||
|
writeFailures: 0,
|
||||||
|
fallbackDescription: 0,
|
||||||
|
};
|
||||||
|
|
||||||
|
const csvPath = path.join(bmadDir, '_config', 'skill-manifest.csv');
|
||||||
|
if (!(await fs.pathExists(csvPath))) return result;
|
||||||
|
|
||||||
|
const commandsPath = path.join(projectDir, config.commands_target_dir);
|
||||||
|
await fs.ensureDir(commandsPath);
|
||||||
|
|
||||||
|
// Per-platform pointer-file shape, all overrideable in platform-codes.yaml.
|
||||||
|
const extension = config.commands_extension || '.md';
|
||||||
|
const template = config.commands_body_template || DEFAULT_COMMANDS_BODY_TEMPLATE;
|
||||||
|
const targetDir = config.target_dir;
|
||||||
|
const filter = config.commands_filter || null;
|
||||||
|
|
||||||
|
const csvContent = await fs.readFile(csvPath, 'utf8');
|
||||||
|
const records = csv.parse(csvContent, { columns: true, skip_empty_lines: true });
|
||||||
|
|
||||||
|
for (const record of records) {
|
||||||
|
const canonicalId = record.canonicalId;
|
||||||
|
if (!canonicalId) continue;
|
||||||
|
|
||||||
|
// Defensive basename validation. canonicalId comes from a trusted
|
||||||
|
// manifest today, but the value flows directly into a file path —
|
||||||
|
// reject anything that could escape commands_target_dir.
|
||||||
|
if (!isSafeCanonicalId(canonicalId)) {
|
||||||
|
result.skippedInvalidId++;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Optional per-platform filter: surfaces that should only show
|
||||||
|
// persona agents (e.g. Copilot's Custom Agents picker) skip
|
||||||
|
// workflow/tool skills here so the picker isn't cluttered with
|
||||||
|
// 90+ unrelated entries.
|
||||||
|
if (filter === 'agents-only' && !(await isAgentSkill(record, bmadDir))) {
|
||||||
|
result.skippedFiltered++;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reserved-name guard is OpenCode-specific. Other adapters that opt
|
||||||
|
// into commands_target_dir later should declare their own reserved
|
||||||
|
// set rather than inheriting OpenCode's.
|
||||||
|
if (this.name === 'opencode' && RESERVED_OPENCODE_COMMANDS.has(canonicalId)) {
|
||||||
|
result.skippedCollision++;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let description = (record.description || '').trim();
|
||||||
|
if (!description) {
|
||||||
|
description = `Run the ${canonicalId} skill`;
|
||||||
|
result.fallbackDescription++;
|
||||||
|
}
|
||||||
|
|
||||||
|
const body = buildCommandPointerBody(description, canonicalId, { template, targetDir });
|
||||||
|
const commandFile = path.join(commandsPath, `${canonicalId}${extension}`);
|
||||||
|
|
||||||
|
// If a pointer file already exists, decide whether to overwrite based
|
||||||
|
// on whether it looks like generator output (description-only diff) or
|
||||||
|
// a user-modified file. forceCommands overrides this protection.
|
||||||
|
if (!options.forceCommands && (await fs.pathExists(commandFile))) {
|
||||||
|
let existing;
|
||||||
|
try {
|
||||||
|
existing = await fs.readFile(commandFile, 'utf8');
|
||||||
|
} catch {
|
||||||
|
// Treat unreadable as user-owned and skip — safer than overwriting.
|
||||||
|
result.skippedExisting++;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (existing === body) {
|
||||||
|
// No-op idempotent re-run.
|
||||||
|
result.skippedExisting++;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (looksLikeGeneratorOutput(existing, canonicalId, { template, targetDir })) {
|
||||||
|
// Description (or other generated bit) has changed; refresh in place.
|
||||||
|
try {
|
||||||
|
await fs.writeFile(commandFile, body, 'utf8');
|
||||||
|
result.updated++;
|
||||||
|
} catch (error) {
|
||||||
|
result.writeFailures++;
|
||||||
|
if (!options.silent) {
|
||||||
|
await prompts.log.warn(`Failed to update command pointer ${canonicalId}${extension}: ${error.message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
// Hand-modified pointer — preserve it.
|
||||||
|
result.skippedExisting++;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
await fs.writeFile(commandFile, body, 'utf8');
|
||||||
|
result.created++;
|
||||||
|
} catch (error) {
|
||||||
|
result.writeFailures++;
|
||||||
|
if (!options.silent) {
|
||||||
|
await prompts.log.warn(`Failed to write command pointer ${canonicalId}${extension}: ${error.message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Install verbatim native SKILL.md directories from skill-manifest.csv.
|
* Install verbatim native SKILL.md directories from skill-manifest.csv.
|
||||||
* Copies the entire source directory as-is into the IDE skill directory.
|
* Copies the entire source directory as-is into the IDE skill directory.
|
||||||
|
|
@ -197,6 +478,18 @@ class ConfigDrivenIdeSetup {
|
||||||
if (count > 0) {
|
if (count > 0) {
|
||||||
await prompts.log.success(`${this.name} configured: ${count} skills → ${targetDir}`);
|
await prompts.log.success(`${this.name} configured: ${count} skills → ${targetDir}`);
|
||||||
}
|
}
|
||||||
|
const cmd = results.commands;
|
||||||
|
if (cmd && (cmd.created > 0 || cmd.updated > 0) && this.installerConfig?.commands_target_dir) {
|
||||||
|
const total = cmd.created + cmd.updated;
|
||||||
|
const detail = cmd.updated > 0 ? `${cmd.created} new, ${cmd.updated} refreshed` : `${total}`;
|
||||||
|
await prompts.log.success(`${this.name} commands: ${detail} → ${this.installerConfig.commands_target_dir}`);
|
||||||
|
if (cmd.skippedCollision > 0) {
|
||||||
|
await prompts.log.message(` (${cmd.skippedCollision} skipped — name collides with reserved slash command)`);
|
||||||
|
}
|
||||||
|
if (cmd.writeFailures > 0) {
|
||||||
|
await prompts.log.warn(` (${cmd.writeFailures} pointer writes failed — see warnings above)`);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -222,27 +515,6 @@ class ConfigDrivenIdeSetup {
|
||||||
removalSet = new Set();
|
removalSet = new Set();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Migrate legacy target directories (e.g. .opencode/agent → .opencode/agents)
|
|
||||||
// Legacy dirs are abandoned entirely, so use prefix matching (null removalSet)
|
|
||||||
if (this.installerConfig?.legacy_targets) {
|
|
||||||
const legacyDirsExist = await Promise.all(
|
|
||||||
this.installerConfig.legacy_targets.map((d) =>
|
|
||||||
this.isGlobalPath(d) ? fs.pathExists(d.replace(/^~/, os.homedir())) : fs.pathExists(path.join(projectDir, d)),
|
|
||||||
),
|
|
||||||
);
|
|
||||||
if (legacyDirsExist.some(Boolean)) {
|
|
||||||
if (!options.silent) await prompts.log.message(' Migrating legacy directories...');
|
|
||||||
for (const legacyDir of this.installerConfig.legacy_targets) {
|
|
||||||
if (this.isGlobalPath(legacyDir)) {
|
|
||||||
await this.warnGlobalLegacy(legacyDir, options);
|
|
||||||
} else {
|
|
||||||
await this.cleanupTarget(projectDir, legacyDir, options, null);
|
|
||||||
await this.removeEmptyParents(projectDir, legacyDir);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Strip BMAD markers from copilot-instructions.md if present
|
// Strip BMAD markers from copilot-instructions.md if present
|
||||||
if (this.name === 'github-copilot') {
|
if (this.name === 'github-copilot') {
|
||||||
await this.cleanupCopilotInstructions(projectDir, options);
|
await this.cleanupCopilotInstructions(projectDir, options);
|
||||||
|
|
@ -258,47 +530,47 @@ class ConfigDrivenIdeSetup {
|
||||||
await this.cleanupRovoDevPrompts(projectDir, options);
|
await this.cleanupRovoDevPrompts(projectDir, options);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Clean generated command pointer files in commands_target_dir.
|
||||||
|
// Mirrors target_dir cleanup so uninstalls and skill removals don't
|
||||||
|
// leave dangling /<canonicalId> commands pointing at missing skills.
|
||||||
|
// Runs regardless of skipTarget — command pointers live in a per-IDE
|
||||||
|
// directory and are not deduped across peers, so a peer-owned shared
|
||||||
|
// skills directory does not protect this IDE's command pointers from
|
||||||
|
// cleanup. The "currently active" set is passed so install-flow cleanup
|
||||||
|
// (where removalSet contains skills that will be re-added moments later)
|
||||||
|
// doesn't trample hand-edited pointers; install-flow cleanup will only
|
||||||
|
// delete pointers for skills that are not in the new manifest.
|
||||||
|
if (this.installerConfig?.commands_target_dir) {
|
||||||
|
// In the install/update flow (signal: previousSkillIds was passed),
|
||||||
|
// spare pointers whose canonicalId is still in the manifest so hand
|
||||||
|
// edits survive a routine reinstall. In the uninstall flow (no
|
||||||
|
// previousSkillIds — full uninstall or per-IDE removal via
|
||||||
|
// cleanupByList), don't spare anything; the IDE itself is going away,
|
||||||
|
// so its pointers should go with it.
|
||||||
|
const isInstallFlow = options.previousSkillIds && options.previousSkillIds.size > 0;
|
||||||
|
const activeSkillIds = isInstallFlow ? await this._readActiveSkillIds(resolvedBmadDir) : new Set();
|
||||||
|
const extension = this.installerConfig.commands_extension || '.md';
|
||||||
|
await this.cleanupCommandPointers(
|
||||||
|
projectDir,
|
||||||
|
this.installerConfig.commands_target_dir,
|
||||||
|
options,
|
||||||
|
removalSet,
|
||||||
|
activeSkillIds,
|
||||||
|
extension,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip target_dir cleanup when a peer platform owns this directory
|
||||||
|
// (set during dedup'd install or when uninstalling one of several
|
||||||
|
// platforms that share the same target_dir).
|
||||||
|
if (options.skipTarget) return;
|
||||||
|
|
||||||
// Clean current target directory
|
// Clean current target directory
|
||||||
if (this.installerConfig?.target_dir) {
|
if (this.installerConfig?.target_dir) {
|
||||||
await this.cleanupTarget(projectDir, this.installerConfig.target_dir, options, removalSet);
|
await this.cleanupTarget(projectDir, this.installerConfig.target_dir, options, removalSet);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Check if a path is global (starts with ~ or is absolute)
|
|
||||||
* @param {string} p - Path to check
|
|
||||||
* @returns {boolean}
|
|
||||||
*/
|
|
||||||
isGlobalPath(p) {
|
|
||||||
return p.startsWith('~') || path.isAbsolute(p);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Warn about stale BMAD files in a global legacy directory (never auto-deletes)
|
|
||||||
* @param {string} legacyDir - Legacy directory path (may start with ~)
|
|
||||||
* @param {Object} options - Options (silent, etc.)
|
|
||||||
*/
|
|
||||||
async warnGlobalLegacy(legacyDir, options = {}) {
|
|
||||||
try {
|
|
||||||
const expanded = legacyDir.startsWith('~/')
|
|
||||||
? path.join(os.homedir(), legacyDir.slice(2))
|
|
||||||
: legacyDir === '~'
|
|
||||||
? os.homedir()
|
|
||||||
: legacyDir;
|
|
||||||
|
|
||||||
if (!(await fs.pathExists(expanded))) return;
|
|
||||||
|
|
||||||
const entries = await fs.readdir(expanded);
|
|
||||||
const bmadFiles = entries.filter((e) => typeof e === 'string' && e.startsWith('bmad'));
|
|
||||||
|
|
||||||
if (bmadFiles.length > 0 && !options.silent) {
|
|
||||||
await prompts.log.warn(`Found ${bmadFiles.length} stale BMAD file(s) in ${expanded}. Remove manually: rm ${expanded}/bmad-*`);
|
|
||||||
}
|
|
||||||
} catch {
|
|
||||||
// Errors reading global paths are silently ignored
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Find the _bmad directory in a project
|
* Find the _bmad directory in a project
|
||||||
* @param {string} projectDir - Project directory
|
* @param {string} projectDir - Project directory
|
||||||
|
|
@ -387,6 +659,97 @@ class ConfigDrivenIdeSetup {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Cleanup generated command pointer files for entries in removalSet.
|
||||||
|
* Symmetric counterpart to installCommandPointers — removes
|
||||||
|
* `<canonicalId><extension>` files whose canonicalId is in the set. Removes
|
||||||
|
* the commands directory entirely if it ends up empty.
|
||||||
|
* @param {string} projectDir
|
||||||
|
* @param {string} commandsTargetDir - Relative dir (e.g. .opencode/commands)
|
||||||
|
* @param {Object} options
|
||||||
|
* @param {Set<string>} removalSet - canonicalIds whose pointer files to remove
|
||||||
|
* @param {Set<string>} [activeSkillIds] - canonicalIds present in the
|
||||||
|
* current manifest. Pointers for IDs in this set are spared so an
|
||||||
|
* install-flow cleanup (where removalSet === previousSkillIds and the
|
||||||
|
* same skills are about to be re-installed) doesn't wipe hand-edited
|
||||||
|
* pointer files. Pass an empty set or omit to delete every match in
|
||||||
|
* removalSet (uninstall flow).
|
||||||
|
* @param {string} [extension] - Pointer file extension (default '.md');
|
||||||
|
* matches the platform's commands_extension config value so cleanup
|
||||||
|
* correctly identifies pointer files for IDEs whose convention isn't .md
|
||||||
|
* (e.g. Copilot's `.agent.md`).
|
||||||
|
*/
|
||||||
|
async cleanupCommandPointers(
|
||||||
|
projectDir,
|
||||||
|
commandsTargetDir,
|
||||||
|
options = {},
|
||||||
|
removalSet = new Set(),
|
||||||
|
activeSkillIds = new Set(),
|
||||||
|
extension = '.md',
|
||||||
|
) {
|
||||||
|
if (!removalSet || removalSet.size === 0) return;
|
||||||
|
|
||||||
|
const commandsPath = path.join(projectDir, commandsTargetDir);
|
||||||
|
if (!(await fs.pathExists(commandsPath))) return;
|
||||||
|
|
||||||
|
let entries;
|
||||||
|
try {
|
||||||
|
entries = await fs.readdir(commandsPath);
|
||||||
|
} catch {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const entry of entries) {
|
||||||
|
if (!entry.endsWith(extension)) continue;
|
||||||
|
const canonicalId = entry.slice(0, -extension.length);
|
||||||
|
if (!removalSet.has(canonicalId)) continue;
|
||||||
|
// Spare pointers for skills that are still in the manifest; the
|
||||||
|
// install pass will refresh them in place if their content has gone
|
||||||
|
// stale, while preserving hand edits.
|
||||||
|
if (activeSkillIds.has(canonicalId)) continue;
|
||||||
|
try {
|
||||||
|
await fs.remove(path.join(commandsPath, entry));
|
||||||
|
} catch {
|
||||||
|
// Skip files we can't remove.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove the commands directory if we emptied it.
|
||||||
|
try {
|
||||||
|
const remaining = await fs.readdir(commandsPath);
|
||||||
|
if (remaining.length === 0) {
|
||||||
|
await fs.remove(commandsPath);
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// Directory may already be gone.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Read the canonicalIds currently present in the skill-manifest.csv.
|
||||||
|
* Used by cleanup to distinguish "re-install of an existing skill"
|
||||||
|
* (preserve pointer) from "skill truly being removed" (delete pointer).
|
||||||
|
* @param {string|null} bmadDir
|
||||||
|
* @returns {Promise<Set<string>>}
|
||||||
|
*/
|
||||||
|
async _readActiveSkillIds(bmadDir) {
|
||||||
|
const ids = new Set();
|
||||||
|
if (!bmadDir) return ids;
|
||||||
|
const csvPath = path.join(bmadDir, '_config', 'skill-manifest.csv');
|
||||||
|
if (!(await fs.pathExists(csvPath))) return ids;
|
||||||
|
try {
|
||||||
|
const content = await fs.readFile(csvPath, 'utf8');
|
||||||
|
const records = csv.parse(content, { columns: true, skip_empty_lines: true });
|
||||||
|
for (const record of records) {
|
||||||
|
if (record.canonicalId) ids.add(record.canonicalId);
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// Manifest unreadable — return an empty set so cleanup falls back to
|
||||||
|
// the conservative "delete what removalSet says" behavior.
|
||||||
|
}
|
||||||
|
return ids;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Cleanup a specific target directory.
|
* Cleanup a specific target directory.
|
||||||
* When removalSet is provided, only removes entries in that set.
|
* When removalSet is provided, only removes entries in that set.
|
||||||
|
|
@ -426,8 +789,8 @@ class ConfigDrivenIdeSetup {
|
||||||
// Always preserve bmad-os-* utility skills regardless of cleanup mode
|
// Always preserve bmad-os-* utility skills regardless of cleanup mode
|
||||||
if (entry.startsWith('bmad-os-')) continue;
|
if (entry.startsWith('bmad-os-')) continue;
|
||||||
|
|
||||||
// Surgical removal from set, or legacy prefix matching when set is null
|
// Surgical removal from set, or fallback to manifest+prefix detection when null
|
||||||
const shouldRemove = removalSet ? removalSet.has(entry) : entry.startsWith('bmad');
|
const shouldRemove = removalSet ? removalSet.has(entry) : isBmadOwnedEntry(entry, null);
|
||||||
|
|
||||||
if (shouldRemove) {
|
if (shouldRemove) {
|
||||||
try {
|
try {
|
||||||
|
|
@ -590,10 +953,9 @@ class ConfigDrivenIdeSetup {
|
||||||
try {
|
try {
|
||||||
if (await fs.pathExists(candidatePath)) {
|
if (await fs.pathExists(candidatePath)) {
|
||||||
const entries = await fs.readdir(candidatePath);
|
const entries = await fs.readdir(candidatePath);
|
||||||
const hasBmad = entries.some(
|
const ancestorBmadDir = await this._findBmadDir(current);
|
||||||
(e) => typeof e === 'string' && e.toLowerCase().startsWith('bmad') && !e.toLowerCase().startsWith('bmad-os-'),
|
const canonicalIds = await getInstalledCanonicalIds(ancestorBmadDir);
|
||||||
);
|
if (entries.some((e) => isBmadOwnedEntry(e, canonicalIds))) {
|
||||||
if (hasBmad) {
|
|
||||||
return candidatePath;
|
return candidatePath;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -605,43 +967,6 @@ class ConfigDrivenIdeSetup {
|
||||||
|
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Walk up ancestor directories from relativeDir toward projectDir, removing each if empty
|
|
||||||
* Stops at projectDir boundary — never removes projectDir itself
|
|
||||||
* @param {string} projectDir - Project root (boundary)
|
|
||||||
* @param {string} relativeDir - Relative directory to start from
|
|
||||||
*/
|
|
||||||
async removeEmptyParents(projectDir, relativeDir) {
|
|
||||||
const resolvedProject = path.resolve(projectDir);
|
|
||||||
let current = relativeDir;
|
|
||||||
let last = null;
|
|
||||||
while (current && current !== '.' && current !== last) {
|
|
||||||
last = current;
|
|
||||||
const fullPath = path.resolve(projectDir, current);
|
|
||||||
// Boundary guard: never traverse outside projectDir
|
|
||||||
if (!fullPath.startsWith(resolvedProject + path.sep) && fullPath !== resolvedProject) break;
|
|
||||||
try {
|
|
||||||
if (!(await fs.pathExists(fullPath))) {
|
|
||||||
// Dir already gone — advance current; last is reset at top of next iteration
|
|
||||||
current = path.dirname(current);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
const remaining = await fs.readdir(fullPath);
|
|
||||||
if (remaining.length > 0) break;
|
|
||||||
await fs.rmdir(fullPath);
|
|
||||||
} catch (error) {
|
|
||||||
// ENOTEMPTY: TOCTOU race (file added between readdir and rmdir) — skip level, continue upward
|
|
||||||
// ENOENT: dir removed by another process between pathExists and rmdir — skip level, continue upward
|
|
||||||
if (error.code === 'ENOTEMPTY' || error.code === 'ENOENT') {
|
|
||||||
current = path.dirname(current);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
break; // fatal error (e.g. EACCES) — stop upward walk
|
|
||||||
}
|
|
||||||
current = path.dirname(current);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
module.exports = { ConfigDrivenIdeSetup };
|
module.exports = { ConfigDrivenIdeSetup };
|
||||||
|
|
|
||||||
|
|
@ -160,8 +160,18 @@ class IdeManager {
|
||||||
let detail = '';
|
let detail = '';
|
||||||
if (handlerResult && handlerResult.results) {
|
if (handlerResult && handlerResult.results) {
|
||||||
const r = handlerResult.results;
|
const r = handlerResult.results;
|
||||||
const count = r.skillDirectories || r.skills || 0;
|
let count = r.skillDirectories || r.skills || 0;
|
||||||
if (count > 0) detail = `${count} skills`;
|
// Dedup'd platform: report the count its peer wrote so the user sees
|
||||||
|
// a consistent picture across all platforms sharing the dir.
|
||||||
|
if (count === 0 && r.sharedTargetHandledByPeer && options.sharedSkillCount) {
|
||||||
|
count = options.sharedSkillCount;
|
||||||
|
}
|
||||||
|
const targetDir = handler.installerConfig?.target_dir || null;
|
||||||
|
if (count > 0 && targetDir) {
|
||||||
|
detail = `${count} skills → ${targetDir}`;
|
||||||
|
} else if (count > 0) {
|
||||||
|
detail = `${count} skills`;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// Propagate handler's success status (default true for backward compat)
|
// Propagate handler's success status (default true for backward compat)
|
||||||
const success = handlerResult?.success !== false;
|
const success = handlerResult?.success !== false;
|
||||||
|
|
@ -172,6 +182,57 @@ class IdeManager {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Run setup for multiple IDEs as a single batch.
|
||||||
|
* Dedupes work when several selected platforms share the same target_dir:
|
||||||
|
* the first platform owns the directory write, peers skip it.
|
||||||
|
* @param {Array<string>} ideList - IDE names to set up
|
||||||
|
* @param {string} projectDir
|
||||||
|
* @param {string} bmadDir
|
||||||
|
* @param {Object} [options] - Forwarded to each handler.setup
|
||||||
|
* @returns {Promise<Array>} Per-IDE results
|
||||||
|
*/
|
||||||
|
async setupBatch(ideList, projectDir, bmadDir, options = {}) {
|
||||||
|
await this.ensureInitialized();
|
||||||
|
const results = [];
|
||||||
|
// target_dir → { firstIde, skillCount } from the platform that actually wrote it
|
||||||
|
const claimedTargets = new Map();
|
||||||
|
|
||||||
|
for (const ideName of ideList) {
|
||||||
|
const handler = this.handlers.get(ideName.toLowerCase());
|
||||||
|
if (!handler) {
|
||||||
|
results.push(await this.setup(ideName, projectDir, bmadDir, options));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
const target = handler.installerConfig?.target_dir || null;
|
||||||
|
const claim = target ? claimedTargets.get(target) : null;
|
||||||
|
const skipTarget = !!claim;
|
||||||
|
|
||||||
|
const result = await this.setup(ideName, projectDir, bmadDir, {
|
||||||
|
...options,
|
||||||
|
skipTarget,
|
||||||
|
sharedWith: claim?.firstIde || null,
|
||||||
|
sharedTarget: target,
|
||||||
|
sharedSkillCount: claim?.skillCount || 0,
|
||||||
|
});
|
||||||
|
|
||||||
|
if (target && !claim) {
|
||||||
|
const writtenCount = result.handlerResult?.results?.skillDirectories || result.handlerResult?.results?.skills || 0;
|
||||||
|
// Only claim the target when the install actually succeeded and wrote skills.
|
||||||
|
// If the first platform fails (ancestor conflict, exception, etc.), leave the
|
||||||
|
// dir unclaimed so the next peer becomes the new first writer instead of
|
||||||
|
// silently skipping into a broken/empty target_dir.
|
||||||
|
if (result.success && writtenCount > 0) {
|
||||||
|
claimedTargets.set(target, { firstIde: ideName, skillCount: writtenCount });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
results.push(result);
|
||||||
|
}
|
||||||
|
|
||||||
|
return results;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Cleanup IDE configurations
|
* Cleanup IDE configurations
|
||||||
* @param {string} projectDir - Project directory
|
* @param {string} projectDir - Project directory
|
||||||
|
|
@ -198,6 +259,8 @@ class IdeManager {
|
||||||
* @param {string} projectDir - Project directory
|
* @param {string} projectDir - Project directory
|
||||||
* @param {Array<string>} ideList - List of IDE names to clean up
|
* @param {Array<string>} ideList - List of IDE names to clean up
|
||||||
* @param {Object} [options] - Cleanup options passed through to handlers
|
* @param {Object} [options] - Cleanup options passed through to handlers
|
||||||
|
* options.remainingIdes - IDE names still installed after this cleanup; used
|
||||||
|
* to skip target_dir wipe when a co-installed platform shares the dir.
|
||||||
* @returns {Array} Results array
|
* @returns {Array} Results array
|
||||||
*/
|
*/
|
||||||
async cleanupByList(projectDir, ideList, options = {}) {
|
async cleanupByList(projectDir, ideList, options = {}) {
|
||||||
|
|
@ -211,13 +274,27 @@ class IdeManager {
|
||||||
// Build lowercase lookup for case-insensitive matching
|
// Build lowercase lookup for case-insensitive matching
|
||||||
const lowercaseHandlers = new Map([...this.handlers.entries()].map(([k, v]) => [k.toLowerCase(), v]));
|
const lowercaseHandlers = new Map([...this.handlers.entries()].map(([k, v]) => [k.toLowerCase(), v]));
|
||||||
|
|
||||||
|
// Resolve target_dirs for IDEs that will remain installed after this cleanup
|
||||||
|
const remainingTargets = new Set();
|
||||||
|
if (Array.isArray(options.remainingIdes)) {
|
||||||
|
for (const remaining of options.remainingIdes) {
|
||||||
|
const h = lowercaseHandlers.get(String(remaining).toLowerCase());
|
||||||
|
const t = h?.installerConfig?.target_dir;
|
||||||
|
if (t) remainingTargets.add(t);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for (const ideName of ideList) {
|
for (const ideName of ideList) {
|
||||||
const handler = lowercaseHandlers.get(ideName.toLowerCase());
|
const handler = lowercaseHandlers.get(ideName.toLowerCase());
|
||||||
if (!handler) continue;
|
if (!handler) continue;
|
||||||
|
|
||||||
|
const target = handler.installerConfig?.target_dir || null;
|
||||||
|
const skipTarget = target && remainingTargets.has(target);
|
||||||
|
const cleanupOptions = skipTarget ? { ...options, skipTarget: true } : options;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
await handler.cleanup(projectDir, options);
|
await handler.cleanup(projectDir, cleanupOptions);
|
||||||
results.push({ ide: ideName, success: true });
|
results.push({ ide: ideName, success: true, skippedTarget: !!skipTarget });
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
results.push({ ide: ideName, success: false, error: error.message });
|
results.push({ ide: ideName, success: false, error: error.message });
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -31,7 +31,50 @@ function clearCache() {
|
||||||
_cachedPlatformCodes = null;
|
_cachedPlatformCodes = null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Format the installable platform list for human-readable output (used by --list-tools).
|
||||||
|
* Sourced from IdeManager so this view matches what --tools accepts at install time
|
||||||
|
* (suspended platforms excluded).
|
||||||
|
* @returns {Promise<string>} Formatted multi-line string with id, name, target_dir, preferred flag.
|
||||||
|
*/
|
||||||
|
async function formatPlatformList() {
|
||||||
|
const { IdeManager } = require('./manager');
|
||||||
|
const ideManager = new IdeManager();
|
||||||
|
await ideManager.ensureInitialized();
|
||||||
|
|
||||||
|
const entries = ideManager.getAvailableIdes().map((ide) => {
|
||||||
|
const handler = ideManager.handlers.get(ide.value);
|
||||||
|
return {
|
||||||
|
id: ide.value,
|
||||||
|
name: ide.name,
|
||||||
|
targetDir: handler?.installerConfig?.target_dir || '',
|
||||||
|
preferred: ide.preferred,
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
const idWidth = Math.max(...entries.map((e) => e.id.length), 'ID'.length);
|
||||||
|
const nameWidth = Math.max(...entries.map((e) => e.name.length), 'Name'.length);
|
||||||
|
|
||||||
|
const pad = (s, w) => s + ' '.repeat(Math.max(0, w - s.length));
|
||||||
|
const lines = [
|
||||||
|
`Supported tool IDs (pass via --tools <id>[,<id>...]):`,
|
||||||
|
'',
|
||||||
|
` ${pad('ID', idWidth)} ${pad('Name', nameWidth)} Target dir`,
|
||||||
|
` ${pad('-'.repeat(idWidth), idWidth)} ${pad('-'.repeat(nameWidth), nameWidth)} ${'-'.repeat(10)}`,
|
||||||
|
];
|
||||||
|
|
||||||
|
for (const e of entries) {
|
||||||
|
const star = e.preferred ? ' *' : ' ';
|
||||||
|
lines.push(`${star}${pad(e.id, idWidth)} ${pad(e.name, nameWidth)} ${e.targetDir}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
lines.push('', '* = recommended / preferred', '', 'Example: bmad-method install --modules bmm --tools claude-code');
|
||||||
|
|
||||||
|
return lines.join('\n');
|
||||||
|
}
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
loadPlatformCodes,
|
loadPlatformCodes,
|
||||||
clearCache,
|
clearCache,
|
||||||
|
formatPlatformList,
|
||||||
};
|
};
|
||||||
|
|
|
||||||
|
|
@ -5,122 +5,218 @@
|
||||||
# preferred: Whether shown as a recommended option on install
|
# preferred: Whether shown as a recommended option on install
|
||||||
# suspended: (optional) Message explaining why install is blocked
|
# suspended: (optional) Message explaining why install is blocked
|
||||||
# installer:
|
# installer:
|
||||||
# target_dir: Directory where skill directories are installed
|
# target_dir: Directory where skill directories are installed (project/workspace)
|
||||||
# legacy_targets: (optional) Old target dirs to clean up on reinstall
|
# global_target_dir: (optional) User-home directory for global install
|
||||||
# ancestor_conflict_check: (optional) Refuse install when ancestor dir has BMAD files
|
# ancestor_conflict_check: (optional) Refuse install when ancestor dir has BMAD files
|
||||||
|
#
|
||||||
|
# Multiple platforms may share the same target_dir or global_target_dir — many tools
|
||||||
|
# read from the shared `.agents/skills/` and `~/.agents/skills/` cross-tool standard.
|
||||||
|
# Paths verified against each tool's primary docs as of 2026-04-25.
|
||||||
|
|
||||||
platforms:
|
platforms:
|
||||||
|
adal:
|
||||||
|
name: "AdaL"
|
||||||
|
preferred: false
|
||||||
|
installer:
|
||||||
|
target_dir: .adal/skills
|
||||||
|
global_target_dir: ~/.adal/skills
|
||||||
|
|
||||||
|
amp:
|
||||||
|
name: "Sourcegraph Amp"
|
||||||
|
preferred: false
|
||||||
|
installer:
|
||||||
|
target_dir: .agents/skills
|
||||||
|
global_target_dir: ~/.config/agents/skills
|
||||||
|
|
||||||
antigravity:
|
antigravity:
|
||||||
name: "Google Antigravity"
|
name: "Google Antigravity"
|
||||||
preferred: false
|
preferred: false
|
||||||
installer:
|
installer:
|
||||||
legacy_targets:
|
|
||||||
- .agent/workflows
|
|
||||||
target_dir: .agent/skills
|
target_dir: .agent/skills
|
||||||
|
global_target_dir: ~/.gemini/antigravity/skills
|
||||||
|
|
||||||
auggie:
|
auggie:
|
||||||
name: "Auggie"
|
name: "Auggie"
|
||||||
preferred: false
|
preferred: false
|
||||||
installer:
|
installer:
|
||||||
legacy_targets:
|
target_dir: .agents/skills
|
||||||
- .augment/commands
|
global_target_dir: ~/.agents/skills
|
||||||
target_dir: .augment/skills
|
|
||||||
|
bob:
|
||||||
|
name: "IBM Bob"
|
||||||
|
preferred: false
|
||||||
|
installer:
|
||||||
|
target_dir: .bob/skills
|
||||||
|
global_target_dir: ~/.bob/skills
|
||||||
|
|
||||||
claude-code:
|
claude-code:
|
||||||
name: "Claude Code"
|
name: "Claude Code"
|
||||||
preferred: true
|
preferred: true
|
||||||
installer:
|
installer:
|
||||||
legacy_targets:
|
|
||||||
- .claude/commands
|
|
||||||
target_dir: .claude/skills
|
target_dir: .claude/skills
|
||||||
|
global_target_dir: ~/.claude/skills
|
||||||
|
|
||||||
cline:
|
cline:
|
||||||
name: "Cline"
|
name: "Cline"
|
||||||
preferred: false
|
preferred: false
|
||||||
installer:
|
installer:
|
||||||
legacy_targets:
|
|
||||||
- .clinerules/workflows
|
|
||||||
target_dir: .cline/skills
|
target_dir: .cline/skills
|
||||||
|
global_target_dir: ~/.cline/skills
|
||||||
|
|
||||||
codex:
|
codex:
|
||||||
name: "Codex"
|
name: "Codex"
|
||||||
preferred: false
|
preferred: true
|
||||||
installer:
|
installer:
|
||||||
legacy_targets:
|
|
||||||
- .codex/prompts
|
|
||||||
- ~/.codex/prompts
|
|
||||||
target_dir: .agents/skills
|
target_dir: .agents/skills
|
||||||
|
global_target_dir: ~/.codex/skills
|
||||||
|
|
||||||
codebuddy:
|
codebuddy:
|
||||||
name: "CodeBuddy"
|
name: "CodeBuddy"
|
||||||
preferred: false
|
preferred: false
|
||||||
installer:
|
installer:
|
||||||
legacy_targets:
|
|
||||||
- .codebuddy/commands
|
|
||||||
target_dir: .codebuddy/skills
|
target_dir: .codebuddy/skills
|
||||||
|
global_target_dir: ~/.codebuddy/skills
|
||||||
|
|
||||||
|
command-code:
|
||||||
|
name: "Command Code"
|
||||||
|
preferred: false
|
||||||
|
installer:
|
||||||
|
target_dir: .agents/skills
|
||||||
|
global_target_dir: ~/.agents/skills
|
||||||
|
|
||||||
|
cortex:
|
||||||
|
name: "Snowflake Cortex Code"
|
||||||
|
preferred: false
|
||||||
|
installer:
|
||||||
|
target_dir: .cortex/skills
|
||||||
|
global_target_dir: ~/.snowflake/cortex/skills
|
||||||
|
|
||||||
crush:
|
crush:
|
||||||
name: "Crush"
|
name: "Crush"
|
||||||
preferred: false
|
preferred: false
|
||||||
installer:
|
installer:
|
||||||
legacy_targets:
|
target_dir: .agents/skills
|
||||||
- .crush/commands
|
global_target_dir: ~/.config/agents/skills
|
||||||
target_dir: .crush/skills
|
|
||||||
|
|
||||||
cursor:
|
cursor:
|
||||||
name: "Cursor"
|
name: "Cursor"
|
||||||
preferred: true
|
preferred: true
|
||||||
installer:
|
installer:
|
||||||
legacy_targets:
|
target_dir: .agents/skills
|
||||||
- .cursor/commands
|
global_target_dir: ~/.agents/skills
|
||||||
target_dir: .cursor/skills
|
|
||||||
|
droid:
|
||||||
|
name: "Factory Droid"
|
||||||
|
preferred: false
|
||||||
|
installer:
|
||||||
|
target_dir: .factory/skills
|
||||||
|
global_target_dir: ~/.factory/skills
|
||||||
|
|
||||||
|
firebender:
|
||||||
|
name: "Firebender"
|
||||||
|
preferred: false
|
||||||
|
installer:
|
||||||
|
target_dir: .firebender/skills
|
||||||
|
global_target_dir: ~/.agents/skills
|
||||||
|
|
||||||
gemini:
|
gemini:
|
||||||
name: "Gemini CLI"
|
name: "Gemini CLI"
|
||||||
preferred: false
|
preferred: false
|
||||||
installer:
|
installer:
|
||||||
legacy_targets:
|
target_dir: .agents/skills
|
||||||
- .gemini/commands
|
global_target_dir: ~/.agents/skills
|
||||||
target_dir: .gemini/skills
|
|
||||||
|
|
||||||
github-copilot:
|
github-copilot:
|
||||||
name: "GitHub Copilot"
|
name: "GitHub Copilot"
|
||||||
|
preferred: true
|
||||||
|
installer:
|
||||||
|
target_dir: .agents/skills
|
||||||
|
global_target_dir: ~/.agents/skills
|
||||||
|
commands_target_dir: .github/agents
|
||||||
|
commands_extension: .agent.md
|
||||||
|
commands_body_template: "LOAD the FULL {project-root}/{target_dir}/{canonicalId}/SKILL.md, READ its entire contents and follow its directions exactly!"
|
||||||
|
# The Custom Agents picker should only show persona agents (not
|
||||||
|
# workflows/tools). Detected by reading each skill's source
|
||||||
|
# `customize.toml` and checking for an `[agent]` section — that's
|
||||||
|
# the actual configuration source of truth: every BMAD persona is
|
||||||
|
# configured under `[agent]`, every workflow under `[workflow]`,
|
||||||
|
# every standalone skill has no customize.toml. This signal is
|
||||||
|
# naming-independent, so personas like `bmad-tea` (which doesn't
|
||||||
|
# follow the `-agent-` convention) are still included, and
|
||||||
|
# meta-skills like `bmad-agent-builder` (which contains `-agent-`
|
||||||
|
# but is a skill-builder workflow, not a persona) are correctly
|
||||||
|
# excluded.
|
||||||
|
commands_filter: agents-only
|
||||||
|
|
||||||
|
goose:
|
||||||
|
name: "Block Goose"
|
||||||
preferred: false
|
preferred: false
|
||||||
installer:
|
installer:
|
||||||
legacy_targets:
|
target_dir: .agents/skills
|
||||||
- .github/agents
|
global_target_dir: ~/.config/agents/skills
|
||||||
- .github/prompts
|
|
||||||
target_dir: .github/skills
|
|
||||||
|
|
||||||
iflow:
|
iflow:
|
||||||
name: "iFlow"
|
name: "iFlow"
|
||||||
preferred: false
|
preferred: false
|
||||||
installer:
|
installer:
|
||||||
legacy_targets:
|
|
||||||
- .iflow/commands
|
|
||||||
target_dir: .iflow/skills
|
target_dir: .iflow/skills
|
||||||
|
global_target_dir: ~/.iflow/skills
|
||||||
|
|
||||||
junie:
|
junie:
|
||||||
name: "Junie"
|
name: "Junie"
|
||||||
preferred: false
|
preferred: false
|
||||||
installer:
|
installer:
|
||||||
target_dir: .agents/skills
|
target_dir: .junie/skills
|
||||||
|
global_target_dir: ~/.junie/skills
|
||||||
|
|
||||||
kilo:
|
kilo:
|
||||||
name: "KiloCoder"
|
name: "KiloCoder"
|
||||||
preferred: false
|
preferred: false
|
||||||
installer:
|
installer:
|
||||||
legacy_targets:
|
target_dir: .agents/skills
|
||||||
- .kilocode/workflows
|
global_target_dir: ~/.kilocode/skills
|
||||||
target_dir: .kilocode/skills
|
|
||||||
|
kimi-code:
|
||||||
|
name: "Kimi Code"
|
||||||
|
preferred: false
|
||||||
|
installer:
|
||||||
|
target_dir: .agents/skills
|
||||||
|
global_target_dir: ~/.agents/skills
|
||||||
|
|
||||||
kiro:
|
kiro:
|
||||||
name: "Kiro"
|
name: "Kiro"
|
||||||
preferred: false
|
preferred: false
|
||||||
installer:
|
installer:
|
||||||
legacy_targets:
|
|
||||||
- .kiro/steering
|
|
||||||
target_dir: .kiro/skills
|
target_dir: .kiro/skills
|
||||||
|
global_target_dir: ~/.kiro/skills
|
||||||
|
|
||||||
|
kode:
|
||||||
|
name: "Kode"
|
||||||
|
preferred: false
|
||||||
|
installer:
|
||||||
|
target_dir: .kode/skills
|
||||||
|
global_target_dir: ~/.kode/skills
|
||||||
|
|
||||||
|
mistral-vibe:
|
||||||
|
name: "Mistral Vibe"
|
||||||
|
preferred: false
|
||||||
|
installer:
|
||||||
|
target_dir: .agents/skills
|
||||||
|
global_target_dir: ~/.vibe/skills
|
||||||
|
|
||||||
|
mux:
|
||||||
|
name: "Mux"
|
||||||
|
preferred: false
|
||||||
|
installer:
|
||||||
|
target_dir: .agents/skills
|
||||||
|
global_target_dir: ~/.agents/skills
|
||||||
|
|
||||||
|
neovate:
|
||||||
|
name: "Neovate"
|
||||||
|
preferred: false
|
||||||
|
installer:
|
||||||
|
target_dir: .neovate/skills
|
||||||
|
global_target_dir: ~/.neovate/skills
|
||||||
|
|
||||||
ona:
|
ona:
|
||||||
name: "Ona"
|
name: "Ona"
|
||||||
|
|
@ -128,65 +224,99 @@ platforms:
|
||||||
installer:
|
installer:
|
||||||
target_dir: .ona/skills
|
target_dir: .ona/skills
|
||||||
|
|
||||||
|
openclaw:
|
||||||
|
name: "OpenClaw"
|
||||||
|
preferred: false
|
||||||
|
installer:
|
||||||
|
target_dir: .agents/skills
|
||||||
|
global_target_dir: ~/.agents/skills
|
||||||
|
|
||||||
opencode:
|
opencode:
|
||||||
name: "OpenCode"
|
name: "OpenCode"
|
||||||
preferred: false
|
preferred: false
|
||||||
installer:
|
installer:
|
||||||
legacy_targets:
|
target_dir: .agents/skills
|
||||||
- .opencode/agents
|
global_target_dir: ~/.agents/skills
|
||||||
- .opencode/commands
|
commands_target_dir: .opencode/commands
|
||||||
- .opencode/agent
|
|
||||||
- .opencode/command
|
openhands:
|
||||||
target_dir: .opencode/skills
|
name: "OpenHands"
|
||||||
|
preferred: false
|
||||||
|
installer:
|
||||||
|
target_dir: .agents/skills
|
||||||
|
global_target_dir: ~/.agents/skills
|
||||||
|
|
||||||
pi:
|
pi:
|
||||||
name: "Pi"
|
name: "Pi"
|
||||||
preferred: false
|
preferred: false
|
||||||
installer:
|
installer:
|
||||||
target_dir: .pi/skills
|
target_dir: .agents/skills
|
||||||
|
global_target_dir: ~/.agents/skills
|
||||||
|
|
||||||
|
pochi:
|
||||||
|
name: "Pochi"
|
||||||
|
preferred: false
|
||||||
|
installer:
|
||||||
|
target_dir: .agents/skills
|
||||||
|
global_target_dir: ~/.agents/skills
|
||||||
|
|
||||||
qoder:
|
qoder:
|
||||||
name: "Qoder"
|
name: "Qoder"
|
||||||
preferred: false
|
preferred: false
|
||||||
installer:
|
installer:
|
||||||
target_dir: .qoder/skills
|
target_dir: .qoder/skills
|
||||||
|
global_target_dir: ~/.qoder/skills
|
||||||
|
|
||||||
qwen:
|
qwen:
|
||||||
name: "QwenCoder"
|
name: "QwenCoder"
|
||||||
preferred: false
|
preferred: false
|
||||||
installer:
|
installer:
|
||||||
legacy_targets:
|
|
||||||
- .qwen/commands
|
|
||||||
target_dir: .qwen/skills
|
target_dir: .qwen/skills
|
||||||
|
global_target_dir: ~/.qwen/skills
|
||||||
|
|
||||||
|
replit:
|
||||||
|
name: "Replit Agent"
|
||||||
|
preferred: false
|
||||||
|
installer:
|
||||||
|
target_dir: .agents/skills
|
||||||
|
|
||||||
roo:
|
roo:
|
||||||
name: "Roo Code"
|
name: "Roo Code"
|
||||||
preferred: false
|
preferred: false
|
||||||
installer:
|
installer:
|
||||||
legacy_targets:
|
target_dir: .agents/skills
|
||||||
- .roo/commands
|
global_target_dir: ~/.agents/skills
|
||||||
target_dir: .roo/skills
|
|
||||||
|
|
||||||
rovo-dev:
|
rovo-dev:
|
||||||
name: "Rovo Dev"
|
name: "Rovo Dev"
|
||||||
preferred: false
|
preferred: false
|
||||||
installer:
|
installer:
|
||||||
legacy_targets:
|
target_dir: .agents/skills
|
||||||
- .rovodev/workflows
|
global_target_dir: ~/.agents/skills
|
||||||
target_dir: .rovodev/skills
|
|
||||||
|
|
||||||
trae:
|
trae:
|
||||||
name: "Trae"
|
name: "Trae"
|
||||||
preferred: false
|
preferred: false
|
||||||
installer:
|
installer:
|
||||||
legacy_targets:
|
|
||||||
- .trae/rules
|
|
||||||
target_dir: .trae/skills
|
target_dir: .trae/skills
|
||||||
|
|
||||||
|
warp:
|
||||||
|
name: "Warp"
|
||||||
|
preferred: false
|
||||||
|
installer:
|
||||||
|
target_dir: .agents/skills
|
||||||
|
global_target_dir: ~/.agents/skills
|
||||||
|
|
||||||
windsurf:
|
windsurf:
|
||||||
name: "Windsurf"
|
name: "Windsurf"
|
||||||
preferred: false
|
preferred: false
|
||||||
installer:
|
installer:
|
||||||
legacy_targets:
|
target_dir: .agents/skills
|
||||||
- .windsurf/workflows
|
global_target_dir: ~/.agents/skills
|
||||||
target_dir: .windsurf/skills
|
|
||||||
|
zencoder:
|
||||||
|
name: "Zencoder"
|
||||||
|
preferred: false
|
||||||
|
installer:
|
||||||
|
target_dir: .zencoder/skills
|
||||||
|
global_target_dir: ~/.zencoder/skills
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,50 @@
|
||||||
|
const path = require('node:path');
|
||||||
|
const fs = require('../../fs-native');
|
||||||
|
const csv = require('csv-parse/sync');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Read the global skill-manifest.csv and return the set of canonicalIds.
|
||||||
|
* These define which directory entries in a target_dir are BMAD-owned, regardless
|
||||||
|
* of whether they happen to start with "bmad-" (custom modules can ship skills
|
||||||
|
* with any prefix, e.g. "fred-cool-skill").
|
||||||
|
*
|
||||||
|
* @param {string} bmadDir - Path to the _bmad install directory
|
||||||
|
* @returns {Promise<Set<string>>} Set of canonicalIds, or empty set if manifest missing
|
||||||
|
*/
|
||||||
|
async function getInstalledCanonicalIds(bmadDir) {
|
||||||
|
const ids = new Set();
|
||||||
|
if (!bmadDir) return ids;
|
||||||
|
|
||||||
|
const csvPath = path.join(bmadDir, '_config', 'skill-manifest.csv');
|
||||||
|
if (!(await fs.pathExists(csvPath))) return ids;
|
||||||
|
|
||||||
|
try {
|
||||||
|
const content = await fs.readFile(csvPath, 'utf8');
|
||||||
|
const records = csv.parse(content, { columns: true, skip_empty_lines: true });
|
||||||
|
for (const record of records) {
|
||||||
|
if (record.canonicalId) ids.add(record.canonicalId);
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// Unreadable/invalid manifest — treat as no info
|
||||||
|
}
|
||||||
|
|
||||||
|
return ids;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test whether a directory entry is BMAD-owned.
|
||||||
|
* Prefers the manifest's canonicalIds; falls back to the legacy "bmad" prefix
|
||||||
|
* when no manifest is available (early install, ancestor lookup with no bmad dir).
|
||||||
|
*
|
||||||
|
* @param {string} entry - Directory entry name
|
||||||
|
* @param {Set<string>|null} canonicalIds - From getInstalledCanonicalIds, or null
|
||||||
|
* @returns {boolean}
|
||||||
|
*/
|
||||||
|
function isBmadOwnedEntry(entry, canonicalIds) {
|
||||||
|
if (!entry || typeof entry !== 'string') return false;
|
||||||
|
if (entry.toLowerCase().startsWith('bmad-os-')) return false;
|
||||||
|
if (canonicalIds && canonicalIds.size > 0) return canonicalIds.has(entry);
|
||||||
|
return entry.toLowerCase().startsWith('bmad');
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = { getInstalledCanonicalIds, isBmadOwnedEntry };
|
||||||
|
|
@ -0,0 +1,210 @@
|
||||||
|
const path = require('node:path');
|
||||||
|
const fs = require('./fs-native');
|
||||||
|
const yaml = require('yaml');
|
||||||
|
const { getProjectRoot, getModulePath, getExternalModuleCachePath } = require('./project-root');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Read a module.yaml and return its declared `code:` field, or null if missing/unparseable.
|
||||||
|
*/
|
||||||
|
async function readModuleCode(yamlPath) {
|
||||||
|
try {
|
||||||
|
const parsed = yaml.parse(await fs.readFile(yamlPath, 'utf8'));
|
||||||
|
if (parsed && typeof parsed === 'object' && typeof parsed.code === 'string') {
|
||||||
|
return parsed.code;
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// fall through
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Discover module.yaml files for officials we can read locally:
|
||||||
|
* - core, bmm: bundled in src/ (always present)
|
||||||
|
* - external officials: only if previously cloned to ~/.bmad/cache/external-modules/
|
||||||
|
*
|
||||||
|
* Each result's `code` is the `code:` field from the module.yaml when present;
|
||||||
|
* that's the value `--set <module>.<key>=<value>` matches against.
|
||||||
|
*
|
||||||
|
* Community/custom modules are not enumerated; users reference their own
|
||||||
|
* module.yaml directly per the design (see issue #1663).
|
||||||
|
*
|
||||||
|
* @returns {Promise<Array<{code: string, yamlPath: string, source: string}>>}
|
||||||
|
*/
|
||||||
|
async function discoverOfficialModuleYamls() {
|
||||||
|
const found = [];
|
||||||
|
// Dedupe is case-insensitive because module caches occasionally retain a
|
||||||
|
// legacy UPPERCASE-named directory alongside the canonical lowercase one
|
||||||
|
// (same module, different cache key from an older schema). We pick whichever
|
||||||
|
// entry we see first and skip the alternate-case duplicate. NOTE: `--set`
|
||||||
|
// matching itself is case-sensitive (it keys on `moduleName` from the install
|
||||||
|
// flow's selected list, which is always lowercase short codes), so the
|
||||||
|
// surfaced `code` here is what users should type. Don't change to
|
||||||
|
// case-sensitive dedupe without revisiting that contract.
|
||||||
|
const seenCodes = new Set();
|
||||||
|
|
||||||
|
const addFound = async (yamlPath, source, fallbackCode) => {
|
||||||
|
const declaredCode = await readModuleCode(yamlPath);
|
||||||
|
const code = declaredCode || fallbackCode;
|
||||||
|
if (!code) return;
|
||||||
|
const lower = code.toLowerCase();
|
||||||
|
if (seenCodes.has(lower)) return;
|
||||||
|
seenCodes.add(lower);
|
||||||
|
found.push({ code, yamlPath, source });
|
||||||
|
};
|
||||||
|
|
||||||
|
// Built-ins.
|
||||||
|
for (const code of ['core', 'bmm']) {
|
||||||
|
const yamlPath = path.join(getModulePath(code), 'module.yaml');
|
||||||
|
if (await fs.pathExists(yamlPath)) {
|
||||||
|
// Built-ins use their well-known short codes regardless of what the
|
||||||
|
// module.yaml `code:` says, since the install flow keys on these.
|
||||||
|
seenCodes.add(code.toLowerCase());
|
||||||
|
found.push({ code, yamlPath, source: 'built-in' });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bundled in src/modules/<code>/module.yaml (rare, but supported by getModulePath).
|
||||||
|
const srcModulesDir = path.join(getProjectRoot(), 'src', 'modules');
|
||||||
|
if (await fs.pathExists(srcModulesDir)) {
|
||||||
|
const entries = await fs.readdir(srcModulesDir, { withFileTypes: true });
|
||||||
|
for (const entry of entries) {
|
||||||
|
if (!entry.isDirectory()) continue;
|
||||||
|
const yamlPath = path.join(srcModulesDir, entry.name, 'module.yaml');
|
||||||
|
if (await fs.pathExists(yamlPath)) {
|
||||||
|
await addFound(yamlPath, 'bundled', entry.name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// External cache (~/.bmad/cache/external-modules/<code>/...).
|
||||||
|
const cacheRoot = getExternalModuleCachePath('').replace(/\/$/, '');
|
||||||
|
if (await fs.pathExists(cacheRoot)) {
|
||||||
|
const rawEntries = await fs.readdir(cacheRoot, { withFileTypes: true });
|
||||||
|
for (const entry of rawEntries) {
|
||||||
|
if (!entry.isDirectory()) continue;
|
||||||
|
const candidates = [
|
||||||
|
path.join(cacheRoot, entry.name, 'module.yaml'),
|
||||||
|
path.join(cacheRoot, entry.name, 'src', 'module.yaml'),
|
||||||
|
path.join(cacheRoot, entry.name, 'skills', 'module.yaml'),
|
||||||
|
];
|
||||||
|
for (const candidate of candidates) {
|
||||||
|
if (await fs.pathExists(candidate)) {
|
||||||
|
await addFound(candidate, 'cached', entry.name);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return found;
|
||||||
|
}
|
||||||
|
|
||||||
|
function formatPromptText(item) {
|
||||||
|
if (Array.isArray(item.prompt)) return item.prompt.join(' ');
|
||||||
|
return String(item.prompt || '').trim();
|
||||||
|
}
|
||||||
|
|
||||||
|
function inferType(item) {
|
||||||
|
if (item['single-select']) return 'single-select';
|
||||||
|
if (item['multi-select']) return 'multi-select';
|
||||||
|
if (typeof item.default === 'boolean') return 'boolean';
|
||||||
|
if (typeof item.default === 'number') return 'number';
|
||||||
|
return 'string';
|
||||||
|
}
|
||||||
|
|
||||||
|
function formatModuleOptions(code, parsed, source) {
|
||||||
|
const lines = [];
|
||||||
|
const header = source === 'built-in' ? code : `${code} (${source})`;
|
||||||
|
lines.push(header + ':');
|
||||||
|
|
||||||
|
let count = 0;
|
||||||
|
for (const [key, item] of Object.entries(parsed)) {
|
||||||
|
if (!item || typeof item !== 'object' || !('prompt' in item)) continue;
|
||||||
|
count++;
|
||||||
|
const type = inferType(item);
|
||||||
|
const scope = item.scope === 'user' ? ' [user-scope]' : '';
|
||||||
|
const defaultStr = item.default === undefined || item.default === null ? '(none)' : String(item.default);
|
||||||
|
lines.push(` ${code}.${key} (${type}${scope}) default: ${defaultStr}`);
|
||||||
|
const promptText = formatPromptText(item);
|
||||||
|
if (promptText) lines.push(` ${promptText}`);
|
||||||
|
if (Array.isArray(item['single-select'])) {
|
||||||
|
const values = item['single-select'].map((v) => (typeof v === 'object' ? v.value : v)).filter((v) => v !== undefined);
|
||||||
|
if (values.length > 0) lines.push(` values: ${values.join(' | ')}`);
|
||||||
|
}
|
||||||
|
lines.push('');
|
||||||
|
}
|
||||||
|
|
||||||
|
if (count === 0) {
|
||||||
|
lines.push(' (no configurable options)', '');
|
||||||
|
}
|
||||||
|
return lines.join('\n');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Render `--list-options` output.
|
||||||
|
*
|
||||||
|
* Returns `{ text, ok }` so callers can surface a non-zero exit code on
|
||||||
|
* a typo'd module-code lookup. Discovery dedupes case-insensitively, so
|
||||||
|
* the lookup is also case-insensitive — typing `--list-options BMM` and
|
||||||
|
* `--list-options bmm` both find the bmm built-in.
|
||||||
|
*
|
||||||
|
* @param {string|null} moduleCode - if non-null, restrict to this module
|
||||||
|
* @returns {Promise<{text: string, ok: boolean}>}
|
||||||
|
*/
|
||||||
|
async function formatOptionsList(moduleCode) {
|
||||||
|
const discovered = await discoverOfficialModuleYamls();
|
||||||
|
const needle = moduleCode ? moduleCode.toLowerCase() : null;
|
||||||
|
const filtered = needle ? discovered.filter((d) => d.code.toLowerCase() === needle) : discovered;
|
||||||
|
|
||||||
|
if (filtered.length === 0) {
|
||||||
|
if (moduleCode) {
|
||||||
|
const text = [
|
||||||
|
`No locally-known module.yaml for '${moduleCode}'.`,
|
||||||
|
'',
|
||||||
|
'Built-in modules (core, bmm) are always available. External officials',
|
||||||
|
'appear here after they have been installed at least once on this machine',
|
||||||
|
'(they are cached under ~/.bmad/cache/external-modules/).',
|
||||||
|
'',
|
||||||
|
'For community or custom modules, read the module.yaml file in that',
|
||||||
|
"module's source repository directly.",
|
||||||
|
].join('\n');
|
||||||
|
return { text, ok: false };
|
||||||
|
}
|
||||||
|
return { text: 'No modules found.', ok: false };
|
||||||
|
}
|
||||||
|
|
||||||
|
const sections = [];
|
||||||
|
// Track when a module-scoped lookup couldn't actually be rendered (yaml
|
||||||
|
// unparseable or empty after parse). The full `--list-options` output is
|
||||||
|
// tolerant of one bad entry, but `--list-options <module>` against a single
|
||||||
|
// unreadable module should still fail tooling so a CI script catches it.
|
||||||
|
let moduleScopedFailure = false;
|
||||||
|
sections.push('Available --set keys', 'Format: --set <module>.<key>=<value> (repeatable)', '');
|
||||||
|
for (const { code, yamlPath, source } of filtered) {
|
||||||
|
let parsed;
|
||||||
|
try {
|
||||||
|
parsed = yaml.parse(await fs.readFile(yamlPath, 'utf8'));
|
||||||
|
} catch {
|
||||||
|
sections.push(`${code} (${source}): could not parse module.yaml`, '');
|
||||||
|
if (moduleCode) moduleScopedFailure = true;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (!parsed || typeof parsed !== 'object' || Array.isArray(parsed)) {
|
||||||
|
sections.push(`${code} (${source}): module.yaml is not a valid object (got ${Array.isArray(parsed) ? 'array' : typeof parsed})`, '');
|
||||||
|
if (moduleCode) moduleScopedFailure = true;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
sections.push(formatModuleOptions(code, parsed, source));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!moduleCode) {
|
||||||
|
sections.push(
|
||||||
|
'Community and custom modules are not listed here — read their module.yaml directly. Unknown keys still persist with a warning.',
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return { text: sections.join('\n'), ok: !moduleScopedFailure };
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = { formatOptionsList, discoverOfficialModuleYamls };
|
||||||
|
|
@ -0,0 +1,203 @@
|
||||||
|
/**
|
||||||
|
* Channel plan: the per-module resolution decision applied at install time.
|
||||||
|
*
|
||||||
|
* A "plan entry" for a module is:
|
||||||
|
* { channel: 'stable'|'next'|'pinned', pin?: string }
|
||||||
|
*
|
||||||
|
* We build the plan from:
|
||||||
|
* 1. CLI flags (--channel / --all-* / --next=CODE / --pin CODE=TAG)
|
||||||
|
* 2. Interactive answers (the "all stable?" gate + per-module picker)
|
||||||
|
* 3. Registry defaults (default_channel from registry-fallback.yaml / official.yaml)
|
||||||
|
* 4. Hardcoded fallback 'stable'
|
||||||
|
*
|
||||||
|
* Precedence: --pin > --next=CODE > --channel (global) > registry default > 'stable'.
|
||||||
|
*
|
||||||
|
* This module is pure. No prompts, no git, no filesystem.
|
||||||
|
*/
|
||||||
|
|
||||||
|
const VALID_CHANNELS = new Set(['stable', 'next']);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parse raw commander options into a structured channel options object.
|
||||||
|
*
|
||||||
|
* @param {Object} options - raw command-line options
|
||||||
|
* @returns {{
|
||||||
|
* global: 'stable'|'next'|null,
|
||||||
|
* nextSet: Set<string>,
|
||||||
|
* pins: Map<string, string>,
|
||||||
|
* warnings: string[]
|
||||||
|
* }}
|
||||||
|
*/
|
||||||
|
function parseChannelOptions(options = {}) {
|
||||||
|
const warnings = [];
|
||||||
|
|
||||||
|
// Global channel from --channel / --all-stable / --all-next.
|
||||||
|
let global = null;
|
||||||
|
const aliases = [];
|
||||||
|
if (options.channel) aliases.push({ flag: '--channel', value: normalizeChannel(options.channel, warnings, '--channel') });
|
||||||
|
if (options.allStable) aliases.push({ flag: '--all-stable', value: 'stable' });
|
||||||
|
if (options.allNext) aliases.push({ flag: '--all-next', value: 'next' });
|
||||||
|
|
||||||
|
const distinct = new Set(aliases.map((a) => a.value).filter(Boolean));
|
||||||
|
if (distinct.size > 1) {
|
||||||
|
warnings.push(
|
||||||
|
`Conflicting channel flags: ${aliases
|
||||||
|
.filter((a) => a.value)
|
||||||
|
.map((a) => a.flag + '=' + a.value)
|
||||||
|
.join(', ')}. Using first: ${aliases.find((a) => a.value).flag}.`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
const firstValid = aliases.find((a) => a.value);
|
||||||
|
if (firstValid) global = firstValid.value;
|
||||||
|
|
||||||
|
// --next=CODE (repeatable)
|
||||||
|
const nextSet = new Set();
|
||||||
|
for (const code of options.next || []) {
|
||||||
|
const trimmed = String(code).trim();
|
||||||
|
if (!trimmed) continue;
|
||||||
|
nextSet.add(trimmed);
|
||||||
|
}
|
||||||
|
|
||||||
|
// --pin CODE=TAG (repeatable)
|
||||||
|
const pins = new Map();
|
||||||
|
for (const spec of options.pin || []) {
|
||||||
|
const parsed = parsePinSpec(spec);
|
||||||
|
if (!parsed) {
|
||||||
|
warnings.push(`Ignoring malformed --pin value '${spec}'. Expected CODE=TAG.`);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (pins.has(parsed.code)) {
|
||||||
|
warnings.push(`--pin specified multiple times for '${parsed.code}'. Using last: ${parsed.tag}.`);
|
||||||
|
}
|
||||||
|
pins.set(parsed.code, parsed.tag);
|
||||||
|
}
|
||||||
|
|
||||||
|
// --yes auto-confirms the community-module curator-bypass prompt so
|
||||||
|
// headless installs with --next=/--pin for a community module don't hang.
|
||||||
|
const acceptBypass = options.yes === true || options.acceptBypass === true;
|
||||||
|
|
||||||
|
return { global, nextSet, pins, warnings, acceptBypass };
|
||||||
|
}
|
||||||
|
|
||||||
|
function normalizeChannel(raw, warnings, flagName) {
|
||||||
|
if (typeof raw !== 'string') return null;
|
||||||
|
const lower = raw.trim().toLowerCase();
|
||||||
|
if (VALID_CHANNELS.has(lower)) return lower;
|
||||||
|
warnings.push(`Ignoring invalid ${flagName} value '${raw}'. Expected one of: stable, next.`);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
function parsePinSpec(spec) {
|
||||||
|
if (typeof spec !== 'string') return null;
|
||||||
|
const idx = spec.indexOf('=');
|
||||||
|
if (idx <= 0 || idx === spec.length - 1) return null;
|
||||||
|
const code = spec.slice(0, idx).trim();
|
||||||
|
const tag = spec.slice(idx + 1).trim();
|
||||||
|
if (!code || !tag) return null;
|
||||||
|
return { code, tag };
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Build a per-module plan entry, applying precedence.
|
||||||
|
*
|
||||||
|
* @param {Object} args
|
||||||
|
* @param {string} args.code
|
||||||
|
* @param {Object} args.channelOptions - from parseChannelOptions
|
||||||
|
* @param {string} [args.registryDefault] - module's default_channel, if any
|
||||||
|
* @returns {{channel: 'stable'|'next'|'pinned', pin?: string, source: string}}
|
||||||
|
* source describes where the decision came from, for logging / debugging.
|
||||||
|
*/
|
||||||
|
function decideChannelForModule({ code, channelOptions, registryDefault }) {
|
||||||
|
const { global, nextSet, pins } = channelOptions || { nextSet: new Set(), pins: new Map() };
|
||||||
|
|
||||||
|
if (pins && pins.has(code)) {
|
||||||
|
return { channel: 'pinned', pin: pins.get(code), source: 'flag:--pin' };
|
||||||
|
}
|
||||||
|
if (nextSet && nextSet.has(code)) {
|
||||||
|
return { channel: 'next', source: 'flag:--next' };
|
||||||
|
}
|
||||||
|
if (global) {
|
||||||
|
return { channel: global, source: 'flag:--channel' };
|
||||||
|
}
|
||||||
|
if (registryDefault && VALID_CHANNELS.has(registryDefault)) {
|
||||||
|
return { channel: registryDefault, source: 'registry' };
|
||||||
|
}
|
||||||
|
return { channel: 'stable', source: 'default' };
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Build a full channel plan map for a set of modules.
|
||||||
|
*
|
||||||
|
* @param {Object} args
|
||||||
|
* @param {Array<{code: string, defaultChannel?: string, builtIn?: boolean}>} args.modules
|
||||||
|
* Only the modules that need a channel entry; callers should filter out
|
||||||
|
* bundled modules (core/bmm) before calling.
|
||||||
|
* @param {Object} args.channelOptions - from parseChannelOptions
|
||||||
|
* @returns {Map<string, {channel: string, pin?: string, source: string}>}
|
||||||
|
*/
|
||||||
|
function buildPlan({ modules, channelOptions }) {
|
||||||
|
const plan = new Map();
|
||||||
|
for (const mod of modules || []) {
|
||||||
|
plan.set(
|
||||||
|
mod.code,
|
||||||
|
decideChannelForModule({
|
||||||
|
code: mod.code,
|
||||||
|
channelOptions,
|
||||||
|
registryDefault: mod.defaultChannel,
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return plan;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Report any --pin CODE=TAG entries that don't correspond to a selected module.
|
||||||
|
* These get warned about but don't abort the install.
|
||||||
|
*/
|
||||||
|
function orphanPinWarnings(channelOptions, selectedCodes) {
|
||||||
|
const warnings = [];
|
||||||
|
const selected = new Set(selectedCodes || []);
|
||||||
|
for (const code of channelOptions?.pins?.keys() || []) {
|
||||||
|
if (!selected.has(code)) {
|
||||||
|
warnings.push(`--pin for '${code}' has no effect (module not selected).`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (const code of channelOptions?.nextSet || []) {
|
||||||
|
if (!selected.has(code)) {
|
||||||
|
warnings.push(`--next for '${code}' has no effect (module not selected).`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return warnings;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Warn when --pin / --next targets a bundled module (core, bmm). Those are
|
||||||
|
* shipped inside the installer binary — there's no git clone to override, so
|
||||||
|
* the flag has no effect. Users who actually want a prerelease core/bmm
|
||||||
|
* should use `npx bmad-method@next install`.
|
||||||
|
*/
|
||||||
|
function bundledTargetWarnings(channelOptions, bundledCodes) {
|
||||||
|
const warnings = [];
|
||||||
|
const bundled = new Set(bundledCodes || []);
|
||||||
|
const hint = '(bundled module; use `npx bmad-method@next install` for a prerelease)';
|
||||||
|
for (const code of channelOptions?.pins?.keys() || []) {
|
||||||
|
if (bundled.has(code)) {
|
||||||
|
warnings.push(`--pin for '${code}' has no effect ${hint}.`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (const code of channelOptions?.nextSet || []) {
|
||||||
|
if (bundled.has(code)) {
|
||||||
|
warnings.push(`--next for '${code}' has no effect ${hint}.`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return warnings;
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
parseChannelOptions,
|
||||||
|
decideChannelForModule,
|
||||||
|
buildPlan,
|
||||||
|
orphanPinWarnings,
|
||||||
|
bundledTargetWarnings,
|
||||||
|
parsePinSpec,
|
||||||
|
};
|
||||||
|
|
@ -0,0 +1,241 @@
|
||||||
|
const https = require('node:https');
|
||||||
|
const semver = require('semver');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Channel resolver for external and community modules.
|
||||||
|
*
|
||||||
|
* A "channel" is the resolution strategy that decides which ref of a module
|
||||||
|
* to clone when no explicit version is supplied:
|
||||||
|
* - stable: highest pure-semver git tag (excludes -alpha/-beta/-rc)
|
||||||
|
* - next: main branch HEAD
|
||||||
|
* - pinned: an explicit user-supplied tag
|
||||||
|
*
|
||||||
|
* This module is pure (no prompts, no git, no filesystem). It only talks to
|
||||||
|
* the GitHub tags API and performs semver math. Clone logic lives in the
|
||||||
|
* module managers that call resolveChannel().
|
||||||
|
*/
|
||||||
|
|
||||||
|
const GITHUB_API_BASE = 'https://api.github.com';
|
||||||
|
const DEFAULT_TIMEOUT_MS = 10_000;
|
||||||
|
const USER_AGENT = 'bmad-method-installer';
|
||||||
|
|
||||||
|
// Per-process cache: { 'owner/repo' => string[] sorted desc } of pure-semver tags.
|
||||||
|
const tagCache = new Map();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parse a GitHub repo URL into { owner, repo }. Returns null if the URL is
|
||||||
|
* not a GitHub URL the resolver can handle.
|
||||||
|
*/
|
||||||
|
function parseGitHubRepo(url) {
|
||||||
|
if (!url || typeof url !== 'string') return null;
|
||||||
|
const trimmed = url
|
||||||
|
.trim()
|
||||||
|
.replace(/\.git$/, '')
|
||||||
|
.replace(/\/$/, '');
|
||||||
|
|
||||||
|
// https://github.com/owner/repo
|
||||||
|
const httpsMatch = trimmed.match(/^https?:\/\/github\.com\/([^/]+)\/([^/]+)(?:\/.*)?$/i);
|
||||||
|
if (httpsMatch) return { owner: httpsMatch[1], repo: httpsMatch[2] };
|
||||||
|
|
||||||
|
// git@github.com:owner/repo
|
||||||
|
const sshMatch = trimmed.match(/^git@github\.com:([^/]+)\/([^/]+)$/i);
|
||||||
|
if (sshMatch) return { owner: sshMatch[1], repo: sshMatch[2] };
|
||||||
|
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
function fetchJson(url, { timeout = DEFAULT_TIMEOUT_MS } = {}) {
|
||||||
|
const headers = {
|
||||||
|
'User-Agent': USER_AGENT,
|
||||||
|
Accept: 'application/vnd.github+json',
|
||||||
|
'X-GitHub-Api-Version': '2022-11-28',
|
||||||
|
};
|
||||||
|
if (process.env.GITHUB_TOKEN) {
|
||||||
|
headers.Authorization = `Bearer ${process.env.GITHUB_TOKEN}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
return new Promise((resolve, reject) => {
|
||||||
|
const req = https.get(url, { headers, timeout }, (res) => {
|
||||||
|
let body = '';
|
||||||
|
res.on('data', (chunk) => (body += chunk));
|
||||||
|
res.on('end', () => {
|
||||||
|
if (res.statusCode < 200 || res.statusCode >= 300) {
|
||||||
|
const err = new Error(`GitHub API ${res.statusCode} for ${url}: ${body.slice(0, 200)}`);
|
||||||
|
err.statusCode = res.statusCode;
|
||||||
|
return reject(err);
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
resolve(JSON.parse(body));
|
||||||
|
} catch (error) {
|
||||||
|
reject(new Error(`Failed to parse GitHub response: ${error.message}`));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
req.on('error', reject);
|
||||||
|
req.on('timeout', () => {
|
||||||
|
req.destroy();
|
||||||
|
reject(new Error(`GitHub API request timed out: ${url}`));
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Strip a leading 'v' and return a valid semver string, or null if the tag
|
||||||
|
* is not valid semver or is a prerelease (contains -alpha/-beta/-rc/etc.).
|
||||||
|
*/
|
||||||
|
function normalizeStableTag(tagName) {
|
||||||
|
if (typeof tagName !== 'string') return null;
|
||||||
|
const stripped = tagName.startsWith('v') ? tagName.slice(1) : tagName;
|
||||||
|
const valid = semver.valid(stripped);
|
||||||
|
if (!valid) return null;
|
||||||
|
// Exclude prereleases. semver.prerelease returns null for pure releases.
|
||||||
|
if (semver.prerelease(valid)) return null;
|
||||||
|
return valid;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Fetch pure-semver tags (highest first) from a GitHub repo.
|
||||||
|
* Cached per-process per owner/repo.
|
||||||
|
*
|
||||||
|
* @returns {Promise<Array<{tag: string, version: string}>>}
|
||||||
|
* tag is the original ref name (e.g. "v1.7.0"), version is the cleaned
|
||||||
|
* semver (e.g. "1.7.0").
|
||||||
|
*/
|
||||||
|
async function fetchStableTags(owner, repo, { timeout } = {}) {
|
||||||
|
const cacheKey = `${owner}/${repo}`;
|
||||||
|
if (tagCache.has(cacheKey)) return tagCache.get(cacheKey);
|
||||||
|
|
||||||
|
// GitHub returns up to 100 tags per page; one page is plenty for our modules.
|
||||||
|
const url = `${GITHUB_API_BASE}/repos/${owner}/${repo}/tags?per_page=100`;
|
||||||
|
const raw = await fetchJson(url, { timeout });
|
||||||
|
if (!Array.isArray(raw)) {
|
||||||
|
throw new TypeError(`Unexpected response from ${url}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const stable = [];
|
||||||
|
for (const entry of raw) {
|
||||||
|
const version = normalizeStableTag(entry?.name);
|
||||||
|
if (version) stable.push({ tag: entry.name, version });
|
||||||
|
}
|
||||||
|
stable.sort((a, b) => semver.rcompare(a.version, b.version));
|
||||||
|
|
||||||
|
tagCache.set(cacheKey, stable);
|
||||||
|
return stable;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Resolve a channel plan for a single module into a git-clonable ref.
|
||||||
|
*
|
||||||
|
* @param {Object} args
|
||||||
|
* @param {'stable'|'next'|'pinned'} args.channel
|
||||||
|
* @param {string} [args.pin] - Required when channel === 'pinned'
|
||||||
|
* @param {string} args.repoUrl - Module's git URL (for tag lookup)
|
||||||
|
* @returns {Promise<{channel, ref, version}>} where
|
||||||
|
* ref: the git ref to pass to `git clone --branch`, or null for HEAD (next)
|
||||||
|
* version: the resolved version string (tag name for stable/pinned, 'main' for next)
|
||||||
|
*
|
||||||
|
* Throws on:
|
||||||
|
* - pinned without a pin value
|
||||||
|
* - stable with no GitHub repo parseable from the URL (pass through to caller to fall back)
|
||||||
|
*
|
||||||
|
* Falls back to next-channel semantics and sets resolvedFallback=true when
|
||||||
|
* stable resolution turns up no tags.
|
||||||
|
*/
|
||||||
|
async function resolveChannel({ channel, pin, repoUrl, timeout }) {
|
||||||
|
if (channel === 'pinned') {
|
||||||
|
if (!pin) throw new Error('resolveChannel: pinned channel requires a pin value');
|
||||||
|
return { channel: 'pinned', ref: pin, version: pin, resolvedFallback: false };
|
||||||
|
}
|
||||||
|
|
||||||
|
if (channel === 'next') {
|
||||||
|
return { channel: 'next', ref: null, version: 'main', resolvedFallback: false };
|
||||||
|
}
|
||||||
|
|
||||||
|
if (channel === 'stable') {
|
||||||
|
const parsed = parseGitHubRepo(repoUrl);
|
||||||
|
if (!parsed) {
|
||||||
|
// No GitHub URL — caller must handle by falling back to next.
|
||||||
|
return { channel: 'next', ref: null, version: 'main', resolvedFallback: true, reason: 'not-a-github-url' };
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const tags = await fetchStableTags(parsed.owner, parsed.repo, { timeout });
|
||||||
|
if (tags.length === 0) {
|
||||||
|
return { channel: 'next', ref: null, version: 'main', resolvedFallback: true, reason: 'no-stable-tags' };
|
||||||
|
}
|
||||||
|
const top = tags[0];
|
||||||
|
return { channel: 'stable', ref: top.tag, version: top.tag, resolvedFallback: false };
|
||||||
|
} catch (error) {
|
||||||
|
// Propagate the error; callers decide whether to fall back or abort.
|
||||||
|
error.message = `Failed to resolve stable channel for ${parsed.owner}/${parsed.repo}: ${error.message}`;
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
throw new Error(`resolveChannel: unknown channel '${channel}'`);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Verify that a specific tag exists in a GitHub repo. Used to validate
|
||||||
|
* --pin values before the user sits through a long clone that then fails.
|
||||||
|
*/
|
||||||
|
async function tagExists(owner, repo, tagName, { timeout } = {}) {
|
||||||
|
const url = `${GITHUB_API_BASE}/repos/${owner}/${repo}/git/refs/tags/${encodeURIComponent(tagName)}`;
|
||||||
|
try {
|
||||||
|
await fetchJson(url, { timeout });
|
||||||
|
return true;
|
||||||
|
} catch (error) {
|
||||||
|
if (error.statusCode === 404) return false;
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Classify the semver delta between two versions.
|
||||||
|
* - 'none' → same version (or downgrade; treated same)
|
||||||
|
* - 'patch' → same major.minor, higher patch
|
||||||
|
* - 'minor' → same major, higher minor
|
||||||
|
* - 'major' → different major
|
||||||
|
* - 'unknown' → either version is not valid semver; caller should treat as major
|
||||||
|
*/
|
||||||
|
function classifyUpgrade(currentVersion, newVersion) {
|
||||||
|
const current = semver.valid(semver.coerce(currentVersion));
|
||||||
|
const next = semver.valid(semver.coerce(newVersion));
|
||||||
|
if (!current || !next) return 'unknown';
|
||||||
|
if (semver.lte(next, current)) return 'none';
|
||||||
|
const diff = semver.diff(current, next);
|
||||||
|
if (diff === 'patch') return 'patch';
|
||||||
|
if (diff === 'minor' || diff === 'preminor') return 'minor';
|
||||||
|
if (diff === 'major' || diff === 'premajor') return 'major';
|
||||||
|
// prepatch, prerelease — treat conservatively as minor (prereleases shouldn't
|
||||||
|
// normally surface here since stable channel filters them out).
|
||||||
|
return 'minor';
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Build the GitHub release notes URL for a resolved tag.
|
||||||
|
* Returns null if the repo URL isn't a GitHub URL.
|
||||||
|
*/
|
||||||
|
function releaseNotesUrl(repoUrl, tag) {
|
||||||
|
const parsed = parseGitHubRepo(repoUrl);
|
||||||
|
if (!parsed || !tag) return null;
|
||||||
|
return `https://github.com/${parsed.owner}/${parsed.repo}/releases/tag/${encodeURIComponent(tag)}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test-only: clear the per-process tag cache.
|
||||||
|
*/
|
||||||
|
function _clearTagCache() {
|
||||||
|
tagCache.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
parseGitHubRepo,
|
||||||
|
fetchStableTags,
|
||||||
|
resolveChannel,
|
||||||
|
tagExists,
|
||||||
|
classifyUpgrade,
|
||||||
|
releaseNotesUrl,
|
||||||
|
normalizeStableTag,
|
||||||
|
_clearTagCache,
|
||||||
|
};
|
||||||
|
|
@ -4,6 +4,8 @@ const path = require('node:path');
|
||||||
const { execSync } = require('node:child_process');
|
const { execSync } = require('node:child_process');
|
||||||
const prompts = require('../prompts');
|
const prompts = require('../prompts');
|
||||||
const { RegistryClient } = require('./registry-client');
|
const { RegistryClient } = require('./registry-client');
|
||||||
|
const { decideChannelForModule } = require('./channel-plan');
|
||||||
|
const { parseGitHubRepo, tagExists } = require('./channel-resolver');
|
||||||
|
|
||||||
const MARKETPLACE_OWNER = 'bmad-code-org';
|
const MARKETPLACE_OWNER = 'bmad-code-org';
|
||||||
const MARKETPLACE_REPO = 'bmad-plugins-marketplace';
|
const MARKETPLACE_REPO = 'bmad-plugins-marketplace';
|
||||||
|
|
@ -15,13 +17,39 @@ const MARKETPLACE_REF = 'main';
|
||||||
* Returns empty results when the registry is unreachable.
|
* Returns empty results when the registry is unreachable.
|
||||||
* Community modules are pinned to approved SHA when set; uses HEAD otherwise.
|
* Community modules are pinned to approved SHA when set; uses HEAD otherwise.
|
||||||
*/
|
*/
|
||||||
|
function quoteShellRef(ref) {
|
||||||
|
if (typeof ref !== 'string' || !/^[\w.\-+/]+$/.test(ref)) {
|
||||||
|
throw new Error(`Unsafe ref name: ${JSON.stringify(ref)}`);
|
||||||
|
}
|
||||||
|
return `"${ref}"`;
|
||||||
|
}
|
||||||
|
|
||||||
class CommunityModuleManager {
|
class CommunityModuleManager {
|
||||||
|
// moduleCode → { channel, version, sha, registryApprovedTag, registryApprovedSha, repoUrl, bypassedCurator }
|
||||||
|
// Shared across all instances; the manifest writer often uses a fresh instance.
|
||||||
|
static _resolutions = new Map();
|
||||||
|
|
||||||
|
// moduleCode → ResolvedModule (from PluginResolver) when the cloned repo ships
|
||||||
|
// a `.claude-plugin/marketplace.json`. Lets community installs reuse the same
|
||||||
|
// skill-level install pipeline as custom-source installs (installFromResolution).
|
||||||
|
static _pluginResolutions = new Map();
|
||||||
|
|
||||||
constructor() {
|
constructor() {
|
||||||
this._client = new RegistryClient();
|
this._client = new RegistryClient();
|
||||||
this._cachedIndex = null;
|
this._cachedIndex = null;
|
||||||
this._cachedCategories = null;
|
this._cachedCategories = null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Get the most recent channel resolution for a community module. */
|
||||||
|
getResolution(moduleCode) {
|
||||||
|
return CommunityModuleManager._resolutions.get(moduleCode) || null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Get the marketplace.json-derived plugin resolution for a community module, if any. */
|
||||||
|
getPluginResolution(moduleCode) {
|
||||||
|
return CommunityModuleManager._pluginResolutions.get(moduleCode) || null;
|
||||||
|
}
|
||||||
|
|
||||||
// ─── Data Loading ──────────────────────────────────────────────────────────
|
// ─── Data Loading ──────────────────────────────────────────────────────────
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -196,12 +224,49 @@ class CommunityModuleManager {
|
||||||
return await prompts.spinner();
|
return await prompts.spinner();
|
||||||
};
|
};
|
||||||
|
|
||||||
const sha = moduleInfo.approvedSha;
|
// ─── Resolve channel plan ──────────────────────────────────────────────
|
||||||
|
// Default community behavior (stable channel) honors the curator's
|
||||||
|
// approved SHA. --next=CODE and --pin CODE=TAG override the curator; we
|
||||||
|
// warn the user before bypassing the approved version.
|
||||||
|
const planEntry = decideChannelForModule({
|
||||||
|
code: moduleCode,
|
||||||
|
channelOptions: options.channelOptions,
|
||||||
|
registryDefault: 'stable',
|
||||||
|
});
|
||||||
|
|
||||||
|
const approvedSha = moduleInfo.approvedSha;
|
||||||
|
const approvedTag = moduleInfo.approvedTag;
|
||||||
|
|
||||||
|
let bypassedCurator = false;
|
||||||
|
if (planEntry.channel !== 'stable') {
|
||||||
|
bypassedCurator = true;
|
||||||
|
if (!silent) {
|
||||||
|
const approvedLabel = approvedTag || approvedSha || 'curator-approved version';
|
||||||
|
await prompts.log.warn(
|
||||||
|
`WARNING: Installing '${moduleCode}' from ${
|
||||||
|
planEntry.channel === 'pinned' ? `tag ${planEntry.pin}` : 'main HEAD'
|
||||||
|
} bypasses the curator-approved ${approvedLabel}. Proceed only if you trust this source.`,
|
||||||
|
);
|
||||||
|
if (!options.channelOptions?.acceptBypass) {
|
||||||
|
const proceed = await prompts.confirm({
|
||||||
|
message: `Continue installing '${moduleCode}' with curator bypass?`,
|
||||||
|
default: false,
|
||||||
|
});
|
||||||
|
if (!proceed) {
|
||||||
|
throw new Error(`Install of community module '${moduleCode}' cancelled by user.`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let needsDependencyInstall = false;
|
let needsDependencyInstall = false;
|
||||||
let wasNewClone = false;
|
let wasNewClone = false;
|
||||||
|
|
||||||
if (await fs.pathExists(moduleCacheDir)) {
|
if (await fs.pathExists(moduleCacheDir)) {
|
||||||
// Already cloned - update to latest HEAD
|
// Already cloned — refresh to the correct ref for the resolved channel.
|
||||||
|
// A pinned install must not reset to origin/HEAD (it would silently drift
|
||||||
|
// to main on every re-install). Stable + approvedSha is handled below
|
||||||
|
// by the curator-SHA checkout logic.
|
||||||
const fetchSpinner = await createSpinner();
|
const fetchSpinner = await createSpinner();
|
||||||
fetchSpinner.start(`Checking ${moduleInfo.displayName}...`);
|
fetchSpinner.start(`Checking ${moduleInfo.displayName}...`);
|
||||||
try {
|
try {
|
||||||
|
|
@ -211,10 +276,24 @@ class CommunityModuleManager {
|
||||||
stdio: ['ignore', 'pipe', 'pipe'],
|
stdio: ['ignore', 'pipe', 'pipe'],
|
||||||
env: { ...process.env, GIT_TERMINAL_PROMPT: '0' },
|
env: { ...process.env, GIT_TERMINAL_PROMPT: '0' },
|
||||||
});
|
});
|
||||||
|
if (planEntry.channel === 'pinned') {
|
||||||
|
// Fetch the pin tag specifically and check it out.
|
||||||
|
execSync(`git fetch --depth 1 origin ${quoteShellRef(planEntry.pin)} --no-tags`, {
|
||||||
|
cwd: moduleCacheDir,
|
||||||
|
stdio: ['ignore', 'pipe', 'pipe'],
|
||||||
|
env: { ...process.env, GIT_TERMINAL_PROMPT: '0' },
|
||||||
|
});
|
||||||
|
execSync('git checkout --quiet FETCH_HEAD', {
|
||||||
|
cwd: moduleCacheDir,
|
||||||
|
stdio: ['ignore', 'pipe', 'pipe'],
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
// stable (approvedSha path re-checks out below) and next: track main.
|
||||||
execSync('git reset --hard origin/HEAD', {
|
execSync('git reset --hard origin/HEAD', {
|
||||||
cwd: moduleCacheDir,
|
cwd: moduleCacheDir,
|
||||||
stdio: ['ignore', 'pipe', 'pipe'],
|
stdio: ['ignore', 'pipe', 'pipe'],
|
||||||
});
|
});
|
||||||
|
}
|
||||||
const newRef = execSync('git rev-parse HEAD', { cwd: moduleCacheDir, stdio: 'pipe' }).toString().trim();
|
const newRef = execSync('git rev-parse HEAD', { cwd: moduleCacheDir, stdio: 'pipe' }).toString().trim();
|
||||||
if (currentRef !== newRef) needsDependencyInstall = true;
|
if (currentRef !== newRef) needsDependencyInstall = true;
|
||||||
fetchSpinner.stop(`Verified ${moduleInfo.displayName}`);
|
fetchSpinner.stop(`Verified ${moduleInfo.displayName}`);
|
||||||
|
|
@ -231,10 +310,17 @@ class CommunityModuleManager {
|
||||||
const fetchSpinner = await createSpinner();
|
const fetchSpinner = await createSpinner();
|
||||||
fetchSpinner.start(`Fetching ${moduleInfo.displayName}...`);
|
fetchSpinner.start(`Fetching ${moduleInfo.displayName}...`);
|
||||||
try {
|
try {
|
||||||
|
if (planEntry.channel === 'pinned') {
|
||||||
|
execSync(`git clone --depth 1 --branch ${quoteShellRef(planEntry.pin)} "${moduleInfo.url}" "${moduleCacheDir}"`, {
|
||||||
|
stdio: ['ignore', 'pipe', 'pipe'],
|
||||||
|
env: { ...process.env, GIT_TERMINAL_PROMPT: '0' },
|
||||||
|
});
|
||||||
|
} else {
|
||||||
execSync(`git clone --depth 1 "${moduleInfo.url}" "${moduleCacheDir}"`, {
|
execSync(`git clone --depth 1 "${moduleInfo.url}" "${moduleCacheDir}"`, {
|
||||||
stdio: ['ignore', 'pipe', 'pipe'],
|
stdio: ['ignore', 'pipe', 'pipe'],
|
||||||
env: { ...process.env, GIT_TERMINAL_PROMPT: '0' },
|
env: { ...process.env, GIT_TERMINAL_PROMPT: '0' },
|
||||||
});
|
});
|
||||||
|
}
|
||||||
fetchSpinner.stop(`Fetched ${moduleInfo.displayName}`);
|
fetchSpinner.stop(`Fetched ${moduleInfo.displayName}`);
|
||||||
needsDependencyInstall = true;
|
needsDependencyInstall = true;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
|
|
@ -243,18 +329,19 @@ class CommunityModuleManager {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If pinned to a specific SHA, check out that exact commit.
|
// ─── Check out the resolved ref per channel ──────────────────────────
|
||||||
// Refuse to install if the approved SHA cannot be reached - security requirement.
|
if (planEntry.channel === 'stable' && approvedSha) {
|
||||||
if (sha) {
|
// Default path: pin to the curator-approved SHA. Refuse install if the SHA
|
||||||
|
// is unreachable (tag may have been deleted or rewritten) — security requirement.
|
||||||
const headSha = execSync('git rev-parse HEAD', { cwd: moduleCacheDir, stdio: 'pipe' }).toString().trim();
|
const headSha = execSync('git rev-parse HEAD', { cwd: moduleCacheDir, stdio: 'pipe' }).toString().trim();
|
||||||
if (headSha !== sha) {
|
if (headSha !== approvedSha) {
|
||||||
try {
|
try {
|
||||||
execSync(`git fetch --depth 1 origin ${sha}`, {
|
execSync(`git fetch --depth 1 origin ${quoteShellRef(approvedSha)}`, {
|
||||||
cwd: moduleCacheDir,
|
cwd: moduleCacheDir,
|
||||||
stdio: ['ignore', 'pipe', 'pipe'],
|
stdio: ['ignore', 'pipe', 'pipe'],
|
||||||
env: { ...process.env, GIT_TERMINAL_PROMPT: '0' },
|
env: { ...process.env, GIT_TERMINAL_PROMPT: '0' },
|
||||||
});
|
});
|
||||||
execSync(`git checkout ${sha}`, {
|
execSync(`git checkout ${quoteShellRef(approvedSha)}`, {
|
||||||
cwd: moduleCacheDir,
|
cwd: moduleCacheDir,
|
||||||
stdio: ['ignore', 'pipe', 'pipe'],
|
stdio: ['ignore', 'pipe', 'pipe'],
|
||||||
});
|
});
|
||||||
|
|
@ -262,12 +349,49 @@ class CommunityModuleManager {
|
||||||
} catch {
|
} catch {
|
||||||
await fs.remove(moduleCacheDir);
|
await fs.remove(moduleCacheDir);
|
||||||
throw new Error(
|
throw new Error(
|
||||||
`Community module '${moduleCode}' could not be pinned to its approved commit (${sha}). ` +
|
`Community module '${moduleCode}' could not be pinned to its approved commit (${approvedSha}). ` +
|
||||||
`Installation refused for security. The module registry entry may need updating.`,
|
`Installation refused for security. The module registry entry may need updating, ` +
|
||||||
|
`or use --next=${moduleCode} / --pin ${moduleCode}=<tag> to explicitly bypass.`,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
} else if (planEntry.channel === 'stable' && !approvedSha) {
|
||||||
|
// Registry data gap: tag or SHA missing. Warn but proceed at HEAD (pre-existing behavior).
|
||||||
|
if (!silent) {
|
||||||
|
await prompts.log.warn(`Community module '${moduleCode}' has no curator-approved SHA in the registry; installing from main HEAD.`);
|
||||||
}
|
}
|
||||||
|
} else if (planEntry.channel === 'pinned') {
|
||||||
|
// We cloned the tag directly above (via --branch), but ensure HEAD matches.
|
||||||
|
// No additional checkout needed.
|
||||||
|
}
|
||||||
|
// else: 'next' channel — already at origin/HEAD from the fetch/reset above.
|
||||||
|
|
||||||
|
// Record the resolution so the manifest writer can pick up channel/version/sha.
|
||||||
|
const installedSha = execSync('git rev-parse HEAD', { cwd: moduleCacheDir, stdio: 'pipe' }).toString().trim();
|
||||||
|
const recordedVersion =
|
||||||
|
planEntry.channel === 'pinned' ? planEntry.pin : planEntry.channel === 'next' ? 'main' : approvedTag || installedSha.slice(0, 7);
|
||||||
|
CommunityModuleManager._resolutions.set(moduleCode, {
|
||||||
|
channel: planEntry.channel,
|
||||||
|
version: recordedVersion,
|
||||||
|
sha: installedSha,
|
||||||
|
registryApprovedTag: approvedTag || null,
|
||||||
|
registryApprovedSha: approvedSha || null,
|
||||||
|
repoUrl: moduleInfo.url,
|
||||||
|
bypassedCurator,
|
||||||
|
planSource: planEntry.source,
|
||||||
|
});
|
||||||
|
|
||||||
|
// If the repo ships a marketplace.json, route through PluginResolver so the
|
||||||
|
// skill-level install pipeline (installFromResolution) handles the copy.
|
||||||
|
// Repos without marketplace.json fall through to the legacy findModuleSource
|
||||||
|
// path unchanged.
|
||||||
|
await this._tryResolveMarketplacePlugin(moduleCacheDir, moduleInfo, {
|
||||||
|
channel: planEntry.channel,
|
||||||
|
version: recordedVersion,
|
||||||
|
sha: installedSha,
|
||||||
|
approvedTag,
|
||||||
|
approvedSha,
|
||||||
|
});
|
||||||
|
|
||||||
// Install dependencies if needed
|
// Install dependencies if needed
|
||||||
const packageJsonPath = path.join(moduleCacheDir, 'package.json');
|
const packageJsonPath = path.join(moduleCacheDir, 'package.json');
|
||||||
|
|
@ -290,6 +414,204 @@ class CommunityModuleManager {
|
||||||
return moduleCacheDir;
|
return moduleCacheDir;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ─── Marketplace.json Resolution ──────────────────────────────────────────
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Detect `.claude-plugin/marketplace.json` in a cloned community repo and
|
||||||
|
* route through PluginResolver. When successful, caches the resolution so
|
||||||
|
* OfficialModulesManager.install() can route the copy through
|
||||||
|
* installFromResolution() — the same path used by custom-source installs.
|
||||||
|
*
|
||||||
|
* Silent no-op when marketplace.json is absent or the resolver returns no
|
||||||
|
* matches; the legacy findModuleSource path then handles the install.
|
||||||
|
*
|
||||||
|
* @param {string} repoPath - Absolute path to the cloned repo
|
||||||
|
* @param {Object} moduleInfo - Normalized community module info
|
||||||
|
* @param {Object} resolution - Resolution metadata from cloneModule
|
||||||
|
* @param {string} resolution.channel - Channel ('stable' | 'next' | 'pinned')
|
||||||
|
* @param {string} resolution.version - Recorded version string
|
||||||
|
* @param {string} resolution.sha - Resolved git SHA
|
||||||
|
* @param {string|null} resolution.approvedTag - Registry approved tag
|
||||||
|
* @param {string|null} resolution.approvedSha - Registry approved SHA
|
||||||
|
*/
|
||||||
|
async _tryResolveMarketplacePlugin(repoPath, moduleInfo, resolution) {
|
||||||
|
const marketplacePath = path.join(repoPath, '.claude-plugin', 'marketplace.json');
|
||||||
|
if (!(await fs.pathExists(marketplacePath))) return;
|
||||||
|
|
||||||
|
let marketplaceData;
|
||||||
|
try {
|
||||||
|
marketplaceData = JSON.parse(await fs.readFile(marketplacePath, 'utf8'));
|
||||||
|
} catch {
|
||||||
|
// Malformed marketplace.json — fall through to legacy path.
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const plugins = Array.isArray(marketplaceData?.plugins) ? marketplaceData.plugins : [];
|
||||||
|
if (plugins.length === 0) return;
|
||||||
|
|
||||||
|
const selection = this._selectPluginForModule(plugins, moduleInfo);
|
||||||
|
if (!selection) {
|
||||||
|
await this._safeWarn(
|
||||||
|
`Community module '${moduleInfo.code}' ships marketplace.json but no plugin entry matches the registry code. ` +
|
||||||
|
`Falling back to legacy install path.`,
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (selection.source === 'single-fallback') {
|
||||||
|
// Single-entry marketplace.json whose plugin name doesn't match the registry
|
||||||
|
// code or the module_definition hint. Most likely correct, but worth surfacing
|
||||||
|
// in case marketplace.json is misconfigured and we'd install the wrong plugin.
|
||||||
|
await this._safeWarn(
|
||||||
|
`Community module '${moduleInfo.code}' picked the only plugin in marketplace.json ('${selection.plugin?.name}') ` +
|
||||||
|
`because no name or module_definition match was found. Verify marketplace.json if the install looks wrong.`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
const { PluginResolver } = require('./plugin-resolver');
|
||||||
|
const resolver = new PluginResolver();
|
||||||
|
let resolved;
|
||||||
|
try {
|
||||||
|
resolved = await resolver.resolve(repoPath, selection.plugin);
|
||||||
|
} catch (error) {
|
||||||
|
// PluginResolver threw (malformed plugin entry, missing files, etc.).
|
||||||
|
// Honor the silent-fallthrough contract — warn and let the legacy
|
||||||
|
// findModuleSource path handle the install.
|
||||||
|
await this._safeWarn(
|
||||||
|
`PluginResolver failed for community module '${moduleInfo.code}': ${error.message}. ` + `Falling back to legacy install path.`,
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (!resolved || resolved.length === 0) return;
|
||||||
|
|
||||||
|
// The registry registers a single code per module. If the resolver returns
|
||||||
|
// multiple modules (Strategy 4: multiple standalone skills), accept only
|
||||||
|
// the entry whose code matches the registry. Other entries are ignored —
|
||||||
|
// they belong to plugins not registered in the community catalog.
|
||||||
|
const matched = resolved.find((mod) => mod.code === moduleInfo.code) || (resolved.length === 1 ? resolved[0] : null);
|
||||||
|
if (!matched) return;
|
||||||
|
|
||||||
|
// Shallow-clone before stamping provenance — the resolver may cache or reuse
|
||||||
|
// its return objects, and we don't want install-specific fields leaking back.
|
||||||
|
const stamped = {
|
||||||
|
...matched,
|
||||||
|
code: moduleInfo.code,
|
||||||
|
repoUrl: moduleInfo.url,
|
||||||
|
cloneRef: resolution.channel === 'pinned' ? resolution.version : resolution.approvedTag || null,
|
||||||
|
cloneSha: resolution.sha,
|
||||||
|
communitySource: true,
|
||||||
|
communityChannel: resolution.channel,
|
||||||
|
communityVersion: resolution.version,
|
||||||
|
registryApprovedTag: resolution.approvedTag,
|
||||||
|
registryApprovedSha: resolution.approvedSha,
|
||||||
|
};
|
||||||
|
|
||||||
|
CommunityModuleManager._pluginResolutions.set(moduleInfo.code, stamped);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Lazy fallback: resolve marketplace.json straight from the on-disk cache
|
||||||
|
* when `_pluginResolutions` is empty (e.g. callers that reach `install()`
|
||||||
|
* without `cloneModule` having populated the cache earlier in this process).
|
||||||
|
*
|
||||||
|
* Reuses an existing channel resolution if present; otherwise synthesizes a
|
||||||
|
* minimal stable-channel stub from the registry entry + the cached repo's
|
||||||
|
* current HEAD. Returns the cached plugin resolution if one is produced,
|
||||||
|
* otherwise null (caller falls back to the legacy path).
|
||||||
|
*
|
||||||
|
* @param {string} moduleCode
|
||||||
|
* @returns {Promise<Object|null>}
|
||||||
|
*/
|
||||||
|
async resolveFromCache(moduleCode) {
|
||||||
|
const existing = this.getPluginResolution(moduleCode);
|
||||||
|
if (existing) return existing;
|
||||||
|
|
||||||
|
const cacheRepoDir = path.join(this.getCacheDir(), moduleCode);
|
||||||
|
const marketplacePath = path.join(cacheRepoDir, '.claude-plugin', 'marketplace.json');
|
||||||
|
if (!(await fs.pathExists(marketplacePath))) return null;
|
||||||
|
|
||||||
|
let moduleInfo;
|
||||||
|
try {
|
||||||
|
moduleInfo = await this.getModuleByCode(moduleCode);
|
||||||
|
} catch {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
if (!moduleInfo) return null;
|
||||||
|
|
||||||
|
let channelResolution = this.getResolution(moduleCode);
|
||||||
|
if (!channelResolution) {
|
||||||
|
let sha = '';
|
||||||
|
try {
|
||||||
|
sha = execSync('git rev-parse HEAD', { cwd: cacheRepoDir, stdio: 'pipe' }).toString().trim();
|
||||||
|
} catch {
|
||||||
|
// Not a git repo or unreadable — give up and let the legacy path run.
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
channelResolution = {
|
||||||
|
channel: 'stable',
|
||||||
|
version: moduleInfo.approvedTag || sha.slice(0, 7),
|
||||||
|
sha,
|
||||||
|
registryApprovedTag: moduleInfo.approvedTag || null,
|
||||||
|
registryApprovedSha: moduleInfo.approvedSha || null,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
await this._tryResolveMarketplacePlugin(cacheRepoDir, moduleInfo, {
|
||||||
|
channel: channelResolution.channel,
|
||||||
|
version: channelResolution.version,
|
||||||
|
sha: channelResolution.sha,
|
||||||
|
approvedTag: channelResolution.registryApprovedTag,
|
||||||
|
approvedSha: channelResolution.registryApprovedSha,
|
||||||
|
});
|
||||||
|
|
||||||
|
return this.getPluginResolution(moduleCode);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Best-effort warning emitter. `prompts.log.warn` may be undefined in some
|
||||||
|
* harnesses and may return a rejected promise — swallow both cases so a
|
||||||
|
* fallthrough warning can never crash the install.
|
||||||
|
*/
|
||||||
|
async _safeWarn(message) {
|
||||||
|
try {
|
||||||
|
const result = prompts.log?.warn?.(message);
|
||||||
|
if (result && typeof result.then === 'function') await result;
|
||||||
|
} catch {
|
||||||
|
/* ignore */
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Pick which plugin entry from marketplace.json represents this community module.
|
||||||
|
* Precedence:
|
||||||
|
* 1. Exact match on `plugin.name === moduleInfo.code`
|
||||||
|
* 2. Trailing directory of `module_definition` matches `plugin.name`
|
||||||
|
* 3. Single plugin in marketplace.json — accepted with a warning so a
|
||||||
|
* mismatched-but-uniquely-named plugin doesn't install silently.
|
||||||
|
* Otherwise null (caller falls back to legacy path).
|
||||||
|
*
|
||||||
|
* @returns {{plugin: Object, source: 'name'|'hint'|'single-fallback'}|null}
|
||||||
|
*/
|
||||||
|
_selectPluginForModule(plugins, moduleInfo) {
|
||||||
|
const byCode = plugins.find((p) => p && p.name === moduleInfo.code);
|
||||||
|
if (byCode) return { plugin: byCode, source: 'name' };
|
||||||
|
|
||||||
|
if (moduleInfo.moduleDefinition) {
|
||||||
|
// module_definition like "src/skills/suno-setup/assets/module.yaml" →
|
||||||
|
// hint segment "suno-setup". Match that against plugin names.
|
||||||
|
const segments = moduleInfo.moduleDefinition.split('/').filter(Boolean);
|
||||||
|
const setupIdx = segments.findIndex((s) => s.endsWith('-setup'));
|
||||||
|
if (setupIdx !== -1) {
|
||||||
|
const hint = segments[setupIdx];
|
||||||
|
const byHint = plugins.find((p) => p && p.name === hint);
|
||||||
|
if (byHint) return { plugin: byHint, source: 'hint' };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (plugins.length === 1) return { plugin: plugins[0], source: 'single-fallback' };
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
// ─── Source Finding ───────────────────────────────────────────────────────
|
// ─── Source Finding ───────────────────────────────────────────────────────
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
||||||
|
|
@ -4,6 +4,13 @@ const path = require('node:path');
|
||||||
const { execSync } = require('node:child_process');
|
const { execSync } = require('node:child_process');
|
||||||
const prompts = require('../prompts');
|
const prompts = require('../prompts');
|
||||||
|
|
||||||
|
function quoteCustomRef(ref) {
|
||||||
|
if (typeof ref !== 'string' || !/^[\w.\-+/]+$/.test(ref)) {
|
||||||
|
throw new Error(`Unsafe ref name: ${JSON.stringify(ref)}`);
|
||||||
|
}
|
||||||
|
return `"${ref}"`;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Manages custom modules installed from user-provided sources.
|
* Manages custom modules installed from user-provided sources.
|
||||||
* Supports any Git host (GitHub, GitLab, Bitbucket, self-hosted) and local file paths.
|
* Supports any Git host (GitHub, GitLab, Bitbucket, self-hosted) and local file paths.
|
||||||
|
|
@ -17,8 +24,9 @@ class CustomModuleManager {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Parse a user-provided source input into a structured descriptor.
|
* Parse a user-provided source input into a structured descriptor.
|
||||||
* Accepts local file paths, HTTPS Git URLs, and SSH Git URLs.
|
* Accepts local file paths, HTTPS Git URLs, HTTP Git URLs, and SSH Git URLs.
|
||||||
* For HTTPS URLs with deep paths (e.g., /tree/main/subdir), extracts the subdir.
|
* For HTTPS/HTTP URLs with deep paths (e.g., /tree/main/subdir), extracts the subdir.
|
||||||
|
* The original protocol (http or https) is preserved in the returned cloneUrl.
|
||||||
*
|
*
|
||||||
* @param {string} input - URL or local file path
|
* @param {string} input - URL or local file path
|
||||||
* @returns {Object} Parsed source descriptor:
|
* @returns {Object} Parsed source descriptor:
|
||||||
|
|
@ -38,8 +46,8 @@ class CustomModuleManager {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
const trimmed = input.trim();
|
const trimmedRaw = input.trim();
|
||||||
if (!trimmed) {
|
if (!trimmedRaw) {
|
||||||
return {
|
return {
|
||||||
type: null,
|
type: null,
|
||||||
cloneUrl: null,
|
cloneUrl: null,
|
||||||
|
|
@ -52,8 +60,53 @@ class CustomModuleManager {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Extract optional @<tag-or-branch> suffix from the end of the input.
|
||||||
|
// Semver-valid characters: letters, digits, dot, hyphen, underscore, plus, slash.
|
||||||
|
// Raw commit SHAs are NOT supported here — `git clone --branch` can't take
|
||||||
|
// them; use --pin at the module level or check out the SHA manually.
|
||||||
|
// Only strip when the tail looks like a ref, so we don't disturb
|
||||||
|
// URLs without a version spec or the SSH protocol's `git@host:...` prefix.
|
||||||
|
let trimmed = trimmedRaw;
|
||||||
|
let versionSuffix = null;
|
||||||
|
const lastAt = trimmedRaw.lastIndexOf('@');
|
||||||
|
// Skip if @ is part of git@github.com:... (first char cannot be stripped as version)
|
||||||
|
// and skip if @ appears before the path rather than after a ref-shaped tail.
|
||||||
|
if (lastAt > 0) {
|
||||||
|
const candidate = trimmedRaw.slice(lastAt + 1);
|
||||||
|
const before = trimmedRaw.slice(0, lastAt);
|
||||||
|
// candidate must be ref-shaped and must not itself look like a URL / SSH host
|
||||||
|
if (/^[\w.\-+/]+$/.test(candidate) && !candidate.includes(':')) {
|
||||||
|
// Avoid consuming the @ in `git@host:owner/repo` — `before` wouldn't end with a path separator
|
||||||
|
// in that case. Require that the @ comes after the host/path, not inside the auth segment.
|
||||||
|
// Rule: the @ is a version suffix only if `before` looks like a complete URL or local path.
|
||||||
|
const beforeLooksLikeRepo =
|
||||||
|
before.startsWith('/') ||
|
||||||
|
before.startsWith('./') ||
|
||||||
|
before.startsWith('../') ||
|
||||||
|
before.startsWith('~') ||
|
||||||
|
/^https?:\/\//i.test(before) ||
|
||||||
|
/^git@[^:]+:.+/.test(before);
|
||||||
|
if (beforeLooksLikeRepo) {
|
||||||
|
versionSuffix = candidate;
|
||||||
|
trimmed = before;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Local path detection: starts with /, ./, ../, or ~
|
// Local path detection: starts with /, ./, ../, or ~
|
||||||
if (trimmed.startsWith('/') || trimmed.startsWith('./') || trimmed.startsWith('../') || trimmed.startsWith('~')) {
|
if (trimmed.startsWith('/') || trimmed.startsWith('./') || trimmed.startsWith('../') || trimmed.startsWith('~')) {
|
||||||
|
if (versionSuffix) {
|
||||||
|
return {
|
||||||
|
type: 'local',
|
||||||
|
cloneUrl: null,
|
||||||
|
subdir: null,
|
||||||
|
localPath: null,
|
||||||
|
cacheKey: null,
|
||||||
|
displayName: null,
|
||||||
|
isValid: false,
|
||||||
|
error: 'Local paths do not support @version suffixes',
|
||||||
|
};
|
||||||
|
}
|
||||||
return this._parseLocalPath(trimmed);
|
return this._parseLocalPath(trimmed);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -66,6 +119,8 @@ class CustomModuleManager {
|
||||||
cloneUrl: trimmed,
|
cloneUrl: trimmed,
|
||||||
subdir: null,
|
subdir: null,
|
||||||
localPath: null,
|
localPath: null,
|
||||||
|
version: versionSuffix || null,
|
||||||
|
rawInput: trimmedRaw,
|
||||||
cacheKey: `${host}/${owner}/${repo}`,
|
cacheKey: `${host}/${owner}/${repo}`,
|
||||||
displayName: `${owner}/${repo}`,
|
displayName: `${owner}/${repo}`,
|
||||||
isValid: true,
|
isValid: true,
|
||||||
|
|
@ -73,41 +128,103 @@ class CustomModuleManager {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// HTTPS URL: https://host/owner/repo[/tree/branch/subdir][.git]
|
// HTTPS/HTTP URL: generic handling for any Git host.
|
||||||
const httpsMatch = trimmed.match(/^https?:\/\/([^/]+)\/([^/]+)\/([^/.]+?)(?:\.git)?(\/.*)?$/);
|
// We avoid host-specific parsing — `git clone` will accept whatever URL the
|
||||||
if (httpsMatch) {
|
// user provides. We only need to (a) separate an optional browser-style
|
||||||
const [, host, owner, repo, remainder] = httpsMatch;
|
// subdir suffix from the clone URL, (b) extract any embedded ref
|
||||||
const cloneUrl = `https://${host}/${owner}/${repo}`;
|
// (branch/tag) from deep-path URLs, and (c) derive a cache key / display
|
||||||
|
// name from the path. The original protocol (http or https) is preserved.
|
||||||
|
if (/^https?:\/\//i.test(trimmed)) {
|
||||||
|
let url;
|
||||||
|
try {
|
||||||
|
url = new URL(trimmed);
|
||||||
|
} catch {
|
||||||
|
url = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (url && url.host) {
|
||||||
|
const host = url.host;
|
||||||
|
let repoPath = url.pathname.replace(/^\/+/, '').replace(/\/+$/, '');
|
||||||
let subdir = null;
|
let subdir = null;
|
||||||
|
let urlRef = null; // branch/tag/commit extracted from deep-path URLs
|
||||||
|
|
||||||
if (remainder) {
|
// Detect browser-style deep-path patterns that embed a ref
|
||||||
// Extract subdir from deep path patterns used by various Git hosts
|
// (branch/tag/commit) and optional subdirectory. These appear
|
||||||
|
// across many hosts:
|
||||||
|
// GitHub /<repo>/tree|blob/<ref>[/<subdir>]
|
||||||
|
// GitLab /<repo>/-/tree|blob/<ref>[/<subdir>]
|
||||||
|
// Gitea /<repo>/src/<ref>[/<subdir>]
|
||||||
|
// Gitea /<repo>/src/(branch|commit|tag)/<ref>[/<subdir>]
|
||||||
|
// Group 1 = repo path prefix, Group 2 = ref, Group 3 = subdir (optional).
|
||||||
const deepPathPatterns = [
|
const deepPathPatterns = [
|
||||||
/^\/(?:-\/)?tree\/[^/]+\/(.+)$/, // GitHub /tree/branch/path, GitLab /-/tree/branch/path
|
/^(.+?)\/(?:-\/)?(?:tree|blob)\/([^/]+)(?:\/(.+))?$/,
|
||||||
/^\/(?:-\/)?blob\/[^/]+\/(.+)$/, // /blob/branch/path (treat same as tree)
|
/^(.+?)\/src\/(?:branch\/|commit\/|tag\/)?([^/]+)(?:\/(.+))?$/,
|
||||||
/^\/src\/[^/]+\/(.+)$/, // Gitea/Forgejo /src/branch/path
|
|
||||||
];
|
];
|
||||||
|
|
||||||
for (const pattern of deepPathPatterns) {
|
for (const pattern of deepPathPatterns) {
|
||||||
const match = remainder.match(pattern);
|
const match = repoPath.match(pattern);
|
||||||
if (match) {
|
if (match) {
|
||||||
subdir = match[1].replace(/\/$/, ''); // strip trailing slash
|
repoPath = match[1];
|
||||||
|
if (match[2]) urlRef = match[2];
|
||||||
|
if (match[3]) {
|
||||||
|
const cleaned = match[3].replace(/\/+$/, '');
|
||||||
|
if (cleaned) subdir = cleaned;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Some hosts use ?path=/subdir on browse links to point at a file or
|
||||||
|
// directory. Honor it when no deep-path marker matched above.
|
||||||
|
if (!subdir) {
|
||||||
|
const pathParam = url.searchParams.get('path');
|
||||||
|
if (pathParam) {
|
||||||
|
const cleaned = pathParam.replace(/^\/+/, '').replace(/\/+$/, '');
|
||||||
|
if (cleaned) subdir = cleaned;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Strip a single trailing .git for a stable cacheKey/displayName.
|
||||||
|
const repoPathClean = repoPath.replace(/\.git$/i, '');
|
||||||
|
if (!repoPathClean) {
|
||||||
|
return {
|
||||||
|
type: null,
|
||||||
|
cloneUrl: null,
|
||||||
|
subdir: null,
|
||||||
|
localPath: null,
|
||||||
|
cacheKey: null,
|
||||||
|
displayName: null,
|
||||||
|
isValid: false,
|
||||||
|
error: 'Not a valid Git URL or local path',
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
const cloneUrl = `${url.protocol}//${host}/${repoPathClean}`;
|
||||||
|
const cacheKey = `${host}/${repoPathClean}`;
|
||||||
|
|
||||||
|
// Display name: prefer "<owner>/<repo>" using the last two meaningful
|
||||||
|
// path segments.
|
||||||
|
const segments = repoPathClean.split('/').filter(Boolean);
|
||||||
|
const repoSeg = segments.at(-1);
|
||||||
|
const ownerSeg = segments.at(-2);
|
||||||
|
const displayName = ownerSeg ? `${ownerSeg}/${repoSeg}` : repoSeg;
|
||||||
|
|
||||||
|
// Precedence: explicit @version suffix > URL /tree/<ref> path segment.
|
||||||
|
const version = versionSuffix || urlRef || null;
|
||||||
|
|
||||||
return {
|
return {
|
||||||
type: 'url',
|
type: 'url',
|
||||||
cloneUrl,
|
cloneUrl,
|
||||||
subdir,
|
subdir,
|
||||||
localPath: null,
|
localPath: null,
|
||||||
cacheKey: `${host}/${owner}/${repo}`,
|
version,
|
||||||
displayName: `${owner}/${repo}`,
|
rawInput: trimmedRaw,
|
||||||
|
cacheKey,
|
||||||
|
displayName,
|
||||||
isValid: true,
|
isValid: true,
|
||||||
error: null,
|
error: null,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
type: null,
|
type: null,
|
||||||
|
|
@ -239,7 +356,7 @@ class CustomModuleManager {
|
||||||
/**
|
/**
|
||||||
* Clone a custom module repository to cache.
|
* Clone a custom module repository to cache.
|
||||||
* Supports any Git host (GitHub, GitLab, Bitbucket, self-hosted, etc.).
|
* Supports any Git host (GitHub, GitLab, Bitbucket, self-hosted, etc.).
|
||||||
* @param {string} sourceInput - Git URL (HTTPS or SSH)
|
* @param {string} sourceInput - Git URL (HTTPS, HTTP, or SSH)
|
||||||
* @param {Object} [options] - Clone options
|
* @param {Object} [options] - Clone options
|
||||||
* @param {boolean} [options.silent] - Suppress spinner output
|
* @param {boolean} [options.silent] - Suppress spinner output
|
||||||
* @param {boolean} [options.skipInstall] - Skip npm install (for browsing before user confirms)
|
* @param {boolean} [options.skipInstall] - Skip npm install (for browsing before user confirms)
|
||||||
|
|
@ -255,6 +372,10 @@ class CustomModuleManager {
|
||||||
const silent = options.silent || false;
|
const silent = options.silent || false;
|
||||||
const displayName = parsed.displayName;
|
const displayName = parsed.displayName;
|
||||||
|
|
||||||
|
// Pin override: --pin CODE=TAG resolved at module-selection time overrides
|
||||||
|
// any @version suffix present in the URL.
|
||||||
|
const effectiveVersion = options.pinOverride || parsed.version || null;
|
||||||
|
|
||||||
await fs.ensureDir(path.dirname(repoCacheDir));
|
await fs.ensureDir(path.dirname(repoCacheDir));
|
||||||
|
|
||||||
const createSpinner = async () => {
|
const createSpinner = async () => {
|
||||||
|
|
@ -264,8 +385,23 @@ class CustomModuleManager {
|
||||||
return await prompts.spinner();
|
return await prompts.spinner();
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// If an existing cache exists but was cloned at a different version, re-clone.
|
||||||
|
// Tracked via .bmad-source.json's recorded version.
|
||||||
if (await fs.pathExists(repoCacheDir)) {
|
if (await fs.pathExists(repoCacheDir)) {
|
||||||
// Update existing clone
|
let cachedVersion = null;
|
||||||
|
try {
|
||||||
|
const existing = await fs.readJson(path.join(repoCacheDir, '.bmad-source.json'));
|
||||||
|
cachedVersion = existing?.version || null;
|
||||||
|
} catch {
|
||||||
|
// no metadata; treat as mismatched to be safe if a version was requested
|
||||||
|
}
|
||||||
|
if ((effectiveVersion || null) !== (cachedVersion || null)) {
|
||||||
|
await fs.remove(repoCacheDir);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (await fs.pathExists(repoCacheDir)) {
|
||||||
|
// Update existing clone (same version as before)
|
||||||
const fetchSpinner = await createSpinner();
|
const fetchSpinner = await createSpinner();
|
||||||
fetchSpinner.start(`Updating ${displayName}...`);
|
fetchSpinner.start(`Updating ${displayName}...`);
|
||||||
try {
|
try {
|
||||||
|
|
@ -274,10 +410,25 @@ class CustomModuleManager {
|
||||||
stdio: ['ignore', 'pipe', 'pipe'],
|
stdio: ['ignore', 'pipe', 'pipe'],
|
||||||
env: { ...process.env, GIT_TERMINAL_PROMPT: '0' },
|
env: { ...process.env, GIT_TERMINAL_PROMPT: '0' },
|
||||||
});
|
});
|
||||||
|
if (effectiveVersion) {
|
||||||
|
// Fetch the ref as either a tag or a branch — `origin <ref>` works
|
||||||
|
// for both, whereas `origin tag <ref>` fails for branch refs parsed
|
||||||
|
// out of /tree/<branch>/... URLs.
|
||||||
|
execSync(`git fetch --depth 1 origin ${quoteCustomRef(effectiveVersion)} --no-tags`, {
|
||||||
|
cwd: repoCacheDir,
|
||||||
|
stdio: ['ignore', 'pipe', 'pipe'],
|
||||||
|
env: { ...process.env, GIT_TERMINAL_PROMPT: '0' },
|
||||||
|
});
|
||||||
|
execSync(`git checkout --quiet FETCH_HEAD`, {
|
||||||
|
cwd: repoCacheDir,
|
||||||
|
stdio: ['ignore', 'pipe', 'pipe'],
|
||||||
|
});
|
||||||
|
} else {
|
||||||
execSync('git reset --hard origin/HEAD', {
|
execSync('git reset --hard origin/HEAD', {
|
||||||
cwd: repoCacheDir,
|
cwd: repoCacheDir,
|
||||||
stdio: ['ignore', 'pipe', 'pipe'],
|
stdio: ['ignore', 'pipe', 'pipe'],
|
||||||
});
|
});
|
||||||
|
}
|
||||||
fetchSpinner.stop(`Updated ${displayName}`);
|
fetchSpinner.stop(`Updated ${displayName}`);
|
||||||
} catch {
|
} catch {
|
||||||
fetchSpinner.error(`Update failed, re-downloading ${displayName}`);
|
fetchSpinner.error(`Update failed, re-downloading ${displayName}`);
|
||||||
|
|
@ -287,25 +438,44 @@ class CustomModuleManager {
|
||||||
|
|
||||||
if (!(await fs.pathExists(repoCacheDir))) {
|
if (!(await fs.pathExists(repoCacheDir))) {
|
||||||
const fetchSpinner = await createSpinner();
|
const fetchSpinner = await createSpinner();
|
||||||
fetchSpinner.start(`Cloning ${displayName}...`);
|
fetchSpinner.start(`Cloning ${displayName}${effectiveVersion ? ` @ ${effectiveVersion}` : ''}...`);
|
||||||
try {
|
try {
|
||||||
|
if (effectiveVersion) {
|
||||||
|
execSync(`git clone --depth 1 --branch ${quoteCustomRef(effectiveVersion)} "${parsed.cloneUrl}" "${repoCacheDir}"`, {
|
||||||
|
stdio: ['ignore', 'pipe', 'pipe'],
|
||||||
|
env: { ...process.env, GIT_TERMINAL_PROMPT: '0' },
|
||||||
|
});
|
||||||
|
} else {
|
||||||
execSync(`git clone --depth 1 "${parsed.cloneUrl}" "${repoCacheDir}"`, {
|
execSync(`git clone --depth 1 "${parsed.cloneUrl}" "${repoCacheDir}"`, {
|
||||||
stdio: ['ignore', 'pipe', 'pipe'],
|
stdio: ['ignore', 'pipe', 'pipe'],
|
||||||
env: { ...process.env, GIT_TERMINAL_PROMPT: '0' },
|
env: { ...process.env, GIT_TERMINAL_PROMPT: '0' },
|
||||||
});
|
});
|
||||||
|
}
|
||||||
fetchSpinner.stop(`Cloned ${displayName}`);
|
fetchSpinner.stop(`Cloned ${displayName}`);
|
||||||
} catch (error_) {
|
} catch (error_) {
|
||||||
fetchSpinner.error(`Failed to clone ${displayName}`);
|
fetchSpinner.error(`Failed to clone ${displayName}`);
|
||||||
throw new Error(`Failed to clone ${parsed.cloneUrl}: ${error_.message}`);
|
const refSuffix = effectiveVersion ? `@${effectiveVersion}` : '';
|
||||||
|
throw new Error(`Failed to clone ${parsed.cloneUrl}${refSuffix}: ${error_.message}`);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Record the resolved SHA for the manifest writer.
|
||||||
|
let resolvedSha = null;
|
||||||
|
try {
|
||||||
|
resolvedSha = execSync('git rev-parse HEAD', { cwd: repoCacheDir, stdio: 'pipe' }).toString().trim();
|
||||||
|
} catch {
|
||||||
|
// swallow — a non-git repo (local path) wouldn't reach here anyway
|
||||||
|
}
|
||||||
|
|
||||||
// Write source metadata for later URL reconstruction
|
// Write source metadata for later URL reconstruction
|
||||||
const metadataPath = path.join(repoCacheDir, '.bmad-source.json');
|
const metadataPath = path.join(repoCacheDir, '.bmad-source.json');
|
||||||
await fs.writeJson(metadataPath, {
|
await fs.writeJson(metadataPath, {
|
||||||
cloneUrl: parsed.cloneUrl,
|
cloneUrl: parsed.cloneUrl,
|
||||||
cacheKey: parsed.cacheKey,
|
cacheKey: parsed.cacheKey,
|
||||||
displayName: parsed.displayName,
|
displayName: parsed.displayName,
|
||||||
|
version: effectiveVersion || null,
|
||||||
|
rawInput: parsed.rawInput || sourceInput,
|
||||||
|
sha: resolvedSha,
|
||||||
clonedAt: new Date().toISOString(),
|
clonedAt: new Date().toISOString(),
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
@ -346,10 +516,26 @@ class CustomModuleManager {
|
||||||
const resolver = new PluginResolver();
|
const resolver = new PluginResolver();
|
||||||
const resolved = await resolver.resolve(repoPath, plugin);
|
const resolved = await resolver.resolve(repoPath, plugin);
|
||||||
|
|
||||||
|
// Read clone metadata (written by cloneRepo) so we can pick up the
|
||||||
|
// resolved git ref + SHA for manifest recording.
|
||||||
|
let cloneMetadata = null;
|
||||||
|
if (sourceUrl) {
|
||||||
|
try {
|
||||||
|
cloneMetadata = await fs.readJson(path.join(repoPath, '.bmad-source.json'));
|
||||||
|
} catch {
|
||||||
|
// no metadata — local-source or legacy cache
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Stamp source info onto each resolved module for manifest tracking
|
// Stamp source info onto each resolved module for manifest tracking
|
||||||
for (const mod of resolved) {
|
for (const mod of resolved) {
|
||||||
if (sourceUrl) mod.repoUrl = sourceUrl;
|
if (sourceUrl) mod.repoUrl = sourceUrl;
|
||||||
if (localPath) mod.localPath = localPath;
|
if (localPath) mod.localPath = localPath;
|
||||||
|
if (cloneMetadata) {
|
||||||
|
mod.cloneRef = cloneMetadata.version || null;
|
||||||
|
mod.cloneSha = cloneMetadata.sha || null;
|
||||||
|
mod.rawInput = cloneMetadata.rawInput || null;
|
||||||
|
}
|
||||||
CustomModuleManager._resolutionCache.set(mod.code, mod);
|
CustomModuleManager._resolutionCache.set(mod.code, mod);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -5,6 +5,46 @@ const { execSync } = require('node:child_process');
|
||||||
const yaml = require('yaml');
|
const yaml = require('yaml');
|
||||||
const prompts = require('../prompts');
|
const prompts = require('../prompts');
|
||||||
const { RegistryClient } = require('./registry-client');
|
const { RegistryClient } = require('./registry-client');
|
||||||
|
const { resolveChannel, tagExists, parseGitHubRepo } = require('./channel-resolver');
|
||||||
|
const { decideChannelForModule } = require('./channel-plan');
|
||||||
|
|
||||||
|
const VALID_CHANNELS = new Set(['stable', 'next', 'pinned']);
|
||||||
|
|
||||||
|
function normalizeChannelName(raw) {
|
||||||
|
if (typeof raw !== 'string') return null;
|
||||||
|
const lower = raw.trim().toLowerCase();
|
||||||
|
return VALID_CHANNELS.has(lower) ? lower : null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Conservative quoting for tag names passed to git commands. Tags are
|
||||||
|
* user-typed (--pin) or come from the GitHub API. Only allow the semver
|
||||||
|
* character class we use to tag BMad releases; anything else throws.
|
||||||
|
*/
|
||||||
|
function quoteShell(ref) {
|
||||||
|
if (typeof ref !== 'string' || !/^[\w.\-+/]+$/.test(ref)) {
|
||||||
|
throw new Error(`Unsafe ref name: ${JSON.stringify(ref)}`);
|
||||||
|
}
|
||||||
|
return `"${ref}"`;
|
||||||
|
}
|
||||||
|
|
||||||
|
async function readChannelMarker(markerPath) {
|
||||||
|
try {
|
||||||
|
if (!(await fs.pathExists(markerPath))) return null;
|
||||||
|
const content = await fs.readFile(markerPath, 'utf8');
|
||||||
|
return JSON.parse(content);
|
||||||
|
} catch {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async function writeChannelMarker(markerPath, data) {
|
||||||
|
try {
|
||||||
|
await fs.writeFile(markerPath, JSON.stringify({ ...data, writtenAt: new Date().toISOString() }, null, 2));
|
||||||
|
} catch {
|
||||||
|
// Best-effort: marker is an optimization, not a correctness requirement.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const MARKETPLACE_OWNER = 'bmad-code-org';
|
const MARKETPLACE_OWNER = 'bmad-code-org';
|
||||||
const MARKETPLACE_REPO = 'bmad-plugins-marketplace';
|
const MARKETPLACE_REPO = 'bmad-plugins-marketplace';
|
||||||
|
|
@ -19,10 +59,25 @@ const FALLBACK_CONFIG_PATH = path.join(__dirname, 'registry-fallback.yaml');
|
||||||
* @class ExternalModuleManager
|
* @class ExternalModuleManager
|
||||||
*/
|
*/
|
||||||
class ExternalModuleManager {
|
class ExternalModuleManager {
|
||||||
|
// moduleCode → { channel, version, ref, sha, repoUrl, resolvedFallback }
|
||||||
|
// Populated when cloneExternalModule resolves a channel. Shared across all
|
||||||
|
// instances so the manifest writer (which often instantiates a fresh
|
||||||
|
// ExternalModuleManager) sees resolutions made during install.
|
||||||
|
static _resolutions = new Map();
|
||||||
|
|
||||||
constructor() {
|
constructor() {
|
||||||
this._client = new RegistryClient();
|
this._client = new RegistryClient();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the most recent channel resolution for a module (if any).
|
||||||
|
* @param {string} moduleCode
|
||||||
|
* @returns {Object|null}
|
||||||
|
*/
|
||||||
|
getResolution(moduleCode) {
|
||||||
|
return ExternalModuleManager._resolutions.get(moduleCode) || null;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Load the official modules registry from GitHub, falling back to the
|
* Load the official modules registry from GitHub, falling back to the
|
||||||
* bundled YAML file if the fetch fails.
|
* bundled YAML file if the fetch fails.
|
||||||
|
|
@ -75,6 +130,7 @@ class ExternalModuleManager {
|
||||||
defaultSelected: mod.default_selected === true || mod.defaultSelected === true,
|
defaultSelected: mod.default_selected === true || mod.defaultSelected === true,
|
||||||
type: mod.type || 'bmad-org',
|
type: mod.type || 'bmad-org',
|
||||||
npmPackage: mod.npm_package || mod.npmPackage || null,
|
npmPackage: mod.npm_package || mod.npmPackage || null,
|
||||||
|
defaultChannel: normalizeChannelName(mod.default_channel || mod.defaultChannel) || 'stable',
|
||||||
builtIn: mod.built_in === true,
|
builtIn: mod.built_in === true,
|
||||||
isExternal: mod.built_in !== true,
|
isExternal: mod.built_in !== true,
|
||||||
};
|
};
|
||||||
|
|
@ -120,10 +176,15 @@ class ExternalModuleManager {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Clone an external module repository to cache
|
* Clone an external module repository to cache, resolving the requested
|
||||||
|
* channel (stable / next / pinned) to a concrete git ref.
|
||||||
|
*
|
||||||
* @param {string} moduleCode - Code of the external module
|
* @param {string} moduleCode - Code of the external module
|
||||||
* @param {Object} options - Clone options
|
* @param {Object} options - Clone options
|
||||||
* @param {boolean} options.silent - Suppress spinner output
|
* @param {boolean} [options.silent] - Suppress spinner output
|
||||||
|
* @param {Object} [options.channelOptions] - Parsed channel flags. See
|
||||||
|
* modules/channel-plan.js. When absent, the module installs on its
|
||||||
|
* registry-declared default channel (typically 'stable').
|
||||||
* @returns {string} Path to the cloned repository
|
* @returns {string} Path to the cloned repository
|
||||||
*/
|
*/
|
||||||
async cloneExternalModule(moduleCode, options = {}) {
|
async cloneExternalModule(moduleCode, options = {}) {
|
||||||
|
|
@ -161,18 +222,132 @@ class ExternalModuleManager {
|
||||||
return await prompts.spinner();
|
return await prompts.spinner();
|
||||||
};
|
};
|
||||||
|
|
||||||
// Track if we need to install dependencies
|
// ─── Resolve channel plan ─────────────────────────────────────────────
|
||||||
|
// Post-install callers (config generation, directory setup, help catalog
|
||||||
|
// rebuild) invoke findModuleSource/cloneExternalModule without
|
||||||
|
// channelOptions just to locate the module's files. Those calls must not
|
||||||
|
// redecide the channel — the install step already chose one, cloned the
|
||||||
|
// right ref, and recorded a resolution. If we re-resolve without flags,
|
||||||
|
// we'd snap back to stable and overwrite a pinned install.
|
||||||
|
const hasExplicitChannelInput =
|
||||||
|
options.channelOptions &&
|
||||||
|
(options.channelOptions.global ||
|
||||||
|
(options.channelOptions.nextSet && options.channelOptions.nextSet.size > 0) ||
|
||||||
|
(options.channelOptions.pins && options.channelOptions.pins.size > 0));
|
||||||
|
const existingResolution = ExternalModuleManager._resolutions.get(moduleCode);
|
||||||
|
const haveUsableCache = await fs.pathExists(moduleCacheDir);
|
||||||
|
|
||||||
|
if (!hasExplicitChannelInput && existingResolution && haveUsableCache) {
|
||||||
|
// This is a look-up only; the module is already installed at its chosen
|
||||||
|
// ref. Skip cloning and return the cached path unchanged.
|
||||||
|
return moduleCacheDir;
|
||||||
|
}
|
||||||
|
|
||||||
|
const planEntry = decideChannelForModule({
|
||||||
|
code: moduleCode,
|
||||||
|
channelOptions: options.channelOptions,
|
||||||
|
registryDefault: moduleInfo.defaultChannel,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Same-plan short-circuit: a single install calls cloneExternalModule
|
||||||
|
// several times (config collection, directory setup, help-catalog rebuild)
|
||||||
|
// with the same channelOptions. The first call resolves + clones; later
|
||||||
|
// calls with an identical plan and a valid cache should return immediately
|
||||||
|
// instead of re-running resolveChannel() and `git fetch` (slow; can fail
|
||||||
|
// on flaky networks even though the tagCache dedupes the GitHub API hit).
|
||||||
|
if (existingResolution && haveUsableCache && existingResolution.channel === planEntry.channel) {
|
||||||
|
const samePin = planEntry.channel !== 'pinned' || existingResolution.version === planEntry.pin;
|
||||||
|
if (samePin) return moduleCacheDir;
|
||||||
|
}
|
||||||
|
|
||||||
|
let resolved;
|
||||||
|
try {
|
||||||
|
resolved = await resolveChannel({
|
||||||
|
channel: planEntry.channel,
|
||||||
|
pin: planEntry.pin,
|
||||||
|
repoUrl: moduleInfo.url,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
// Tag-API failure (rate limit, transient network). If we already have
|
||||||
|
// a usable cache at a recorded ref, treat this as "couldn't check for
|
||||||
|
// updates" and re-use the cached version silently — that's the right
|
||||||
|
// call for an update/quick-update, since the semantics don't change
|
||||||
|
// and the user isn't worse off than before they ran this command.
|
||||||
|
const cachedMarker = await readChannelMarker(path.join(moduleCacheDir, '.bmad-channel.json'));
|
||||||
|
if (cachedMarker?.channel && (await fs.pathExists(moduleCacheDir))) {
|
||||||
|
if (!silent) {
|
||||||
|
await prompts.log.warn(
|
||||||
|
`Could not check for updates to ${moduleInfo.name} (${error.message}); using cached ${cachedMarker.version || cachedMarker.channel}.`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
ExternalModuleManager._resolutions.set(moduleCode, {
|
||||||
|
channel: cachedMarker.channel,
|
||||||
|
version: cachedMarker.version || 'main',
|
||||||
|
ref: cachedMarker.version && cachedMarker.version !== 'main' ? cachedMarker.version : null,
|
||||||
|
sha: cachedMarker.sha,
|
||||||
|
repoUrl: moduleInfo.url,
|
||||||
|
resolvedFallback: false,
|
||||||
|
planSource: 'cached',
|
||||||
|
});
|
||||||
|
return moduleCacheDir;
|
||||||
|
}
|
||||||
|
// No cache to fall back on — this is effectively a fresh install with
|
||||||
|
// no offline safety net. Surface a clear error with actionable guidance.
|
||||||
|
const isRateLimited = /rate limit/i.test(error.message);
|
||||||
|
const hint = isRateLimited
|
||||||
|
? process.env.GITHUB_TOKEN
|
||||||
|
? 'Your GITHUB_TOKEN may have expired or been rate-limited on its own budget. Try a different token or wait for the reset.'
|
||||||
|
: 'Set a GITHUB_TOKEN env var (any personal access token with public-repo read) to raise the 60-req/hour anonymous limit.'
|
||||||
|
: `Check your network connection, or rerun with \`--next=${moduleCode}\` / \`--pin ${moduleCode}=<tag>\` to skip the tag lookup.`;
|
||||||
|
throw new Error(`Could not resolve stable tag for '${moduleCode}' (${error.message}). ${hint}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (resolved.resolvedFallback && !silent) {
|
||||||
|
if (resolved.reason === 'no-stable-tags') {
|
||||||
|
await prompts.log.warn(`No stable releases found for ${moduleInfo.name}; installing from main.`);
|
||||||
|
} else if (resolved.reason === 'not-a-github-url') {
|
||||||
|
await prompts.log.warn(`Cannot determine stable tags for ${moduleInfo.name} (non-GitHub URL); installing from main.`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate pin before we burn time cloning. Best-effort: skip on non-GitHub URLs.
|
||||||
|
if (planEntry.channel === 'pinned') {
|
||||||
|
const parsed = parseGitHubRepo(moduleInfo.url);
|
||||||
|
if (parsed) {
|
||||||
|
try {
|
||||||
|
const exists = await tagExists(parsed.owner, parsed.repo, planEntry.pin);
|
||||||
|
if (!exists) {
|
||||||
|
throw new Error(`Tag '${planEntry.pin}' not found in ${parsed.owner}/${parsed.repo}.`);
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
if (error.message?.includes('not found')) throw error;
|
||||||
|
// Network hiccup on tag verification — let the clone attempt fail clearly.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ─── Clone or update cache by resolved channel ────────────────────────
|
||||||
|
const markerPath = path.join(moduleCacheDir, '.bmad-channel.json');
|
||||||
|
const currentMarker = await readChannelMarker(markerPath);
|
||||||
|
const needsChannelReset = currentMarker && currentMarker.channel !== resolved.channel;
|
||||||
|
|
||||||
let needsDependencyInstall = false;
|
let needsDependencyInstall = false;
|
||||||
let wasNewClone = false;
|
let wasNewClone = false;
|
||||||
|
|
||||||
// Check if already cloned
|
if (needsChannelReset && (await fs.pathExists(moduleCacheDir))) {
|
||||||
|
// Channel changed (e.g. user switched stable→next). Blow away and re-clone
|
||||||
|
// to avoid tangling shallow clones of different refs.
|
||||||
|
await fs.remove(moduleCacheDir);
|
||||||
|
}
|
||||||
|
|
||||||
if (await fs.pathExists(moduleCacheDir)) {
|
if (await fs.pathExists(moduleCacheDir)) {
|
||||||
// Try to update if it's a git repo
|
// Cache exists on the right channel. Refresh the ref.
|
||||||
const fetchSpinner = await createSpinner();
|
const fetchSpinner = await createSpinner();
|
||||||
fetchSpinner.start(`Fetching ${moduleInfo.name}...`);
|
fetchSpinner.start(`Fetching ${moduleInfo.name}...`);
|
||||||
try {
|
try {
|
||||||
const currentRef = execSync('git rev-parse HEAD', { cwd: moduleCacheDir, stdio: 'pipe' }).toString().trim();
|
const currentSha = execSync('git rev-parse HEAD', { cwd: moduleCacheDir, stdio: 'pipe' }).toString().trim();
|
||||||
// Fetch and reset to remote - works better with shallow clones than pull
|
|
||||||
|
if (resolved.channel === 'next') {
|
||||||
execSync('git fetch origin --depth 1', {
|
execSync('git fetch origin --depth 1', {
|
||||||
cwd: moduleCacheDir,
|
cwd: moduleCacheDir,
|
||||||
stdio: ['ignore', 'pipe', 'pipe'],
|
stdio: ['ignore', 'pipe', 'pipe'],
|
||||||
|
|
@ -183,16 +358,24 @@ class ExternalModuleManager {
|
||||||
stdio: ['ignore', 'pipe', 'pipe'],
|
stdio: ['ignore', 'pipe', 'pipe'],
|
||||||
env: { ...process.env, GIT_TERMINAL_PROMPT: '0' },
|
env: { ...process.env, GIT_TERMINAL_PROMPT: '0' },
|
||||||
});
|
});
|
||||||
const newRef = execSync('git rev-parse HEAD', { cwd: moduleCacheDir, stdio: 'pipe' }).toString().trim();
|
} else {
|
||||||
|
// stable or pinned — fetch the specific tag and check it out.
|
||||||
fetchSpinner.stop(`Fetched ${moduleInfo.name}`);
|
execSync(`git fetch --depth 1 origin tag ${quoteShell(resolved.ref)} --no-tags`, {
|
||||||
// Force dependency install if we got new code
|
cwd: moduleCacheDir,
|
||||||
if (currentRef !== newRef) {
|
stdio: ['ignore', 'pipe', 'pipe'],
|
||||||
needsDependencyInstall = true;
|
env: { ...process.env, GIT_TERMINAL_PROMPT: '0' },
|
||||||
|
});
|
||||||
|
execSync(`git checkout --quiet FETCH_HEAD`, {
|
||||||
|
cwd: moduleCacheDir,
|
||||||
|
stdio: ['ignore', 'pipe', 'pipe'],
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const newSha = execSync('git rev-parse HEAD', { cwd: moduleCacheDir, stdio: 'pipe' }).toString().trim();
|
||||||
|
fetchSpinner.stop(`Fetched ${moduleInfo.name}`);
|
||||||
|
if (currentSha !== newSha) needsDependencyInstall = true;
|
||||||
} catch {
|
} catch {
|
||||||
fetchSpinner.error(`Fetch failed, re-downloading ${moduleInfo.name}`);
|
fetchSpinner.error(`Fetch failed, re-downloading ${moduleInfo.name}`);
|
||||||
// If update fails, remove and re-clone
|
|
||||||
await fs.remove(moduleCacheDir);
|
await fs.remove(moduleCacheDir);
|
||||||
wasNewClone = true;
|
wasNewClone = true;
|
||||||
}
|
}
|
||||||
|
|
@ -200,22 +383,41 @@ class ExternalModuleManager {
|
||||||
wasNewClone = true;
|
wasNewClone = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clone if not exists or was removed
|
|
||||||
if (wasNewClone) {
|
if (wasNewClone) {
|
||||||
const fetchSpinner = await createSpinner();
|
const fetchSpinner = await createSpinner();
|
||||||
fetchSpinner.start(`Fetching ${moduleInfo.name}...`);
|
fetchSpinner.start(`Fetching ${moduleInfo.name}...`);
|
||||||
try {
|
try {
|
||||||
|
if (resolved.channel === 'next') {
|
||||||
execSync(`git clone --depth 1 "${moduleInfo.url}" "${moduleCacheDir}"`, {
|
execSync(`git clone --depth 1 "${moduleInfo.url}" "${moduleCacheDir}"`, {
|
||||||
stdio: ['ignore', 'pipe', 'pipe'],
|
stdio: ['ignore', 'pipe', 'pipe'],
|
||||||
env: { ...process.env, GIT_TERMINAL_PROMPT: '0' },
|
env: { ...process.env, GIT_TERMINAL_PROMPT: '0' },
|
||||||
});
|
});
|
||||||
|
} else {
|
||||||
|
execSync(`git clone --depth 1 --branch ${quoteShell(resolved.ref)} "${moduleInfo.url}" "${moduleCacheDir}"`, {
|
||||||
|
stdio: ['ignore', 'pipe', 'pipe'],
|
||||||
|
env: { ...process.env, GIT_TERMINAL_PROMPT: '0' },
|
||||||
|
});
|
||||||
|
}
|
||||||
fetchSpinner.stop(`Fetched ${moduleInfo.name}`);
|
fetchSpinner.stop(`Fetched ${moduleInfo.name}`);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
fetchSpinner.error(`Failed to fetch ${moduleInfo.name}`);
|
fetchSpinner.error(`Failed to fetch ${moduleInfo.name}`);
|
||||||
throw new Error(`Failed to clone external module '${moduleCode}': ${error.message}`);
|
throw new Error(`Failed to clone external module '${moduleCode}' at ${resolved.version}: ${error.message}`);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Record resolution (channel + tag + SHA) for the manifest writer to pick up.
|
||||||
|
const sha = execSync('git rev-parse HEAD', { cwd: moduleCacheDir, stdio: 'pipe' }).toString().trim();
|
||||||
|
ExternalModuleManager._resolutions.set(moduleCode, {
|
||||||
|
channel: resolved.channel,
|
||||||
|
version: resolved.version,
|
||||||
|
ref: resolved.ref,
|
||||||
|
sha,
|
||||||
|
repoUrl: moduleInfo.url,
|
||||||
|
resolvedFallback: !!resolved.resolvedFallback,
|
||||||
|
planSource: planEntry.source,
|
||||||
|
});
|
||||||
|
await writeChannelMarker(markerPath, { channel: resolved.channel, version: resolved.version, sha });
|
||||||
|
|
||||||
// Install dependencies if package.json exists
|
// Install dependencies if package.json exists
|
||||||
const packageJsonPath = path.join(moduleCacheDir, 'package.json');
|
const packageJsonPath = path.join(moduleCacheDir, 'package.json');
|
||||||
const nodeModulesPath = path.join(moduleCacheDir, 'node_modules');
|
const nodeModulesPath = path.join(moduleCacheDir, 'node_modules');
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,13 @@
|
||||||
|
/**
|
||||||
|
* Canonical schema for per-module `module-help.csv` files.
|
||||||
|
*
|
||||||
|
* Both the merger (`Installer.mergeModuleHelpCatalogs`) and the synthesizer
|
||||||
|
* (`PluginResolver._buildSynthesizedHelpCsv`) emit this exact header. The
|
||||||
|
* merger compares each per-module file's header against this string and
|
||||||
|
* warns on drift, so any rename here must be matched in external module
|
||||||
|
* authors' CSVs (or accepted as a positional fall-through with a warning).
|
||||||
|
*/
|
||||||
|
const MODULE_HELP_CSV_HEADER =
|
||||||
|
'module,skill,display-name,menu-code,description,action,args,phase,preceded-by,followed-by,required,output-location,outputs';
|
||||||
|
|
||||||
|
module.exports = { MODULE_HELP_CSV_HEADER };
|
||||||
|
|
@ -15,6 +15,11 @@ class OfficialModules {
|
||||||
// Tracked during interactive config collection so {directory_name}
|
// Tracked during interactive config collection so {directory_name}
|
||||||
// placeholder defaults can be resolved in buildQuestion().
|
// placeholder defaults can be resolved in buildQuestion().
|
||||||
this.currentProjectDir = null;
|
this.currentProjectDir = null;
|
||||||
|
// Install-time channel flag state. Set by Config.build once, then used as
|
||||||
|
// the default for every findModuleSource/cloneExternalModule call so that
|
||||||
|
// pre-install config collection and the install step agree on which ref
|
||||||
|
// to clone.
|
||||||
|
this.channelOptions = options.channelOptions || null;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -38,7 +43,7 @@ class OfficialModules {
|
||||||
* @returns {OfficialModules}
|
* @returns {OfficialModules}
|
||||||
*/
|
*/
|
||||||
static async build(config, paths) {
|
static async build(config, paths) {
|
||||||
const instance = new OfficialModules();
|
const instance = new OfficialModules({ channelOptions: config.channelOptions });
|
||||||
|
|
||||||
// Pre-collected by UI or quickUpdate — store and load existing for path-change detection
|
// Pre-collected by UI or quickUpdate — store and load existing for path-change detection
|
||||||
if (config.moduleConfigs) {
|
if (config.moduleConfigs) {
|
||||||
|
|
@ -196,6 +201,12 @@ class OfficialModules {
|
||||||
* @returns {string|null} Path to the module source or null if not found
|
* @returns {string|null} Path to the module source or null if not found
|
||||||
*/
|
*/
|
||||||
async findModuleSource(moduleCode, options = {}) {
|
async findModuleSource(moduleCode, options = {}) {
|
||||||
|
// Inherit channelOptions from the install-scoped instance when the caller
|
||||||
|
// didn't pass one explicitly. Keeps pre-install config collection and the
|
||||||
|
// actual install step looking at the same git ref.
|
||||||
|
if (options.channelOptions === undefined && this.channelOptions) {
|
||||||
|
options = { ...options, channelOptions: this.channelOptions };
|
||||||
|
}
|
||||||
const projectRoot = getProjectRoot();
|
const projectRoot = getProjectRoot();
|
||||||
|
|
||||||
// Check for core module (directly under src/core-skills)
|
// Check for core module (directly under src/core-skills)
|
||||||
|
|
@ -214,13 +225,13 @@ class OfficialModules {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check external official modules
|
// Check external official modules (pass channelOptions so channel plan applies)
|
||||||
const externalSource = await this.externalModuleManager.findExternalModuleSource(moduleCode, options);
|
const externalSource = await this.externalModuleManager.findExternalModuleSource(moduleCode, options);
|
||||||
if (externalSource) {
|
if (externalSource) {
|
||||||
return externalSource;
|
return externalSource;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check community modules
|
// Check community modules (pass channelOptions for --next/--pin overrides)
|
||||||
const { CommunityModuleManager } = require('./community-manager');
|
const { CommunityModuleManager } = require('./community-manager');
|
||||||
const communityMgr = new CommunityModuleManager();
|
const communityMgr = new CommunityModuleManager();
|
||||||
const communitySource = await communityMgr.findModuleSource(moduleCode, options);
|
const communitySource = await communityMgr.findModuleSource(moduleCode, options);
|
||||||
|
|
@ -258,7 +269,25 @@ class OfficialModules {
|
||||||
return this.installFromResolution(resolved, bmadDir, fileTrackingCallback, options);
|
return this.installFromResolution(resolved, bmadDir, fileTrackingCallback, options);
|
||||||
}
|
}
|
||||||
|
|
||||||
const sourcePath = await this.findModuleSource(moduleName, { silent: options.silent });
|
// Community modules whose cloned repo ships marketplace.json get the same
|
||||||
|
// skill-level install treatment as custom-source installs. If the in-process
|
||||||
|
// cache wasn't populated (e.g. caller skipped the pre-clone phase), fall
|
||||||
|
// back to resolving directly from `~/.bmad/cache/community-modules/<name>/`
|
||||||
|
// so we don't silently regress to the legacy half-install path.
|
||||||
|
const { CommunityModuleManager } = require('./community-manager');
|
||||||
|
const communityMgr = new CommunityModuleManager();
|
||||||
|
let communityResolved = communityMgr.getPluginResolution(moduleName);
|
||||||
|
if (!communityResolved) {
|
||||||
|
communityResolved = await communityMgr.resolveFromCache(moduleName);
|
||||||
|
}
|
||||||
|
if (communityResolved) {
|
||||||
|
return this.installFromResolution(communityResolved, bmadDir, fileTrackingCallback, options);
|
||||||
|
}
|
||||||
|
|
||||||
|
const sourcePath = await this.findModuleSource(moduleName, {
|
||||||
|
silent: options.silent,
|
||||||
|
channelOptions: options.channelOptions,
|
||||||
|
});
|
||||||
const targetPath = path.join(bmadDir, moduleName);
|
const targetPath = path.join(bmadDir, moduleName);
|
||||||
|
|
||||||
if (!sourcePath) {
|
if (!sourcePath) {
|
||||||
|
|
@ -281,11 +310,24 @@ class OfficialModules {
|
||||||
const manifestObj = new Manifest();
|
const manifestObj = new Manifest();
|
||||||
const versionInfo = await manifestObj.getModuleVersionInfo(moduleName, bmadDir, sourcePath);
|
const versionInfo = await manifestObj.getModuleVersionInfo(moduleName, bmadDir, sourcePath);
|
||||||
|
|
||||||
|
// Pick up channel resolution recorded by whichever manager did the clone.
|
||||||
|
const externalResolution = this.externalModuleManager.getResolution(moduleName);
|
||||||
|
let communityResolution = null;
|
||||||
|
if (!externalResolution) {
|
||||||
|
const { CommunityModuleManager } = require('./community-manager');
|
||||||
|
communityResolution = new CommunityModuleManager().getResolution(moduleName);
|
||||||
|
}
|
||||||
|
const resolution = externalResolution || communityResolution;
|
||||||
|
|
||||||
await manifestObj.addModule(bmadDir, moduleName, {
|
await manifestObj.addModule(bmadDir, moduleName, {
|
||||||
version: versionInfo.version,
|
version: resolution?.version || versionInfo.version,
|
||||||
source: versionInfo.source,
|
source: versionInfo.source,
|
||||||
npmPackage: versionInfo.npmPackage,
|
npmPackage: versionInfo.npmPackage,
|
||||||
repoUrl: versionInfo.repoUrl,
|
repoUrl: versionInfo.repoUrl,
|
||||||
|
channel: resolution?.channel,
|
||||||
|
sha: resolution?.sha,
|
||||||
|
registryApprovedTag: communityResolution?.registryApprovedTag,
|
||||||
|
registryApprovedSha: communityResolution?.registryApprovedSha,
|
||||||
});
|
});
|
||||||
|
|
||||||
return { success: true, module: moduleName, path: targetPath, versionInfo };
|
return { success: true, module: moduleName, path: targetPath, versionInfo };
|
||||||
|
|
@ -333,18 +375,46 @@ class OfficialModules {
|
||||||
await this.createModuleDirectories(resolved.code, bmadDir, options);
|
await this.createModuleDirectories(resolved.code, bmadDir, options);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update manifest
|
// Update manifest. For community installs we honor the channel resolved by
|
||||||
|
// CommunityModuleManager (stable/next/pinned) and propagate the registry's
|
||||||
|
// approved tag/sha. For custom-source installs we derive channel from the
|
||||||
|
// cloneRef (present → pinned, absent → next; local paths have no channel).
|
||||||
const { Manifest } = require('../core/manifest');
|
const { Manifest } = require('../core/manifest');
|
||||||
const manifestObj = new Manifest();
|
const manifestObj = new Manifest();
|
||||||
|
|
||||||
await manifestObj.addModule(bmadDir, resolved.code, {
|
const hasGitClone = !!resolved.repoUrl;
|
||||||
version: resolved.version || null,
|
const isCommunity = resolved.communitySource === true;
|
||||||
source: 'custom',
|
const manifestEntry = {
|
||||||
|
version: resolved.communityVersion || resolved.cloneRef || (hasGitClone ? 'main' : resolved.version || null),
|
||||||
|
source: isCommunity ? 'community' : 'custom',
|
||||||
npmPackage: null,
|
npmPackage: null,
|
||||||
repoUrl: resolved.repoUrl || null,
|
repoUrl: resolved.repoUrl || null,
|
||||||
});
|
};
|
||||||
|
if (isCommunity) {
|
||||||
|
if (resolved.communityChannel) manifestEntry.channel = resolved.communityChannel;
|
||||||
|
if (resolved.cloneSha) manifestEntry.sha = resolved.cloneSha;
|
||||||
|
if (resolved.registryApprovedTag) manifestEntry.registryApprovedTag = resolved.registryApprovedTag;
|
||||||
|
if (resolved.registryApprovedSha) manifestEntry.registryApprovedSha = resolved.registryApprovedSha;
|
||||||
|
} else if (hasGitClone) {
|
||||||
|
manifestEntry.channel = resolved.cloneRef ? 'pinned' : 'next';
|
||||||
|
if (resolved.cloneSha) manifestEntry.sha = resolved.cloneSha;
|
||||||
|
if (resolved.rawInput) manifestEntry.rawSource = resolved.rawInput;
|
||||||
|
}
|
||||||
|
if (resolved.localPath) manifestEntry.localPath = resolved.localPath;
|
||||||
|
await manifestObj.addModule(bmadDir, resolved.code, manifestEntry);
|
||||||
|
|
||||||
return { success: true, module: resolved.code, path: targetPath, versionInfo: { version: resolved.version || '' } };
|
return {
|
||||||
|
success: true,
|
||||||
|
module: resolved.code,
|
||||||
|
path: targetPath,
|
||||||
|
// Mirror the manifestEntry.version precedence above so downstream summary
|
||||||
|
// lines show the same string we just wrote to disk (community installs
|
||||||
|
// use the registry-approved tag via `communityVersion`; custom git-backed
|
||||||
|
// installs show the cloned ref or 'main').
|
||||||
|
versionInfo: {
|
||||||
|
version: resolved.communityVersion || resolved.cloneRef || (hasGitClone ? 'main' : resolved.version || ''),
|
||||||
|
},
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -833,7 +903,10 @@ class OfficialModules {
|
||||||
try {
|
try {
|
||||||
const content = await fs.readFile(moduleConfigPath, 'utf8');
|
const content = await fs.readFile(moduleConfigPath, 'utf8');
|
||||||
const moduleConfig = yaml.parse(content);
|
const moduleConfig = yaml.parse(content);
|
||||||
if (moduleConfig) {
|
// Only keep plain object parses. A corrupt config.yaml that parses
|
||||||
|
// to a scalar or array would crash later code that does `key in cfg`
|
||||||
|
// / `Object.keys(cfg)`; treat it the same as a parse error.
|
||||||
|
if (moduleConfig && typeof moduleConfig === 'object' && !Array.isArray(moduleConfig)) {
|
||||||
this._existingConfig[entry.name] = moduleConfig;
|
this._existingConfig[entry.name] = moduleConfig;
|
||||||
foundAny = true;
|
foundAny = true;
|
||||||
}
|
}
|
||||||
|
|
@ -844,9 +917,58 @@ class OfficialModules {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (foundAny) {
|
||||||
|
await this._hoistCoreKeysFromLegacyModuleConfigs();
|
||||||
|
}
|
||||||
|
|
||||||
return foundAny;
|
return foundAny;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Migrate prior answers when a key has moved from a non-core module to core
|
||||||
|
* (e.g. project_name moving from bmm to core in #2279). Without this, the
|
||||||
|
* partition logic in writeCentralConfig drops the value from the bmm bucket
|
||||||
|
* (because it's now a core key) without re-homing it under [core], so the
|
||||||
|
* user's prior answer silently disappears on the next install/quick-update.
|
||||||
|
*/
|
||||||
|
async _hoistCoreKeysFromLegacyModuleConfigs() {
|
||||||
|
const coreSchemaPath = path.join(getSourcePath(), 'core-skills', 'module.yaml');
|
||||||
|
if (!(await fs.pathExists(coreSchemaPath))) return;
|
||||||
|
|
||||||
|
let coreSchema;
|
||||||
|
try {
|
||||||
|
coreSchema = yaml.parse(await fs.readFile(coreSchemaPath, 'utf8'));
|
||||||
|
} catch {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (!coreSchema || typeof coreSchema !== 'object') return;
|
||||||
|
|
||||||
|
const coreKeys = new Set(
|
||||||
|
Object.entries(coreSchema)
|
||||||
|
.filter(([, v]) => v && typeof v === 'object' && 'prompt' in v)
|
||||||
|
.map(([k]) => k),
|
||||||
|
);
|
||||||
|
if (coreKeys.size === 0) return;
|
||||||
|
|
||||||
|
// Belt-and-suspenders: loadExistingConfig already filters non-object parses,
|
||||||
|
// but anyone calling _hoistCoreKeysFromLegacyModuleConfigs in isolation (or
|
||||||
|
// future code paths populating _existingConfig directly) shouldn't be able
|
||||||
|
// to crash this with a scalar / array.
|
||||||
|
const existingCore = this._existingConfig.core;
|
||||||
|
this._existingConfig.core = existingCore && typeof existingCore === 'object' && !Array.isArray(existingCore) ? existingCore : {};
|
||||||
|
|
||||||
|
for (const [moduleName, cfg] of Object.entries(this._existingConfig)) {
|
||||||
|
if (moduleName === 'core' || !cfg || typeof cfg !== 'object' || Array.isArray(cfg)) continue;
|
||||||
|
for (const key of Object.keys(cfg)) {
|
||||||
|
if (!coreKeys.has(key)) continue;
|
||||||
|
if (!(key in this._existingConfig.core)) {
|
||||||
|
this._existingConfig.core[key] = cfg[key];
|
||||||
|
}
|
||||||
|
delete cfg[key];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Pre-scan module schemas to gather metadata for the configuration gateway prompt.
|
* Pre-scan module schemas to gather metadata for the configuration gateway prompt.
|
||||||
* Returns info about which modules have configurable options.
|
* Returns info about which modules have configurable options.
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,7 @@
|
||||||
const fs = require('../fs-native');
|
const fs = require('../fs-native');
|
||||||
const path = require('node:path');
|
const path = require('node:path');
|
||||||
const yaml = require('yaml');
|
const yaml = require('yaml');
|
||||||
|
const { MODULE_HELP_CSV_HEADER } = require('./module-help-schema');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Resolves how to install a plugin from marketplace.json by analyzing
|
* Resolves how to install a plugin from marketplace.json by analyzing
|
||||||
|
|
@ -338,8 +339,7 @@ class PluginResolver {
|
||||||
* @returns {string} CSV content
|
* @returns {string} CSV content
|
||||||
*/
|
*/
|
||||||
_buildSynthesizedHelpCsv(moduleName, skillInfos) {
|
_buildSynthesizedHelpCsv(moduleName, skillInfos) {
|
||||||
const header = 'module,skill,display-name,menu-code,description,action,args,phase,after,before,required,output-location,outputs';
|
const rows = [MODULE_HELP_CSV_HEADER];
|
||||||
const rows = [header];
|
|
||||||
|
|
||||||
for (const info of skillInfos) {
|
for (const info of skillInfos) {
|
||||||
const displayName = this._formatDisplayName(info.name || info.dirName);
|
const displayName = this._formatDisplayName(info.name || info.dirName);
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,10 @@
|
||||||
# Fallback module registry — used only when the BMad Marketplace repo
|
# Fallback module registry — used only when the BMad Marketplace repo
|
||||||
# (bmad-code-org/bmad-plugins-marketplace) is unreachable.
|
# (bmad-code-org/bmad-plugins-marketplace) is unreachable.
|
||||||
# The remote registry/official.yaml is the source of truth.
|
# The remote registry/official.yaml is the source of truth.
|
||||||
|
#
|
||||||
|
# default_channel (optional) — the install channel when the user does not
|
||||||
|
# override with --channel/--pin/--next. Valid values: stable | next.
|
||||||
|
# Omit to inherit the installer's hardcoded default (stable).
|
||||||
|
|
||||||
modules:
|
modules:
|
||||||
bmad-builder:
|
bmad-builder:
|
||||||
|
|
@ -12,6 +16,7 @@ modules:
|
||||||
defaultSelected: false
|
defaultSelected: false
|
||||||
type: bmad-org
|
type: bmad-org
|
||||||
npmPackage: bmad-builder
|
npmPackage: bmad-builder
|
||||||
|
default_channel: stable
|
||||||
|
|
||||||
bmad-creative-intelligence-suite:
|
bmad-creative-intelligence-suite:
|
||||||
url: https://github.com/bmad-code-org/bmad-module-creative-intelligence-suite
|
url: https://github.com/bmad-code-org/bmad-module-creative-intelligence-suite
|
||||||
|
|
@ -22,6 +27,7 @@ modules:
|
||||||
defaultSelected: false
|
defaultSelected: false
|
||||||
type: bmad-org
|
type: bmad-org
|
||||||
npmPackage: bmad-creative-intelligence-suite
|
npmPackage: bmad-creative-intelligence-suite
|
||||||
|
default_channel: stable
|
||||||
|
|
||||||
bmad-game-dev-studio:
|
bmad-game-dev-studio:
|
||||||
url: https://github.com/bmad-code-org/bmad-module-game-dev-studio.git
|
url: https://github.com/bmad-code-org/bmad-module-game-dev-studio.git
|
||||||
|
|
@ -32,6 +38,7 @@ modules:
|
||||||
defaultSelected: false
|
defaultSelected: false
|
||||||
type: bmad-org
|
type: bmad-org
|
||||||
npmPackage: bmad-game-dev-studio
|
npmPackage: bmad-game-dev-studio
|
||||||
|
default_channel: stable
|
||||||
|
|
||||||
bmad-method-test-architecture-enterprise:
|
bmad-method-test-architecture-enterprise:
|
||||||
url: https://github.com/bmad-code-org/bmad-method-test-architecture-enterprise
|
url: https://github.com/bmad-code-org/bmad-method-test-architecture-enterprise
|
||||||
|
|
@ -42,3 +49,4 @@ modules:
|
||||||
defaultSelected: false
|
defaultSelected: false
|
||||||
type: bmad-org
|
type: bmad-org
|
||||||
npmPackage: bmad-method-test-architecture-enterprise
|
npmPackage: bmad-method-test-architecture-enterprise
|
||||||
|
default_channel: stable
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,336 @@
|
||||||
|
const path = require('node:path');
|
||||||
|
const semver = require('semver');
|
||||||
|
const yaml = require('yaml');
|
||||||
|
const fs = require('../fs-native');
|
||||||
|
const { getExternalModuleCachePath, getModulePath, resolveInstalledModuleYaml } = require('../project-root');
|
||||||
|
|
||||||
|
const DEFAULT_PARENT_DEPTH = 8;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Resolve a module version from authoritative on-disk metadata.
|
||||||
|
* Preference order:
|
||||||
|
* 1. package.json nearest the module source/cache root
|
||||||
|
* 2. module.yaml in the module source directory
|
||||||
|
* 3. .claude-plugin/marketplace.json
|
||||||
|
* 4. caller-provided fallback version
|
||||||
|
*
|
||||||
|
* @param {string} moduleName - Module code/name
|
||||||
|
* @param {Object} [options]
|
||||||
|
* @param {string} [options.moduleSourcePath] - Directory containing module.yaml
|
||||||
|
* @param {string} [options.fallbackVersion] - Final fallback when no metadata is found
|
||||||
|
* @param {string[]} [options.marketplacePluginNames] - Preferred marketplace plugin names
|
||||||
|
* @returns {Promise<{version: string|null, source: string|null, path: string|null}>}
|
||||||
|
*/
|
||||||
|
async function resolveModuleVersion(moduleName, options = {}) {
|
||||||
|
const moduleSourcePath = await normalizeDirectoryPath(options.moduleSourcePath);
|
||||||
|
const packageJsonPath = await findPackageJsonPath(moduleName, moduleSourcePath);
|
||||||
|
|
||||||
|
if (packageJsonPath) {
|
||||||
|
const packageVersion = await readPackageJsonVersion(packageJsonPath);
|
||||||
|
if (packageVersion) {
|
||||||
|
return {
|
||||||
|
version: packageVersion,
|
||||||
|
source: 'package.json',
|
||||||
|
path: packageJsonPath,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const moduleYamlPath = await findModuleYamlPath(moduleName, moduleSourcePath);
|
||||||
|
if (moduleYamlPath) {
|
||||||
|
const moduleVersion = await readModuleYamlVersion(moduleYamlPath);
|
||||||
|
if (moduleVersion) {
|
||||||
|
return {
|
||||||
|
version: moduleVersion,
|
||||||
|
source: 'module.yaml',
|
||||||
|
path: moduleYamlPath,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const marketplaceVersion = await findMarketplaceVersion(moduleName, moduleSourcePath, options.marketplacePluginNames || []);
|
||||||
|
if (marketplaceVersion) {
|
||||||
|
return marketplaceVersion;
|
||||||
|
}
|
||||||
|
|
||||||
|
const fallbackVersion = normalizeVersion(options.fallbackVersion);
|
||||||
|
if (fallbackVersion) {
|
||||||
|
return {
|
||||||
|
version: fallbackVersion,
|
||||||
|
source: 'fallback',
|
||||||
|
path: null,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
version: null,
|
||||||
|
source: null,
|
||||||
|
path: null,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
async function findPackageJsonPath(moduleName, moduleSourcePath) {
|
||||||
|
const roots = await buildSearchRoots(moduleName, moduleSourcePath);
|
||||||
|
|
||||||
|
for (const root of roots) {
|
||||||
|
const packageJsonPath = await findNearestUpwardFile(root.searchDir, 'package.json', { boundaryDir: root.boundaryDir });
|
||||||
|
if (packageJsonPath) {
|
||||||
|
return packageJsonPath;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
async function findModuleYamlPath(moduleName, moduleSourcePath) {
|
||||||
|
if (moduleSourcePath) {
|
||||||
|
const directModuleYamlPath = path.join(moduleSourcePath, 'module.yaml');
|
||||||
|
if (await fs.pathExists(directModuleYamlPath)) {
|
||||||
|
return directModuleYamlPath;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return resolveInstalledModuleYaml(moduleName);
|
||||||
|
}
|
||||||
|
|
||||||
|
async function findMarketplaceVersion(moduleName, moduleSourcePath, marketplacePluginNames) {
|
||||||
|
const roots = await buildSearchRoots(moduleName, moduleSourcePath);
|
||||||
|
|
||||||
|
for (const root of roots) {
|
||||||
|
const marketplacePath = await findNearestUpwardFile(root.searchDir, path.join('.claude-plugin', 'marketplace.json'), {
|
||||||
|
boundaryDir: root.boundaryDir,
|
||||||
|
});
|
||||||
|
if (!marketplacePath) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
const data = await readJsonFile(marketplacePath);
|
||||||
|
if (!data) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
const version = extractMarketplaceVersion(data, moduleName, marketplacePluginNames);
|
||||||
|
if (version) {
|
||||||
|
return {
|
||||||
|
version,
|
||||||
|
source: 'marketplace.json',
|
||||||
|
path: marketplacePath,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
async function buildSearchRoots(moduleName, moduleSourcePath) {
|
||||||
|
const roots = [];
|
||||||
|
const seen = new Set();
|
||||||
|
|
||||||
|
const addRoot = async (candidate) => {
|
||||||
|
const normalized = await normalizeExistingDirectory(candidate);
|
||||||
|
if (!normalized || seen.has(normalized)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
seen.add(normalized);
|
||||||
|
roots.push({
|
||||||
|
searchDir: normalized,
|
||||||
|
boundaryDir: await findSearchBoundary(normalized),
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
await addRoot(moduleSourcePath);
|
||||||
|
|
||||||
|
if (moduleName === 'core' || moduleName === 'bmm') {
|
||||||
|
await addRoot(getModulePath(moduleName));
|
||||||
|
} else {
|
||||||
|
await addRoot(getExternalModuleCachePath(moduleName));
|
||||||
|
}
|
||||||
|
|
||||||
|
return roots;
|
||||||
|
}
|
||||||
|
|
||||||
|
async function findNearestUpwardFile(startDir, relativeFilePath, options = {}) {
|
||||||
|
const normalizedStartDir = await normalizeExistingDirectory(startDir);
|
||||||
|
if (!normalizedStartDir) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
const maxDepth = options.maxDepth ?? DEFAULT_PARENT_DEPTH;
|
||||||
|
const normalizedBoundaryDir = await normalizeDirectoryPath(options.boundaryDir);
|
||||||
|
let currentDir = normalizedStartDir;
|
||||||
|
for (let depth = 0; depth <= maxDepth; depth++) {
|
||||||
|
const candidate = path.join(currentDir, relativeFilePath);
|
||||||
|
if (await fs.pathExists(candidate)) {
|
||||||
|
return candidate;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (normalizedBoundaryDir && currentDir === normalizedBoundaryDir) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
const parentDir = path.dirname(currentDir);
|
||||||
|
if (parentDir === currentDir) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
currentDir = parentDir;
|
||||||
|
}
|
||||||
|
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
async function findSearchBoundary(startDir) {
|
||||||
|
const normalizedStartDir = await normalizeExistingDirectory(startDir);
|
||||||
|
if (!normalizedStartDir) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
let currentDir = normalizedStartDir;
|
||||||
|
for (let depth = 0; depth <= DEFAULT_PARENT_DEPTH; depth++) {
|
||||||
|
if (
|
||||||
|
(await fs.pathExists(path.join(currentDir, 'package.json'))) ||
|
||||||
|
(await fs.pathExists(path.join(currentDir, '.claude-plugin', 'marketplace.json'))) ||
|
||||||
|
(await fs.pathExists(path.join(currentDir, '.git')))
|
||||||
|
) {
|
||||||
|
return currentDir;
|
||||||
|
}
|
||||||
|
|
||||||
|
const parentDir = path.dirname(currentDir);
|
||||||
|
if (parentDir === currentDir) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
currentDir = parentDir;
|
||||||
|
}
|
||||||
|
|
||||||
|
return normalizedStartDir;
|
||||||
|
}
|
||||||
|
|
||||||
|
async function normalizeDirectoryPath(candidate) {
|
||||||
|
if (!candidate) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
const resolvedPath = path.resolve(candidate);
|
||||||
|
try {
|
||||||
|
const stats = await fs.stat(resolvedPath);
|
||||||
|
return stats.isDirectory() ? resolvedPath : path.dirname(resolvedPath);
|
||||||
|
} catch {
|
||||||
|
return resolvedPath;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async function normalizeExistingDirectory(candidate) {
|
||||||
|
const normalized = await normalizeDirectoryPath(candidate);
|
||||||
|
if (!normalized) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!(await fs.pathExists(normalized))) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
return normalized;
|
||||||
|
}
|
||||||
|
|
||||||
|
async function readPackageJsonVersion(packageJsonPath) {
|
||||||
|
const data = await readJsonFile(packageJsonPath);
|
||||||
|
return normalizeVersion(data?.version);
|
||||||
|
}
|
||||||
|
|
||||||
|
async function readModuleYamlVersion(moduleYamlPath) {
|
||||||
|
try {
|
||||||
|
const content = await fs.readFile(moduleYamlPath, 'utf8');
|
||||||
|
const data = yaml.parse(content);
|
||||||
|
return normalizeVersion(data?.version || data?.module_version || data?.moduleVersion);
|
||||||
|
} catch {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async function readJsonFile(filePath) {
|
||||||
|
try {
|
||||||
|
const content = await fs.readFile(filePath, 'utf8');
|
||||||
|
return JSON.parse(content);
|
||||||
|
} catch {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function extractMarketplaceVersion(data, moduleName, marketplacePluginNames = []) {
|
||||||
|
const plugins = Array.isArray(data?.plugins) ? data.plugins : [];
|
||||||
|
if (plugins.length === 0) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
const preferredNames = new Set(
|
||||||
|
[moduleName, ...marketplacePluginNames]
|
||||||
|
.filter((value) => typeof value === 'string')
|
||||||
|
.map((value) => value.trim())
|
||||||
|
.filter(Boolean),
|
||||||
|
);
|
||||||
|
|
||||||
|
const exactMatches = [];
|
||||||
|
const fallbackVersions = [];
|
||||||
|
|
||||||
|
for (const plugin of plugins) {
|
||||||
|
const version = normalizeVersion(plugin?.version);
|
||||||
|
if (!version) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
fallbackVersions.push(version);
|
||||||
|
|
||||||
|
const pluginNames = [plugin?.name, plugin?.code].filter((value) => typeof value === 'string').map((value) => value.trim());
|
||||||
|
if (pluginNames.some((name) => preferredNames.has(name))) {
|
||||||
|
exactMatches.push(version);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return pickBestVersion(exactMatches.length > 0 ? exactMatches : fallbackVersions);
|
||||||
|
}
|
||||||
|
|
||||||
|
function pickBestVersion(versions) {
|
||||||
|
const candidates = versions.map(normalizeVersion).filter(Boolean);
|
||||||
|
if (candidates.length === 0) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
candidates.sort(compareVersionsDescending);
|
||||||
|
return candidates[0];
|
||||||
|
}
|
||||||
|
|
||||||
|
function compareVersionsDescending(left, right) {
|
||||||
|
const leftSemver = normalizeSemver(left);
|
||||||
|
const rightSemver = normalizeSemver(right);
|
||||||
|
|
||||||
|
if (leftSemver && rightSemver) {
|
||||||
|
return semver.rcompare(leftSemver, rightSemver);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (leftSemver) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (rightSemver) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return right.localeCompare(left, undefined, { numeric: true, sensitivity: 'base' });
|
||||||
|
}
|
||||||
|
|
||||||
|
function normalizeSemver(version) {
|
||||||
|
return semver.valid(version) || semver.valid(semver.coerce(version));
|
||||||
|
}
|
||||||
|
|
||||||
|
function normalizeVersion(version) {
|
||||||
|
if (typeof version !== 'string') {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
const trimmed = version.trim();
|
||||||
|
return trimmed || null;
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
resolveModuleVersion,
|
||||||
|
};
|
||||||
|
|
@ -1,5 +1,6 @@
|
||||||
const path = require('node:path');
|
const path = require('node:path');
|
||||||
const os = require('node:os');
|
const os = require('node:os');
|
||||||
|
const yaml = require('yaml');
|
||||||
const fs = require('./fs-native');
|
const fs = require('./fs-native');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -86,6 +87,11 @@ function getExternalModuleCachePath(moduleName, ...segments) {
|
||||||
* Built-in modules (core, bmm) live under <src>. External official modules are
|
* Built-in modules (core, bmm) live under <src>. External official modules are
|
||||||
* cloned into ~/.bmad/cache/external-modules/<name>/ with varying internal
|
* cloned into ~/.bmad/cache/external-modules/<name>/ with varying internal
|
||||||
* layouts (some at src/module.yaml, some at skills/module.yaml, some nested).
|
* layouts (some at src/module.yaml, some at skills/module.yaml, some nested).
|
||||||
|
* Url-source custom modules are cloned into ~/.bmad/cache/custom-modules/<host>/<owner>/<repo>/
|
||||||
|
* and are resolved by walking the cache and matching `code` or `name` from the
|
||||||
|
* discovered module.yaml. Local custom-source modules are not cached; their
|
||||||
|
* path is read from the CustomModuleManager resolution cache set during the
|
||||||
|
* same install run.
|
||||||
* This mirrors the candidate-path search in
|
* This mirrors the candidate-path search in
|
||||||
* ExternalModuleManager.findExternalModuleSource but performs no git/network
|
* ExternalModuleManager.findExternalModuleSource but performs no git/network
|
||||||
* work, which keeps it safe to call during manifest writing.
|
* work, which keeps it safe to call during manifest writing.
|
||||||
|
|
@ -97,26 +103,113 @@ async function resolveInstalledModuleYaml(moduleName) {
|
||||||
const builtIn = path.join(getModulePath(moduleName), 'module.yaml');
|
const builtIn = path.join(getModulePath(moduleName), 'module.yaml');
|
||||||
if (await fs.pathExists(builtIn)) return builtIn;
|
if (await fs.pathExists(builtIn)) return builtIn;
|
||||||
|
|
||||||
const cacheRoot = getExternalModuleCachePath(moduleName);
|
// Collect every module.yaml under a root using the standard candidate paths.
|
||||||
if (!(await fs.pathExists(cacheRoot))) return null;
|
// Url-source repos can host multiple plugins (discovery mode), so we need all
|
||||||
|
// matches, not just the first. Returned in priority order.
|
||||||
|
async function searchRootAll(root) {
|
||||||
|
const results = [];
|
||||||
for (const dir of ['skills', 'src']) {
|
for (const dir of ['skills', 'src']) {
|
||||||
const direct = path.join(cacheRoot, dir, 'module.yaml');
|
const direct = path.join(root, dir, 'module.yaml');
|
||||||
if (await fs.pathExists(direct)) return direct;
|
if (await fs.pathExists(direct)) results.push(direct);
|
||||||
|
|
||||||
const dirPath = path.join(cacheRoot, dir);
|
const dirPath = path.join(root, dir);
|
||||||
if (await fs.pathExists(dirPath)) {
|
if (await fs.pathExists(dirPath)) {
|
||||||
const entries = await fs.readdir(dirPath, { withFileTypes: true });
|
const entries = await fs.readdir(dirPath, { withFileTypes: true });
|
||||||
for (const entry of entries) {
|
for (const entry of entries) {
|
||||||
if (!entry.isDirectory()) continue;
|
if (!entry.isDirectory()) continue;
|
||||||
const nested = path.join(dirPath, entry.name, 'module.yaml');
|
const nested = path.join(dirPath, entry.name, 'module.yaml');
|
||||||
if (await fs.pathExists(nested)) return nested;
|
if (await fs.pathExists(nested)) results.push(nested);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const atRoot = path.join(cacheRoot, 'module.yaml');
|
// BMB standard: {setup-skill}/assets/module.yaml (setup skill is any *-setup directory).
|
||||||
if (await fs.pathExists(atRoot)) return atRoot;
|
// Check at the repo root, and also under src/skills/ and skills/ since
|
||||||
|
// marketplace plugins commonly nest skills under src/skills/<name>/.
|
||||||
|
const setupSearchRoots = [root, path.join(root, 'src', 'skills'), path.join(root, 'skills')];
|
||||||
|
for (const setupRoot of setupSearchRoots) {
|
||||||
|
if (!(await fs.pathExists(setupRoot))) continue;
|
||||||
|
const entries = await fs.readdir(setupRoot, { withFileTypes: true });
|
||||||
|
for (const entry of entries) {
|
||||||
|
if (!entry.isDirectory() || !entry.name.endsWith('-setup')) continue;
|
||||||
|
const setupAssets = path.join(setupRoot, entry.name, 'assets', 'module.yaml');
|
||||||
|
if (await fs.pathExists(setupAssets)) results.push(setupAssets);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const atRoot = path.join(root, 'module.yaml');
|
||||||
|
if (await fs.pathExists(atRoot)) results.push(atRoot);
|
||||||
|
return results;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Backwards-compatible single-result variant for the existing external-cache
|
||||||
|
// and resolution-cache fallbacks (one module per root by construction).
|
||||||
|
async function searchRoot(root) {
|
||||||
|
const all = await searchRootAll(root);
|
||||||
|
return all.length > 0 ? all[0] : null;
|
||||||
|
}
|
||||||
|
|
||||||
|
const cacheRoot = getExternalModuleCachePath(moduleName);
|
||||||
|
if (await fs.pathExists(cacheRoot)) {
|
||||||
|
const found = await searchRoot(cacheRoot);
|
||||||
|
if (found) return found;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Community modules are cloned to ~/.bmad/cache/community-modules/<name>/
|
||||||
|
// (parallel to the external-modules cache used above). Search there too so
|
||||||
|
// collectAgentsFromModuleYaml and writeCentralConfig can locate community
|
||||||
|
// module.yaml files regardless of how nested the layout is.
|
||||||
|
const communityCacheRoot = path.join(os.homedir(), '.bmad', 'cache', 'community-modules', moduleName);
|
||||||
|
if (await fs.pathExists(communityCacheRoot)) {
|
||||||
|
const found = await searchRoot(communityCacheRoot);
|
||||||
|
if (found) return found;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback: local custom-source modules store their source path in the
|
||||||
|
// CustomModuleManager resolution cache populated during the same install run.
|
||||||
|
// Match by code OR name since callers may use either form.
|
||||||
|
try {
|
||||||
|
const { CustomModuleManager } = require('./modules/custom-module-manager');
|
||||||
|
for (const [, mod] of CustomModuleManager._resolutionCache) {
|
||||||
|
if ((mod.code === moduleName || mod.name === moduleName) && mod.localPath) {
|
||||||
|
const found = await searchRoot(mod.localPath);
|
||||||
|
if (found) return found;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// Resolution cache unavailable — continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback: url-source custom modules cloned to ~/.bmad/cache/custom-modules/.
|
||||||
|
// Walk every cached repo, enumerate ALL module.yaml files via searchRootAll
|
||||||
|
// (a single repo can host multiple plugins in discovery mode), and match by
|
||||||
|
// the yaml's `code` or `name` field. This works on re-install runs where
|
||||||
|
// _resolutionCache is empty and covers both discovery-mode (with marketplace.json)
|
||||||
|
// and direct-mode modules, since we identify repo roots by .bmad-source.json
|
||||||
|
// (written by cloneRepo) or .claude-plugin/ rather than by marketplace.json.
|
||||||
|
try {
|
||||||
|
const customCacheDir = path.join(os.homedir(), '.bmad', 'cache', 'custom-modules');
|
||||||
|
if (await fs.pathExists(customCacheDir)) {
|
||||||
|
const { CustomModuleManager } = require('./modules/custom-module-manager');
|
||||||
|
const customMgr = new CustomModuleManager();
|
||||||
|
const repoRoots = await customMgr._findCacheRepoRoots(customCacheDir);
|
||||||
|
for (const { repoPath } of repoRoots) {
|
||||||
|
const candidates = await searchRootAll(repoPath);
|
||||||
|
for (const candidate of candidates) {
|
||||||
|
try {
|
||||||
|
const parsed = yaml.parse(await fs.readFile(candidate, 'utf8'));
|
||||||
|
if (parsed && (parsed.code === moduleName || parsed.name === moduleName)) {
|
||||||
|
return candidate;
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// Malformed yaml — skip
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// Custom-modules cache walk failed — continue
|
||||||
|
}
|
||||||
|
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,330 @@
|
||||||
|
// `--set <module>.<key>=<value>` is a post-install patch. The installer runs
|
||||||
|
// its normal flow and writes `_bmad/config.toml`, `_bmad/config.user.toml`,
|
||||||
|
// and `_bmad/<module>/config.yaml`; afterwards `applySetOverrides` upserts
|
||||||
|
// each override into those files.
|
||||||
|
//
|
||||||
|
// This is intentionally NOT integrated with the prompt/template/schema
|
||||||
|
// system. Tradeoffs:
|
||||||
|
// - No `result:` template rendering: `--set bmm.project_knowledge=research`
|
||||||
|
// writes "research" verbatim. Pass `--set bmm.project_knowledge='{project-root}/research'`
|
||||||
|
// if you want the rendered form.
|
||||||
|
// - Carry-forward across installs is best-effort: declared schema keys
|
||||||
|
// persist via the existingValue path on the next interactive run; values
|
||||||
|
// for keys outside any module's schema may need to be re-passed on each
|
||||||
|
// install (or edited directly in `_bmad/config.toml`).
|
||||||
|
// - No "key not in schema" validation: whatever you assert, we write.
|
||||||
|
//
|
||||||
|
// Names that, when used as object keys, can mutate `Object.prototype` and
|
||||||
|
// cascade into every plain-object lookup in the process. The `--set` pipeline
|
||||||
|
// assigns into plain `{}` maps keyed by user input, so `--set __proto__.x=1`
|
||||||
|
// would otherwise reach `overrides.__proto__[x] = 1` and pollute every plain
|
||||||
|
// object. We reject the names at parse time and harden the maps in
|
||||||
|
// `parseSetEntries` with `Object.create(null)` for defense-in-depth.
|
||||||
|
const PROTOTYPE_POLLUTING_NAMES = new Set(['__proto__', 'prototype', 'constructor']);
|
||||||
|
|
||||||
|
const path = require('node:path');
|
||||||
|
const fs = require('./fs-native');
|
||||||
|
const yaml = require('yaml');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parse a single `--set <module>.<key>=<value>` entry.
|
||||||
|
* @param {string} entry - raw flag value
|
||||||
|
* @returns {{module: string, key: string, value: string}}
|
||||||
|
* @throws {Error} on malformed input
|
||||||
|
*/
|
||||||
|
function parseSetEntry(entry) {
|
||||||
|
if (typeof entry !== 'string' || entry.length === 0) {
|
||||||
|
throw new Error('--set: empty entry. Expected <module>.<key>=<value>');
|
||||||
|
}
|
||||||
|
const eq = entry.indexOf('=');
|
||||||
|
if (eq === -1) {
|
||||||
|
throw new Error(`--set "${entry}": missing '='. Expected <module>.<key>=<value>`);
|
||||||
|
}
|
||||||
|
const lhs = entry.slice(0, eq);
|
||||||
|
// Note: only the LHS is trimmed. Values may legitimately contain leading
|
||||||
|
// or trailing whitespace (paths with spaces, quoted strings); module / key
|
||||||
|
// names cannot, so it's safe to be strict on the left.
|
||||||
|
const value = entry.slice(eq + 1);
|
||||||
|
const dot = lhs.indexOf('.');
|
||||||
|
if (dot === -1) {
|
||||||
|
throw new Error(`--set "${entry}": missing '.'. Expected <module>.<key>=<value>`);
|
||||||
|
}
|
||||||
|
const moduleCode = lhs.slice(0, dot).trim();
|
||||||
|
const key = lhs.slice(dot + 1).trim();
|
||||||
|
if (!moduleCode || !key) {
|
||||||
|
throw new Error(`--set "${entry}": empty module or key. Expected <module>.<key>=<value>`);
|
||||||
|
}
|
||||||
|
if (PROTOTYPE_POLLUTING_NAMES.has(moduleCode) || PROTOTYPE_POLLUTING_NAMES.has(key)) {
|
||||||
|
throw new Error(
|
||||||
|
`--set "${entry}": '__proto__', 'prototype', and 'constructor' are reserved and cannot be used as a module or key name.`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return { module: moduleCode, key, value };
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parse repeated `--set` entries into a `{ module: { key: value } }` map.
|
||||||
|
* Later entries overwrite earlier ones for the same key. Both the outer
|
||||||
|
* map and the per-module inner maps are `Object.create(null)` so callers
|
||||||
|
* that bypass `parseSetEntry`'s name check still can't pollute prototypes.
|
||||||
|
*
|
||||||
|
* @param {string[]} entries
|
||||||
|
* @returns {Object<string, Object<string, string>>}
|
||||||
|
*/
|
||||||
|
function parseSetEntries(entries) {
|
||||||
|
const overrides = Object.create(null);
|
||||||
|
if (!Array.isArray(entries)) return overrides;
|
||||||
|
for (const entry of entries) {
|
||||||
|
const { module: moduleCode, key, value } = parseSetEntry(entry);
|
||||||
|
if (!overrides[moduleCode]) overrides[moduleCode] = Object.create(null);
|
||||||
|
overrides[moduleCode][key] = value;
|
||||||
|
}
|
||||||
|
return overrides;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Encode a JS string as a TOML basic string (double-quoted with escapes).
|
||||||
|
* @param {string} value
|
||||||
|
*/
|
||||||
|
function tomlString(value) {
|
||||||
|
const s = String(value);
|
||||||
|
// Per the TOML spec, basic strings escape `\`, `"`, and control characters.
|
||||||
|
return (
|
||||||
|
'"' +
|
||||||
|
s
|
||||||
|
.replaceAll('\\', '\\\\')
|
||||||
|
.replaceAll('"', String.raw`\"`)
|
||||||
|
.replaceAll('\b', String.raw`\b`)
|
||||||
|
.replaceAll('\f', String.raw`\f`)
|
||||||
|
.replaceAll('\n', String.raw`\n`)
|
||||||
|
.replaceAll('\r', String.raw`\r`)
|
||||||
|
.replaceAll('\t', String.raw`\t`) +
|
||||||
|
'"'
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Section header for a given module code.
|
||||||
|
* - `core` → `[core]`
|
||||||
|
* - `<other>` → `[modules.<other>]`
|
||||||
|
*
|
||||||
|
* Mirrors the layout `manifest-generator.writeCentralConfig` produces.
|
||||||
|
*/
|
||||||
|
function sectionHeader(moduleCode) {
|
||||||
|
return moduleCode === 'core' ? '[core]' : `[modules.${moduleCode}]`;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Insert or update `key = value` inside a TOML section, returning the new
|
||||||
|
* file content. The format produced by the installer is regular and small
|
||||||
|
* enough that a line scanner is more reliable than pulling in a TOML
|
||||||
|
* round-tripper that would normalize the file's existing whitespace and
|
||||||
|
* comment structure.
|
||||||
|
*
|
||||||
|
* - If `[section]` exists and contains `key`, replace the value on that
|
||||||
|
* line (preserving any inline comment after the value).
|
||||||
|
* - If `[section]` exists but `key` doesn't, append `key = value` at the
|
||||||
|
* end of the section (before the next `[...]` header or EOF, skipping
|
||||||
|
* trailing blank lines so the section stays tidy).
|
||||||
|
* - If `[section]` doesn't exist, append a new section block at EOF.
|
||||||
|
*
|
||||||
|
* @param {string} content existing file content (may be empty)
|
||||||
|
* @param {string} section exact `[section]` header to target
|
||||||
|
* @param {string} key
|
||||||
|
* @param {string} valueToml already TOML-encoded value (e.g. `"foo"`)
|
||||||
|
* @returns {string} new content
|
||||||
|
*/
|
||||||
|
function upsertTomlKey(content, section, key, valueToml) {
|
||||||
|
const lines = content.split('\n');
|
||||||
|
// Track whether the file already ended with a newline so we can preserve
|
||||||
|
// that. `split('\n')` on `"a\n"` yields `['a', '']`, which gives us the
|
||||||
|
// marker we need.
|
||||||
|
const hadTrailingNewline = lines.length > 0 && lines.at(-1) === '';
|
||||||
|
if (hadTrailingNewline) lines.pop();
|
||||||
|
|
||||||
|
// Locate the target section.
|
||||||
|
const sectionStart = lines.findIndex((line) => line.trim() === section);
|
||||||
|
if (sectionStart === -1) {
|
||||||
|
// Section doesn't exist — append a new block. Pad with a blank line if
|
||||||
|
// the file is non-empty so sections stay visually separated.
|
||||||
|
if (lines.length > 0 && lines.at(-1).trim() !== '') lines.push('');
|
||||||
|
lines.push(section, `${key} = ${valueToml}`);
|
||||||
|
return lines.join('\n') + (hadTrailingNewline ? '\n' : '');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find the section's end (next `[...]` header or EOF).
|
||||||
|
let sectionEnd = lines.length;
|
||||||
|
for (let i = sectionStart + 1; i < lines.length; i++) {
|
||||||
|
if (/^\s*\[/.test(lines[i])) {
|
||||||
|
sectionEnd = i;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Look for the key inside the section. Match `<key> = ...` allowing
|
||||||
|
// optional leading whitespace; preserve the comment tail (`# ...`) if any.
|
||||||
|
const keyPattern = new RegExp(`^(\\s*)${escapeRegExp(key)}\\s*=\\s*(.*)$`);
|
||||||
|
for (let i = sectionStart + 1; i < sectionEnd; i++) {
|
||||||
|
const match = lines[i].match(keyPattern);
|
||||||
|
if (match) {
|
||||||
|
const indent = match[1];
|
||||||
|
// Preserve trailing comment if present. We split on the first `#` that
|
||||||
|
// is preceded by whitespace — TOML strings can't contain unescaped `#`
|
||||||
|
// in basic-string form so this is safe for the values we emit.
|
||||||
|
const tail = match[2];
|
||||||
|
const commentIdx = tail.search(/\s+#/);
|
||||||
|
const commentSuffix = commentIdx === -1 ? '' : tail.slice(commentIdx);
|
||||||
|
lines[i] = `${indent}${key} = ${valueToml}${commentSuffix}`;
|
||||||
|
return lines.join('\n') + (hadTrailingNewline ? '\n' : '');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Section exists but key doesn't. Insert before the next section header,
|
||||||
|
// skipping trailing blank lines inside the current section so the new
|
||||||
|
// entry sits with its siblings.
|
||||||
|
let insertAt = sectionEnd;
|
||||||
|
while (insertAt > sectionStart + 1 && lines[insertAt - 1].trim() === '') {
|
||||||
|
insertAt--;
|
||||||
|
}
|
||||||
|
lines.splice(insertAt, 0, `${key} = ${valueToml}`);
|
||||||
|
return lines.join('\n') + (hadTrailingNewline ? '\n' : '');
|
||||||
|
}
|
||||||
|
|
||||||
|
function escapeRegExp(s) {
|
||||||
|
return s.replaceAll(/[.*+?^${}()|[\]\\]/g, String.raw`\$&`);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Look up `[section] key` in a TOML file. Returns true if the file exists,
|
||||||
|
* the section is present, and `key` is set within it. Used by
|
||||||
|
* `applySetOverrides` to route an override to the file that already owns
|
||||||
|
* the key (so user-scope keys land in `config.user.toml`, team-scope keys
|
||||||
|
* land in `config.toml`).
|
||||||
|
*/
|
||||||
|
async function tomlHasKey(filePath, section, key) {
|
||||||
|
if (!(await fs.pathExists(filePath))) return false;
|
||||||
|
const content = await fs.readFile(filePath, 'utf8');
|
||||||
|
const lines = content.split('\n');
|
||||||
|
const sectionStart = lines.findIndex((line) => line.trim() === section);
|
||||||
|
if (sectionStart === -1) return false;
|
||||||
|
const keyPattern = new RegExp(`^\\s*${escapeRegExp(key)}\\s*=`);
|
||||||
|
for (let i = sectionStart + 1; i < lines.length; i++) {
|
||||||
|
if (/^\s*\[/.test(lines[i])) return false;
|
||||||
|
if (keyPattern.test(lines[i])) return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Apply parsed `--set` overrides to the central TOML files written by the
|
||||||
|
* installer. Called at the end of an install / quick-update.
|
||||||
|
*
|
||||||
|
* Routing per (module, key):
|
||||||
|
* 1. If `_bmad/config.user.toml` already has `[section] key`, update there
|
||||||
|
* (user-scope key like `core.user_name`, `bmm.user_skill_level`).
|
||||||
|
* 2. Otherwise update `_bmad/config.toml` (team scope, the default).
|
||||||
|
*
|
||||||
|
* The schema-correct user/team partition lives in `manifest-generator`. We
|
||||||
|
* intentionally don't re-read module schemas here — the only goal is to
|
||||||
|
* match the file the installer just wrote the key to. For brand-new keys
|
||||||
|
* (not in either file yet), team scope is the safe default.
|
||||||
|
*
|
||||||
|
* @param {Object<string, Object<string, string>>} overrides
|
||||||
|
* @param {string} bmadDir absolute path to `_bmad/`
|
||||||
|
* @returns {Promise<Array<{module:string,key:string,scope:'team'|'user',file:string}>>}
|
||||||
|
* a list of applied entries (for caller logging)
|
||||||
|
*/
|
||||||
|
async function applySetOverrides(overrides, bmadDir) {
|
||||||
|
const applied = [];
|
||||||
|
if (!overrides || typeof overrides !== 'object') return applied;
|
||||||
|
|
||||||
|
const teamPath = path.join(bmadDir, 'config.toml');
|
||||||
|
const userPath = path.join(bmadDir, 'config.user.toml');
|
||||||
|
|
||||||
|
for (const moduleCode of Object.keys(overrides)) {
|
||||||
|
// Skip overrides for modules not actually installed. The installer writes
|
||||||
|
// `_bmad/<module>/config.yaml` for every installed module (including core),
|
||||||
|
// so its presence is a reliable "is this module here?" signal that works
|
||||||
|
// for both fresh installs and quick-updates without coupling to caller-
|
||||||
|
// supplied module lists.
|
||||||
|
const moduleConfigYaml = path.join(bmadDir, moduleCode, 'config.yaml');
|
||||||
|
if (!(await fs.pathExists(moduleConfigYaml))) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
const section = sectionHeader(moduleCode);
|
||||||
|
const moduleOverrides = overrides[moduleCode] || {};
|
||||||
|
for (const key of Object.keys(moduleOverrides)) {
|
||||||
|
const value = moduleOverrides[key];
|
||||||
|
const valueToml = tomlString(value);
|
||||||
|
|
||||||
|
const userOwnsIt = await tomlHasKey(userPath, section, key);
|
||||||
|
const targetPath = userOwnsIt ? userPath : teamPath;
|
||||||
|
|
||||||
|
// The team file always exists post-install; the user file only exists
|
||||||
|
// if the install wrote at least one user-scope key. If we're routing to
|
||||||
|
// it but it doesn't exist yet, create it with a minimal header so it
|
||||||
|
// has the same shape as installer-written user toml.
|
||||||
|
let content = '';
|
||||||
|
if (await fs.pathExists(targetPath)) {
|
||||||
|
content = await fs.readFile(targetPath, 'utf8');
|
||||||
|
} else {
|
||||||
|
content = '# Personal overrides for _bmad/config.toml.\n';
|
||||||
|
}
|
||||||
|
|
||||||
|
const next = upsertTomlKey(content, section, key, valueToml);
|
||||||
|
await fs.writeFile(targetPath, next, 'utf8');
|
||||||
|
applied.push({
|
||||||
|
module: moduleCode,
|
||||||
|
key,
|
||||||
|
scope: userOwnsIt ? 'user' : 'team',
|
||||||
|
file: path.basename(targetPath),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Also patch the per-module yaml (`_bmad/<module>/config.yaml`). The
|
||||||
|
// installer reads this file as `_existingConfig` on subsequent runs and
|
||||||
|
// surfaces declared values as prompt defaults — under `--yes` those
|
||||||
|
// defaults are accepted, so patching here gives `--set` natural
|
||||||
|
// carry-forward for declared keys without needing schema-strict
|
||||||
|
// partition exemptions in the manifest writer. For undeclared keys the
|
||||||
|
// value lives in the per-module yaml but won't be re-emitted into
|
||||||
|
// config.toml on the next install (the schema-strict partition drops
|
||||||
|
// it); re-pass `--set` if you need it sticky.
|
||||||
|
const moduleYamlPath = path.join(bmadDir, moduleCode, 'config.yaml');
|
||||||
|
if (await fs.pathExists(moduleYamlPath)) {
|
||||||
|
try {
|
||||||
|
const text = await fs.readFile(moduleYamlPath, 'utf8');
|
||||||
|
const parsed = yaml.parse(text);
|
||||||
|
if (parsed && typeof parsed === 'object' && !Array.isArray(parsed)) {
|
||||||
|
// Preserve the installer's banner header (everything up to the
|
||||||
|
// first non-comment line) so `_bmad/<module>/config.yaml` keeps
|
||||||
|
// its provenance comments after we round-trip it.
|
||||||
|
const headerLines = [];
|
||||||
|
for (const line of text.split('\n')) {
|
||||||
|
if (line.startsWith('#') || line.trim() === '') {
|
||||||
|
headerLines.push(line);
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (const key of Object.keys(moduleOverrides)) {
|
||||||
|
parsed[key] = moduleOverrides[key];
|
||||||
|
}
|
||||||
|
const body = yaml.stringify(parsed, { indent: 2, lineWidth: 0, minContentWidth: 0 });
|
||||||
|
const header = headerLines.length > 0 ? headerLines.join('\n') + '\n' : '';
|
||||||
|
await fs.writeFile(moduleYamlPath, header + body, 'utf8');
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// Per-module yaml unparseable — skip silently. The central toml was
|
||||||
|
// already patched above, which is the user-visible state for the
|
||||||
|
// current install. Carry-forward will fail next install but the
|
||||||
|
// current install reflects the override.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return applied;
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = { parseSetEntry, parseSetEntries, applySetOverrides, upsertTomlKey, tomlString };
|
||||||
|
|
@ -1,50 +1,109 @@
|
||||||
const path = require('node:path');
|
const path = require('node:path');
|
||||||
const os = require('node:os');
|
const os = require('node:os');
|
||||||
|
const semver = require('semver');
|
||||||
const fs = require('./fs-native');
|
const fs = require('./fs-native');
|
||||||
|
const installerPackageJson = require('../../package.json');
|
||||||
const { CLIUtils } = require('./cli-utils');
|
const { CLIUtils } = require('./cli-utils');
|
||||||
const { ExternalModuleManager } = require('./modules/external-manager');
|
const { ExternalModuleManager } = require('./modules/external-manager');
|
||||||
const { getProjectRoot } = require('./project-root');
|
const { resolveModuleVersion } = require('./modules/version-resolver');
|
||||||
|
const { Manifest } = require('./core/manifest');
|
||||||
|
const {
|
||||||
|
parseChannelOptions,
|
||||||
|
buildPlan,
|
||||||
|
decideChannelForModule,
|
||||||
|
orphanPinWarnings,
|
||||||
|
bundledTargetWarnings,
|
||||||
|
} = require('./modules/channel-plan');
|
||||||
|
const channelResolver = require('./modules/channel-resolver');
|
||||||
const prompts = require('./prompts');
|
const prompts = require('./prompts');
|
||||||
|
const { parseSetEntries } = require('./set-overrides');
|
||||||
|
|
||||||
|
const manifest = new Manifest();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Read module version from .claude-plugin/marketplace.json
|
* Format a resolved version for display in installer labels.
|
||||||
* @param {string} moduleCode - Module code (e.g., 'core', 'bmm', 'cis')
|
* Semver-like values are normalized to a single leading "v".
|
||||||
* @returns {string} Version string or empty string
|
* @param {string|null|undefined} version
|
||||||
|
* @returns {string}
|
||||||
*/
|
*/
|
||||||
async function getMarketplaceVersion(moduleCode) {
|
function formatDisplayVersion(version) {
|
||||||
let marketplacePath;
|
const trimmed = typeof version === 'string' ? version.trim() : '';
|
||||||
if (moduleCode === 'core' || moduleCode === 'bmm') {
|
if (!trimmed) return '';
|
||||||
marketplacePath = path.join(getProjectRoot(), '.claude-plugin', 'marketplace.json');
|
|
||||||
} else {
|
const normalized = semver.valid(semver.coerce(trimmed));
|
||||||
const cacheDir = path.join(os.homedir(), '.bmad', 'cache', 'external-modules', moduleCode);
|
if (normalized) {
|
||||||
marketplacePath = path.join(cacheDir, '.claude-plugin', 'marketplace.json');
|
return `v${normalized}`;
|
||||||
}
|
}
|
||||||
try {
|
|
||||||
if (await fs.pathExists(marketplacePath)) {
|
return trimmed;
|
||||||
const data = JSON.parse(await fs.readFile(marketplacePath, 'utf8'));
|
|
||||||
return _extractMarketplaceVersion(data);
|
|
||||||
}
|
|
||||||
} catch {
|
|
||||||
// ignore
|
|
||||||
}
|
|
||||||
return '';
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Extract the highest version from marketplace.json plugins array.
|
* Build the display label for a module, showing an upgrade arrow when an
|
||||||
* Handles multiple plugins per file safely.
|
* installed semver differs from the latest resolvable semver.
|
||||||
* @param {Object} data - Parsed marketplace.json
|
* @param {string} name
|
||||||
* @returns {string} Version string or empty string
|
* @param {string} latestVersion
|
||||||
|
* @param {string} installedVersion
|
||||||
|
* @returns {string}
|
||||||
*/
|
*/
|
||||||
function _extractMarketplaceVersion(data) {
|
function buildModuleLabel(name, latestVersion, installedVersion = '') {
|
||||||
const plugins = data?.plugins;
|
const latestDisplay = formatDisplayVersion(latestVersion);
|
||||||
if (!Array.isArray(plugins) || plugins.length === 0) return '';
|
if (!latestDisplay) return name;
|
||||||
// Use the highest version across all plugins in the file
|
|
||||||
let best = '';
|
const installedDisplay = formatDisplayVersion(installedVersion);
|
||||||
for (const p of plugins) {
|
const latestSemver = semver.valid(semver.coerce(latestVersion || ''));
|
||||||
if (p.version && (!best || p.version > best)) best = p.version;
|
const installedSemver = semver.valid(semver.coerce(installedVersion || ''));
|
||||||
|
|
||||||
|
if (installedDisplay && latestSemver && installedSemver && semver.neq(installedSemver, latestSemver)) {
|
||||||
|
return `${name} (${installedDisplay} → ${latestDisplay})`;
|
||||||
}
|
}
|
||||||
return best;
|
|
||||||
|
return `${name} (${latestDisplay})`;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Resolve the version to show for a module picker entry. External modules use
|
||||||
|
* the same channel/tag resolver as installs; bundled modules fall back to local
|
||||||
|
* source metadata.
|
||||||
|
* @param {string} moduleCode - Module code (e.g., 'core', 'bmm', 'cis')
|
||||||
|
* @param {Object} options
|
||||||
|
* @param {string|null} [options.repoUrl] - Module repository URL for tag resolution
|
||||||
|
* @param {string|null} [options.registryDefault] - Registry default channel
|
||||||
|
* @param {Object|null} [options.channelOptions] - Parsed installer channel options
|
||||||
|
* @returns {Promise<{version: string, lookupAttempted: boolean, lookupSucceeded: boolean}>}
|
||||||
|
*/
|
||||||
|
async function getModuleVersion(moduleCode, { repoUrl = null, registryDefault = null, channelOptions = null } = {}) {
|
||||||
|
if (repoUrl) {
|
||||||
|
const plan = decideChannelForModule({
|
||||||
|
code: moduleCode,
|
||||||
|
channelOptions,
|
||||||
|
registryDefault,
|
||||||
|
});
|
||||||
|
|
||||||
|
try {
|
||||||
|
const resolved = await channelResolver.resolveChannel({
|
||||||
|
channel: plan.channel,
|
||||||
|
pin: plan.pin,
|
||||||
|
repoUrl,
|
||||||
|
});
|
||||||
|
if (resolved?.version) {
|
||||||
|
return {
|
||||||
|
version: resolved.version,
|
||||||
|
lookupAttempted: plan.channel === 'stable',
|
||||||
|
lookupSucceeded: true,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// Fall back to local metadata when tag resolution is unavailable.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const versionInfo = await resolveModuleVersion(moduleCode);
|
||||||
|
return {
|
||||||
|
version: versionInfo.version || '',
|
||||||
|
lookupAttempted: !!repoUrl,
|
||||||
|
lookupSucceeded: false,
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -64,6 +123,31 @@ class UI {
|
||||||
const messageLoader = new MessageLoader();
|
const messageLoader = new MessageLoader();
|
||||||
await messageLoader.displayStartMessage();
|
await messageLoader.displayStartMessage();
|
||||||
|
|
||||||
|
// Parse channel flags (--channel/--all-*/--next=/--pin) once. Warnings
|
||||||
|
// are surfaced immediately so the user sees them before any git ops run.
|
||||||
|
const channelOptions = parseChannelOptions(options);
|
||||||
|
for (const warning of channelOptions.warnings) {
|
||||||
|
await prompts.log.warn(warning);
|
||||||
|
}
|
||||||
|
|
||||||
|
// When the user launched the installer from a prerelease (npx bmad-method@next),
|
||||||
|
// mirror that intent for external modules: seed the global channel to 'next' so
|
||||||
|
// the module picker's version labels resolve from main HEAD (matching what
|
||||||
|
// actually gets installed) and the interactive channel gate skips — the user
|
||||||
|
// already declared "next" intent by typing @next. Explicit channel flags
|
||||||
|
// override this seed.
|
||||||
|
if (
|
||||||
|
semver.prerelease(installerPackageJson.version) !== null &&
|
||||||
|
!channelOptions.global &&
|
||||||
|
channelOptions.nextSet.size === 0 &&
|
||||||
|
channelOptions.pins.size === 0
|
||||||
|
) {
|
||||||
|
channelOptions.global = 'next';
|
||||||
|
await prompts.log.info(
|
||||||
|
'Launched from a prerelease — installing all external modules from main HEAD (next channel). Pass --all-stable or --pin to override.',
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
// Get directory from options or prompt
|
// Get directory from options or prompt
|
||||||
let confirmedDirectory;
|
let confirmedDirectory;
|
||||||
if (options.directory) {
|
if (options.directory) {
|
||||||
|
|
@ -117,12 +201,15 @@ class UI {
|
||||||
actionType = options.action;
|
actionType = options.action;
|
||||||
await prompts.log.info(`Using action from command-line: ${actionType}`);
|
await prompts.log.info(`Using action from command-line: ${actionType}`);
|
||||||
} else if (options.yes) {
|
} else if (options.yes) {
|
||||||
// Default to quick-update if available, otherwise first available choice
|
// Default to quick-update if available, unless flags that require the
|
||||||
|
// full update path are present (e.g. --custom-source which re-clones
|
||||||
|
// modules at a new version — quick-update skips that entirely).
|
||||||
if (choices.length === 0) {
|
if (choices.length === 0) {
|
||||||
throw new Error('No valid actions available for this installation');
|
throw new Error('No valid actions available for this installation');
|
||||||
}
|
}
|
||||||
const hasQuickUpdate = choices.some((c) => c.value === 'quick-update');
|
const hasQuickUpdate = choices.some((c) => c.value === 'quick-update');
|
||||||
actionType = hasQuickUpdate ? 'quick-update' : choices[0].value;
|
const needsFullUpdate = !!options.customSource;
|
||||||
|
actionType = hasQuickUpdate && !needsFullUpdate ? 'quick-update' : (choices.find((c) => c.value === 'update') || choices[0]).value;
|
||||||
await prompts.log.info(`Non-interactive mode (--yes): defaulting to ${actionType}`);
|
await prompts.log.info(`Non-interactive mode (--yes): defaulting to ${actionType}`);
|
||||||
} else {
|
} else {
|
||||||
actionType = await prompts.select({
|
actionType = await prompts.select({
|
||||||
|
|
@ -145,7 +232,7 @@ class UI {
|
||||||
// Return early with modify configuration
|
// Return early with modify configuration
|
||||||
if (actionType === 'update') {
|
if (actionType === 'update') {
|
||||||
// Get existing installation info
|
// Get existing installation info
|
||||||
const { installedModuleIds } = await this.getExistingInstallation(confirmedDirectory);
|
const { installedModuleIds, installedModuleVersions } = await this.getExistingInstallation(confirmedDirectory);
|
||||||
|
|
||||||
await prompts.log.message(`Found existing modules: ${[...installedModuleIds].join(', ')}`);
|
await prompts.log.message(`Found existing modules: ${[...installedModuleIds].join(', ')}`);
|
||||||
|
|
||||||
|
|
@ -158,8 +245,11 @@ class UI {
|
||||||
.map((m) => m.trim())
|
.map((m) => m.trim())
|
||||||
.filter(Boolean);
|
.filter(Boolean);
|
||||||
await prompts.log.info(`Using modules from command-line: ${selectedModules.join(', ')}`);
|
await prompts.log.info(`Using modules from command-line: ${selectedModules.join(', ')}`);
|
||||||
} else if (options.customSource) {
|
} else if (options.customSource && !options.yes) {
|
||||||
// Custom source without --modules: start with empty list (core added below)
|
// Custom source without --modules or --yes: start with empty list
|
||||||
|
// (only custom source modules + core will be installed).
|
||||||
|
// When --yes is also set, fall through to the --yes branch so all
|
||||||
|
// installed modules are included alongside the custom source modules.
|
||||||
selectedModules = [];
|
selectedModules = [];
|
||||||
} else if (options.yes) {
|
} else if (options.yes) {
|
||||||
selectedModules = await this.getDefaultModules(installedModuleIds);
|
selectedModules = await this.getDefaultModules(installedModuleIds);
|
||||||
|
|
@ -167,7 +257,7 @@ class UI {
|
||||||
`Non-interactive mode (--yes): using default modules (installed + defaults): ${selectedModules.join(', ')}`,
|
`Non-interactive mode (--yes): using default modules (installed + defaults): ${selectedModules.join(', ')}`,
|
||||||
);
|
);
|
||||||
} else {
|
} else {
|
||||||
selectedModules = await this.selectAllModules(installedModuleIds);
|
selectedModules = await this.selectAllModules(installedModuleIds, installedModuleVersions, channelOptions);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Resolve custom sources from --custom-source flag
|
// Resolve custom sources from --custom-source flag
|
||||||
|
|
@ -183,10 +273,38 @@ class UI {
|
||||||
selectedModules.unshift('core');
|
selectedModules.unshift('core');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// For existing installs, resolve per-module update decisions BEFORE
|
||||||
|
// we clone anything. Reads the existing manifest's recorded channel
|
||||||
|
// per module and prompts the user on available upgrades (patch/minor
|
||||||
|
// default Y, major default N). Legacy entries with no channel are
|
||||||
|
// migrated here too. Mutates channelOptions.pins to lock rejections.
|
||||||
|
await this._resolveUpdateChannels({
|
||||||
|
bmadDir,
|
||||||
|
selectedModules,
|
||||||
|
channelOptions,
|
||||||
|
yes: options.yes || false,
|
||||||
|
});
|
||||||
|
|
||||||
// Get tool selection
|
// Get tool selection
|
||||||
const toolSelection = await this.promptToolSelection(confirmedDirectory, options);
|
const toolSelection = await this.promptToolSelection(confirmedDirectory, options);
|
||||||
|
|
||||||
const moduleConfigs = await this.collectModuleConfigs(confirmedDirectory, selectedModules, options);
|
const { moduleConfigs, setOverrides } = await this.collectModuleConfigs(confirmedDirectory, selectedModules, {
|
||||||
|
...options,
|
||||||
|
channelOptions,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Warn about --pin/--next flags that refer to modules the user didn't
|
||||||
|
// select, or that target bundled modules (core/bmm) where channel
|
||||||
|
// flags don't apply.
|
||||||
|
{
|
||||||
|
const bundledCodes = await this._bundledModuleCodes();
|
||||||
|
for (const warning of [
|
||||||
|
...orphanPinWarnings(channelOptions, selectedModules),
|
||||||
|
...bundledTargetWarnings(channelOptions, bundledCodes),
|
||||||
|
]) {
|
||||||
|
await prompts.log.warn(warning);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
actionType: 'update',
|
actionType: 'update',
|
||||||
|
|
@ -196,13 +314,15 @@ class UI {
|
||||||
skipIde: toolSelection.skipIde,
|
skipIde: toolSelection.skipIde,
|
||||||
coreConfig: moduleConfigs.core || {},
|
coreConfig: moduleConfigs.core || {},
|
||||||
moduleConfigs: moduleConfigs,
|
moduleConfigs: moduleConfigs,
|
||||||
|
setOverrides,
|
||||||
skipPrompts: options.yes || false,
|
skipPrompts: options.yes || false,
|
||||||
|
channelOptions,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// This section is only for new installations (update returns early above)
|
// This section is only for new installations (update returns early above)
|
||||||
const { installedModuleIds } = await this.getExistingInstallation(confirmedDirectory);
|
const { installedModuleIds, installedModuleVersions } = await this.getExistingInstallation(confirmedDirectory);
|
||||||
|
|
||||||
// Unified module selection - all modules in one grouped multiselect
|
// Unified module selection - all modules in one grouped multiselect
|
||||||
let selectedModules;
|
let selectedModules;
|
||||||
|
|
@ -221,7 +341,7 @@ class UI {
|
||||||
selectedModules = await this.getDefaultModules(installedModuleIds);
|
selectedModules = await this.getDefaultModules(installedModuleIds);
|
||||||
await prompts.log.info(`Using default modules (--yes flag): ${selectedModules.join(', ')}`);
|
await prompts.log.info(`Using default modules (--yes flag): ${selectedModules.join(', ')}`);
|
||||||
} else {
|
} else {
|
||||||
selectedModules = await this.selectAllModules(installedModuleIds);
|
selectedModules = await this.selectAllModules(installedModuleIds, installedModuleVersions, channelOptions);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Resolve custom sources from --custom-source flag
|
// Resolve custom sources from --custom-source flag
|
||||||
|
|
@ -236,8 +356,33 @@ class UI {
|
||||||
if (!selectedModules.includes('core')) {
|
if (!selectedModules.includes('core')) {
|
||||||
selectedModules.unshift('core');
|
selectedModules.unshift('core');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Interactive channel gate: "Ready to install (all stable)? [Y/n]"
|
||||||
|
// Only shown for fresh installs with no channel flags and an external module
|
||||||
|
// selected. Skipped for prerelease launches because channelOptions.global
|
||||||
|
// was already seeded to 'next' upstream. Non-interactive installs skip this
|
||||||
|
// and fall through to the registry default (stable) or whatever flags were
|
||||||
|
// supplied.
|
||||||
|
await this._interactiveChannelGate({ options, channelOptions, selectedModules });
|
||||||
|
|
||||||
let toolSelection = await this.promptToolSelection(confirmedDirectory, options);
|
let toolSelection = await this.promptToolSelection(confirmedDirectory, options);
|
||||||
const moduleConfigs = await this.collectModuleConfigs(confirmedDirectory, selectedModules, options);
|
const { moduleConfigs, setOverrides } = await this.collectModuleConfigs(confirmedDirectory, selectedModules, {
|
||||||
|
...options,
|
||||||
|
channelOptions,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Warn about --pin/--next flags that refer to modules the user didn't
|
||||||
|
// select, or that target bundled modules (core/bmm) where channel
|
||||||
|
// flags don't apply.
|
||||||
|
{
|
||||||
|
const bundledCodes = await this._bundledModuleCodes();
|
||||||
|
for (const warning of [
|
||||||
|
...orphanPinWarnings(channelOptions, selectedModules),
|
||||||
|
...bundledTargetWarnings(channelOptions, bundledCodes),
|
||||||
|
]) {
|
||||||
|
await prompts.log.warn(warning);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
actionType: 'install',
|
actionType: 'install',
|
||||||
|
|
@ -247,7 +392,9 @@ class UI {
|
||||||
skipIde: toolSelection.skipIde,
|
skipIde: toolSelection.skipIde,
|
||||||
coreConfig: moduleConfigs.core || {},
|
coreConfig: moduleConfigs.core || {},
|
||||||
moduleConfigs: moduleConfigs,
|
moduleConfigs: moduleConfigs,
|
||||||
|
setOverrides,
|
||||||
skipPrompts: options.yes || false,
|
skipPrompts: options.yes || false,
|
||||||
|
channelOptions,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -260,6 +407,37 @@ class UI {
|
||||||
* @param {Object} options - Command-line options
|
* @param {Object} options - Command-line options
|
||||||
* @returns {Object} Tool configuration
|
* @returns {Object} Tool configuration
|
||||||
*/
|
*/
|
||||||
|
_parseToolsFlag(toolsArg, allKnownValues) {
|
||||||
|
const selectedIdes = toolsArg
|
||||||
|
.split(',')
|
||||||
|
.map((t) => t.trim())
|
||||||
|
.filter(Boolean);
|
||||||
|
|
||||||
|
if (selectedIdes.length === 0) {
|
||||||
|
const err = new Error(
|
||||||
|
'--tools was passed empty. Provide at least one tool ID (e.g. --tools claude-code) or run with --list-tools to see valid IDs.',
|
||||||
|
);
|
||||||
|
err.expected = true;
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
|
||||||
|
const unknown = selectedIdes.filter((id) => !allKnownValues.has(id));
|
||||||
|
if (unknown.length > 0) {
|
||||||
|
const err = new Error(
|
||||||
|
[
|
||||||
|
`Unknown tool ID${unknown.length === 1 ? '' : 's'}: ${unknown.join(', ')}`,
|
||||||
|
'',
|
||||||
|
'Run with --list-tools to see all valid IDs.',
|
||||||
|
'Common: claude-code, cursor, copilot, windsurf, cline',
|
||||||
|
].join('\n'),
|
||||||
|
);
|
||||||
|
err.expected = true;
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
|
||||||
|
return selectedIdes;
|
||||||
|
}
|
||||||
|
|
||||||
async promptToolSelection(projectDir, options = {}) {
|
async promptToolSelection(projectDir, options = {}) {
|
||||||
const { ExistingInstall } = require('./core/existing-install');
|
const { ExistingInstall } = require('./core/existing-install');
|
||||||
const { Installer } = require('./core/installer');
|
const { Installer } = require('./core/installer');
|
||||||
|
|
@ -294,15 +472,10 @@ class UI {
|
||||||
const allTools = [...preferredIdes, ...otherIdes];
|
const allTools = [...preferredIdes, ...otherIdes];
|
||||||
|
|
||||||
// Non-interactive: handle --tools and --yes flags before interactive prompt
|
// Non-interactive: handle --tools and --yes flags before interactive prompt
|
||||||
if (options.tools) {
|
// Use !== undefined so an explicit --tools "" falls through to _parseToolsFlag and
|
||||||
if (options.tools.toLowerCase() === 'none') {
|
// gets a specific "passed empty" error instead of being silently ignored.
|
||||||
await prompts.log.info('Skipping tool configuration (--tools none)');
|
if (options.tools !== undefined) {
|
||||||
return { ides: [], skipIde: true };
|
const selectedIdes = this._parseToolsFlag(options.tools, allKnownValues);
|
||||||
}
|
|
||||||
const selectedIdes = options.tools
|
|
||||||
.split(',')
|
|
||||||
.map((t) => t.trim())
|
|
||||||
.filter(Boolean);
|
|
||||||
await prompts.log.info(`Using tools from command-line: ${selectedIdes.join(', ')}`);
|
await prompts.log.info(`Using tools from command-line: ${selectedIdes.join(', ')}`);
|
||||||
await this.displaySelectedTools(selectedIdes, preferredIdes, allTools);
|
await this.displaySelectedTools(selectedIdes, preferredIdes, allTools);
|
||||||
return { ides: selectedIdes, skipIde: false };
|
return { ides: selectedIdes, skipIde: false };
|
||||||
|
|
@ -378,21 +551,13 @@ class UI {
|
||||||
|
|
||||||
let selectedIdes = [];
|
let selectedIdes = [];
|
||||||
|
|
||||||
// Check if tools are provided via command-line
|
// Check if tools are provided via command-line.
|
||||||
if (options.tools) {
|
// Use !== undefined so an explicit --tools "" still hits _parseToolsFlag's empty-value error.
|
||||||
// Check for explicit "none" value to skip tool installation
|
if (options.tools !== undefined) {
|
||||||
if (options.tools.toLowerCase() === 'none') {
|
selectedIdes = this._parseToolsFlag(options.tools, allKnownValues);
|
||||||
await prompts.log.info('Skipping tool configuration (--tools none)');
|
|
||||||
return { ides: [], skipIde: true };
|
|
||||||
} else {
|
|
||||||
selectedIdes = options.tools
|
|
||||||
.split(',')
|
|
||||||
.map((t) => t.trim())
|
|
||||||
.filter(Boolean);
|
|
||||||
await prompts.log.info(`Using tools from command-line: ${selectedIdes.join(', ')}`);
|
await prompts.log.info(`Using tools from command-line: ${selectedIdes.join(', ')}`);
|
||||||
await this.displaySelectedTools(selectedIdes, preferredIdes, allTools);
|
await this.displaySelectedTools(selectedIdes, preferredIdes, allTools);
|
||||||
return { ides: selectedIdes, skipIde: false };
|
return { ides: selectedIdes, skipIde: false };
|
||||||
}
|
|
||||||
} else if (options.yes) {
|
} else if (options.yes) {
|
||||||
// If --yes flag is set, skip tool prompt and use previously configured tools or empty
|
// If --yes flag is set, skip tool prompt and use previously configured tools or empty
|
||||||
if (configuredIdes.length > 0) {
|
if (configuredIdes.length > 0) {
|
||||||
|
|
@ -400,8 +565,18 @@ class UI {
|
||||||
await this.displaySelectedTools(configuredIdes, preferredIdes, allTools);
|
await this.displaySelectedTools(configuredIdes, preferredIdes, allTools);
|
||||||
return { ides: configuredIdes, skipIde: false };
|
return { ides: configuredIdes, skipIde: false };
|
||||||
} else {
|
} else {
|
||||||
await prompts.log.info('Skipping tool configuration (--yes flag, no previous tools)');
|
const err = new Error(
|
||||||
return { ides: [], skipIde: true };
|
[
|
||||||
|
'--tools is required for non-interactive install (--yes / -y) when no tools are previously configured.',
|
||||||
|
'',
|
||||||
|
'Common: claude-code, cursor, copilot, windsurf, cline',
|
||||||
|
'See all supported tools: bmad-method install --list-tools',
|
||||||
|
'',
|
||||||
|
'Example: bmad-method install --modules bmm --tools claude-code -y',
|
||||||
|
].join('\n'),
|
||||||
|
);
|
||||||
|
err.expected = true;
|
||||||
|
throw err;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -496,7 +671,7 @@ class UI {
|
||||||
/**
|
/**
|
||||||
* Get existing installation info and installed modules
|
* Get existing installation info and installed modules
|
||||||
* @param {string} directory - Installation directory
|
* @param {string} directory - Installation directory
|
||||||
* @returns {Object} Object with existingInstall, installedModuleIds, and bmadDir
|
* @returns {Object} Object with existingInstall, installedModuleIds, installedModuleVersions, and bmadDir
|
||||||
*/
|
*/
|
||||||
async getExistingInstallation(directory) {
|
async getExistingInstallation(directory) {
|
||||||
const { ExistingInstall } = require('./core/existing-install');
|
const { ExistingInstall } = require('./core/existing-install');
|
||||||
|
|
@ -505,8 +680,26 @@ class UI {
|
||||||
const { bmadDir } = await installer.findBmadDir(directory);
|
const { bmadDir } = await installer.findBmadDir(directory);
|
||||||
const existingInstall = await ExistingInstall.detect(bmadDir);
|
const existingInstall = await ExistingInstall.detect(bmadDir);
|
||||||
const installedModuleIds = new Set(existingInstall.moduleIds);
|
const installedModuleIds = new Set(existingInstall.moduleIds);
|
||||||
|
const installedModuleVersions = new Map();
|
||||||
|
const manifestModules = await manifest.getAllModuleVersions(bmadDir);
|
||||||
|
|
||||||
return { existingInstall, installedModuleIds, bmadDir };
|
for (const module of manifestModules) {
|
||||||
|
if (module?.name && module.version) {
|
||||||
|
installedModuleVersions.set(module.name, module.version);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const module of existingInstall.modules) {
|
||||||
|
if (module?.id && module.version && module.version !== 'unknown' && !installedModuleVersions.has(module.id)) {
|
||||||
|
installedModuleVersions.set(module.id, module.version);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (existingInstall.hasCore && existingInstall.version && !installedModuleVersions.has('core')) {
|
||||||
|
installedModuleVersions.set('core', existingInstall.version);
|
||||||
|
}
|
||||||
|
|
||||||
|
return { existingInstall, installedModuleIds, installedModuleVersions, bmadDir };
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -519,7 +712,34 @@ class UI {
|
||||||
*/
|
*/
|
||||||
async collectModuleConfigs(directory, modules, options = {}) {
|
async collectModuleConfigs(directory, modules, options = {}) {
|
||||||
const { OfficialModules } = require('./modules/official-modules');
|
const { OfficialModules } = require('./modules/official-modules');
|
||||||
const configCollector = new OfficialModules();
|
|
||||||
|
// Parse --set up front purely to surface user-error before the install
|
||||||
|
// burns time on the network / filesystem. The actual application happens
|
||||||
|
// in installer.install() as a post-write TOML patch — see
|
||||||
|
// `tools/installer/set-overrides.js`. We also warn about overrides
|
||||||
|
// targeting modules the user didn't include, since those will silently
|
||||||
|
// miss the file the patch step looks for.
|
||||||
|
let setOverrides = {};
|
||||||
|
try {
|
||||||
|
setOverrides = parseSetEntries(options.set || []);
|
||||||
|
} catch (error) {
|
||||||
|
// install.js validated already; rethrow as-is for the user.
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
// Drop overrides for modules that aren't in the install set so the
|
||||||
|
// post-install patch step doesn't create orphan sections in config.toml
|
||||||
|
// for modules that were never installed.
|
||||||
|
const selectedModuleSet = new Set(['core', ...modules]);
|
||||||
|
for (const moduleCode of Object.keys(setOverrides)) {
|
||||||
|
if (!selectedModuleSet.has(moduleCode)) {
|
||||||
|
await prompts.log.warn(
|
||||||
|
`--set ${moduleCode}.* — module '${moduleCode}' is not in the install set; values will be ignored. Add it to --modules to apply.`,
|
||||||
|
);
|
||||||
|
delete setOverrides[moduleCode];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const configCollector = new OfficialModules({ channelOptions: options.channelOptions });
|
||||||
|
|
||||||
// Seed core config from CLI options if provided
|
// Seed core config from CLI options if provided
|
||||||
if (options.userName || options.communicationLanguage || options.documentOutputLanguage || options.outputFolder) {
|
if (options.userName || options.communicationLanguage || options.documentOutputLanguage || options.outputFolder) {
|
||||||
|
|
@ -568,6 +788,9 @@ class UI {
|
||||||
const defaultUsername = safeUsername.charAt(0).toUpperCase() + safeUsername.slice(1);
|
const defaultUsername = safeUsername.charAt(0).toUpperCase() + safeUsername.slice(1);
|
||||||
configCollector.collectedConfig.core = {
|
configCollector.collectedConfig.core = {
|
||||||
user_name: defaultUsername,
|
user_name: defaultUsername,
|
||||||
|
// {directory_name} default per src/core-skills/module.yaml — matches what the
|
||||||
|
// interactive flow resolves via buildQuestion()'s {directory_name} placeholder.
|
||||||
|
project_name: path.basename(directory),
|
||||||
communication_language: 'English',
|
communication_language: 'English',
|
||||||
document_output_language: 'English',
|
document_output_language: 'English',
|
||||||
output_folder: '_bmad-output',
|
output_folder: '_bmad-output',
|
||||||
|
|
@ -581,17 +804,19 @@ class UI {
|
||||||
skipPrompts: options.yes || false,
|
skipPrompts: options.yes || false,
|
||||||
});
|
});
|
||||||
|
|
||||||
return configCollector.collectedConfig;
|
return { moduleConfigs: configCollector.collectedConfig, setOverrides };
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Select all modules across three tiers: official, community, and custom URL.
|
* Select all modules across three tiers: official, community, and custom URL.
|
||||||
* @param {Set} installedModuleIds - Currently installed module IDs
|
* @param {Set} installedModuleIds - Currently installed module IDs
|
||||||
|
* @param {Map<string, string>} installedModuleVersions - Installed module versions from the local manifest
|
||||||
|
* @param {Object|null} channelOptions - Parsed installer channel options
|
||||||
* @returns {Array} Selected module codes (excluding core)
|
* @returns {Array} Selected module codes (excluding core)
|
||||||
*/
|
*/
|
||||||
async selectAllModules(installedModuleIds = new Set()) {
|
async selectAllModules(installedModuleIds = new Set(), installedModuleVersions = new Map(), channelOptions = null) {
|
||||||
// Phase 1: Official modules
|
// Phase 1: Official modules
|
||||||
const officialSelected = await this._selectOfficialModules(installedModuleIds);
|
const officialSelected = await this._selectOfficialModules(installedModuleIds, installedModuleVersions, channelOptions);
|
||||||
|
|
||||||
// Determine which installed modules are NOT official (community or custom).
|
// Determine which installed modules are NOT official (community or custom).
|
||||||
// These must be preserved even if the user declines to browse community/custom.
|
// These must be preserved even if the user declines to browse community/custom.
|
||||||
|
|
@ -627,9 +852,11 @@ class UI {
|
||||||
* Select official modules using autocompleteMultiselect.
|
* Select official modules using autocompleteMultiselect.
|
||||||
* Extracted from the original selectAllModules - unchanged behavior.
|
* Extracted from the original selectAllModules - unchanged behavior.
|
||||||
* @param {Set} installedModuleIds - Currently installed module IDs
|
* @param {Set} installedModuleIds - Currently installed module IDs
|
||||||
|
* @param {Map<string, string>} installedModuleVersions - Installed module versions from the local manifest
|
||||||
|
* @param {Object|null} channelOptions - Parsed installer channel options
|
||||||
* @returns {Array} Selected official module codes
|
* @returns {Array} Selected official module codes
|
||||||
*/
|
*/
|
||||||
async _selectOfficialModules(installedModuleIds = new Set()) {
|
async _selectOfficialModules(installedModuleIds = new Set(), installedModuleVersions = new Map(), channelOptions = null) {
|
||||||
// Built-in modules (core, bmm) come from local source, not the registry
|
// Built-in modules (core, bmm) come from local source, not the registry
|
||||||
const { OfficialModules } = require('./modules/official-modules');
|
const { OfficialModules } = require('./modules/official-modules');
|
||||||
const builtInModules = (await new OfficialModules().listAvailable()).modules || [];
|
const builtInModules = (await new OfficialModules().listAvailable()).modules || [];
|
||||||
|
|
@ -642,15 +869,18 @@ class UI {
|
||||||
const initialValues = [];
|
const initialValues = [];
|
||||||
const lockedValues = ['core'];
|
const lockedValues = ['core'];
|
||||||
|
|
||||||
const buildModuleEntry = async (code, name, description, isDefault) => {
|
const buildModuleEntry = async (code, name, description, isDefault, repoUrl = null, registryDefault = null) => {
|
||||||
const isInstalled = installedModuleIds.has(code);
|
const isInstalled = installedModuleIds.has(code);
|
||||||
const version = await getMarketplaceVersion(code);
|
const installedVersion = installedModuleVersions.get(code) || '';
|
||||||
const label = version ? `${name} (v${version})` : name;
|
const versionState = await getModuleVersion(code, { repoUrl, registryDefault, channelOptions });
|
||||||
|
const label = buildModuleLabel(name, versionState.version, installedVersion);
|
||||||
return {
|
return {
|
||||||
label,
|
label,
|
||||||
value: code,
|
value: code,
|
||||||
hint: description,
|
hint: description,
|
||||||
selected: isInstalled || isDefault,
|
selected: isInstalled || isDefault,
|
||||||
|
lookupAttempted: versionState.lookupAttempted,
|
||||||
|
lookupSucceeded: versionState.lookupSucceeded,
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -667,12 +897,38 @@ class UI {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add external registry modules (skip built-in duplicates)
|
// Add external registry modules (skip built-in duplicates)
|
||||||
for (const mod of registryModules) {
|
const externalRegistryModules = registryModules.filter((mod) => !mod.builtIn && !builtInCodes.has(mod.code));
|
||||||
if (mod.builtIn || builtInCodes.has(mod.code)) continue;
|
let externalRegistryEntries = [];
|
||||||
const entry = await buildModuleEntry(mod.code, mod.name, mod.description, mod.defaultSelected);
|
if (externalRegistryModules.length > 0) {
|
||||||
|
const spinner = await prompts.spinner();
|
||||||
|
spinner.start('Checking latest module versions...');
|
||||||
|
|
||||||
|
externalRegistryEntries = await Promise.all(
|
||||||
|
externalRegistryModules.map(async (mod) => ({
|
||||||
|
code: mod.code,
|
||||||
|
entry: await buildModuleEntry(
|
||||||
|
mod.code,
|
||||||
|
mod.name,
|
||||||
|
mod.description,
|
||||||
|
mod.defaultSelected,
|
||||||
|
mod.url || null,
|
||||||
|
mod.defaultChannel || null,
|
||||||
|
),
|
||||||
|
})),
|
||||||
|
);
|
||||||
|
|
||||||
|
spinner.stop('Checked latest module versions.');
|
||||||
|
|
||||||
|
const attemptedLookups = externalRegistryEntries.filter(({ entry }) => entry.lookupAttempted).length;
|
||||||
|
const successfulLookups = externalRegistryEntries.filter(({ entry }) => entry.lookupSucceeded).length;
|
||||||
|
if (attemptedLookups > 0 && successfulLookups === 0) {
|
||||||
|
await prompts.log.warn('Could not check latest module versions; showing cached/local versions.');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (const { code, entry } of externalRegistryEntries) {
|
||||||
allOptions.push({ label: entry.label, value: entry.value, hint: entry.hint });
|
allOptions.push({ label: entry.label, value: entry.value, hint: entry.hint });
|
||||||
if (entry.selected) {
|
if (entry.selected) {
|
||||||
initialValues.push(mod.code);
|
initialValues.push(code);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1594,6 +1850,351 @@ class UI {
|
||||||
});
|
});
|
||||||
await prompts.log.message('Selected tools:\n' + toolLines.join('\n'));
|
await prompts.log.message('Selected tools:\n' + toolLines.join('\n'));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return the set of module codes the registry marks as built-in (core, bmm).
|
||||||
|
* These ship with the installer binary and have no per-module channel.
|
||||||
|
*/
|
||||||
|
async _bundledModuleCodes() {
|
||||||
|
const externalManager = new ExternalModuleManager();
|
||||||
|
try {
|
||||||
|
const modules = await externalManager.listAvailable();
|
||||||
|
return modules.filter((m) => m.builtIn).map((m) => m.code);
|
||||||
|
} catch {
|
||||||
|
// Registry unreachable — fall back to the known bundled codes.
|
||||||
|
return ['core', 'bmm'];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Fast-path channel gate: confirm "all stable" or open the per-module picker.
|
||||||
|
*
|
||||||
|
* Skipped when:
|
||||||
|
* - running non-interactively (--yes)
|
||||||
|
* - the user already passed channel flags (--channel / --pin / --next), OR
|
||||||
|
* the installer was launched from a prerelease (which seeds
|
||||||
|
* channelOptions.global = 'next' upstream in promptInstall)
|
||||||
|
* - no externals/community modules are selected
|
||||||
|
*
|
||||||
|
* Mutates channelOptions.pins and channelOptions.nextSet to reflect picker choices.
|
||||||
|
*/
|
||||||
|
async _interactiveChannelGate({ options, channelOptions, selectedModules }) {
|
||||||
|
if (options.yes) return;
|
||||||
|
// If the user already declared their channel intent via flags, trust them
|
||||||
|
// and skip the gate.
|
||||||
|
const haveFlagIntent = channelOptions.global || channelOptions.nextSet.size > 0 || channelOptions.pins.size > 0;
|
||||||
|
if (haveFlagIntent) return;
|
||||||
|
|
||||||
|
// Figure out which selected modules actually get a channel (externals +
|
||||||
|
// community modules). Bundled core/bmm and custom modules skip the picker.
|
||||||
|
const externalManager = new ExternalModuleManager();
|
||||||
|
const externals = await externalManager.listAvailable();
|
||||||
|
const externalByCode = new Map(externals.map((m) => [m.code, m]));
|
||||||
|
|
||||||
|
const { CommunityModuleManager } = require('./modules/community-manager');
|
||||||
|
const communityMgr = new CommunityModuleManager();
|
||||||
|
const community = await communityMgr.listAll();
|
||||||
|
const communityByCode = new Map(community.map((m) => [m.code, m]));
|
||||||
|
|
||||||
|
const channelSelectable = selectedModules.filter((code) => {
|
||||||
|
const info = externalByCode.get(code) || communityByCode.get(code);
|
||||||
|
return info && !info.builtIn;
|
||||||
|
});
|
||||||
|
if (channelSelectable.length === 0) return;
|
||||||
|
|
||||||
|
const fastPath = await prompts.confirm({
|
||||||
|
message: `Ready to install (all stable)? Pick "n" to customize channels or pin versions.`,
|
||||||
|
default: true,
|
||||||
|
});
|
||||||
|
if (fastPath) return; // stable for all, registry default applies
|
||||||
|
|
||||||
|
// Customize path: per-module picker.
|
||||||
|
const { fetchStableTags, parseGitHubRepo } = require('./modules/channel-resolver');
|
||||||
|
|
||||||
|
for (const code of channelSelectable) {
|
||||||
|
const info = externalByCode.get(code) || communityByCode.get(code);
|
||||||
|
const repoUrl = info.url;
|
||||||
|
|
||||||
|
// Try to pre-resolve the top stable tag so we can surface it in the picker.
|
||||||
|
let stableLabel = 'stable (released version)';
|
||||||
|
try {
|
||||||
|
const parsed = repoUrl ? parseGitHubRepo(repoUrl) : null;
|
||||||
|
if (parsed) {
|
||||||
|
const tags = await fetchStableTags(parsed.owner, parsed.repo);
|
||||||
|
if (tags.length > 0) {
|
||||||
|
stableLabel = `stable ${tags[0].tag} (released version)`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// fall through with the generic label
|
||||||
|
}
|
||||||
|
|
||||||
|
const choice = await prompts.select({
|
||||||
|
message: `${code}: choose a channel`,
|
||||||
|
choices: [
|
||||||
|
{ name: stableLabel, value: 'stable' },
|
||||||
|
{ name: 'next (main HEAD \u2014 current development)', value: 'next' },
|
||||||
|
{ name: 'pin (specific version)', value: 'pin' },
|
||||||
|
],
|
||||||
|
default: 'stable',
|
||||||
|
});
|
||||||
|
|
||||||
|
if (choice === 'next') {
|
||||||
|
channelOptions.nextSet.add(code);
|
||||||
|
} else if (choice === 'pin') {
|
||||||
|
const pinValue = await prompts.text({
|
||||||
|
message: `Enter a version tag for '${code}' (e.g. v1.6.0):`,
|
||||||
|
validate: (value) => {
|
||||||
|
if (!value || !/^[\w.\-+/]+$/.test(String(value).trim())) {
|
||||||
|
return 'Must be a non-empty tag name (letters, digits, dots, hyphens).';
|
||||||
|
}
|
||||||
|
},
|
||||||
|
});
|
||||||
|
channelOptions.pins.set(code, String(pinValue).trim());
|
||||||
|
}
|
||||||
|
// 'stable' is the default; nothing to record.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Resolve channel decisions for an update over an existing install.
|
||||||
|
*
|
||||||
|
* For each selected external/community module:
|
||||||
|
* - Read the recorded channel from the existing manifest.
|
||||||
|
* - On `stable`: query tags; if a newer stable exists, classify the diff
|
||||||
|
* and prompt. Patch/minor default Y; major defaults N. `--yes` accepts
|
||||||
|
* defaults (patches/minors) but NOT majors — a major under --yes stays
|
||||||
|
* frozen unless the user also passes `--pin CODE=NEW_TAG`.
|
||||||
|
* - On `next`: no prompt (pull HEAD).
|
||||||
|
* - On `pinned`: no prompt (stays pinned).
|
||||||
|
* - No channel recorded and `version: null`: one-time migration prompt
|
||||||
|
* ("Switch to stable / Keep on next").
|
||||||
|
*
|
||||||
|
* Decisions that freeze the current version are applied by adding a pin to
|
||||||
|
* `channelOptions.pins` so downstream clone logic honors them.
|
||||||
|
*/
|
||||||
|
async _resolveUpdateChannels({ bmadDir, selectedModules, channelOptions, yes }) {
|
||||||
|
const { Manifest } = require('./core/manifest');
|
||||||
|
const manifestObj = new Manifest();
|
||||||
|
const manifest = await manifestObj.read(bmadDir);
|
||||||
|
const existingByName = new Map();
|
||||||
|
for (const m of manifest?.modulesDetailed || []) {
|
||||||
|
if (m?.name) existingByName.set(m.name, m);
|
||||||
|
}
|
||||||
|
if (existingByName.size === 0) return;
|
||||||
|
|
||||||
|
const externalManager = new ExternalModuleManager();
|
||||||
|
const externals = await externalManager.listAvailable();
|
||||||
|
const externalByCode = new Map(externals.map((m) => [m.code, m]));
|
||||||
|
|
||||||
|
const { CommunityModuleManager } = require('./modules/community-manager');
|
||||||
|
const communityMgr = new CommunityModuleManager();
|
||||||
|
const community = await communityMgr.listAll();
|
||||||
|
const communityByCode = new Map(community.map((m) => [m.code, m]));
|
||||||
|
|
||||||
|
const { fetchStableTags, classifyUpgrade, releaseNotesUrl } = require('./modules/channel-resolver');
|
||||||
|
const { parseGitHubRepo } = require('./modules/channel-resolver');
|
||||||
|
|
||||||
|
// Interactive-only: offer a one-time gate to review / switch channels for
|
||||||
|
// selected modules that are already installed. Default N so normal Modify
|
||||||
|
// flows (add/remove modules) aren't interrupted.
|
||||||
|
let reviewChannels = false;
|
||||||
|
if (!yes) {
|
||||||
|
const existingWithChannel = selectedModules.filter((code) => {
|
||||||
|
const prev = existingByName.get(code);
|
||||||
|
if (!prev) return false;
|
||||||
|
const info = externalByCode.get(code) || communityByCode.get(code);
|
||||||
|
return info && !info.builtIn;
|
||||||
|
});
|
||||||
|
if (existingWithChannel.length > 0) {
|
||||||
|
reviewChannels = await prompts.confirm({
|
||||||
|
message: 'Review channel assignments (stable / next / pin) for your existing modules?',
|
||||||
|
default: false,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const code of selectedModules) {
|
||||||
|
const prev = existingByName.get(code);
|
||||||
|
if (!prev) continue;
|
||||||
|
|
||||||
|
const info = externalByCode.get(code) || communityByCode.get(code);
|
||||||
|
if (!info) continue;
|
||||||
|
// Bundled modules (core/bmm) ship with the installer binary itself —
|
||||||
|
// their version is stapled to the CLI version, not a git tag. Skip
|
||||||
|
// tag-API lookups for them; the "upgrade" mechanism is `npx bmad@X install`.
|
||||||
|
if (info.builtIn) continue;
|
||||||
|
|
||||||
|
const repoUrl = info.url;
|
||||||
|
const parsed = repoUrl ? parseGitHubRepo(repoUrl) : null;
|
||||||
|
|
||||||
|
// Legacy migration: manifest carries no channel and a null/empty
|
||||||
|
// version. Offer the one-time pick between stable and next.
|
||||||
|
const recordedChannel = prev.channel || null;
|
||||||
|
const needsMigration = !recordedChannel && (prev.version == null || prev.version === '');
|
||||||
|
if (needsMigration) {
|
||||||
|
if (yes) {
|
||||||
|
// Conservative headless default: stable.
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
const chosen = await prompts.select({
|
||||||
|
message: `${code}: your existing install tracks the main branch. Switch to stable releases (recommended for production), or keep on main?`,
|
||||||
|
choices: [
|
||||||
|
{ name: 'Switch to stable', value: 'stable' },
|
||||||
|
{ name: 'Keep on main (next)', value: 'next' },
|
||||||
|
],
|
||||||
|
default: 'stable',
|
||||||
|
});
|
||||||
|
if (chosen === 'next') channelOptions.nextSet.add(code);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Optional channel-switch offer. Fires only when the user opted in via
|
||||||
|
// the gate above. 'keep' falls through to the existing per-channel
|
||||||
|
// logic (which runs upgrade classification for stable). Any switch
|
||||||
|
// records the new intent into channelOptions and skips upgrade prompts.
|
||||||
|
if (reviewChannels && recordedChannel) {
|
||||||
|
const switchChoices = [
|
||||||
|
{
|
||||||
|
name: `Keep on '${recordedChannel}'${prev.version ? ` @ ${prev.version}` : ''}`,
|
||||||
|
value: 'keep',
|
||||||
|
},
|
||||||
|
];
|
||||||
|
if (recordedChannel !== 'stable') {
|
||||||
|
switchChoices.push({ name: 'Switch to stable (released version)', value: 'stable' });
|
||||||
|
}
|
||||||
|
if (recordedChannel !== 'next') {
|
||||||
|
switchChoices.push({ name: 'Switch to next (main HEAD)', value: 'next' });
|
||||||
|
}
|
||||||
|
switchChoices.push({ name: 'Pin to a specific version tag', value: 'pin' });
|
||||||
|
|
||||||
|
const choice = await prompts.select({
|
||||||
|
message: `${code} channel:`,
|
||||||
|
choices: switchChoices,
|
||||||
|
default: 'keep',
|
||||||
|
});
|
||||||
|
|
||||||
|
if (choice === 'next') {
|
||||||
|
channelOptions.nextSet.add(code);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (choice === 'pin') {
|
||||||
|
const pinValue = await prompts.text({
|
||||||
|
message: `Enter a version tag for '${code}' (e.g. v1.6.0):`,
|
||||||
|
validate: (value) => {
|
||||||
|
if (!value || !/^[\w.\-+/]+$/.test(String(value).trim())) {
|
||||||
|
return 'Must be a non-empty tag name (letters, digits, dots, hyphens).';
|
||||||
|
}
|
||||||
|
},
|
||||||
|
});
|
||||||
|
channelOptions.pins.set(code, String(pinValue).trim());
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (choice === 'stable') {
|
||||||
|
// Switch to stable: install at the top stable tag without an
|
||||||
|
// upgrade-classification prompt (the user explicitly opted in).
|
||||||
|
// Also warm the tag cache here so the actual clone step doesn't
|
||||||
|
// need a second GitHub API call (can hit rate limits).
|
||||||
|
if (parsed) {
|
||||||
|
try {
|
||||||
|
await fetchStableTags(parsed.owner, parsed.repo);
|
||||||
|
} catch {
|
||||||
|
// best effort; clone step will surface any failure
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
// 'keep' → fall through with recordedChannel below.
|
||||||
|
}
|
||||||
|
|
||||||
|
if (recordedChannel === 'pinned' || recordedChannel === 'next') {
|
||||||
|
// Respect any explicit channel intent the user already expressed via
|
||||||
|
// CLI flags (--channel / --all-* / --next=CODE / --pin CODE=TAG) or
|
||||||
|
// via the interactive review gate above. Only auto-re-assert the
|
||||||
|
// recorded channel when the user hasn't opted into anything else —
|
||||||
|
// otherwise --all-stable (or a review "switch to stable") would be
|
||||||
|
// silently clobbered by the prior channel.
|
||||||
|
const alreadyDecided = channelOptions.global || channelOptions.nextSet.has(code) || channelOptions.pins.has(code);
|
||||||
|
if (!alreadyDecided) {
|
||||||
|
if (recordedChannel === 'pinned' && prev.version) {
|
||||||
|
channelOptions.pins.set(code, prev.version);
|
||||||
|
} else if (recordedChannel === 'next') {
|
||||||
|
channelOptions.nextSet.add(code);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stable channel: check for a newer released tag.
|
||||||
|
if (!parsed) continue;
|
||||||
|
// Respect explicit CLI intent (--pin / --next=CODE / --all-*) and any
|
||||||
|
// choice the user already made in the earlier review gate. Without this
|
||||||
|
// guard the upgrade classifier below would unconditionally call
|
||||||
|
// `channelOptions.pins.set(code, prev.version)` on decline/major-refuse/
|
||||||
|
// fetch-error, silently clobbering the user's override.
|
||||||
|
const alreadyDecided = channelOptions.global || channelOptions.nextSet.has(code) || channelOptions.pins.has(code);
|
||||||
|
if (alreadyDecided) continue;
|
||||||
|
let tags;
|
||||||
|
try {
|
||||||
|
tags = await fetchStableTags(parsed.owner, parsed.repo);
|
||||||
|
} catch (error) {
|
||||||
|
await prompts.log.warn(`Could not check for updates on ${code} (${error.message}). Leaving at ${prev.version}.`);
|
||||||
|
if (prev.version) channelOptions.pins.set(code, prev.version);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (!tags || tags.length === 0) continue;
|
||||||
|
const topTag = tags[0].tag; // e.g. "v1.7.0"
|
||||||
|
const currentTag = prev.version || '';
|
||||||
|
const diffClass = classifyUpgrade(currentTag, topTag);
|
||||||
|
|
||||||
|
if (diffClass === 'none') continue; // already at or above top tag
|
||||||
|
|
||||||
|
const notes = releaseNotesUrl(repoUrl, topTag);
|
||||||
|
let accept;
|
||||||
|
if (diffClass === 'major') {
|
||||||
|
if (yes) {
|
||||||
|
// Major under --yes is refused by design.
|
||||||
|
await prompts.log.warn(
|
||||||
|
`${code} ${currentTag} → ${topTag} is a new major release; staying on ${currentTag}. ` +
|
||||||
|
`To accept, rerun with --pin ${code}=${topTag}.`,
|
||||||
|
);
|
||||||
|
channelOptions.pins.set(code, currentTag);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
accept = await prompts.confirm({
|
||||||
|
message:
|
||||||
|
`${code} ${topTag} available — new major release (may change behavior).` +
|
||||||
|
(notes ? ` Release notes: ${notes}.` : '') +
|
||||||
|
' Upgrade?',
|
||||||
|
default: false,
|
||||||
|
});
|
||||||
|
} else if (diffClass === 'minor') {
|
||||||
|
if (yes) {
|
||||||
|
accept = true;
|
||||||
|
} else {
|
||||||
|
accept = await prompts.confirm({
|
||||||
|
message: `${code} ${topTag} available (new features).` + (notes ? ` Release notes: ${notes}.` : '') + ' Upgrade?',
|
||||||
|
default: true,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// patch
|
||||||
|
if (yes) {
|
||||||
|
accept = true;
|
||||||
|
} else {
|
||||||
|
accept = await prompts.confirm({
|
||||||
|
message: `${code} ${topTag} available. Upgrade?`,
|
||||||
|
default: true,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!accept && currentTag) {
|
||||||
|
// Freeze the current version by pinning it for this run.
|
||||||
|
channelOptions.pins.set(code, currentTag);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
module.exports = { UI };
|
module.exports = { UI };
|
||||||
|
|
|
||||||
|
|
@ -1,169 +0,0 @@
|
||||||
# BMAD Platform Codes Configuration
|
|
||||||
# Central configuration for all platform/IDE codes used in the BMAD system
|
|
||||||
#
|
|
||||||
# This file defines the standardized platform codes that are used throughout
|
|
||||||
# the installation system to identify different platforms (IDEs, tools, etc.)
|
|
||||||
#
|
|
||||||
# Format:
|
|
||||||
# code: Platform identifier used internally
|
|
||||||
# name: Display name shown to users
|
|
||||||
# preferred: Whether this platform is shown as a recommended option on install
|
|
||||||
# category: Type of platform (ide, tool, service, etc.)
|
|
||||||
|
|
||||||
platforms:
|
|
||||||
# Recommended Platforms
|
|
||||||
claude-code:
|
|
||||||
name: "Claude Code"
|
|
||||||
preferred: true
|
|
||||||
category: cli
|
|
||||||
description: "Anthropic's official CLI for Claude"
|
|
||||||
|
|
||||||
cursor:
|
|
||||||
name: "Cursor"
|
|
||||||
preferred: true
|
|
||||||
category: ide
|
|
||||||
description: "AI-first code editor"
|
|
||||||
|
|
||||||
# Other IDEs and Tools
|
|
||||||
cline:
|
|
||||||
name: "Cline"
|
|
||||||
preferred: false
|
|
||||||
category: ide
|
|
||||||
description: "AI coding assistant"
|
|
||||||
|
|
||||||
opencode:
|
|
||||||
name: "OpenCode"
|
|
||||||
preferred: false
|
|
||||||
category: ide
|
|
||||||
description: "OpenCode terminal coding assistant"
|
|
||||||
|
|
||||||
codebuddy:
|
|
||||||
name: "CodeBuddy"
|
|
||||||
preferred: false
|
|
||||||
category: ide
|
|
||||||
description: "Tencent Cloud Code Assistant - AI-powered coding companion"
|
|
||||||
|
|
||||||
auggie:
|
|
||||||
name: "Auggie"
|
|
||||||
preferred: false
|
|
||||||
category: cli
|
|
||||||
description: "AI development tool"
|
|
||||||
|
|
||||||
roo:
|
|
||||||
name: "Roo Code"
|
|
||||||
preferred: false
|
|
||||||
category: ide
|
|
||||||
description: "Enhanced Cline fork"
|
|
||||||
|
|
||||||
rovo-dev:
|
|
||||||
name: "Rovo Dev"
|
|
||||||
preferred: false
|
|
||||||
category: ide
|
|
||||||
description: "Atlassian's Rovo development environment"
|
|
||||||
|
|
||||||
kiro:
|
|
||||||
name: "Kiro"
|
|
||||||
preferred: false
|
|
||||||
category: ide
|
|
||||||
description: "Amazon's AI-powered IDE"
|
|
||||||
|
|
||||||
github-copilot:
|
|
||||||
name: "GitHub Copilot"
|
|
||||||
preferred: false
|
|
||||||
category: ide
|
|
||||||
description: "GitHub's AI pair programmer"
|
|
||||||
|
|
||||||
codex:
|
|
||||||
name: "Codex"
|
|
||||||
preferred: false
|
|
||||||
category: cli
|
|
||||||
description: "OpenAI Codex integration"
|
|
||||||
|
|
||||||
qwen:
|
|
||||||
name: "QwenCoder"
|
|
||||||
preferred: false
|
|
||||||
category: ide
|
|
||||||
description: "Qwen AI coding assistant"
|
|
||||||
|
|
||||||
gemini:
|
|
||||||
name: "Gemini CLI"
|
|
||||||
preferred: false
|
|
||||||
category: cli
|
|
||||||
description: "Google's CLI for Gemini"
|
|
||||||
|
|
||||||
iflow:
|
|
||||||
name: "iFlow"
|
|
||||||
preferred: false
|
|
||||||
category: ide
|
|
||||||
description: "AI workflow automation"
|
|
||||||
|
|
||||||
kilo:
|
|
||||||
name: "KiloCoder"
|
|
||||||
preferred: false
|
|
||||||
category: ide
|
|
||||||
description: "AI coding platform"
|
|
||||||
|
|
||||||
crush:
|
|
||||||
name: "Crush"
|
|
||||||
preferred: false
|
|
||||||
category: ide
|
|
||||||
description: "AI development assistant"
|
|
||||||
|
|
||||||
antigravity:
|
|
||||||
name: "Google Antigravity"
|
|
||||||
preferred: false
|
|
||||||
category: ide
|
|
||||||
description: "Google's AI development environment"
|
|
||||||
|
|
||||||
trae:
|
|
||||||
name: "Trae"
|
|
||||||
preferred: false
|
|
||||||
category: ide
|
|
||||||
description: "AI coding tool"
|
|
||||||
|
|
||||||
windsurf:
|
|
||||||
name: "Windsurf"
|
|
||||||
preferred: false
|
|
||||||
category: ide
|
|
||||||
description: "AI-powered IDE with cascade flows"
|
|
||||||
|
|
||||||
junie:
|
|
||||||
name: "Junie"
|
|
||||||
preferred: false
|
|
||||||
category: cli
|
|
||||||
description: "AI coding agent by JetBrains"
|
|
||||||
|
|
||||||
ona:
|
|
||||||
name: "Ona"
|
|
||||||
preferred: false
|
|
||||||
category: ide
|
|
||||||
description: "Ona AI development environment"
|
|
||||||
|
|
||||||
# Platform categories
|
|
||||||
categories:
|
|
||||||
ide:
|
|
||||||
name: "Integrated Development Environment"
|
|
||||||
description: "Full-featured code editors with AI assistance"
|
|
||||||
|
|
||||||
cli:
|
|
||||||
name: "Command Line Interface"
|
|
||||||
description: "Terminal-based tools"
|
|
||||||
|
|
||||||
tool:
|
|
||||||
name: "Development Tool"
|
|
||||||
description: "Standalone development utilities"
|
|
||||||
|
|
||||||
service:
|
|
||||||
name: "Cloud Service"
|
|
||||||
description: "Cloud-based development platforms"
|
|
||||||
|
|
||||||
extension:
|
|
||||||
name: "Editor Extension"
|
|
||||||
description: "Plugins for existing editors"
|
|
||||||
|
|
||||||
# Naming conventions and rules
|
|
||||||
conventions:
|
|
||||||
code_format: "lowercase-kebab-case"
|
|
||||||
name_format: "Title Case"
|
|
||||||
max_code_length: 20
|
|
||||||
allowed_characters: "a-z0-9-"
|
|
||||||
|
|
@ -129,13 +129,45 @@ export default defineConfig({
|
||||||
// TEA docs moved to standalone module site; keep BMM sidebar focused.
|
// TEA docs moved to standalone module site; keep BMM sidebar focused.
|
||||||
{
|
{
|
||||||
label: 'BMad Ecosystem',
|
label: 'BMad Ecosystem',
|
||||||
|
translations: { 'vi-VN': 'Hệ sinh thái BMad', 'zh-CN': 'BMad 生态系统', 'fr-FR': 'Écosystème BMad', 'cs-CZ': 'Ekosystém BMad' },
|
||||||
collapsed: false,
|
collapsed: false,
|
||||||
items: [
|
items: [
|
||||||
{ label: 'BMad Builder', link: 'https://bmad-builder-docs.bmad-method.org/', attrs: { target: '_blank' } },
|
{
|
||||||
{ label: 'Creative Intelligence Suite', link: 'https://cis-docs.bmad-method.org/', attrs: { target: '_blank' } },
|
label: 'BMad Builder',
|
||||||
{ label: 'Game Dev Studio', link: 'https://game-dev-studio-docs.bmad-method.org/', attrs: { target: '_blank' } },
|
translations: { 'vi-VN': 'BMad Builder', 'zh-CN': 'BMad 构建器', 'fr-FR': 'BMad Builder', 'cs-CZ': 'BMad Builder' },
|
||||||
|
link: 'https://bmad-builder-docs.bmad-method.org/',
|
||||||
|
attrs: { target: '_blank' },
|
||||||
|
},
|
||||||
|
{
|
||||||
|
label: 'Creative Intelligence Suite',
|
||||||
|
translations: {
|
||||||
|
'vi-VN': 'Bộ công cụ Trí tuệ Sáng tạo',
|
||||||
|
'zh-CN': '创意智能套件',
|
||||||
|
'fr-FR': "Suite d'Intelligence Créative",
|
||||||
|
'cs-CZ': 'Sada kreativní inteligence',
|
||||||
|
},
|
||||||
|
link: 'https://cis-docs.bmad-method.org/',
|
||||||
|
attrs: { target: '_blank' },
|
||||||
|
},
|
||||||
|
{
|
||||||
|
label: 'Game Dev Studio',
|
||||||
|
translations: {
|
||||||
|
'vi-VN': 'Xưởng phát triển Game',
|
||||||
|
'zh-CN': '游戏开发工作室',
|
||||||
|
'fr-FR': 'Studio de Développement de Jeux',
|
||||||
|
'cs-CZ': 'Herní vývojové studio',
|
||||||
|
},
|
||||||
|
link: 'https://game-dev-studio-docs.bmad-method.org/',
|
||||||
|
attrs: { target: '_blank' },
|
||||||
|
},
|
||||||
{
|
{
|
||||||
label: 'Test Architect (TEA)',
|
label: 'Test Architect (TEA)',
|
||||||
|
translations: {
|
||||||
|
'vi-VN': 'Kiến trúc sư Kiểm thử (TEA)',
|
||||||
|
'zh-CN': '测试架构师 (TEA)',
|
||||||
|
'fr-FR': 'Architecte de Tests (TEA)',
|
||||||
|
'cs-CZ': 'Testovací architekt (TEA)',
|
||||||
|
},
|
||||||
link: 'https://bmad-code-org.github.io/bmad-method-test-architecture-enterprise/',
|
link: 'https://bmad-code-org.github.io/bmad-method-test-architecture-enterprise/',
|
||||||
attrs: { target: '_blank' },
|
attrs: { target: '_blank' },
|
||||||
},
|
},
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue