Merge remote-tracking branch 'upstream/main'
This commit is contained in:
commit
c6a4a592c0
|
|
@ -0,0 +1,15 @@
|
||||||
|
#!/bin/bash
|
||||||
|
# Discord notification helper functions
|
||||||
|
|
||||||
|
# Escape markdown special chars and @mentions for safe Discord display
|
||||||
|
# Bracket expression: ] must be first, then other chars. In POSIX bracket expr, \ is literal.
|
||||||
|
esc() { sed -e 's/[][\*_()~`>]/\\&/g' -e 's/@/@ /g'; }
|
||||||
|
|
||||||
|
# Truncate to $1 chars (or 80 if wall-of-text with <3 spaces)
|
||||||
|
trunc() {
|
||||||
|
local max=$1
|
||||||
|
local txt=$(tr '\n\r' ' ' | cut -c1-"$max")
|
||||||
|
local spaces=$(printf '%s' "$txt" | tr -cd ' ' | wc -c)
|
||||||
|
[ "$spaces" -lt 3 ] && [ ${#txt} -gt 80 ] && txt=$(printf '%s' "$txt" | cut -c1-80)
|
||||||
|
printf '%s' "$txt"
|
||||||
|
}
|
||||||
|
|
@ -1,16 +1,286 @@
|
||||||
name: Discord Notification
|
name: Discord Notification
|
||||||
|
|
||||||
"on": [pull_request, release, create, delete, issue_comment, pull_request_review, pull_request_review_comment]
|
on:
|
||||||
|
pull_request:
|
||||||
|
types: [opened, closed, reopened, ready_for_review]
|
||||||
|
release:
|
||||||
|
types: [published]
|
||||||
|
create:
|
||||||
|
delete:
|
||||||
|
issue_comment:
|
||||||
|
types: [created]
|
||||||
|
pull_request_review:
|
||||||
|
types: [submitted]
|
||||||
|
pull_request_review_comment:
|
||||||
|
types: [created]
|
||||||
|
issues:
|
||||||
|
types: [opened, closed, reopened]
|
||||||
|
|
||||||
|
env:
|
||||||
|
MAX_TITLE: 100
|
||||||
|
MAX_BODY: 250
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
notify:
|
pull_request:
|
||||||
|
if: github.event_name == 'pull_request'
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
ref: ${{ github.event.repository.default_branch }}
|
||||||
|
sparse-checkout: .github/scripts
|
||||||
|
sparse-checkout-cone-mode: false
|
||||||
|
- name: Notify Discord
|
||||||
|
env:
|
||||||
|
WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }}
|
||||||
|
ACTION: ${{ github.event.action }}
|
||||||
|
MERGED: ${{ github.event.pull_request.merged }}
|
||||||
|
PR_NUM: ${{ github.event.pull_request.number }}
|
||||||
|
PR_URL: ${{ github.event.pull_request.html_url }}
|
||||||
|
PR_TITLE: ${{ github.event.pull_request.title }}
|
||||||
|
PR_USER: ${{ github.event.pull_request.user.login }}
|
||||||
|
PR_BODY: ${{ github.event.pull_request.body }}
|
||||||
|
run: |
|
||||||
|
set -o pipefail
|
||||||
|
source .github/scripts/discord-helpers.sh
|
||||||
|
[ -z "$WEBHOOK" ] && exit 0
|
||||||
|
|
||||||
|
if [ "$ACTION" = "opened" ]; then ICON="🔀"; LABEL="New PR"
|
||||||
|
elif [ "$ACTION" = "closed" ] && [ "$MERGED" = "true" ]; then ICON="🎉"; LABEL="Merged"
|
||||||
|
elif [ "$ACTION" = "closed" ]; then ICON="❌"; LABEL="Closed"
|
||||||
|
elif [ "$ACTION" = "reopened" ]; then ICON="🔄"; LABEL="Reopened"
|
||||||
|
else ICON="📋"; LABEL="Ready"; fi
|
||||||
|
|
||||||
|
TITLE=$(printf '%s' "$PR_TITLE" | trunc $MAX_TITLE | esc)
|
||||||
|
[ ${#PR_TITLE} -gt $MAX_TITLE ] && TITLE="${TITLE}..."
|
||||||
|
BODY=$(printf '%s' "$PR_BODY" | trunc $MAX_BODY | esc)
|
||||||
|
[ -n "$PR_BODY" ] && [ ${#PR_BODY} -gt $MAX_BODY ] && BODY="${BODY}..."
|
||||||
|
[ -n "$BODY" ] && BODY=" · $BODY"
|
||||||
|
USER=$(printf '%s' "$PR_USER" | esc)
|
||||||
|
|
||||||
|
MSG="$ICON **[$LABEL #$PR_NUM: $TITLE](<$PR_URL>)**"$'\n'"by @$USER$BODY"
|
||||||
|
jq -n --arg content "$MSG" '{content: $content}' | curl -sf --retry 2 -X POST "$WEBHOOK" -H "Content-Type: application/json" -d @-
|
||||||
|
|
||||||
|
issues:
|
||||||
|
if: github.event_name == 'issues'
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
ref: ${{ github.event.repository.default_branch }}
|
||||||
|
sparse-checkout: .github/scripts
|
||||||
|
sparse-checkout-cone-mode: false
|
||||||
|
- name: Notify Discord
|
||||||
|
env:
|
||||||
|
WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }}
|
||||||
|
ACTION: ${{ github.event.action }}
|
||||||
|
ISSUE_NUM: ${{ github.event.issue.number }}
|
||||||
|
ISSUE_URL: ${{ github.event.issue.html_url }}
|
||||||
|
ISSUE_TITLE: ${{ github.event.issue.title }}
|
||||||
|
ISSUE_USER: ${{ github.event.issue.user.login }}
|
||||||
|
ISSUE_BODY: ${{ github.event.issue.body }}
|
||||||
|
ACTOR: ${{ github.actor }}
|
||||||
|
run: |
|
||||||
|
set -o pipefail
|
||||||
|
source .github/scripts/discord-helpers.sh
|
||||||
|
[ -z "$WEBHOOK" ] && exit 0
|
||||||
|
|
||||||
|
if [ "$ACTION" = "opened" ]; then ICON="🐛"; LABEL="New Issue"; USER="$ISSUE_USER"
|
||||||
|
elif [ "$ACTION" = "closed" ]; then ICON="✅"; LABEL="Closed"; USER="$ACTOR"
|
||||||
|
else ICON="🔄"; LABEL="Reopened"; USER="$ACTOR"; fi
|
||||||
|
|
||||||
|
TITLE=$(printf '%s' "$ISSUE_TITLE" | trunc $MAX_TITLE | esc)
|
||||||
|
[ ${#ISSUE_TITLE} -gt $MAX_TITLE ] && TITLE="${TITLE}..."
|
||||||
|
BODY=$(printf '%s' "$ISSUE_BODY" | trunc $MAX_BODY | esc)
|
||||||
|
[ -n "$ISSUE_BODY" ] && [ ${#ISSUE_BODY} -gt $MAX_BODY ] && BODY="${BODY}..."
|
||||||
|
[ -n "$BODY" ] && BODY=" · $BODY"
|
||||||
|
USER=$(printf '%s' "$USER" | esc)
|
||||||
|
|
||||||
|
MSG="$ICON **[$LABEL #$ISSUE_NUM: $TITLE](<$ISSUE_URL>)**"$'\n'"by @$USER$BODY"
|
||||||
|
jq -n --arg content "$MSG" '{content: $content}' | curl -sf --retry 2 -X POST "$WEBHOOK" -H "Content-Type: application/json" -d @-
|
||||||
|
|
||||||
|
issue_comment:
|
||||||
|
if: github.event_name == 'issue_comment'
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
ref: ${{ github.event.repository.default_branch }}
|
||||||
|
sparse-checkout: .github/scripts
|
||||||
|
sparse-checkout-cone-mode: false
|
||||||
|
- name: Notify Discord
|
||||||
|
env:
|
||||||
|
WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }}
|
||||||
|
IS_PR: ${{ github.event.issue.pull_request && 'true' || 'false' }}
|
||||||
|
ISSUE_NUM: ${{ github.event.issue.number }}
|
||||||
|
ISSUE_TITLE: ${{ github.event.issue.title }}
|
||||||
|
COMMENT_URL: ${{ github.event.comment.html_url }}
|
||||||
|
COMMENT_USER: ${{ github.event.comment.user.login }}
|
||||||
|
COMMENT_BODY: ${{ github.event.comment.body }}
|
||||||
|
run: |
|
||||||
|
set -o pipefail
|
||||||
|
source .github/scripts/discord-helpers.sh
|
||||||
|
[ -z "$WEBHOOK" ] && exit 0
|
||||||
|
|
||||||
|
[ "$IS_PR" = "true" ] && TYPE="PR" || TYPE="Issue"
|
||||||
|
|
||||||
|
TITLE=$(printf '%s' "$ISSUE_TITLE" | trunc $MAX_TITLE | esc)
|
||||||
|
[ ${#ISSUE_TITLE} -gt $MAX_TITLE ] && TITLE="${TITLE}..."
|
||||||
|
BODY=$(printf '%s' "$COMMENT_BODY" | trunc $MAX_BODY | esc)
|
||||||
|
[ ${#COMMENT_BODY} -gt $MAX_BODY ] && BODY="${BODY}..."
|
||||||
|
USER=$(printf '%s' "$COMMENT_USER" | esc)
|
||||||
|
|
||||||
|
MSG="💬 **[Comment on $TYPE #$ISSUE_NUM: $TITLE](<$COMMENT_URL>)**"$'\n'"@$USER: $BODY"
|
||||||
|
jq -n --arg content "$MSG" '{content: $content}' | curl -sf --retry 2 -X POST "$WEBHOOK" -H "Content-Type: application/json" -d @-
|
||||||
|
|
||||||
|
pull_request_review:
|
||||||
|
if: github.event_name == 'pull_request_review'
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
ref: ${{ github.event.repository.default_branch }}
|
||||||
|
sparse-checkout: .github/scripts
|
||||||
|
sparse-checkout-cone-mode: false
|
||||||
|
- name: Notify Discord
|
||||||
|
env:
|
||||||
|
WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }}
|
||||||
|
STATE: ${{ github.event.review.state }}
|
||||||
|
PR_NUM: ${{ github.event.pull_request.number }}
|
||||||
|
PR_TITLE: ${{ github.event.pull_request.title }}
|
||||||
|
REVIEW_URL: ${{ github.event.review.html_url }}
|
||||||
|
REVIEW_USER: ${{ github.event.review.user.login }}
|
||||||
|
REVIEW_BODY: ${{ github.event.review.body }}
|
||||||
|
run: |
|
||||||
|
set -o pipefail
|
||||||
|
source .github/scripts/discord-helpers.sh
|
||||||
|
[ -z "$WEBHOOK" ] && exit 0
|
||||||
|
|
||||||
|
if [ "$STATE" = "approved" ]; then ICON="✅"; LABEL="Approved"
|
||||||
|
elif [ "$STATE" = "changes_requested" ]; then ICON="🔧"; LABEL="Changes Requested"
|
||||||
|
else ICON="👀"; LABEL="Reviewed"; fi
|
||||||
|
|
||||||
|
TITLE=$(printf '%s' "$PR_TITLE" | trunc $MAX_TITLE | esc)
|
||||||
|
[ ${#PR_TITLE} -gt $MAX_TITLE ] && TITLE="${TITLE}..."
|
||||||
|
BODY=$(printf '%s' "$REVIEW_BODY" | trunc $MAX_BODY | esc)
|
||||||
|
[ -n "$REVIEW_BODY" ] && [ ${#REVIEW_BODY} -gt $MAX_BODY ] && BODY="${BODY}..."
|
||||||
|
[ -n "$BODY" ] && BODY=": $BODY"
|
||||||
|
USER=$(printf '%s' "$REVIEW_USER" | esc)
|
||||||
|
|
||||||
|
MSG="$ICON **[$LABEL PR #$PR_NUM: $TITLE](<$REVIEW_URL>)**"$'\n'"@$USER$BODY"
|
||||||
|
jq -n --arg content "$MSG" '{content: $content}' | curl -sf --retry 2 -X POST "$WEBHOOK" -H "Content-Type: application/json" -d @-
|
||||||
|
|
||||||
|
pull_request_review_comment:
|
||||||
|
if: github.event_name == 'pull_request_review_comment'
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
ref: ${{ github.event.repository.default_branch }}
|
||||||
|
sparse-checkout: .github/scripts
|
||||||
|
sparse-checkout-cone-mode: false
|
||||||
|
- name: Notify Discord
|
||||||
|
env:
|
||||||
|
WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }}
|
||||||
|
PR_NUM: ${{ github.event.pull_request.number }}
|
||||||
|
PR_TITLE: ${{ github.event.pull_request.title }}
|
||||||
|
COMMENT_URL: ${{ github.event.comment.html_url }}
|
||||||
|
COMMENT_USER: ${{ github.event.comment.user.login }}
|
||||||
|
COMMENT_BODY: ${{ github.event.comment.body }}
|
||||||
|
run: |
|
||||||
|
set -o pipefail
|
||||||
|
source .github/scripts/discord-helpers.sh
|
||||||
|
[ -z "$WEBHOOK" ] && exit 0
|
||||||
|
|
||||||
|
TITLE=$(printf '%s' "$PR_TITLE" | trunc $MAX_TITLE | esc)
|
||||||
|
[ ${#PR_TITLE} -gt $MAX_TITLE ] && TITLE="${TITLE}..."
|
||||||
|
BODY=$(printf '%s' "$COMMENT_BODY" | trunc $MAX_BODY | esc)
|
||||||
|
[ ${#COMMENT_BODY} -gt $MAX_BODY ] && BODY="${BODY}..."
|
||||||
|
USER=$(printf '%s' "$COMMENT_USER" | esc)
|
||||||
|
|
||||||
|
MSG="💭 **[Review Comment PR #$PR_NUM: $TITLE](<$COMMENT_URL>)**"$'\n'"@$USER: $BODY"
|
||||||
|
jq -n --arg content "$MSG" '{content: $content}' | curl -sf --retry 2 -X POST "$WEBHOOK" -H "Content-Type: application/json" -d @-
|
||||||
|
|
||||||
|
release:
|
||||||
|
if: github.event_name == 'release'
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
ref: ${{ github.event.repository.default_branch }}
|
||||||
|
sparse-checkout: .github/scripts
|
||||||
|
sparse-checkout-cone-mode: false
|
||||||
|
- name: Notify Discord
|
||||||
|
env:
|
||||||
|
WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }}
|
||||||
|
TAG: ${{ github.event.release.tag_name }}
|
||||||
|
NAME: ${{ github.event.release.name }}
|
||||||
|
URL: ${{ github.event.release.html_url }}
|
||||||
|
RELEASE_BODY: ${{ github.event.release.body }}
|
||||||
|
run: |
|
||||||
|
set -o pipefail
|
||||||
|
source .github/scripts/discord-helpers.sh
|
||||||
|
[ -z "$WEBHOOK" ] && exit 0
|
||||||
|
|
||||||
|
REL_NAME=$(printf '%s' "$NAME" | trunc $MAX_TITLE | esc)
|
||||||
|
[ ${#NAME} -gt $MAX_TITLE ] && REL_NAME="${REL_NAME}..."
|
||||||
|
BODY=$(printf '%s' "$RELEASE_BODY" | trunc $MAX_BODY | esc)
|
||||||
|
[ -n "$RELEASE_BODY" ] && [ ${#RELEASE_BODY} -gt $MAX_BODY ] && BODY="${BODY}..."
|
||||||
|
[ -n "$BODY" ] && BODY=" · $BODY"
|
||||||
|
TAG_ESC=$(printf '%s' "$TAG" | esc)
|
||||||
|
|
||||||
|
MSG="🚀 **[Release $TAG_ESC: $REL_NAME](<$URL>)**"$'\n'"$BODY"
|
||||||
|
jq -n --arg content "$MSG" '{content: $content}' | curl -sf --retry 2 -X POST "$WEBHOOK" -H "Content-Type: application/json" -d @-
|
||||||
|
|
||||||
|
create:
|
||||||
|
if: github.event_name == 'create'
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
ref: ${{ github.event.repository.default_branch }}
|
||||||
|
sparse-checkout: .github/scripts
|
||||||
|
sparse-checkout-cone-mode: false
|
||||||
|
- name: Notify Discord
|
||||||
|
env:
|
||||||
|
WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }}
|
||||||
|
REF_TYPE: ${{ github.event.ref_type }}
|
||||||
|
REF: ${{ github.event.ref }}
|
||||||
|
ACTOR: ${{ github.actor }}
|
||||||
|
REPO_URL: ${{ github.event.repository.html_url }}
|
||||||
|
run: |
|
||||||
|
set -o pipefail
|
||||||
|
source .github/scripts/discord-helpers.sh
|
||||||
|
[ -z "$WEBHOOK" ] && exit 0
|
||||||
|
|
||||||
|
[ "$REF_TYPE" = "branch" ] && ICON="🌿" || ICON="🏷️"
|
||||||
|
REF_TRUNC=$(printf '%s' "$REF" | trunc $MAX_TITLE)
|
||||||
|
[ ${#REF} -gt $MAX_TITLE ] && REF_TRUNC="${REF_TRUNC}..."
|
||||||
|
REF_ESC=$(printf '%s' "$REF_TRUNC" | esc)
|
||||||
|
REF_URL=$(jq -rn --arg ref "$REF" '$ref | @uri')
|
||||||
|
ACTOR_ESC=$(printf '%s' "$ACTOR" | esc)
|
||||||
|
MSG="$ICON **${REF_TYPE^} created: [$REF_ESC](<$REPO_URL/tree/$REF_URL>)** by @$ACTOR_ESC"
|
||||||
|
jq -n --arg content "$MSG" '{content: $content}' | curl -sf --retry 2 -X POST "$WEBHOOK" -H "Content-Type: application/json" -d @-
|
||||||
|
|
||||||
|
delete:
|
||||||
|
if: github.event_name == 'delete'
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Notify Discord
|
- name: Notify Discord
|
||||||
uses: sarisia/actions-status-discord@v1
|
env:
|
||||||
if: always()
|
WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }}
|
||||||
with:
|
REF_TYPE: ${{ github.event.ref_type }}
|
||||||
webhook: ${{ secrets.DISCORD_WEBHOOK }}
|
REF: ${{ github.event.ref }}
|
||||||
status: ${{ job.status }}
|
ACTOR: ${{ github.actor }}
|
||||||
title: "Triggered by ${{ github.event_name }}"
|
run: |
|
||||||
color: 0x5865F2
|
set -o pipefail
|
||||||
|
[ -z "$WEBHOOK" ] && exit 0
|
||||||
|
esc() { sed -e 's/[][\*_()~`>]/\\&/g' -e 's/@/@ /g'; }
|
||||||
|
trunc() { tr '\n\r' ' ' | cut -c1-"$1"; }
|
||||||
|
|
||||||
|
REF_TRUNC=$(printf '%s' "$REF" | trunc 100)
|
||||||
|
[ ${#REF} -gt 100 ] && REF_TRUNC="${REF_TRUNC}..."
|
||||||
|
REF_ESC=$(printf '%s' "$REF_TRUNC" | esc)
|
||||||
|
ACTOR_ESC=$(printf '%s' "$ACTOR" | esc)
|
||||||
|
MSG="🗑️ **${REF_TYPE^} deleted: $REF_ESC** by @$ACTOR_ESC"
|
||||||
|
jq -n --arg content "$MSG" '{content: $content}' | curl -sf --retry 2 -X POST "$WEBHOOK" -H "Content-Type: application/json" -d @-
|
||||||
|
|
|
||||||
|
|
@ -70,4 +70,6 @@ z*/
|
||||||
.codex
|
.codex
|
||||||
.github/chatmodes
|
.github/chatmodes
|
||||||
.agent
|
.agent
|
||||||
.agentvibes/
|
.agentvibes/
|
||||||
|
.kiro/
|
||||||
|
.roo
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,9 @@
|
||||||
# Test fixtures with intentionally broken/malformed files
|
# Test fixtures with intentionally broken/malformed files
|
||||||
test/fixtures/**
|
test/fixtures/**
|
||||||
|
|
||||||
|
# Contributor Covenant (external standard)
|
||||||
|
CODE_OF_CONDUCT.md
|
||||||
|
|
||||||
# BMAD runtime folders (user-specific, not in repo)
|
# BMAD runtime folders (user-specific, not in repo)
|
||||||
.bmad/
|
.bmad/
|
||||||
.bmad*/
|
.bmad*/
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,128 @@
|
||||||
|
# Contributor Covenant Code of Conduct
|
||||||
|
|
||||||
|
## Our Pledge
|
||||||
|
|
||||||
|
We as members, contributors, and leaders pledge to make participation in our
|
||||||
|
community a harassment-free experience for everyone, regardless of age, body
|
||||||
|
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||||
|
identity and expression, level of experience, education, socio-economic status,
|
||||||
|
nationality, personal appearance, race, religion, or sexual identity
|
||||||
|
and orientation.
|
||||||
|
|
||||||
|
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||||
|
diverse, inclusive, and healthy community.
|
||||||
|
|
||||||
|
## Our Standards
|
||||||
|
|
||||||
|
Examples of behavior that contributes to a positive environment for our
|
||||||
|
community include:
|
||||||
|
|
||||||
|
* Demonstrating empathy and kindness toward other people
|
||||||
|
* Being respectful of differing opinions, viewpoints, and experiences
|
||||||
|
* Giving and gracefully accepting constructive feedback
|
||||||
|
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||||
|
and learning from the experience
|
||||||
|
* Focusing on what is best not just for us as individuals, but for the
|
||||||
|
overall community
|
||||||
|
|
||||||
|
Examples of unacceptable behavior include:
|
||||||
|
|
||||||
|
* The use of sexualized language or imagery, and sexual attention or
|
||||||
|
advances of any kind
|
||||||
|
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||||
|
* Public or private harassment
|
||||||
|
* Publishing others' private information, such as a physical or email
|
||||||
|
address, without their explicit permission
|
||||||
|
* Other conduct which could reasonably be considered inappropriate in a
|
||||||
|
professional setting
|
||||||
|
|
||||||
|
## Enforcement Responsibilities
|
||||||
|
|
||||||
|
Community leaders are responsible for clarifying and enforcing our standards of
|
||||||
|
acceptable behavior and will take appropriate and fair corrective action in
|
||||||
|
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||||
|
or harmful.
|
||||||
|
|
||||||
|
Community leaders have the right and responsibility to remove, edit, or reject
|
||||||
|
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||||
|
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||||
|
decisions when appropriate.
|
||||||
|
|
||||||
|
## Scope
|
||||||
|
|
||||||
|
This Code of Conduct applies within all community spaces, and also applies when
|
||||||
|
an individual is officially representing the community in public spaces.
|
||||||
|
Examples of representing our community include using an official e-mail address,
|
||||||
|
posting via an official social media account, or acting as an appointed
|
||||||
|
representative at an online or offline event.
|
||||||
|
|
||||||
|
## Enforcement
|
||||||
|
|
||||||
|
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||||
|
reported to the community leaders responsible for enforcement at
|
||||||
|
the official BMAD Discord server (https://discord.com/invite/gk8jAdXWmj) - DM a moderator or flag a post.
|
||||||
|
All complaints will be reviewed and investigated promptly and fairly.
|
||||||
|
|
||||||
|
All community leaders are obligated to respect the privacy and security of the
|
||||||
|
reporter of any incident.
|
||||||
|
|
||||||
|
## Enforcement Guidelines
|
||||||
|
|
||||||
|
Community leaders will follow these Community Impact Guidelines in determining
|
||||||
|
the consequences for any action they deem in violation of this Code of Conduct:
|
||||||
|
|
||||||
|
### 1. Correction
|
||||||
|
|
||||||
|
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||||
|
unprofessional or unwelcome in the community.
|
||||||
|
|
||||||
|
**Consequence**: A private, written warning from community leaders, providing
|
||||||
|
clarity around the nature of the violation and an explanation of why the
|
||||||
|
behavior was inappropriate. A public apology may be requested.
|
||||||
|
|
||||||
|
### 2. Warning
|
||||||
|
|
||||||
|
**Community Impact**: A violation through a single incident or series
|
||||||
|
of actions.
|
||||||
|
|
||||||
|
**Consequence**: A warning with consequences for continued behavior. No
|
||||||
|
interaction with the people involved, including unsolicited interaction with
|
||||||
|
those enforcing the Code of Conduct, for a specified period of time. This
|
||||||
|
includes avoiding interactions in community spaces as well as external channels
|
||||||
|
like social media. Violating these terms may lead to a temporary or
|
||||||
|
permanent ban.
|
||||||
|
|
||||||
|
### 3. Temporary Ban
|
||||||
|
|
||||||
|
**Community Impact**: A serious violation of community standards, including
|
||||||
|
sustained inappropriate behavior.
|
||||||
|
|
||||||
|
**Consequence**: A temporary ban from any sort of interaction or public
|
||||||
|
communication with the community for a specified period of time. No public or
|
||||||
|
private interaction with the people involved, including unsolicited interaction
|
||||||
|
with those enforcing the Code of Conduct, is allowed during this period.
|
||||||
|
Violating these terms may lead to a permanent ban.
|
||||||
|
|
||||||
|
### 4. Permanent Ban
|
||||||
|
|
||||||
|
**Community Impact**: Demonstrating a pattern of violation of community
|
||||||
|
standards, including sustained inappropriate behavior, harassment of an
|
||||||
|
individual, or aggression toward or disparagement of classes of individuals.
|
||||||
|
|
||||||
|
**Consequence**: A permanent ban from any sort of public interaction within
|
||||||
|
the community.
|
||||||
|
|
||||||
|
## Attribution
|
||||||
|
|
||||||
|
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||||
|
version 2.0, available at
|
||||||
|
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
||||||
|
|
||||||
|
Community Impact Guidelines were inspired by [Mozilla's code of conduct
|
||||||
|
enforcement ladder](https://github.com/mozilla/diversity).
|
||||||
|
|
||||||
|
[homepage]: https://www.contributor-covenant.org
|
||||||
|
|
||||||
|
For answers to common questions about this code of conduct, see the FAQ at
|
||||||
|
https://www.contributor-covenant.org/faq. Translations are available at
|
||||||
|
https://www.contributor-covenant.org/translations.
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
name: Brainstorming Session
|
name: brainstorming-session
|
||||||
description: Facilitate interactive brainstorming sessions using diverse creative techniques and ideation methods
|
description: Facilitate interactive brainstorming sessions using diverse creative techniques and ideation methods
|
||||||
context_file: '' # Optional context file path for project-specific guidance
|
context_file: '' # Optional context file path for project-specific guidance
|
||||||
---
|
---
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
name: Party Mode
|
name: party-mode
|
||||||
description: Orchestrates group discussions between all installed BMAD agents, enabling natural multi-agent conversations
|
description: Orchestrates group discussions between all installed BMAD agents, enabling natural multi-agent conversations
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
name: Create Agent
|
name: create-agent
|
||||||
description: Interactive workflow to build BMAD Core compliant agents with optional brainstorming, persona development, and command structure
|
description: Interactive workflow to build BMAD Core compliant agents with optional brainstorming, persona development, and command structure
|
||||||
web_bundle: true
|
web_bundle: true
|
||||||
---
|
---
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
name: Create Workflow
|
name: create-workflow
|
||||||
description: Create structured standalone workflows using markdown-based step architecture
|
description: Create structured standalone workflows using markdown-based step architecture
|
||||||
web_bundle: true
|
web_bundle: true
|
||||||
---
|
---
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
name: Edit Agent
|
name: edit-agent
|
||||||
description: Edit existing BMAD agents while following all best practices and conventions
|
description: Edit existing BMAD agents while following all best practices and conventions
|
||||||
web_bundle: false
|
web_bundle: false
|
||||||
---
|
---
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
name: Edit Workflow
|
name: edit-workflow
|
||||||
description: Intelligent workflow editor that helps modify existing workflows while following best practices
|
description: Intelligent workflow editor that helps modify existing workflows while following best practices
|
||||||
web_bundle: true
|
web_bundle: true
|
||||||
---
|
---
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
name: Workflow Compliance Check
|
name: workflow-compliance-check
|
||||||
description: Systematic validation of workflows against BMAD standards with adversarial analysis and detailed reporting
|
description: Systematic validation of workflows against BMAD standards with adversarial analysis and detailed reporting
|
||||||
web_bundle: false
|
web_bundle: false
|
||||||
---
|
---
|
||||||
|
|
|
||||||
|
|
@ -108,7 +108,8 @@ Stories move through these states in the sprint status file:
|
||||||
|
|
||||||
**As Needed:**
|
**As Needed:**
|
||||||
|
|
||||||
- Run `workflow-status` anytime to check progress
|
- Run `sprint-status` anytime in Phase 4 to inspect sprint-status.yaml and get the next implementation command
|
||||||
|
- Run `workflow-status` for cross-phase routing and project-level paths
|
||||||
- Run `correct-course` if significant changes needed
|
- Run `correct-course` if significant changes needed
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
@ -155,7 +156,7 @@ PRD (PM) → Architecture (Architect)
|
||||||
## Troubleshooting
|
## Troubleshooting
|
||||||
|
|
||||||
**Q: Which workflow should I run next?**
|
**Q: Which workflow should I run next?**
|
||||||
A: Run `workflow-status` - it reads the sprint status file and tells you exactly what to do.
|
A: Run `workflow-status` - it reads the sprint status file and tells you exactly what to do. During implementation (Phase 4) run `sprint-status` (fast check against sprint-status.yaml).
|
||||||
|
|
||||||
**Q: Story needs significant changes mid-implementation?**
|
**Q: Story needs significant changes mid-implementation?**
|
||||||
A: Run `correct-course` to analyze impact and route appropriately.
|
A: Run `correct-course` to analyze impact and route appropriately.
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
name: Product Brief Workflow
|
name: create-product-brief
|
||||||
description: Create comprehensive product briefs through collaborative step-by-step discovery as creative Business Analyst working with the user as peers.
|
description: Create comprehensive product briefs through collaborative step-by-step discovery as creative Business Analyst working with the user as peers.
|
||||||
web_bundle: true
|
web_bundle: true
|
||||||
---
|
---
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
name: Research Workflow
|
name: research
|
||||||
description: Conduct comprehensive research across multiple domains using current web data and verified sources - Market, Technical, Domain and other research types.
|
description: Conduct comprehensive research across multiple domains using current web data and verified sources - Market, Technical, Domain and other research types.
|
||||||
|
web_bundle: true
|
||||||
---
|
---
|
||||||
|
|
||||||
# Research Workflow
|
# Research Workflow
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,9 @@
|
||||||
|
---
|
||||||
|
name: create-ux-design
|
||||||
|
description: Work with a peer UX Design expert to plan your applications UX patterns, look and feel.
|
||||||
|
web_bundle: true
|
||||||
|
---
|
||||||
|
|
||||||
# Create UX Design Workflow
|
# Create UX Design Workflow
|
||||||
|
|
||||||
**Goal:** Create comprehensive UX design specifications through collaborative visual exploration and informed decision-making where you act as a UX facilitator working with a product stakeholder.
|
**Goal:** Create comprehensive UX design specifications through collaborative visual exploration and informed decision-making where you act as a UX facilitator working with a product stakeholder.
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
---
|
---
|
||||||
name: PRD Workflow
|
name: create-prd
|
||||||
description: Creates a comprehensive PRDs through collaborative step-by-step discovery between two product managers working as peers.
|
description: Creates a comprehensive PRDs through collaborative step-by-step discovery between two product managers working as peers.
|
||||||
main_config: `{project-root}/{bmad_folder}/bmm/config.yaml`
|
main_config: '{project-root}/{bmad_folder}/bmm/config.yaml'
|
||||||
web_bundle: true
|
web_bundle: true
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
name: Architecture Workflow
|
name: create-architecture
|
||||||
description: Collaborative architectural decision facilitation for AI-agent consistency. Replaces template-driven architecture with intelligent, adaptive conversation that produces a decision-focused architecture document optimized for preventing agent conflicts.
|
description: Collaborative architectural decision facilitation for AI-agent consistency. Replaces template-driven architecture with intelligent, adaptive conversation that produces a decision-focused architecture document optimized for preventing agent conflicts.
|
||||||
|
web_bundle: true
|
||||||
---
|
---
|
||||||
|
|
||||||
# Architecture Workflow
|
# Architecture Workflow
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
name: 'Create Epics and Stories'
|
name: create-epics-stories
|
||||||
description: 'Transform PRD requirements and Architecture decisions into comprehensive stories organized by user value. This workflow requires completed PRD + Architecture documents (UX recommended if UI exists) and breaks down requirements into implementation-ready epics and user stories that incorporate all available technical and design context. Creates detailed, actionable stories with complete acceptance criteria for development teams.'
|
description: 'Transform PRD requirements and Architecture decisions into comprehensive stories organized by user value. This workflow requires completed PRD + Architecture documents (UX recommended if UI exists) and breaks down requirements into implementation-ready epics and user stories that incorporate all available technical and design context. Creates detailed, actionable stories with complete acceptance criteria for development teams.'
|
||||||
web_bundle: true
|
web_bundle: true
|
||||||
---
|
---
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
name: 'Implementation Readiness'
|
name: check-implementation-readiness
|
||||||
description: 'Critical validation workflow that assesses PRD, Architecture, and Epics & Stories for completeness and alignment before implementation. Uses adversarial review approach to find gaps and issues.'
|
description: 'Critical validation workflow that assesses PRD, Architecture, and Epics & Stories for completeness and alignment before implementation. Uses adversarial review approach to find gaps and issues.'
|
||||||
web_bundle: false
|
web_bundle: false
|
||||||
---
|
---
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,174 @@
|
||||||
|
# Sprint Status - Multi-Mode Service
|
||||||
|
|
||||||
|
<critical>The workflow execution engine is governed by: {project-root}/{bmad_folder}/core/tasks/workflow.xml</critical>
|
||||||
|
<critical>You MUST have already loaded and processed: {project-root}/{bmad_folder}/bmm/workflows/4-implementation/sprint-status/workflow.yaml</critical>
|
||||||
|
<critical>Modes: interactive (default), validate, data</critical>
|
||||||
|
<critical>⚠️ ABSOLUTELY NO TIME ESTIMATES. Do NOT mention hours, days, weeks, or timelines.</critical>
|
||||||
|
|
||||||
|
<workflow>
|
||||||
|
|
||||||
|
<step n="0" goal="Determine execution mode">
|
||||||
|
<action>Set mode = {{mode}} if provided by caller; otherwise mode = "interactive"</action>
|
||||||
|
|
||||||
|
<check if="mode == data">
|
||||||
|
<action>Jump to Step 20</action>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="mode == validate">
|
||||||
|
<action>Jump to Step 30</action>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="mode == interactive">
|
||||||
|
<action>Continue to Step 1</action>
|
||||||
|
</check>
|
||||||
|
</step>
|
||||||
|
|
||||||
|
<step n="1" goal="Locate sprint status file">
|
||||||
|
<action>Try {sprint_status_file}</action>
|
||||||
|
<check if="file not found">
|
||||||
|
<output>❌ sprint-status.yaml not found.
|
||||||
|
Run `/bmad:bmm:workflows:sprint-planning` to generate it, then rerun sprint-status.</output>
|
||||||
|
<action>Exit workflow</action>
|
||||||
|
</check>
|
||||||
|
<action>Continue to Step 2</action>
|
||||||
|
</step>
|
||||||
|
|
||||||
|
<step n="2" goal="Read and parse sprint-status.yaml">
|
||||||
|
<action>Read the FULL file: {sprint_status_file}</action>
|
||||||
|
<action>Parse fields: generated, project, project_key, tracking_system, story_location</action>
|
||||||
|
<action>Parse development_status map. Classify keys:</action>
|
||||||
|
- Epics: keys starting with "epic-" (and not ending with "-retrospective")
|
||||||
|
- Retrospectives: keys ending with "-retrospective"
|
||||||
|
- Stories: everything else (e.g., 1-2-login-form)
|
||||||
|
<action>Count story statuses: backlog, drafted, ready-for-dev, in-progress, review, done</action>
|
||||||
|
<action>Count epic statuses: backlog, contexted</action>
|
||||||
|
<action>Detect risks:</action>
|
||||||
|
- Stories in review but no reviewer assigned context → suggest `/bmad:bmm:workflows:code-review`
|
||||||
|
- Stories in in-progress with no ready-for-dev items behind them → keep focus on the active story
|
||||||
|
- All epics backlog/contexted but no stories drafted → prompt to run `/bmad:bmm:workflows:create-story`
|
||||||
|
</step>
|
||||||
|
|
||||||
|
<step n="3" goal="Select next action recommendation">
|
||||||
|
<action>Pick the next recommended workflow using priority:</action>
|
||||||
|
1. If any story status == in-progress → recommend `dev-story` for the first in-progress story
|
||||||
|
2. Else if any story status == review → recommend `code-review` for the first review story
|
||||||
|
3. Else if any story status == ready-for-dev → recommend `dev-story`
|
||||||
|
4. Else if any story status == drafted → recommend `story-ready`
|
||||||
|
5. Else if any story status == backlog → recommend `create-story`
|
||||||
|
6. Else if any epic status == backlog → recommend `epic-tech-context`
|
||||||
|
7. Else if retrospectives are optional → recommend `retrospective`
|
||||||
|
8. Else → All implementation items done; suggest `workflow-status` to plan next phase
|
||||||
|
<action>Store selected recommendation as: next_story_id, next_workflow_id, next_agent (SM/DEV as appropriate)</action>
|
||||||
|
</step>
|
||||||
|
|
||||||
|
<step n="4" goal="Display summary">
|
||||||
|
<output>
|
||||||
|
## 📊 Sprint Status
|
||||||
|
|
||||||
|
- Project: {{project}} ({{project_key}})
|
||||||
|
- Tracking: {{tracking_system}}
|
||||||
|
- Status file: {sprint_status_file}
|
||||||
|
|
||||||
|
**Stories:** backlog {{count_backlog}}, drafted {{count_drafted}}, ready-for-dev {{count_ready}}, in-progress {{count_in_progress}}, review {{count_review}}, done {{count_done}}
|
||||||
|
|
||||||
|
**Epics:** backlog {{epic_backlog}}, contexted {{epic_contexted}}
|
||||||
|
|
||||||
|
**Next Recommendation:** /bmad:bmm:workflows:{{next_workflow_id}} ({{next_story_id}})
|
||||||
|
|
||||||
|
{{#if risks}}
|
||||||
|
**Risks:**
|
||||||
|
{{#each risks}}
|
||||||
|
|
||||||
|
- {{this}}
|
||||||
|
{{/each}}
|
||||||
|
{{/if}}
|
||||||
|
|
||||||
|
{{#if by_epic}}
|
||||||
|
**Per Epic:**
|
||||||
|
{{#each by_epic}}
|
||||||
|
|
||||||
|
- {{epic_id}}: context={{context_status}}, stories → backlog {{backlog}}, drafted {{drafted}}, ready {{ready_for_dev}}, in-progress {{in_progress}}, review {{review}}, done {{done}}
|
||||||
|
{{/each}}
|
||||||
|
{{/if}}
|
||||||
|
</output>
|
||||||
|
</step>
|
||||||
|
|
||||||
|
<step n="5" goal="Offer actions">
|
||||||
|
<ask>Pick an option:
|
||||||
|
1) Run recommended workflow now
|
||||||
|
2) Show all stories grouped by status
|
||||||
|
3) Show raw sprint-status.yaml
|
||||||
|
4) Exit
|
||||||
|
Choice:</ask>
|
||||||
|
|
||||||
|
<check if="choice == 1">
|
||||||
|
<output>Run `/bmad:bmm:workflows:{{next_workflow_id}}`.
|
||||||
|
If the command targets a story, set `story_key={{next_story_id}}` when prompted.</output>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="choice == 2">
|
||||||
|
<output>
|
||||||
|
### Stories by Status
|
||||||
|
- In Progress: {{stories_in_progress}}
|
||||||
|
- Review: {{stories_in_review}}
|
||||||
|
- Ready for Dev: {{stories_ready_for_dev}}
|
||||||
|
- Drafted: {{stories_drafted}}
|
||||||
|
- Backlog: {{stories_backlog}}
|
||||||
|
- Done: {{stories_done}}
|
||||||
|
</output>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="choice == 3">
|
||||||
|
<action>Display the full contents of {sprint_status_file}</action>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="choice == 4">
|
||||||
|
<action>Exit workflow</action>
|
||||||
|
</check>
|
||||||
|
</step>
|
||||||
|
|
||||||
|
<!-- ========================= -->
|
||||||
|
<!-- Data mode for other flows -->
|
||||||
|
<!-- ========================= -->
|
||||||
|
|
||||||
|
<step n="20" goal="Data mode output">
|
||||||
|
<action>Load and parse {sprint_status_file} same as Step 2</action>
|
||||||
|
<action>Compute recommendation same as Step 3</action>
|
||||||
|
<template-output>next_workflow_id = {{next_workflow_id}}</template-output>
|
||||||
|
<template-output>next_story_id = {{next_story_id}}</template-output>
|
||||||
|
<template-output>count_backlog = {{count_backlog}}</template-output>
|
||||||
|
<template-output>count_drafted = {{count_drafted}}</template-output>
|
||||||
|
<template-output>count_ready = {{count_ready}}</template-output>
|
||||||
|
<template-output>count_in_progress = {{count_in_progress}}</template-output>
|
||||||
|
<template-output>count_review = {{count_review}}</template-output>
|
||||||
|
<template-output>count_done = {{count_done}}</template-output>
|
||||||
|
<template-output>epic_backlog = {{epic_backlog}}</template-output>
|
||||||
|
<template-output>epic_contexted = {{epic_contexted}}</template-output>
|
||||||
|
<template-output>warnings = {{risks}}</template-output>
|
||||||
|
<action>Return to caller</action>
|
||||||
|
</step>
|
||||||
|
|
||||||
|
<!-- ========================= -->
|
||||||
|
<!-- Validate mode -->
|
||||||
|
<!-- ========================= -->
|
||||||
|
|
||||||
|
<step n="30" goal="Validate sprint-status file">
|
||||||
|
<action>Check that {sprint_status_file} exists</action>
|
||||||
|
<check if="missing">
|
||||||
|
<template-output>is_valid = false</template-output>
|
||||||
|
<template-output>error = "sprint-status.yaml missing"</template-output>
|
||||||
|
<template-output>suggestion = "Run sprint-planning to create it"</template-output>
|
||||||
|
<action>Return</action>
|
||||||
|
</check>
|
||||||
|
<action>Read file and verify it has a development_status section with at least one entry</action>
|
||||||
|
<check if="validation fails">
|
||||||
|
<template-output>is_valid = false</template-output>
|
||||||
|
<template-output>error = "development_status missing or empty"</template-output>
|
||||||
|
<template-output>suggestion = "Re-run sprint-planning or repair the file manually"</template-output>
|
||||||
|
<action>Return</action>
|
||||||
|
</check>
|
||||||
|
<template-output>is_valid = true</template-output>
|
||||||
|
<template-output>message = "sprint-status.yaml present and parsable"</template-output>
|
||||||
|
</step>
|
||||||
|
|
||||||
|
</workflow>
|
||||||
|
|
@ -0,0 +1,35 @@
|
||||||
|
# Sprint Status - Implementation Tracker
|
||||||
|
name: sprint-status
|
||||||
|
description: "Summarize sprint-status.yaml, surface risks, and route to the right implementation workflow."
|
||||||
|
author: "BMad"
|
||||||
|
|
||||||
|
# Critical variables from config
|
||||||
|
config_source: "{project-root}/{bmad_folder}/bmm/config.yaml"
|
||||||
|
output_folder: "{config_source}:output_folder"
|
||||||
|
user_name: "{config_source}:user_name"
|
||||||
|
communication_language: "{config_source}:communication_language"
|
||||||
|
document_output_language: "{config_source}:document_output_language"
|
||||||
|
date: system-generated
|
||||||
|
sprint_artifacts: "{config_source}:sprint_artifacts"
|
||||||
|
|
||||||
|
# Workflow components
|
||||||
|
installed_path: "{project-root}/{bmad_folder}/bmm/workflows/4-implementation/sprint-status"
|
||||||
|
instructions: "{installed_path}/instructions.md"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
variables:
|
||||||
|
sprint_status_file: "{sprint_artifacts}/sprint-status.yaml || {output_folder}/sprint-status.yaml"
|
||||||
|
tracking_system: "file-system"
|
||||||
|
|
||||||
|
# Smart input file references
|
||||||
|
input_file_patterns:
|
||||||
|
sprint_status:
|
||||||
|
description: "Sprint status file generated by sprint-planning"
|
||||||
|
whole: "{sprint_artifacts}/sprint-status.yaml || {output_folder}/sprint-status.yaml"
|
||||||
|
load_strategy: "FULL_LOAD"
|
||||||
|
|
||||||
|
# Standalone so IDE commands get generated
|
||||||
|
standalone: true
|
||||||
|
|
||||||
|
# No web bundle needed
|
||||||
|
web_bundle: false
|
||||||
|
|
@ -32,18 +32,115 @@
|
||||||
</check>
|
</check>
|
||||||
|
|
||||||
<check if="Mode B">
|
<check if="Mode B">
|
||||||
<ask>**[t] Plan first** - Create tech-spec then implement
|
|
||||||
|
<!-- Escalation Threshold: Lightweight check - should we invoke scale-adaptive? -->
|
||||||
|
|
||||||
|
<action>Evaluate escalation threshold against user input (minimal tokens, no file loading):
|
||||||
|
|
||||||
|
**Triggers escalation** (if 2+ signals present):
|
||||||
|
|
||||||
|
- Multiple components mentioned (e.g., dashboard + api + database)
|
||||||
|
- System-level language (e.g., platform, integration, architecture)
|
||||||
|
- Uncertainty about approach (e.g., "how should I", "best way to")
|
||||||
|
- Multi-layer scope (e.g., UI + backend + data together)
|
||||||
|
- Extended timeframe (e.g., "this week", "over the next few days")
|
||||||
|
|
||||||
|
**Reduces signal:**
|
||||||
|
|
||||||
|
- Simplicity markers (e.g., "just", "quickly", "fix", "bug", "typo", "simple", "basic", "minor")
|
||||||
|
- Single file/component focus
|
||||||
|
- Confident, specific request
|
||||||
|
|
||||||
|
Use holistic judgment, not mechanical keyword matching.</action>
|
||||||
|
|
||||||
|
<!-- No Escalation: Simple request, offer existing choice -->
|
||||||
|
<check if="escalation threshold NOT triggered">
|
||||||
|
<ask>**[t] Plan first** - Create tech-spec then implement
|
||||||
**[e] Execute directly** - Start now</ask>
|
**[e] Execute directly** - Start now</ask>
|
||||||
|
|
||||||
<check if="t">
|
<check if="t">
|
||||||
<action>Load and execute {create_tech_spec_workflow}</action>
|
<action>Load and execute {create_tech_spec_workflow}</action>
|
||||||
<action>Continue to implementation after spec complete</action>
|
<action>Continue to implementation after spec complete</action>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="e">
|
||||||
|
<ask>Any additional guidance before I begin? (patterns, files, constraints) Or "go" to start.</ask>
|
||||||
|
<goto>step_2</goto>
|
||||||
|
</check>
|
||||||
|
|
||||||
</check>
|
</check>
|
||||||
|
|
||||||
<check if="e">
|
<!-- Escalation Triggered: Load scale-adaptive and evaluate level -->
|
||||||
<ask>Any additional guidance before I begin? (patterns, files, constraints) Or "go" to start.</ask>
|
<check if="escalation threshold triggered">
|
||||||
<goto>step_2</goto>
|
<action>Load {project_levels} and evaluate user input against detection_hints.keywords</action>
|
||||||
|
<action>Determine level (0-4) using scale-adaptive definitions</action>
|
||||||
|
|
||||||
|
<!-- Level 0: Scale-adaptive confirms simple, fall back to standard choice -->
|
||||||
|
<check if="level 0">
|
||||||
|
<ask>**[t] Plan first** - Create tech-spec then implement
|
||||||
|
|
||||||
|
**[e] Execute directly** - Start now</ask>
|
||||||
|
|
||||||
|
<check if="t">
|
||||||
|
<action>Load and execute {create_tech_spec_workflow}</action>
|
||||||
|
<action>Continue to implementation after spec complete</action>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="e">
|
||||||
|
<ask>Any additional guidance before I begin? (patterns, files, constraints) Or "go" to start.</ask>
|
||||||
|
<goto>step_2</goto>
|
||||||
|
</check>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="level 1 or 2 or couldn't determine level">
|
||||||
|
<ask>This looks like a focused feature with multiple components.
|
||||||
|
|
||||||
|
**[t] Create tech-spec first** (recommended)
|
||||||
|
**[w] Seems bigger than quick-dev** — see what BMad Method recommends (workflow-init)
|
||||||
|
**[e] Execute directly**</ask>
|
||||||
|
|
||||||
|
<check if="t">
|
||||||
|
<action>Load and execute {create_tech_spec_workflow}</action>
|
||||||
|
<action>Continue to implementation after spec complete</action>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="w">
|
||||||
|
<action>Load and execute {workflow_init}</action>
|
||||||
|
<action>EXIT quick-dev - user has been routed to BMad Method</action>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="e">
|
||||||
|
<ask>Any additional guidance before I begin? (patterns, files, constraints) Or "go" to start.</ask>
|
||||||
|
<goto>step_2</goto>
|
||||||
|
</check>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<!-- Level 3+: BMad Method territory, recommend workflow-init -->
|
||||||
|
<check if="level 3 or higher">
|
||||||
|
<ask>This sounds like platform/system work.
|
||||||
|
|
||||||
|
**[w] Start BMad Method** (recommended) (workflow-init)
|
||||||
|
**[t] Create tech-spec** (lighter planning)
|
||||||
|
**[e] Execute directly** - feeling lucky</ask>
|
||||||
|
|
||||||
|
<check if="w">
|
||||||
|
<action>Load and execute {workflow_init}</action>
|
||||||
|
<action>EXIT quick-dev - user has been routed to BMad Method</action>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="t">
|
||||||
|
<action>Load and execute {create_tech_spec_workflow}</action>
|
||||||
|
<action>Continue to implementation after spec complete</action>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="e">
|
||||||
|
<ask>Any additional guidance before I begin? (patterns, files, constraints) Or "go" to start.</ask>
|
||||||
|
<goto>step_2</goto>
|
||||||
|
</check>
|
||||||
|
</check>
|
||||||
|
|
||||||
</check>
|
</check>
|
||||||
|
|
||||||
</check>
|
</check>
|
||||||
|
|
||||||
</step>
|
</step>
|
||||||
|
|
|
||||||
|
|
@ -25,5 +25,9 @@ create_tech_spec_workflow: "{project-root}/{bmad_folder}/bmm/workflows/bmad-quic
|
||||||
party_mode_exec: "{project-root}/{bmad_folder}/core/workflows/party-mode/workflow.md"
|
party_mode_exec: "{project-root}/{bmad_folder}/core/workflows/party-mode/workflow.md"
|
||||||
advanced_elicitation: "{project-root}/{bmad_folder}/core/tasks/advanced-elicitation.xml"
|
advanced_elicitation: "{project-root}/{bmad_folder}/core/tasks/advanced-elicitation.xml"
|
||||||
|
|
||||||
|
# Routing resources (lazy-loaded)
|
||||||
|
project_levels: "{project-root}/{bmad_folder}/bmm/workflows/workflow-status/project-levels.yaml"
|
||||||
|
workflow_init: "{project-root}/{bmad_folder}/bmm/workflows/workflow-status/init/workflow.yaml"
|
||||||
|
|
||||||
standalone: true
|
standalone: true
|
||||||
web_bundle: false
|
web_bundle: false
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
name: Generate Project Context
|
name: generate-project-context
|
||||||
description: Creates a concise project_context.md file with critical rules and patterns that AI agents must follow when implementing code. Optimized for LLM context efficiency.
|
description: Creates a concise project_context.md file with critical rules and patterns that AI agents must follow when implementing code. Optimized for LLM context efficiency.
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -105,7 +105,7 @@ class ManifestGenerator {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Recursively find and parse workflow.yaml files
|
* Recursively find and parse workflow.yaml and workflow.md files
|
||||||
*/
|
*/
|
||||||
async getWorkflowsFromPath(basePath, moduleName) {
|
async getWorkflowsFromPath(basePath, moduleName) {
|
||||||
const workflows = [];
|
const workflows = [];
|
||||||
|
|
@ -126,11 +126,23 @@ class ManifestGenerator {
|
||||||
// Recurse into subdirectories
|
// Recurse into subdirectories
|
||||||
const newRelativePath = relativePath ? `${relativePath}/${entry.name}` : entry.name;
|
const newRelativePath = relativePath ? `${relativePath}/${entry.name}` : entry.name;
|
||||||
await findWorkflows(fullPath, newRelativePath);
|
await findWorkflows(fullPath, newRelativePath);
|
||||||
} else if (entry.name === 'workflow.yaml') {
|
} else if (entry.name === 'workflow.yaml' || entry.name === 'workflow.md') {
|
||||||
// Parse workflow file
|
// Parse workflow file (both YAML and MD formats)
|
||||||
try {
|
try {
|
||||||
const content = await fs.readFile(fullPath, 'utf8');
|
const content = await fs.readFile(fullPath, 'utf8');
|
||||||
const workflow = yaml.load(content);
|
|
||||||
|
let workflow;
|
||||||
|
if (entry.name === 'workflow.yaml') {
|
||||||
|
// Parse YAML workflow
|
||||||
|
workflow = yaml.load(content);
|
||||||
|
} else {
|
||||||
|
// Parse MD workflow with YAML frontmatter
|
||||||
|
const frontmatterMatch = content.match(/^---\n([\s\S]*?)\n---/);
|
||||||
|
if (!frontmatterMatch) {
|
||||||
|
continue; // Skip MD files without frontmatter
|
||||||
|
}
|
||||||
|
workflow = yaml.load(frontmatterMatch[1]);
|
||||||
|
}
|
||||||
|
|
||||||
// Skip template workflows (those with placeholder values)
|
// Skip template workflows (those with placeholder values)
|
||||||
if (workflow.name && workflow.name.includes('{') && workflow.name.includes('}')) {
|
if (workflow.name && workflow.name.includes('{') && workflow.name.includes('}')) {
|
||||||
|
|
@ -141,18 +153,15 @@ class ManifestGenerator {
|
||||||
// Build relative path for installation
|
// Build relative path for installation
|
||||||
const installPath =
|
const installPath =
|
||||||
moduleName === 'core'
|
moduleName === 'core'
|
||||||
? `${this.bmadFolderName}/core/workflows/${relativePath}/workflow.yaml`
|
? `${this.bmadFolderName}/core/workflows/${relativePath}/${entry.name}`
|
||||||
: `${this.bmadFolderName}/${moduleName}/workflows/${relativePath}/workflow.yaml`;
|
: `${this.bmadFolderName}/${moduleName}/workflows/${relativePath}/${entry.name}`;
|
||||||
|
|
||||||
// Check for standalone property (default: false)
|
|
||||||
const standalone = workflow.standalone === true;
|
|
||||||
|
|
||||||
|
// ALL workflows now generate commands - no standalone property needed
|
||||||
workflows.push({
|
workflows.push({
|
||||||
name: workflow.name,
|
name: workflow.name,
|
||||||
description: workflow.description.replaceAll('"', '""'), // Escape quotes for CSV
|
description: workflow.description.replaceAll('"', '""'), // Escape quotes for CSV
|
||||||
module: moduleName,
|
module: moduleName,
|
||||||
path: installPath,
|
path: installPath,
|
||||||
standalone: standalone,
|
|
||||||
});
|
});
|
||||||
|
|
||||||
// Add to files list
|
// Add to files list
|
||||||
|
|
@ -541,12 +550,12 @@ class ManifestGenerator {
|
||||||
async writeWorkflowManifest(cfgDir) {
|
async writeWorkflowManifest(cfgDir) {
|
||||||
const csvPath = path.join(cfgDir, 'workflow-manifest.csv');
|
const csvPath = path.join(cfgDir, 'workflow-manifest.csv');
|
||||||
|
|
||||||
// Create CSV header with standalone column
|
// Create CSV header - removed standalone column as ALL workflows now generate commands
|
||||||
let csv = 'name,description,module,path,standalone\n';
|
let csv = 'name,description,module,path\n';
|
||||||
|
|
||||||
// Add all workflows
|
// Add all workflows - no standalone property needed anymore
|
||||||
for (const workflow of this.workflows) {
|
for (const workflow of this.workflows) {
|
||||||
csv += `"${workflow.name}","${workflow.description}","${workflow.module}","${workflow.path}","${workflow.standalone}"\n`;
|
csv += `"${workflow.name}","${workflow.description}","${workflow.module}","${workflow.path}"\n`;
|
||||||
}
|
}
|
||||||
|
|
||||||
await fs.writeFile(csvPath, csv);
|
await fs.writeFile(csvPath, csv);
|
||||||
|
|
|
||||||
|
|
@ -3,6 +3,7 @@ const fs = require('fs-extra');
|
||||||
const { BaseIdeSetup } = require('./_base-ide');
|
const { BaseIdeSetup } = require('./_base-ide');
|
||||||
const chalk = require('chalk');
|
const chalk = require('chalk');
|
||||||
const { AgentCommandGenerator } = require('./shared/agent-command-generator');
|
const { AgentCommandGenerator } = require('./shared/agent-command-generator');
|
||||||
|
const { WorkflowCommandGenerator } = require('./shared/workflow-command-generator');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Auggie CLI setup handler
|
* Auggie CLI setup handler
|
||||||
|
|
@ -33,10 +34,23 @@ class AuggieSetup extends BaseIdeSetup {
|
||||||
const agentGen = new AgentCommandGenerator(this.bmadFolderName);
|
const agentGen = new AgentCommandGenerator(this.bmadFolderName);
|
||||||
const { artifacts: agentArtifacts } = await agentGen.collectAgentArtifacts(bmadDir, options.selectedModules || []);
|
const { artifacts: agentArtifacts } = await agentGen.collectAgentArtifacts(bmadDir, options.selectedModules || []);
|
||||||
|
|
||||||
// Get tasks, tools, and workflows (standalone only)
|
// Get tasks, tools, and workflows (ALL workflows now generate commands)
|
||||||
const tasks = await this.getTasks(bmadDir, true);
|
const tasks = await this.getTasks(bmadDir, true);
|
||||||
const tools = await this.getTools(bmadDir, true);
|
const tools = await this.getTools(bmadDir, true);
|
||||||
const workflows = await this.getWorkflows(bmadDir, true);
|
|
||||||
|
// Get ALL workflows using the new workflow command generator
|
||||||
|
const workflowGenerator = new WorkflowCommandGenerator(this.bmadFolderName);
|
||||||
|
const { artifacts: workflowArtifacts, counts: workflowCounts } = await workflowGenerator.collectWorkflowArtifacts(bmadDir);
|
||||||
|
|
||||||
|
// Convert workflow artifacts to expected format
|
||||||
|
const workflows = workflowArtifacts
|
||||||
|
.filter((artifact) => artifact.type === 'workflow-command')
|
||||||
|
.map((artifact) => ({
|
||||||
|
module: artifact.module,
|
||||||
|
name: path.basename(artifact.relativePath, '.md'),
|
||||||
|
path: artifact.sourcePath,
|
||||||
|
content: artifact.content,
|
||||||
|
}));
|
||||||
|
|
||||||
const bmadCommandsDir = path.join(location, 'bmad');
|
const bmadCommandsDir = path.join(location, 'bmad');
|
||||||
const agentsDir = path.join(bmadCommandsDir, 'agents');
|
const agentsDir = path.join(bmadCommandsDir, 'agents');
|
||||||
|
|
@ -73,13 +87,11 @@ class AuggieSetup extends BaseIdeSetup {
|
||||||
await this.writeFile(targetPath, commandContent);
|
await this.writeFile(targetPath, commandContent);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Install workflows
|
// Install workflows (already generated commands)
|
||||||
for (const workflow of workflows) {
|
for (const workflow of workflows) {
|
||||||
const content = await this.readFile(workflow.path);
|
// Use the pre-generated workflow command content
|
||||||
const commandContent = this.createWorkflowCommand(workflow, content);
|
|
||||||
|
|
||||||
const targetPath = path.join(workflowsDir, `${workflow.module}-${workflow.name}.md`);
|
const targetPath = path.join(workflowsDir, `${workflow.module}-${workflow.name}.md`);
|
||||||
await this.writeFile(targetPath, commandContent);
|
await this.writeFile(targetPath, workflow.content);
|
||||||
}
|
}
|
||||||
|
|
||||||
const totalInstalled = agentArtifacts.length + tasks.length + tools.length + workflows.length;
|
const totalInstalled = agentArtifacts.length + tasks.length + tools.length + workflows.length;
|
||||||
|
|
|
||||||
|
|
@ -3,6 +3,7 @@ const fs = require('fs-extra');
|
||||||
const { BaseIdeSetup } = require('./_base-ide');
|
const { BaseIdeSetup } = require('./_base-ide');
|
||||||
const chalk = require('chalk');
|
const chalk = require('chalk');
|
||||||
const { AgentCommandGenerator } = require('./shared/agent-command-generator');
|
const { AgentCommandGenerator } = require('./shared/agent-command-generator');
|
||||||
|
const { WorkflowCommandGenerator } = require('./shared/workflow-command-generator');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Crush IDE setup handler
|
* Crush IDE setup handler
|
||||||
|
|
@ -34,10 +35,23 @@ class CrushSetup extends BaseIdeSetup {
|
||||||
const agentGen = new AgentCommandGenerator(this.bmadFolderName);
|
const agentGen = new AgentCommandGenerator(this.bmadFolderName);
|
||||||
const { artifacts: agentArtifacts } = await agentGen.collectAgentArtifacts(bmadDir, options.selectedModules || []);
|
const { artifacts: agentArtifacts } = await agentGen.collectAgentArtifacts(bmadDir, options.selectedModules || []);
|
||||||
|
|
||||||
// Get tasks, tools, and workflows (standalone only)
|
// Get tasks, tools, and workflows (ALL workflows now generate commands)
|
||||||
const tasks = await this.getTasks(bmadDir, true);
|
const tasks = await this.getTasks(bmadDir, true);
|
||||||
const tools = await this.getTools(bmadDir, true);
|
const tools = await this.getTools(bmadDir, true);
|
||||||
const workflows = await this.getWorkflows(bmadDir, true);
|
|
||||||
|
// Get ALL workflows using the new workflow command generator
|
||||||
|
const workflowGenerator = new WorkflowCommandGenerator(this.bmadFolderName);
|
||||||
|
const { artifacts: workflowArtifacts, counts: workflowCounts } = await workflowGenerator.collectWorkflowArtifacts(bmadDir);
|
||||||
|
|
||||||
|
// Convert workflow artifacts to expected format for organizeByModule
|
||||||
|
const workflows = workflowArtifacts
|
||||||
|
.filter((artifact) => artifact.type === 'workflow-command')
|
||||||
|
.map((artifact) => ({
|
||||||
|
module: artifact.module,
|
||||||
|
name: path.basename(artifact.relativePath, '.md'),
|
||||||
|
path: artifact.sourcePath,
|
||||||
|
content: artifact.content,
|
||||||
|
}));
|
||||||
|
|
||||||
// Organize by module
|
// Organize by module
|
||||||
const agentCount = await this.organizeByModule(commandsDir, agentArtifacts, tasks, tools, workflows, projectDir);
|
const agentCount = await this.organizeByModule(commandsDir, agentArtifacts, tasks, tools, workflows, projectDir);
|
||||||
|
|
@ -113,13 +127,12 @@ class CrushSetup extends BaseIdeSetup {
|
||||||
toolCount++;
|
toolCount++;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy module-specific workflows
|
// Copy module-specific workflow commands (already generated)
|
||||||
const moduleWorkflows = workflows.filter((w) => w.module === module);
|
const moduleWorkflows = workflows.filter((w) => w.module === module);
|
||||||
for (const workflow of moduleWorkflows) {
|
for (const workflow of moduleWorkflows) {
|
||||||
const content = await this.readFile(workflow.path);
|
// Use the pre-generated workflow command content
|
||||||
const commandContent = this.createWorkflowCommand(workflow, content);
|
|
||||||
const targetPath = path.join(moduleWorkflowsDir, `${workflow.name}.md`);
|
const targetPath = path.join(moduleWorkflowsDir, `${workflow.name}.md`);
|
||||||
await this.writeFile(targetPath, commandContent);
|
await this.writeFile(targetPath, workflow.content);
|
||||||
workflowCount++;
|
workflowCount++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -2,6 +2,7 @@ const path = require('node:path');
|
||||||
const { BaseIdeSetup } = require('./_base-ide');
|
const { BaseIdeSetup } = require('./_base-ide');
|
||||||
const chalk = require('chalk');
|
const chalk = require('chalk');
|
||||||
const { AgentCommandGenerator } = require('./shared/agent-command-generator');
|
const { AgentCommandGenerator } = require('./shared/agent-command-generator');
|
||||||
|
const { WorkflowCommandGenerator } = require('./shared/workflow-command-generator');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Cursor IDE setup handler
|
* Cursor IDE setup handler
|
||||||
|
|
@ -53,10 +54,22 @@ class CursorSetup extends BaseIdeSetup {
|
||||||
// Convert artifacts to agent format for index creation
|
// Convert artifacts to agent format for index creation
|
||||||
const agents = agentArtifacts.map((a) => ({ module: a.module, name: a.name }));
|
const agents = agentArtifacts.map((a) => ({ module: a.module, name: a.name }));
|
||||||
|
|
||||||
// Get tasks, tools, and workflows (standalone only)
|
// Get tasks, tools, and workflows (ALL workflows now generate commands)
|
||||||
const tasks = await this.getTasks(bmadDir, true);
|
const tasks = await this.getTasks(bmadDir, true);
|
||||||
const tools = await this.getTools(bmadDir, true);
|
const tools = await this.getTools(bmadDir, true);
|
||||||
const workflows = await this.getWorkflows(bmadDir, true);
|
|
||||||
|
// Get ALL workflows using the new workflow command generator
|
||||||
|
const workflowGenerator = new WorkflowCommandGenerator(this.bmadFolderName);
|
||||||
|
const { artifacts: workflowArtifacts, counts: workflowCounts } = await workflowGenerator.collectWorkflowArtifacts(bmadDir);
|
||||||
|
|
||||||
|
// Convert artifacts to workflow objects for directory creation
|
||||||
|
const workflows = workflowArtifacts
|
||||||
|
.filter((artifact) => artifact.type === 'workflow-command')
|
||||||
|
.map((artifact) => ({
|
||||||
|
module: artifact.module,
|
||||||
|
name: path.basename(artifact.relativePath, '.md'),
|
||||||
|
path: artifact.sourcePath,
|
||||||
|
}));
|
||||||
|
|
||||||
// Create directories for each module
|
// Create directories for each module
|
||||||
const modules = new Set();
|
const modules = new Set();
|
||||||
|
|
@ -113,18 +126,21 @@ class CursorSetup extends BaseIdeSetup {
|
||||||
toolCount++;
|
toolCount++;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process and copy workflows
|
// Process and copy workflow commands (generated, not raw workflows)
|
||||||
let workflowCount = 0;
|
let workflowCount = 0;
|
||||||
for (const workflow of workflows) {
|
for (const artifact of workflowArtifacts) {
|
||||||
const content = await this.readAndProcess(workflow.path, {
|
if (artifact.type === 'workflow-command') {
|
||||||
module: workflow.module,
|
// Add MDC metadata header to workflow command
|
||||||
name: workflow.name,
|
const content = this.wrapLauncherWithMDC(artifact.content, {
|
||||||
});
|
module: artifact.module,
|
||||||
|
name: path.basename(artifact.relativePath, '.md'),
|
||||||
|
});
|
||||||
|
|
||||||
const targetPath = path.join(bmadRulesDir, workflow.module, 'workflows', `${workflow.name}.mdc`);
|
const targetPath = path.join(bmadRulesDir, artifact.module, 'workflows', `${path.basename(artifact.relativePath, '.md')}.mdc`);
|
||||||
|
|
||||||
await this.writeFile(targetPath, content);
|
await this.writeFile(targetPath, content);
|
||||||
workflowCount++;
|
workflowCount++;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create BMAD index file (but NOT .cursorrules - user manages that)
|
// Create BMAD index file (but NOT .cursorrules - user manages that)
|
||||||
|
|
|
||||||
|
|
@ -4,6 +4,7 @@ const yaml = require('js-yaml');
|
||||||
const { BaseIdeSetup } = require('./_base-ide');
|
const { BaseIdeSetup } = require('./_base-ide');
|
||||||
const chalk = require('chalk');
|
const chalk = require('chalk');
|
||||||
const { AgentCommandGenerator } = require('./shared/agent-command-generator');
|
const { AgentCommandGenerator } = require('./shared/agent-command-generator');
|
||||||
|
const { WorkflowCommandGenerator } = require('./shared/workflow-command-generator');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gemini CLI setup handler
|
* Gemini CLI setup handler
|
||||||
|
|
@ -68,9 +69,13 @@ class GeminiSetup extends BaseIdeSetup {
|
||||||
const agentGen = new AgentCommandGenerator(this.bmadFolderName);
|
const agentGen = new AgentCommandGenerator(this.bmadFolderName);
|
||||||
const { artifacts: agentArtifacts } = await agentGen.collectAgentArtifacts(bmadDir, options.selectedModules || []);
|
const { artifacts: agentArtifacts } = await agentGen.collectAgentArtifacts(bmadDir, options.selectedModules || []);
|
||||||
|
|
||||||
// Get tasks
|
// Get tasks and workflows (ALL workflows now generate commands)
|
||||||
const tasks = await this.getTasks(bmadDir);
|
const tasks = await this.getTasks(bmadDir);
|
||||||
|
|
||||||
|
// Get ALL workflows using the new workflow command generator
|
||||||
|
const workflowGenerator = new WorkflowCommandGenerator(this.bmadFolderName);
|
||||||
|
const { artifacts: workflowArtifacts, counts: workflowCounts } = await workflowGenerator.collectWorkflowArtifacts(bmadDir);
|
||||||
|
|
||||||
// Install agents as TOML files with bmad- prefix (flat structure)
|
// Install agents as TOML files with bmad- prefix (flat structure)
|
||||||
let agentCount = 0;
|
let agentCount = 0;
|
||||||
for (const artifact of agentArtifacts) {
|
for (const artifact of agentArtifacts) {
|
||||||
|
|
@ -98,17 +103,37 @@ class GeminiSetup extends BaseIdeSetup {
|
||||||
console.log(chalk.green(` ✓ Added task: /bmad:tasks:${task.module}:${task.name}`));
|
console.log(chalk.green(` ✓ Added task: /bmad:tasks:${task.module}:${task.name}`));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Install workflows as TOML files with bmad- prefix (flat structure)
|
||||||
|
let workflowCount = 0;
|
||||||
|
for (const artifact of workflowArtifacts) {
|
||||||
|
if (artifact.type === 'workflow-command') {
|
||||||
|
// Create TOML wrapper around workflow command content
|
||||||
|
const tomlContent = await this.createWorkflowToml(artifact);
|
||||||
|
|
||||||
|
// Flat structure: bmad-workflow-{module}-{name}.toml
|
||||||
|
const workflowName = path.basename(artifact.relativePath, '.md');
|
||||||
|
const tomlPath = path.join(commandsDir, `bmad-workflow-${artifact.module}-${workflowName}.toml`);
|
||||||
|
await this.writeFile(tomlPath, tomlContent);
|
||||||
|
workflowCount++;
|
||||||
|
|
||||||
|
console.log(chalk.green(` ✓ Added workflow: /bmad:workflows:${artifact.module}:${workflowName}`));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
console.log(chalk.green(`✓ ${this.name} configured:`));
|
console.log(chalk.green(`✓ ${this.name} configured:`));
|
||||||
console.log(chalk.dim(` - ${agentCount} agents configured`));
|
console.log(chalk.dim(` - ${agentCount} agents configured`));
|
||||||
console.log(chalk.dim(` - ${taskCount} tasks configured`));
|
console.log(chalk.dim(` - ${taskCount} tasks configured`));
|
||||||
|
console.log(chalk.dim(` - ${workflowCount} workflows configured`));
|
||||||
console.log(chalk.dim(` - Commands directory: ${path.relative(projectDir, commandsDir)}`));
|
console.log(chalk.dim(` - Commands directory: ${path.relative(projectDir, commandsDir)}`));
|
||||||
console.log(chalk.dim(` - Agent activation: /bmad:agents:{agent-name}`));
|
console.log(chalk.dim(` - Agent activation: /bmad:agents:{agent-name}`));
|
||||||
console.log(chalk.dim(` - Task activation: /bmad:tasks:{task-name}`));
|
console.log(chalk.dim(` - Task activation: /bmad:tasks:{task-name}`));
|
||||||
|
console.log(chalk.dim(` - Workflow activation: /bmad:workflows:{workflow-name}`));
|
||||||
|
|
||||||
return {
|
return {
|
||||||
success: true,
|
success: true,
|
||||||
agents: agentCount,
|
agents: agentCount,
|
||||||
tasks: taskCount,
|
tasks: taskCount,
|
||||||
|
workflows: workflowCount,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -179,6 +204,27 @@ ${contentWithoutFrontmatter}
|
||||||
return tomlContent;
|
return tomlContent;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create workflow TOML content from artifact
|
||||||
|
*/
|
||||||
|
async createWorkflowToml(artifact) {
|
||||||
|
// Extract description from artifact content
|
||||||
|
const descriptionMatch = artifact.content.match(/description:\s*"([^"]+)"/);
|
||||||
|
const description = descriptionMatch
|
||||||
|
? descriptionMatch[1]
|
||||||
|
: `BMAD ${artifact.module.toUpperCase()} Workflow: ${path.basename(artifact.relativePath, '.md')}`;
|
||||||
|
|
||||||
|
// Strip frontmatter from command content
|
||||||
|
const frontmatterRegex = /^---\s*\n[\s\S]*?\n---\s*\n/;
|
||||||
|
const contentWithoutFrontmatter = artifact.content.replace(frontmatterRegex, '').trim();
|
||||||
|
|
||||||
|
return `description = "${description}"
|
||||||
|
prompt = """
|
||||||
|
${contentWithoutFrontmatter}
|
||||||
|
"""
|
||||||
|
`;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Cleanup Gemini configuration - surgically remove only BMAD files
|
* Cleanup Gemini configuration - surgically remove only BMAD files
|
||||||
*/
|
*/
|
||||||
|
|
|
||||||
|
|
@ -3,6 +3,7 @@ const fs = require('fs-extra');
|
||||||
const { BaseIdeSetup } = require('./_base-ide');
|
const { BaseIdeSetup } = require('./_base-ide');
|
||||||
const chalk = require('chalk');
|
const chalk = require('chalk');
|
||||||
const { AgentCommandGenerator } = require('./shared/agent-command-generator');
|
const { AgentCommandGenerator } = require('./shared/agent-command-generator');
|
||||||
|
const { WorkflowCommandGenerator } = require('./shared/workflow-command-generator');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* iFlow CLI setup handler
|
* iFlow CLI setup handler
|
||||||
|
|
@ -29,9 +30,11 @@ class IFlowSetup extends BaseIdeSetup {
|
||||||
const commandsDir = path.join(iflowDir, this.commandsDir, 'bmad');
|
const commandsDir = path.join(iflowDir, this.commandsDir, 'bmad');
|
||||||
const agentsDir = path.join(commandsDir, 'agents');
|
const agentsDir = path.join(commandsDir, 'agents');
|
||||||
const tasksDir = path.join(commandsDir, 'tasks');
|
const tasksDir = path.join(commandsDir, 'tasks');
|
||||||
|
const workflowsDir = path.join(commandsDir, 'workflows');
|
||||||
|
|
||||||
await this.ensureDir(agentsDir);
|
await this.ensureDir(agentsDir);
|
||||||
await this.ensureDir(tasksDir);
|
await this.ensureDir(tasksDir);
|
||||||
|
await this.ensureDir(workflowsDir);
|
||||||
|
|
||||||
// Generate agent launchers
|
// Generate agent launchers
|
||||||
const agentGen = new AgentCommandGenerator(this.bmadFolderName);
|
const agentGen = new AgentCommandGenerator(this.bmadFolderName);
|
||||||
|
|
@ -47,9 +50,13 @@ class IFlowSetup extends BaseIdeSetup {
|
||||||
agentCount++;
|
agentCount++;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get tasks
|
// Get tasks and workflows (ALL workflows now generate commands)
|
||||||
const tasks = await this.getTasks(bmadDir);
|
const tasks = await this.getTasks(bmadDir);
|
||||||
|
|
||||||
|
// Get ALL workflows using the new workflow command generator
|
||||||
|
const workflowGenerator = new WorkflowCommandGenerator(this.bmadFolderName);
|
||||||
|
const { artifacts: workflowArtifacts, counts: workflowCounts } = await workflowGenerator.collectWorkflowArtifacts(bmadDir);
|
||||||
|
|
||||||
// Setup tasks as commands
|
// Setup tasks as commands
|
||||||
let taskCount = 0;
|
let taskCount = 0;
|
||||||
for (const task of tasks) {
|
for (const task of tasks) {
|
||||||
|
|
@ -61,15 +68,27 @@ class IFlowSetup extends BaseIdeSetup {
|
||||||
taskCount++;
|
taskCount++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Setup workflows as commands (already generated)
|
||||||
|
let workflowCount = 0;
|
||||||
|
for (const artifact of workflowArtifacts) {
|
||||||
|
if (artifact.type === 'workflow-command') {
|
||||||
|
const targetPath = path.join(workflowsDir, `${artifact.module}-${path.basename(artifact.relativePath, '.md')}.md`);
|
||||||
|
await this.writeFile(targetPath, artifact.content);
|
||||||
|
workflowCount++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
console.log(chalk.green(`✓ ${this.name} configured:`));
|
console.log(chalk.green(`✓ ${this.name} configured:`));
|
||||||
console.log(chalk.dim(` - ${agentCount} agent commands created`));
|
console.log(chalk.dim(` - ${agentCount} agent commands created`));
|
||||||
console.log(chalk.dim(` - ${taskCount} task commands created`));
|
console.log(chalk.dim(` - ${taskCount} task commands created`));
|
||||||
|
console.log(chalk.dim(` - ${workflowCount} workflow commands created`));
|
||||||
console.log(chalk.dim(` - Commands directory: ${path.relative(projectDir, commandsDir)}`));
|
console.log(chalk.dim(` - Commands directory: ${path.relative(projectDir, commandsDir)}`));
|
||||||
|
|
||||||
return {
|
return {
|
||||||
success: true,
|
success: true,
|
||||||
agents: agentCount,
|
agents: agentCount,
|
||||||
tasks: taskCount,
|
tasks: taskCount,
|
||||||
|
workflows: workflowCount,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,327 @@
|
||||||
|
const path = require('node:path');
|
||||||
|
const { BaseIdeSetup } = require('./_base-ide');
|
||||||
|
const chalk = require('chalk');
|
||||||
|
const fs = require('fs-extra');
|
||||||
|
const yaml = require('js-yaml');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Kiro CLI setup handler for BMad Method
|
||||||
|
*/
|
||||||
|
class KiroCliSetup extends BaseIdeSetup {
|
||||||
|
constructor() {
|
||||||
|
super('kiro-cli', 'Kiro CLI', false);
|
||||||
|
this.configDir = '.kiro';
|
||||||
|
this.agentsDir = 'agents';
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Cleanup old BMAD installation before reinstalling
|
||||||
|
* @param {string} projectDir - Project directory
|
||||||
|
*/
|
||||||
|
async cleanup(projectDir) {
|
||||||
|
const bmadAgentsDir = path.join(projectDir, this.configDir, this.agentsDir);
|
||||||
|
|
||||||
|
if (await fs.pathExists(bmadAgentsDir)) {
|
||||||
|
// Remove existing BMad agents
|
||||||
|
const files = await fs.readdir(bmadAgentsDir);
|
||||||
|
for (const file of files) {
|
||||||
|
if (file.startsWith('bmad-') || file.includes('bmad')) {
|
||||||
|
await fs.remove(path.join(bmadAgentsDir, file));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
console.log(chalk.dim(` Cleaned old BMAD agents from ${this.name}`));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Setup Kiro CLI configuration with BMad agents
|
||||||
|
* @param {string} projectDir - Project directory
|
||||||
|
* @param {string} bmadDir - BMAD installation directory
|
||||||
|
* @param {Object} options - Setup options
|
||||||
|
*/
|
||||||
|
async setup(projectDir, bmadDir, options = {}) {
|
||||||
|
console.log(chalk.cyan(`Setting up ${this.name}...`));
|
||||||
|
|
||||||
|
await this.cleanup(projectDir);
|
||||||
|
|
||||||
|
const kiroDir = path.join(projectDir, this.configDir);
|
||||||
|
const agentsDir = path.join(kiroDir, this.agentsDir);
|
||||||
|
|
||||||
|
await this.ensureDir(agentsDir);
|
||||||
|
|
||||||
|
// Create BMad agents from source YAML files
|
||||||
|
await this.createBmadAgentsFromSource(agentsDir, projectDir);
|
||||||
|
|
||||||
|
console.log(chalk.green(`✓ ${this.name} configured with BMad agents`));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create BMad agent definitions from source YAML files
|
||||||
|
* @param {string} agentsDir - Agents directory
|
||||||
|
* @param {string} projectDir - Project directory
|
||||||
|
*/
|
||||||
|
async createBmadAgentsFromSource(agentsDir, projectDir) {
|
||||||
|
const sourceDir = path.join(__dirname, '../../../../../src/modules');
|
||||||
|
|
||||||
|
// Find all agent YAML files
|
||||||
|
const agentFiles = await this.findAgentFiles(sourceDir);
|
||||||
|
|
||||||
|
for (const agentFile of agentFiles) {
|
||||||
|
try {
|
||||||
|
await this.processAgentFile(agentFile, agentsDir, projectDir);
|
||||||
|
} catch (error) {
|
||||||
|
console.warn(chalk.yellow(`⚠️ Failed to process ${agentFile}: ${error.message}`));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Find all agent YAML files in modules and core
|
||||||
|
* @param {string} sourceDir - Source modules directory
|
||||||
|
* @returns {Array} Array of agent file paths
|
||||||
|
*/
|
||||||
|
async findAgentFiles(sourceDir) {
|
||||||
|
const agentFiles = [];
|
||||||
|
|
||||||
|
// Check core agents
|
||||||
|
const coreAgentsDir = path.join(__dirname, '../../../../../src/core/agents');
|
||||||
|
if (await fs.pathExists(coreAgentsDir)) {
|
||||||
|
const files = await fs.readdir(coreAgentsDir);
|
||||||
|
|
||||||
|
for (const file of files) {
|
||||||
|
if (file.endsWith('.agent.yaml')) {
|
||||||
|
agentFiles.push(path.join(coreAgentsDir, file));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check module agents
|
||||||
|
if (!(await fs.pathExists(sourceDir))) {
|
||||||
|
return agentFiles;
|
||||||
|
}
|
||||||
|
|
||||||
|
const modules = await fs.readdir(sourceDir);
|
||||||
|
|
||||||
|
for (const module of modules) {
|
||||||
|
const moduleAgentsDir = path.join(sourceDir, module, 'agents');
|
||||||
|
|
||||||
|
if (await fs.pathExists(moduleAgentsDir)) {
|
||||||
|
const files = await fs.readdir(moduleAgentsDir);
|
||||||
|
|
||||||
|
for (const file of files) {
|
||||||
|
if (file.endsWith('.agent.yaml')) {
|
||||||
|
agentFiles.push(path.join(moduleAgentsDir, file));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return agentFiles;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Validate BMad Core compliance
|
||||||
|
* @param {Object} agentData - Agent YAML data
|
||||||
|
* @returns {boolean} True if compliant
|
||||||
|
*/
|
||||||
|
validateBmadCompliance(agentData) {
|
||||||
|
const requiredFields = ['agent.metadata.id', 'agent.persona.role', 'agent.persona.principles'];
|
||||||
|
|
||||||
|
for (const field of requiredFields) {
|
||||||
|
const keys = field.split('.');
|
||||||
|
let current = agentData;
|
||||||
|
|
||||||
|
for (const key of keys) {
|
||||||
|
if (!current || !current[key]) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
current = current[key];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Process individual agent YAML file
|
||||||
|
* @param {string} agentFile - Path to agent YAML file
|
||||||
|
* @param {string} agentsDir - Target agents directory
|
||||||
|
* @param {string} projectDir - Project directory
|
||||||
|
*/
|
||||||
|
async processAgentFile(agentFile, agentsDir, projectDir) {
|
||||||
|
const yamlContent = await fs.readFile(agentFile, 'utf8');
|
||||||
|
const agentData = yaml.load(yamlContent);
|
||||||
|
|
||||||
|
if (!this.validateBmadCompliance(agentData)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract module from file path
|
||||||
|
const normalizedPath = path.normalize(agentFile);
|
||||||
|
const pathParts = normalizedPath.split(path.sep);
|
||||||
|
const basename = path.basename(agentFile, '.agent.yaml');
|
||||||
|
|
||||||
|
// Find the module name from path
|
||||||
|
let moduleName = 'unknown';
|
||||||
|
if (pathParts.includes('src')) {
|
||||||
|
const srcIndex = pathParts.indexOf('src');
|
||||||
|
if (srcIndex + 3 < pathParts.length) {
|
||||||
|
const folderAfterSrc = pathParts[srcIndex + 1];
|
||||||
|
// Handle both src/core/agents and src/modules/[module]/agents patterns
|
||||||
|
if (folderAfterSrc === 'core') {
|
||||||
|
moduleName = 'core';
|
||||||
|
} else if (folderAfterSrc === 'modules') {
|
||||||
|
moduleName = pathParts[srcIndex + 2]; // The actual module name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract the agent name from the ID path in YAML if available
|
||||||
|
let agentBaseName = basename;
|
||||||
|
if (agentData.agent && agentData.agent.metadata && agentData.agent.metadata.id) {
|
||||||
|
const idPath = agentData.agent.metadata.id;
|
||||||
|
agentBaseName = path.basename(idPath, '.md');
|
||||||
|
}
|
||||||
|
|
||||||
|
const agentName = `bmad-${moduleName}-${agentBaseName}`;
|
||||||
|
const sanitizedAgentName = this.sanitizeAgentName(agentName);
|
||||||
|
|
||||||
|
// Create JSON definition
|
||||||
|
await this.createAgentDefinitionFromYaml(agentsDir, sanitizedAgentName, agentData);
|
||||||
|
|
||||||
|
// Create prompt file
|
||||||
|
await this.createAgentPromptFromYaml(agentsDir, sanitizedAgentName, agentData, projectDir);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sanitize agent name for file naming
|
||||||
|
* @param {string} name - Agent name
|
||||||
|
* @returns {string} Sanitized name
|
||||||
|
*/
|
||||||
|
sanitizeAgentName(name) {
|
||||||
|
return name
|
||||||
|
.toLowerCase()
|
||||||
|
.replaceAll(/\s+/g, '-')
|
||||||
|
.replaceAll(/[^a-z0-9-]/g, '');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create agent JSON definition from YAML data
|
||||||
|
* @param {string} agentsDir - Agents directory
|
||||||
|
* @param {string} agentName - Agent name (role-based)
|
||||||
|
* @param {Object} agentData - Agent YAML data
|
||||||
|
*/
|
||||||
|
async createAgentDefinitionFromYaml(agentsDir, agentName, agentData) {
|
||||||
|
const personName = agentData.agent.metadata.name;
|
||||||
|
const role = agentData.agent.persona.role;
|
||||||
|
|
||||||
|
const agentConfig = {
|
||||||
|
name: agentName,
|
||||||
|
description: `${personName} - ${role}`,
|
||||||
|
prompt: `file://./${agentName}-prompt.md`,
|
||||||
|
tools: ['*'],
|
||||||
|
mcpServers: {},
|
||||||
|
useLegacyMcpJson: true,
|
||||||
|
resources: [],
|
||||||
|
};
|
||||||
|
|
||||||
|
const agentPath = path.join(agentsDir, `${agentName}.json`);
|
||||||
|
await fs.writeJson(agentPath, agentConfig, { spaces: 2 });
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create agent prompt from YAML data
|
||||||
|
* @param {string} agentsDir - Agents directory
|
||||||
|
* @param {string} agentName - Agent name (role-based)
|
||||||
|
* @param {Object} agentData - Agent YAML data
|
||||||
|
* @param {string} projectDir - Project directory
|
||||||
|
*/
|
||||||
|
async createAgentPromptFromYaml(agentsDir, agentName, agentData, projectDir) {
|
||||||
|
const promptPath = path.join(agentsDir, `${agentName}-prompt.md`);
|
||||||
|
|
||||||
|
// Generate prompt from YAML data
|
||||||
|
const prompt = this.generatePromptFromYaml(agentData);
|
||||||
|
await fs.writeFile(promptPath, prompt);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generate prompt content from YAML data
|
||||||
|
* @param {Object} agentData - Agent YAML data
|
||||||
|
* @returns {string} Generated prompt
|
||||||
|
*/
|
||||||
|
generatePromptFromYaml(agentData) {
|
||||||
|
const agent = agentData.agent;
|
||||||
|
const name = agent.metadata.name;
|
||||||
|
const icon = agent.metadata.icon || '🤖';
|
||||||
|
const role = agent.persona.role;
|
||||||
|
const identity = agent.persona.identity;
|
||||||
|
const style = agent.persona.communication_style;
|
||||||
|
const principles = agent.persona.principles;
|
||||||
|
|
||||||
|
let prompt = `# ${name} ${icon}\n\n`;
|
||||||
|
prompt += `## Role\n${role}\n\n`;
|
||||||
|
|
||||||
|
if (identity) {
|
||||||
|
prompt += `## Identity\n${identity}\n\n`;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (style) {
|
||||||
|
prompt += `## Communication Style\n${style}\n\n`;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (principles) {
|
||||||
|
prompt += `## Principles\n`;
|
||||||
|
if (typeof principles === 'string') {
|
||||||
|
// Handle multi-line string principles
|
||||||
|
prompt += principles + '\n\n';
|
||||||
|
} else if (Array.isArray(principles)) {
|
||||||
|
// Handle array principles
|
||||||
|
for (const principle of principles) {
|
||||||
|
prompt += `- ${principle}\n`;
|
||||||
|
}
|
||||||
|
prompt += '\n';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add menu items if available
|
||||||
|
if (agent.menu && agent.menu.length > 0) {
|
||||||
|
prompt += `## Available Workflows\n`;
|
||||||
|
for (let i = 0; i < agent.menu.length; i++) {
|
||||||
|
const item = agent.menu[i];
|
||||||
|
prompt += `${i + 1}. **${item.trigger}**: ${item.description}\n`;
|
||||||
|
}
|
||||||
|
prompt += '\n';
|
||||||
|
}
|
||||||
|
|
||||||
|
prompt += `## Instructions\nYou are ${name}, part of the BMad Method. Follow your role and principles while assisting users with their development needs.\n`;
|
||||||
|
|
||||||
|
return prompt;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if Kiro CLI is available
|
||||||
|
* @returns {Promise<boolean>} True if available
|
||||||
|
*/
|
||||||
|
async isAvailable() {
|
||||||
|
try {
|
||||||
|
const { execSync } = require('node:child_process');
|
||||||
|
execSync('kiro-cli --version', { stdio: 'ignore' });
|
||||||
|
return true;
|
||||||
|
} catch {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get installation instructions
|
||||||
|
* @returns {string} Installation instructions
|
||||||
|
*/
|
||||||
|
getInstallInstructions() {
|
||||||
|
return `Install Kiro CLI:
|
||||||
|
curl -fsSL https://github.com/aws/kiro-cli/releases/latest/download/install.sh | bash
|
||||||
|
|
||||||
|
Or visit: https://github.com/aws/kiro-cli`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = { KiroCliSetup };
|
||||||
|
|
@ -47,7 +47,7 @@ class OpenCodeSetup extends BaseIdeSetup {
|
||||||
agentCount++;
|
agentCount++;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Install workflow commands with flat naming: bmad-workflow-{module}-{name}.md
|
// Install workflow commands with flat naming: bmad-{module}-{workflow-name}
|
||||||
const workflowGenerator = new WorkflowCommandGenerator(this.bmadFolderName);
|
const workflowGenerator = new WorkflowCommandGenerator(this.bmadFolderName);
|
||||||
const { artifacts: workflowArtifacts, counts: workflowCounts } = await workflowGenerator.collectWorkflowArtifacts(bmadDir);
|
const { artifacts: workflowArtifacts, counts: workflowCounts } = await workflowGenerator.collectWorkflowArtifacts(bmadDir);
|
||||||
|
|
||||||
|
|
@ -55,10 +55,10 @@ class OpenCodeSetup extends BaseIdeSetup {
|
||||||
for (const artifact of workflowArtifacts) {
|
for (const artifact of workflowArtifacts) {
|
||||||
if (artifact.type === 'workflow-command') {
|
if (artifact.type === 'workflow-command') {
|
||||||
const commandContent = artifact.content;
|
const commandContent = artifact.content;
|
||||||
// Flat structure: bmad-workflow-{module}-{name}.md
|
// Flat structure: bmad-{module}-{workflow-name}.md
|
||||||
// artifact.relativePath is like: bmm/workflows/plan-project.md
|
// artifact.relativePath is like: bmm/workflows/plan-project.md
|
||||||
const workflowName = path.basename(artifact.relativePath, '.md');
|
const workflowName = path.basename(artifact.relativePath, '.md');
|
||||||
const targetPath = path.join(commandsBaseDir, `bmad-workflow-${artifact.module}-${workflowName}.md`);
|
const targetPath = path.join(commandsBaseDir, `bmad-${artifact.module}-${workflowName}.md`);
|
||||||
await this.writeFile(targetPath, commandContent);
|
await this.writeFile(targetPath, commandContent);
|
||||||
workflowCommandCount++;
|
workflowCommandCount++;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -5,34 +5,13 @@ const { AgentCommandGenerator } = require('./shared/agent-command-generator');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Roo IDE setup handler
|
* Roo IDE setup handler
|
||||||
* Creates custom modes in .roomodes file
|
* Creates custom commands in .roo/commands directory
|
||||||
*/
|
*/
|
||||||
class RooSetup extends BaseIdeSetup {
|
class RooSetup extends BaseIdeSetup {
|
||||||
constructor() {
|
constructor() {
|
||||||
super('roo', 'Roo Code');
|
super('roo', 'Roo Code');
|
||||||
this.configFile = '.roomodes';
|
this.configDir = '.roo';
|
||||||
this.defaultPermissions = {
|
this.commandsDir = 'commands';
|
||||||
dev: {
|
|
||||||
description: 'Development files',
|
|
||||||
fileRegex: String.raw`.*\.(js|jsx|ts|tsx|py|java|cpp|c|h|cs|go|rs|php|rb|swift)$`,
|
|
||||||
},
|
|
||||||
config: {
|
|
||||||
description: 'Configuration files',
|
|
||||||
fileRegex: String.raw`.*\.(json|yaml|yml|toml|xml|ini|env|config)$`,
|
|
||||||
},
|
|
||||||
docs: {
|
|
||||||
description: 'Documentation files',
|
|
||||||
fileRegex: String.raw`.*\.(md|mdx|rst|txt|doc|docx)$`,
|
|
||||||
},
|
|
||||||
styles: {
|
|
||||||
description: 'Style and design files',
|
|
||||||
fileRegex: String.raw`.*\.(css|scss|sass|less|stylus)$`,
|
|
||||||
},
|
|
||||||
all: {
|
|
||||||
description: 'All files',
|
|
||||||
fileRegex: '.*',
|
|
||||||
},
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -44,94 +23,96 @@ class RooSetup extends BaseIdeSetup {
|
||||||
async setup(projectDir, bmadDir, options = {}) {
|
async setup(projectDir, bmadDir, options = {}) {
|
||||||
console.log(chalk.cyan(`Setting up ${this.name}...`));
|
console.log(chalk.cyan(`Setting up ${this.name}...`));
|
||||||
|
|
||||||
// Check for existing .roomodes file
|
// Create .roo/commands directory
|
||||||
const roomodesPath = path.join(projectDir, this.configFile);
|
const rooCommandsDir = path.join(projectDir, this.configDir, this.commandsDir);
|
||||||
let existingModes = [];
|
await this.ensureDir(rooCommandsDir);
|
||||||
let existingContent = '';
|
|
||||||
|
|
||||||
if (await this.pathExists(roomodesPath)) {
|
// Generate agent launchers
|
||||||
existingContent = await this.readFile(roomodesPath);
|
|
||||||
// Parse existing modes to avoid duplicates
|
|
||||||
const modeMatches = existingContent.matchAll(/- slug: ([\w-]+)/g);
|
|
||||||
for (const match of modeMatches) {
|
|
||||||
existingModes.push(match[1]);
|
|
||||||
}
|
|
||||||
console.log(chalk.yellow(`Found existing .roomodes file with ${existingModes.length} modes`));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate agent launchers (though Roo will reference the actual .bmad agents)
|
|
||||||
const agentGen = new AgentCommandGenerator(this.bmadFolderName);
|
const agentGen = new AgentCommandGenerator(this.bmadFolderName);
|
||||||
const { artifacts: agentArtifacts } = await agentGen.collectAgentArtifacts(bmadDir, options.selectedModules || []);
|
const { artifacts: agentArtifacts } = await agentGen.collectAgentArtifacts(bmadDir, options.selectedModules || []);
|
||||||
|
|
||||||
// Always use 'all' permissions - users can customize in .roomodes file
|
|
||||||
const permissionChoice = 'all';
|
|
||||||
|
|
||||||
// Create modes content
|
|
||||||
let newModesContent = '';
|
|
||||||
let addedCount = 0;
|
let addedCount = 0;
|
||||||
let skippedCount = 0;
|
let skippedCount = 0;
|
||||||
|
|
||||||
for (const artifact of agentArtifacts) {
|
for (const artifact of agentArtifacts) {
|
||||||
const slug = `bmad-${artifact.module}-${artifact.name}`;
|
const commandName = `bmad-${artifact.module}-agent-${artifact.name}`;
|
||||||
|
const commandPath = path.join(rooCommandsDir, `${commandName}.md`);
|
||||||
|
|
||||||
// Skip if already exists
|
// Skip if already exists
|
||||||
if (existingModes.includes(slug)) {
|
if (await this.pathExists(commandPath)) {
|
||||||
console.log(chalk.dim(` Skipping ${slug} - already exists`));
|
console.log(chalk.dim(` Skipping ${commandName} - already exists`));
|
||||||
skippedCount++;
|
skippedCount++;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read the actual agent file from .bmad for metadata extraction
|
// Read the actual agent file from .bmad for metadata extraction (installed agents are .md files)
|
||||||
const agentPath = path.join(bmadDir, artifact.module, 'agents', `${artifact.name}.md`);
|
const agentPath = path.join(bmadDir, artifact.module, 'agents', `${artifact.name}.md`);
|
||||||
const content = await this.readFile(agentPath);
|
const content = await this.readFile(agentPath);
|
||||||
|
|
||||||
// Create mode entry that references the actual .bmad agent
|
// Create command file that references the actual .bmad agent
|
||||||
const modeEntry = await this.createModeEntry(
|
await this.createCommandFile({ module: artifact.module, name: artifact.name, path: agentPath }, content, commandPath, projectDir);
|
||||||
{ module: artifact.module, name: artifact.name, path: agentPath },
|
|
||||||
content,
|
|
||||||
permissionChoice,
|
|
||||||
projectDir,
|
|
||||||
);
|
|
||||||
|
|
||||||
newModesContent += modeEntry;
|
|
||||||
addedCount++;
|
addedCount++;
|
||||||
console.log(chalk.green(` ✓ Added mode: ${slug}`));
|
console.log(chalk.green(` ✓ Added command: ${commandName}`));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build final content
|
|
||||||
let finalContent = '';
|
|
||||||
if (existingContent) {
|
|
||||||
// Append to existing content
|
|
||||||
finalContent = existingContent.trim() + '\n' + newModesContent;
|
|
||||||
} else {
|
|
||||||
// Create new .roomodes file
|
|
||||||
finalContent = 'customModes:\n' + newModesContent;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write .roomodes file
|
|
||||||
await this.writeFile(roomodesPath, finalContent);
|
|
||||||
|
|
||||||
console.log(chalk.green(`✓ ${this.name} configured:`));
|
console.log(chalk.green(`✓ ${this.name} configured:`));
|
||||||
console.log(chalk.dim(` - ${addedCount} modes added`));
|
console.log(chalk.dim(` - ${addedCount} commands added`));
|
||||||
if (skippedCount > 0) {
|
if (skippedCount > 0) {
|
||||||
console.log(chalk.dim(` - ${skippedCount} modes skipped (already exist)`));
|
console.log(chalk.dim(` - ${skippedCount} commands skipped (already exist)`));
|
||||||
}
|
}
|
||||||
console.log(chalk.dim(` - Configuration file: ${this.configFile}`));
|
console.log(chalk.dim(` - Commands directory: ${this.configDir}/${this.commandsDir}/bmad/`));
|
||||||
console.log(chalk.dim(` - Permission level: all (unrestricted)`));
|
console.log(chalk.dim(` Commands will be available when you open this project in Roo Code`));
|
||||||
console.log(chalk.yellow(`\n 💡 Tip: Edit ${this.configFile} to customize file permissions per agent`));
|
|
||||||
console.log(chalk.dim(` Modes will be available when you open this project in Roo Code`));
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
success: true,
|
success: true,
|
||||||
modes: addedCount,
|
commands: addedCount,
|
||||||
skipped: skippedCount,
|
skipped: skippedCount,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a mode entry for an agent
|
* Create a unified command file for agents
|
||||||
|
* @param {string} commandPath - Path where to write the command file
|
||||||
|
* @param {Object} options - Command options
|
||||||
|
* @param {string} options.name - Display name for the command
|
||||||
|
* @param {string} options.description - Description for the command
|
||||||
|
* @param {string} options.agentPath - Path to the agent file (relative to project root)
|
||||||
|
* @param {string} [options.icon] - Icon emoji (defaults to 🤖)
|
||||||
|
* @param {string} [options.extraContent] - Additional content to include before activation
|
||||||
*/
|
*/
|
||||||
async createModeEntry(agent, content, permissionChoice, projectDir) {
|
async createAgentCommandFile(commandPath, options) {
|
||||||
|
const { name, description, agentPath, icon = '🤖', extraContent = '' } = options;
|
||||||
|
|
||||||
|
// Build command content with YAML frontmatter
|
||||||
|
let commandContent = `---\n`;
|
||||||
|
commandContent += `name: '${icon} ${name}'\n`;
|
||||||
|
commandContent += `description: '${description}'\n`;
|
||||||
|
commandContent += `---\n\n`;
|
||||||
|
|
||||||
|
commandContent += `You must fully embody this agent's persona and follow all activation instructions exactly as specified. NEVER break character until given an exit command.\n\n`;
|
||||||
|
|
||||||
|
// Add any extra content (e.g., warnings for custom agents)
|
||||||
|
if (extraContent) {
|
||||||
|
commandContent += `${extraContent}\n\n`;
|
||||||
|
}
|
||||||
|
|
||||||
|
commandContent += `<agent-activation CRITICAL="TRUE">\n`;
|
||||||
|
commandContent += `1. LOAD the FULL agent file from @${agentPath}\n`;
|
||||||
|
commandContent += `2. READ its entire contents - this contains the complete agent persona, menu, and instructions\n`;
|
||||||
|
commandContent += `3. Execute ALL activation steps exactly as written in the agent file\n`;
|
||||||
|
commandContent += `4. Follow the agent's persona and menu system precisely\n`;
|
||||||
|
commandContent += `5. Stay in character throughout the session\n`;
|
||||||
|
commandContent += `</agent-activation>\n`;
|
||||||
|
|
||||||
|
// Write command file
|
||||||
|
await this.writeFile(commandPath, commandContent);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a command file for an agent
|
||||||
|
*/
|
||||||
|
async createCommandFile(agent, content, commandPath, projectDir) {
|
||||||
// Extract metadata from agent content
|
// Extract metadata from agent content
|
||||||
const titleMatch = content.match(/title="([^"]+)"/);
|
const titleMatch = content.match(/title="([^"]+)"/);
|
||||||
const title = titleMatch ? titleMatch[1] : this.formatTitle(agent.name);
|
const title = titleMatch ? titleMatch[1] : this.formatTitle(agent.name);
|
||||||
|
|
@ -142,66 +123,16 @@ class RooSetup extends BaseIdeSetup {
|
||||||
const whenToUseMatch = content.match(/whenToUse="([^"]+)"/);
|
const whenToUseMatch = content.match(/whenToUse="([^"]+)"/);
|
||||||
const whenToUse = whenToUseMatch ? whenToUseMatch[1] : `Use for ${title} tasks`;
|
const whenToUse = whenToUseMatch ? whenToUseMatch[1] : `Use for ${title} tasks`;
|
||||||
|
|
||||||
// Get the activation header from central template
|
|
||||||
const activationHeader = await this.getAgentCommandHeader();
|
|
||||||
|
|
||||||
const roleDefinitionMatch = content.match(/roleDefinition="([^"]+)"/);
|
|
||||||
const roleDefinition = roleDefinitionMatch
|
|
||||||
? roleDefinitionMatch[1]
|
|
||||||
: `You are a ${title} specializing in ${title.toLowerCase()} tasks and responsibilities.`;
|
|
||||||
|
|
||||||
// Get relative path
|
// Get relative path
|
||||||
const relativePath = path.relative(projectDir, agent.path).replaceAll('\\', '/');
|
const relativePath = path.relative(projectDir, agent.path).replaceAll('\\', '/');
|
||||||
|
|
||||||
// Determine permissions
|
// Use unified method
|
||||||
const permissions = this.getPermissionsForAgent(agent, permissionChoice);
|
await this.createAgentCommandFile(commandPath, {
|
||||||
|
name: title,
|
||||||
// Build mode entry
|
description: whenToUse,
|
||||||
const slug = `bmad-${agent.module}-${agent.name}`;
|
agentPath: relativePath,
|
||||||
let modeEntry = ` - slug: ${slug}\n`;
|
icon: icon,
|
||||||
modeEntry += ` name: '${icon} ${title}'\n`;
|
});
|
||||||
|
|
||||||
if (permissions && permissions.description) {
|
|
||||||
modeEntry += ` description: '${permissions.description}'\n`;
|
|
||||||
}
|
|
||||||
|
|
||||||
modeEntry += ` roleDefinition: ${roleDefinition}\n`;
|
|
||||||
modeEntry += ` whenToUse: ${whenToUse}\n`;
|
|
||||||
modeEntry += ` customInstructions: ${activationHeader} Read the full YAML from ${relativePath} start activation to alter your state of being follow startup section instructions stay in this being until told to exit this mode\n`;
|
|
||||||
modeEntry += ` groups:\n`;
|
|
||||||
modeEntry += ` - read\n`;
|
|
||||||
|
|
||||||
if (permissions && permissions.fileRegex) {
|
|
||||||
modeEntry += ` - - edit\n`;
|
|
||||||
modeEntry += ` - fileRegex: ${permissions.fileRegex}\n`;
|
|
||||||
modeEntry += ` description: ${permissions.description}\n`;
|
|
||||||
} else {
|
|
||||||
modeEntry += ` - edit\n`;
|
|
||||||
}
|
|
||||||
|
|
||||||
return modeEntry;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get permissions configuration for an agent
|
|
||||||
*/
|
|
||||||
getPermissionsForAgent(agent, permissionChoice) {
|
|
||||||
if (permissionChoice === 'custom') {
|
|
||||||
// Custom logic based on agent name/module
|
|
||||||
if (agent.name.includes('dev') || agent.name.includes('code')) {
|
|
||||||
return this.defaultPermissions.dev;
|
|
||||||
} else if (agent.name.includes('doc') || agent.name.includes('write')) {
|
|
||||||
return this.defaultPermissions.docs;
|
|
||||||
} else if (agent.name.includes('config') || agent.name.includes('setup')) {
|
|
||||||
return this.defaultPermissions.config;
|
|
||||||
} else if (agent.name.includes('style') || agent.name.includes('css')) {
|
|
||||||
return this.defaultPermissions.styles;
|
|
||||||
}
|
|
||||||
// Default to all for custom agents
|
|
||||||
return this.defaultPermissions.all;
|
|
||||||
}
|
|
||||||
|
|
||||||
return this.defaultPermissions[permissionChoice] || null;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -219,8 +150,26 @@ class RooSetup extends BaseIdeSetup {
|
||||||
*/
|
*/
|
||||||
async cleanup(projectDir) {
|
async cleanup(projectDir) {
|
||||||
const fs = require('fs-extra');
|
const fs = require('fs-extra');
|
||||||
const roomodesPath = path.join(projectDir, this.configFile);
|
const rooCommandsDir = path.join(projectDir, this.configDir, this.commandsDir);
|
||||||
|
|
||||||
|
if (await fs.pathExists(rooCommandsDir)) {
|
||||||
|
const files = await fs.readdir(rooCommandsDir);
|
||||||
|
let removedCount = 0;
|
||||||
|
|
||||||
|
for (const file of files) {
|
||||||
|
if (file.startsWith('bmad-') && file.endsWith('.md')) {
|
||||||
|
await fs.remove(path.join(rooCommandsDir, file));
|
||||||
|
removedCount++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (removedCount > 0) {
|
||||||
|
console.log(chalk.dim(`Removed ${removedCount} BMAD commands from .roo/commands/`));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Also clean up old .roomodes file if it exists
|
||||||
|
const roomodesPath = path.join(projectDir, '.roomodes');
|
||||||
if (await fs.pathExists(roomodesPath)) {
|
if (await fs.pathExists(roomodesPath)) {
|
||||||
const content = await fs.readFile(roomodesPath, 'utf8');
|
const content = await fs.readFile(roomodesPath, 'utf8');
|
||||||
|
|
||||||
|
|
@ -245,7 +194,9 @@ class RooSetup extends BaseIdeSetup {
|
||||||
|
|
||||||
// Write back filtered content
|
// Write back filtered content
|
||||||
await fs.writeFile(roomodesPath, filteredLines.join('\n'));
|
await fs.writeFile(roomodesPath, filteredLines.join('\n'));
|
||||||
console.log(chalk.dim(`Removed ${removedCount} BMAD modes from .roomodes`));
|
if (removedCount > 0) {
|
||||||
|
console.log(chalk.dim(`Removed ${removedCount} BMAD modes from legacy .roomodes file`));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -254,68 +205,53 @@ class RooSetup extends BaseIdeSetup {
|
||||||
* @param {string} projectDir - Project directory
|
* @param {string} projectDir - Project directory
|
||||||
* @param {string} agentName - Agent name (e.g., "fred-commit-poet")
|
* @param {string} agentName - Agent name (e.g., "fred-commit-poet")
|
||||||
* @param {string} agentPath - Path to compiled agent (relative to project root)
|
* @param {string} agentPath - Path to compiled agent (relative to project root)
|
||||||
* @param {Object} metadata - Agent metadata
|
* @param {Object} metadata - Agent metadata (unused, kept for compatibility)
|
||||||
* @returns {Object} Installation result
|
* @returns {Object} Installation result
|
||||||
*/
|
*/
|
||||||
async installCustomAgentLauncher(projectDir, agentName, agentPath, metadata) {
|
async installCustomAgentLauncher(projectDir, agentName, agentPath, metadata) {
|
||||||
const roomodesPath = path.join(projectDir, this.configFile);
|
const rooCommandsDir = path.join(projectDir, this.configDir, this.commandsDir);
|
||||||
let existingContent = '';
|
await this.ensureDir(rooCommandsDir);
|
||||||
|
|
||||||
// Read existing .roomodes file
|
const commandName = `bmad-custom-agent-${agentName.toLowerCase()}`;
|
||||||
if (await this.pathExists(roomodesPath)) {
|
const commandPath = path.join(rooCommandsDir, `${commandName}.md`);
|
||||||
existingContent = await this.readFile(roomodesPath);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create custom agent mode entry
|
// Check if command already exists
|
||||||
const slug = `bmad-custom-${agentName.toLowerCase()}`;
|
if (await this.pathExists(commandPath)) {
|
||||||
const modeEntry = ` - slug: ${slug}
|
|
||||||
name: 'BMAD Custom: ${agentName}'
|
|
||||||
description: |
|
|
||||||
Custom BMAD agent: ${agentName}
|
|
||||||
|
|
||||||
**⚠️ IMPORTANT**: Run @${agentPath} first to load the complete agent!
|
|
||||||
|
|
||||||
This is a launcher for the custom BMAD agent "${agentName}". The agent will follow the persona and instructions from the main agent file.
|
|
||||||
prompt: |
|
|
||||||
@${agentPath}
|
|
||||||
always: false
|
|
||||||
permissions: all
|
|
||||||
`;
|
|
||||||
|
|
||||||
// Check if mode already exists
|
|
||||||
if (existingContent.includes(slug)) {
|
|
||||||
return {
|
return {
|
||||||
ide: 'roo',
|
ide: 'roo',
|
||||||
path: this.configFile,
|
path: path.join(this.configDir, this.commandsDir, `${commandName}.md`),
|
||||||
command: agentName,
|
command: commandName,
|
||||||
type: 'custom-agent-launcher',
|
type: 'custom-agent-launcher',
|
||||||
alreadyExists: true,
|
alreadyExists: true,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build final content
|
// Read the custom agent file to extract metadata (same as regular agents)
|
||||||
let finalContent = '';
|
const fullAgentPath = path.join(projectDir, agentPath);
|
||||||
if (existingContent) {
|
const content = await this.readFile(fullAgentPath);
|
||||||
// Find customModes section or add it
|
|
||||||
if (existingContent.includes('customModes:')) {
|
|
||||||
// Append to existing customModes
|
|
||||||
finalContent = existingContent + modeEntry;
|
|
||||||
} else {
|
|
||||||
// Add customModes section
|
|
||||||
finalContent = existingContent.trim() + '\n\ncustomModes:\n' + modeEntry;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Create new .roomodes file with customModes
|
|
||||||
finalContent = 'customModes:\n' + modeEntry;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write .roomodes file
|
// Extract metadata from agent content
|
||||||
await this.writeFile(roomodesPath, finalContent);
|
const titleMatch = content.match(/title="([^"]+)"/);
|
||||||
|
const title = titleMatch ? titleMatch[1] : this.formatTitle(agentName);
|
||||||
|
|
||||||
|
const iconMatch = content.match(/icon="([^"]+)"/);
|
||||||
|
const icon = iconMatch ? iconMatch[1] : '🤖';
|
||||||
|
|
||||||
|
const whenToUseMatch = content.match(/whenToUse="([^"]+)"/);
|
||||||
|
const whenToUse = whenToUseMatch ? whenToUseMatch[1] : `Use for ${title} tasks`;
|
||||||
|
|
||||||
|
// Use unified method without extra content (clean)
|
||||||
|
await this.createAgentCommandFile(commandPath, {
|
||||||
|
name: title,
|
||||||
|
description: whenToUse,
|
||||||
|
agentPath: agentPath,
|
||||||
|
icon: icon,
|
||||||
|
});
|
||||||
|
|
||||||
return {
|
return {
|
||||||
ide: 'roo',
|
ide: 'roo',
|
||||||
path: this.configFile,
|
path: path.join(this.configDir, this.commandsDir, `${commandName}.md`),
|
||||||
command: slug,
|
command: commandName,
|
||||||
type: 'custom-agent-launcher',
|
type: 'custom-agent-launcher',
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -90,6 +90,11 @@ async function getAgentsFromDir(dirPath, moduleName) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Skip README files and other non-agent files
|
||||||
|
if (file.toLowerCase() === 'readme.md' || file.toLowerCase().startsWith('readme-')) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
if (file.includes('.customize.')) {
|
if (file.includes('.customize.')) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
@ -101,6 +106,11 @@ async function getAgentsFromDir(dirPath, moduleName) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Only include files that have agent-specific content (compiled agents have <agent> tag)
|
||||||
|
if (!content.includes('<agent')) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
agents.push({
|
agents.push({
|
||||||
path: filePath,
|
path: filePath,
|
||||||
name: file.replace('.md', ''),
|
name: file.replace('.md', ''),
|
||||||
|
|
|
||||||
|
|
@ -25,16 +25,16 @@ class WorkflowCommandGenerator {
|
||||||
return { generated: 0 };
|
return { generated: 0 };
|
||||||
}
|
}
|
||||||
|
|
||||||
// Filter to only standalone workflows
|
// ALL workflows now generate commands - no standalone filtering
|
||||||
const standaloneWorkflows = workflows.filter((w) => w.standalone === 'true' || w.standalone === true);
|
const allWorkflows = workflows;
|
||||||
|
|
||||||
// Base commands directory
|
// Base commands directory
|
||||||
const baseCommandsDir = path.join(projectDir, '.claude', 'commands', 'bmad');
|
const baseCommandsDir = path.join(projectDir, '.claude', 'commands', 'bmad');
|
||||||
|
|
||||||
let generatedCount = 0;
|
let generatedCount = 0;
|
||||||
|
|
||||||
// Generate a command file for each standalone workflow, organized by module
|
// Generate a command file for each workflow, organized by module
|
||||||
for (const workflow of standaloneWorkflows) {
|
for (const workflow of allWorkflows) {
|
||||||
const moduleWorkflowsDir = path.join(baseCommandsDir, workflow.module, 'workflows');
|
const moduleWorkflowsDir = path.join(baseCommandsDir, workflow.module, 'workflows');
|
||||||
await fs.ensureDir(moduleWorkflowsDir);
|
await fs.ensureDir(moduleWorkflowsDir);
|
||||||
|
|
||||||
|
|
@ -46,7 +46,7 @@ class WorkflowCommandGenerator {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Also create a workflow launcher README in each module
|
// Also create a workflow launcher README in each module
|
||||||
const groupedWorkflows = this.groupWorkflowsByModule(standaloneWorkflows);
|
const groupedWorkflows = this.groupWorkflowsByModule(allWorkflows);
|
||||||
await this.createModuleWorkflowLaunchers(baseCommandsDir, groupedWorkflows);
|
await this.createModuleWorkflowLaunchers(baseCommandsDir, groupedWorkflows);
|
||||||
|
|
||||||
return { generated: generatedCount };
|
return { generated: generatedCount };
|
||||||
|
|
@ -59,12 +59,12 @@ class WorkflowCommandGenerator {
|
||||||
return { artifacts: [], counts: { commands: 0, launchers: 0 } };
|
return { artifacts: [], counts: { commands: 0, launchers: 0 } };
|
||||||
}
|
}
|
||||||
|
|
||||||
// Filter to only standalone workflows
|
// ALL workflows now generate commands - no standalone filtering
|
||||||
const standaloneWorkflows = workflows.filter((w) => w.standalone === 'true' || w.standalone === true);
|
const allWorkflows = workflows;
|
||||||
|
|
||||||
const artifacts = [];
|
const artifacts = [];
|
||||||
|
|
||||||
for (const workflow of standaloneWorkflows) {
|
for (const workflow of allWorkflows) {
|
||||||
const commandContent = await this.generateCommandContent(workflow, bmadDir);
|
const commandContent = await this.generateCommandContent(workflow, bmadDir);
|
||||||
artifacts.push({
|
artifacts.push({
|
||||||
type: 'workflow-command',
|
type: 'workflow-command',
|
||||||
|
|
@ -75,7 +75,7 @@ class WorkflowCommandGenerator {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
const groupedWorkflows = this.groupWorkflowsByModule(standaloneWorkflows);
|
const groupedWorkflows = this.groupWorkflowsByModule(allWorkflows);
|
||||||
for (const [module, launcherContent] of Object.entries(this.buildModuleWorkflowLaunchers(groupedWorkflows))) {
|
for (const [module, launcherContent] of Object.entries(this.buildModuleWorkflowLaunchers(groupedWorkflows))) {
|
||||||
artifacts.push({
|
artifacts.push({
|
||||||
type: 'workflow-launcher',
|
type: 'workflow-launcher',
|
||||||
|
|
@ -89,7 +89,7 @@ class WorkflowCommandGenerator {
|
||||||
return {
|
return {
|
||||||
artifacts,
|
artifacts,
|
||||||
counts: {
|
counts: {
|
||||||
commands: standaloneWorkflows.length,
|
commands: allWorkflows.length,
|
||||||
launchers: Object.keys(groupedWorkflows).length,
|
launchers: Object.keys(groupedWorkflows).length,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
@ -99,8 +99,13 @@ class WorkflowCommandGenerator {
|
||||||
* Generate command content for a workflow
|
* Generate command content for a workflow
|
||||||
*/
|
*/
|
||||||
async generateCommandContent(workflow, bmadDir) {
|
async generateCommandContent(workflow, bmadDir) {
|
||||||
// Load the template
|
// Determine template based on workflow file type
|
||||||
const template = await fs.readFile(this.templatePath, 'utf8');
|
const isMarkdownWorkflow = workflow.path.endsWith('workflow.md');
|
||||||
|
const templateName = isMarkdownWorkflow ? 'workflow-commander.md' : 'workflow-command-template.md';
|
||||||
|
const templatePath = path.join(path.dirname(this.templatePath), templateName);
|
||||||
|
|
||||||
|
// Load the appropriate template
|
||||||
|
const template = await fs.readFile(templatePath, 'utf8');
|
||||||
|
|
||||||
// Convert source path to installed path
|
// Convert source path to installed path
|
||||||
// From: /Users/.../src/modules/bmm/workflows/.../workflow.yaml
|
// From: /Users/.../src/modules/bmm/workflows/.../workflow.yaml
|
||||||
|
|
@ -130,9 +135,7 @@ class WorkflowCommandGenerator {
|
||||||
.replaceAll('{{workflow_path}}', workflowPath)
|
.replaceAll('{{workflow_path}}', workflowPath)
|
||||||
.replaceAll('{{core_workflow_path}}', coreWorkflowPath)
|
.replaceAll('{{core_workflow_path}}', coreWorkflowPath)
|
||||||
.replaceAll('{bmad_folder}', this.bmadFolderName)
|
.replaceAll('{bmad_folder}', this.bmadFolderName)
|
||||||
.replaceAll('{*bmad_folder*}', '{bmad_folder}')
|
.replaceAll('{*bmad_folder*}', '{bmad_folder}');
|
||||||
.replaceAll('{{interactive}}', workflow.interactive)
|
|
||||||
.replaceAll('{{author}}', workflow.author || 'BMAD');
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,5 @@
|
||||||
|
---
|
||||||
|
description: '{{description}}'
|
||||||
|
---
|
||||||
|
|
||||||
|
IT IS CRITICAL THAT YOU FOLLOW THIS COMMAND: LOAD the FULL @{{workflow_path}}, READ its entire contents and follow its directions exactly!
|
||||||
Loading…
Reference in New Issue