Merge branch 'main' into main
This commit is contained in:
commit
26a3aba0ee
|
|
@ -0,0 +1,15 @@
|
||||||
|
#!/bin/bash
|
||||||
|
# Discord notification helper functions
|
||||||
|
|
||||||
|
# Escape markdown special chars and @mentions for safe Discord display
|
||||||
|
# Bracket expression: ] must be first, then other chars. In POSIX bracket expr, \ is literal.
|
||||||
|
esc() { sed -e 's/[][\*_()~`>]/\\&/g' -e 's/@/@ /g'; }
|
||||||
|
|
||||||
|
# Truncate to $1 chars (or 80 if wall-of-text with <3 spaces)
|
||||||
|
trunc() {
|
||||||
|
local max=$1
|
||||||
|
local txt=$(tr '\n\r' ' ' | cut -c1-"$max")
|
||||||
|
local spaces=$(printf '%s' "$txt" | tr -cd ' ' | wc -c)
|
||||||
|
[ "$spaces" -lt 3 ] && [ ${#txt} -gt 80 ] && txt=$(printf '%s' "$txt" | cut -c1-80)
|
||||||
|
printf '%s' "$txt"
|
||||||
|
}
|
||||||
|
|
@ -1,16 +1,286 @@
|
||||||
name: Discord Notification
|
name: Discord Notification
|
||||||
|
|
||||||
"on": [pull_request, release, create, delete, issue_comment, pull_request_review, pull_request_review_comment]
|
on:
|
||||||
|
pull_request:
|
||||||
|
types: [opened, closed, reopened, ready_for_review]
|
||||||
|
release:
|
||||||
|
types: [published]
|
||||||
|
create:
|
||||||
|
delete:
|
||||||
|
issue_comment:
|
||||||
|
types: [created]
|
||||||
|
pull_request_review:
|
||||||
|
types: [submitted]
|
||||||
|
pull_request_review_comment:
|
||||||
|
types: [created]
|
||||||
|
issues:
|
||||||
|
types: [opened, closed, reopened]
|
||||||
|
|
||||||
|
env:
|
||||||
|
MAX_TITLE: 100
|
||||||
|
MAX_BODY: 250
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
notify:
|
pull_request:
|
||||||
|
if: github.event_name == 'pull_request'
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
ref: ${{ github.event.repository.default_branch }}
|
||||||
|
sparse-checkout: .github/scripts
|
||||||
|
sparse-checkout-cone-mode: false
|
||||||
|
- name: Notify Discord
|
||||||
|
env:
|
||||||
|
WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }}
|
||||||
|
ACTION: ${{ github.event.action }}
|
||||||
|
MERGED: ${{ github.event.pull_request.merged }}
|
||||||
|
PR_NUM: ${{ github.event.pull_request.number }}
|
||||||
|
PR_URL: ${{ github.event.pull_request.html_url }}
|
||||||
|
PR_TITLE: ${{ github.event.pull_request.title }}
|
||||||
|
PR_USER: ${{ github.event.pull_request.user.login }}
|
||||||
|
PR_BODY: ${{ github.event.pull_request.body }}
|
||||||
|
run: |
|
||||||
|
set -o pipefail
|
||||||
|
source .github/scripts/discord-helpers.sh
|
||||||
|
[ -z "$WEBHOOK" ] && exit 0
|
||||||
|
|
||||||
|
if [ "$ACTION" = "opened" ]; then ICON="🔀"; LABEL="New PR"
|
||||||
|
elif [ "$ACTION" = "closed" ] && [ "$MERGED" = "true" ]; then ICON="🎉"; LABEL="Merged"
|
||||||
|
elif [ "$ACTION" = "closed" ]; then ICON="❌"; LABEL="Closed"
|
||||||
|
elif [ "$ACTION" = "reopened" ]; then ICON="🔄"; LABEL="Reopened"
|
||||||
|
else ICON="📋"; LABEL="Ready"; fi
|
||||||
|
|
||||||
|
TITLE=$(printf '%s' "$PR_TITLE" | trunc $MAX_TITLE | esc)
|
||||||
|
[ ${#PR_TITLE} -gt $MAX_TITLE ] && TITLE="${TITLE}..."
|
||||||
|
BODY=$(printf '%s' "$PR_BODY" | trunc $MAX_BODY | esc)
|
||||||
|
[ -n "$PR_BODY" ] && [ ${#PR_BODY} -gt $MAX_BODY ] && BODY="${BODY}..."
|
||||||
|
[ -n "$BODY" ] && BODY=" · $BODY"
|
||||||
|
USER=$(printf '%s' "$PR_USER" | esc)
|
||||||
|
|
||||||
|
MSG="$ICON **[$LABEL #$PR_NUM: $TITLE](<$PR_URL>)**"$'\n'"by @$USER$BODY"
|
||||||
|
jq -n --arg content "$MSG" '{content: $content}' | curl -sf --retry 2 -X POST "$WEBHOOK" -H "Content-Type: application/json" -d @-
|
||||||
|
|
||||||
|
issues:
|
||||||
|
if: github.event_name == 'issues'
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
ref: ${{ github.event.repository.default_branch }}
|
||||||
|
sparse-checkout: .github/scripts
|
||||||
|
sparse-checkout-cone-mode: false
|
||||||
|
- name: Notify Discord
|
||||||
|
env:
|
||||||
|
WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }}
|
||||||
|
ACTION: ${{ github.event.action }}
|
||||||
|
ISSUE_NUM: ${{ github.event.issue.number }}
|
||||||
|
ISSUE_URL: ${{ github.event.issue.html_url }}
|
||||||
|
ISSUE_TITLE: ${{ github.event.issue.title }}
|
||||||
|
ISSUE_USER: ${{ github.event.issue.user.login }}
|
||||||
|
ISSUE_BODY: ${{ github.event.issue.body }}
|
||||||
|
ACTOR: ${{ github.actor }}
|
||||||
|
run: |
|
||||||
|
set -o pipefail
|
||||||
|
source .github/scripts/discord-helpers.sh
|
||||||
|
[ -z "$WEBHOOK" ] && exit 0
|
||||||
|
|
||||||
|
if [ "$ACTION" = "opened" ]; then ICON="🐛"; LABEL="New Issue"; USER="$ISSUE_USER"
|
||||||
|
elif [ "$ACTION" = "closed" ]; then ICON="✅"; LABEL="Closed"; USER="$ACTOR"
|
||||||
|
else ICON="🔄"; LABEL="Reopened"; USER="$ACTOR"; fi
|
||||||
|
|
||||||
|
TITLE=$(printf '%s' "$ISSUE_TITLE" | trunc $MAX_TITLE | esc)
|
||||||
|
[ ${#ISSUE_TITLE} -gt $MAX_TITLE ] && TITLE="${TITLE}..."
|
||||||
|
BODY=$(printf '%s' "$ISSUE_BODY" | trunc $MAX_BODY | esc)
|
||||||
|
[ -n "$ISSUE_BODY" ] && [ ${#ISSUE_BODY} -gt $MAX_BODY ] && BODY="${BODY}..."
|
||||||
|
[ -n "$BODY" ] && BODY=" · $BODY"
|
||||||
|
USER=$(printf '%s' "$USER" | esc)
|
||||||
|
|
||||||
|
MSG="$ICON **[$LABEL #$ISSUE_NUM: $TITLE](<$ISSUE_URL>)**"$'\n'"by @$USER$BODY"
|
||||||
|
jq -n --arg content "$MSG" '{content: $content}' | curl -sf --retry 2 -X POST "$WEBHOOK" -H "Content-Type: application/json" -d @-
|
||||||
|
|
||||||
|
issue_comment:
|
||||||
|
if: github.event_name == 'issue_comment'
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
ref: ${{ github.event.repository.default_branch }}
|
||||||
|
sparse-checkout: .github/scripts
|
||||||
|
sparse-checkout-cone-mode: false
|
||||||
|
- name: Notify Discord
|
||||||
|
env:
|
||||||
|
WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }}
|
||||||
|
IS_PR: ${{ github.event.issue.pull_request && 'true' || 'false' }}
|
||||||
|
ISSUE_NUM: ${{ github.event.issue.number }}
|
||||||
|
ISSUE_TITLE: ${{ github.event.issue.title }}
|
||||||
|
COMMENT_URL: ${{ github.event.comment.html_url }}
|
||||||
|
COMMENT_USER: ${{ github.event.comment.user.login }}
|
||||||
|
COMMENT_BODY: ${{ github.event.comment.body }}
|
||||||
|
run: |
|
||||||
|
set -o pipefail
|
||||||
|
source .github/scripts/discord-helpers.sh
|
||||||
|
[ -z "$WEBHOOK" ] && exit 0
|
||||||
|
|
||||||
|
[ "$IS_PR" = "true" ] && TYPE="PR" || TYPE="Issue"
|
||||||
|
|
||||||
|
TITLE=$(printf '%s' "$ISSUE_TITLE" | trunc $MAX_TITLE | esc)
|
||||||
|
[ ${#ISSUE_TITLE} -gt $MAX_TITLE ] && TITLE="${TITLE}..."
|
||||||
|
BODY=$(printf '%s' "$COMMENT_BODY" | trunc $MAX_BODY | esc)
|
||||||
|
[ ${#COMMENT_BODY} -gt $MAX_BODY ] && BODY="${BODY}..."
|
||||||
|
USER=$(printf '%s' "$COMMENT_USER" | esc)
|
||||||
|
|
||||||
|
MSG="💬 **[Comment on $TYPE #$ISSUE_NUM: $TITLE](<$COMMENT_URL>)**"$'\n'"@$USER: $BODY"
|
||||||
|
jq -n --arg content "$MSG" '{content: $content}' | curl -sf --retry 2 -X POST "$WEBHOOK" -H "Content-Type: application/json" -d @-
|
||||||
|
|
||||||
|
pull_request_review:
|
||||||
|
if: github.event_name == 'pull_request_review'
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
ref: ${{ github.event.repository.default_branch }}
|
||||||
|
sparse-checkout: .github/scripts
|
||||||
|
sparse-checkout-cone-mode: false
|
||||||
|
- name: Notify Discord
|
||||||
|
env:
|
||||||
|
WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }}
|
||||||
|
STATE: ${{ github.event.review.state }}
|
||||||
|
PR_NUM: ${{ github.event.pull_request.number }}
|
||||||
|
PR_TITLE: ${{ github.event.pull_request.title }}
|
||||||
|
REVIEW_URL: ${{ github.event.review.html_url }}
|
||||||
|
REVIEW_USER: ${{ github.event.review.user.login }}
|
||||||
|
REVIEW_BODY: ${{ github.event.review.body }}
|
||||||
|
run: |
|
||||||
|
set -o pipefail
|
||||||
|
source .github/scripts/discord-helpers.sh
|
||||||
|
[ -z "$WEBHOOK" ] && exit 0
|
||||||
|
|
||||||
|
if [ "$STATE" = "approved" ]; then ICON="✅"; LABEL="Approved"
|
||||||
|
elif [ "$STATE" = "changes_requested" ]; then ICON="🔧"; LABEL="Changes Requested"
|
||||||
|
else ICON="👀"; LABEL="Reviewed"; fi
|
||||||
|
|
||||||
|
TITLE=$(printf '%s' "$PR_TITLE" | trunc $MAX_TITLE | esc)
|
||||||
|
[ ${#PR_TITLE} -gt $MAX_TITLE ] && TITLE="${TITLE}..."
|
||||||
|
BODY=$(printf '%s' "$REVIEW_BODY" | trunc $MAX_BODY | esc)
|
||||||
|
[ -n "$REVIEW_BODY" ] && [ ${#REVIEW_BODY} -gt $MAX_BODY ] && BODY="${BODY}..."
|
||||||
|
[ -n "$BODY" ] && BODY=": $BODY"
|
||||||
|
USER=$(printf '%s' "$REVIEW_USER" | esc)
|
||||||
|
|
||||||
|
MSG="$ICON **[$LABEL PR #$PR_NUM: $TITLE](<$REVIEW_URL>)**"$'\n'"@$USER$BODY"
|
||||||
|
jq -n --arg content "$MSG" '{content: $content}' | curl -sf --retry 2 -X POST "$WEBHOOK" -H "Content-Type: application/json" -d @-
|
||||||
|
|
||||||
|
pull_request_review_comment:
|
||||||
|
if: github.event_name == 'pull_request_review_comment'
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
ref: ${{ github.event.repository.default_branch }}
|
||||||
|
sparse-checkout: .github/scripts
|
||||||
|
sparse-checkout-cone-mode: false
|
||||||
|
- name: Notify Discord
|
||||||
|
env:
|
||||||
|
WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }}
|
||||||
|
PR_NUM: ${{ github.event.pull_request.number }}
|
||||||
|
PR_TITLE: ${{ github.event.pull_request.title }}
|
||||||
|
COMMENT_URL: ${{ github.event.comment.html_url }}
|
||||||
|
COMMENT_USER: ${{ github.event.comment.user.login }}
|
||||||
|
COMMENT_BODY: ${{ github.event.comment.body }}
|
||||||
|
run: |
|
||||||
|
set -o pipefail
|
||||||
|
source .github/scripts/discord-helpers.sh
|
||||||
|
[ -z "$WEBHOOK" ] && exit 0
|
||||||
|
|
||||||
|
TITLE=$(printf '%s' "$PR_TITLE" | trunc $MAX_TITLE | esc)
|
||||||
|
[ ${#PR_TITLE} -gt $MAX_TITLE ] && TITLE="${TITLE}..."
|
||||||
|
BODY=$(printf '%s' "$COMMENT_BODY" | trunc $MAX_BODY | esc)
|
||||||
|
[ ${#COMMENT_BODY} -gt $MAX_BODY ] && BODY="${BODY}..."
|
||||||
|
USER=$(printf '%s' "$COMMENT_USER" | esc)
|
||||||
|
|
||||||
|
MSG="💭 **[Review Comment PR #$PR_NUM: $TITLE](<$COMMENT_URL>)**"$'\n'"@$USER: $BODY"
|
||||||
|
jq -n --arg content "$MSG" '{content: $content}' | curl -sf --retry 2 -X POST "$WEBHOOK" -H "Content-Type: application/json" -d @-
|
||||||
|
|
||||||
|
release:
|
||||||
|
if: github.event_name == 'release'
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
ref: ${{ github.event.repository.default_branch }}
|
||||||
|
sparse-checkout: .github/scripts
|
||||||
|
sparse-checkout-cone-mode: false
|
||||||
|
- name: Notify Discord
|
||||||
|
env:
|
||||||
|
WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }}
|
||||||
|
TAG: ${{ github.event.release.tag_name }}
|
||||||
|
NAME: ${{ github.event.release.name }}
|
||||||
|
URL: ${{ github.event.release.html_url }}
|
||||||
|
RELEASE_BODY: ${{ github.event.release.body }}
|
||||||
|
run: |
|
||||||
|
set -o pipefail
|
||||||
|
source .github/scripts/discord-helpers.sh
|
||||||
|
[ -z "$WEBHOOK" ] && exit 0
|
||||||
|
|
||||||
|
REL_NAME=$(printf '%s' "$NAME" | trunc $MAX_TITLE | esc)
|
||||||
|
[ ${#NAME} -gt $MAX_TITLE ] && REL_NAME="${REL_NAME}..."
|
||||||
|
BODY=$(printf '%s' "$RELEASE_BODY" | trunc $MAX_BODY | esc)
|
||||||
|
[ -n "$RELEASE_BODY" ] && [ ${#RELEASE_BODY} -gt $MAX_BODY ] && BODY="${BODY}..."
|
||||||
|
[ -n "$BODY" ] && BODY=" · $BODY"
|
||||||
|
TAG_ESC=$(printf '%s' "$TAG" | esc)
|
||||||
|
|
||||||
|
MSG="🚀 **[Release $TAG_ESC: $REL_NAME](<$URL>)**"$'\n'"$BODY"
|
||||||
|
jq -n --arg content "$MSG" '{content: $content}' | curl -sf --retry 2 -X POST "$WEBHOOK" -H "Content-Type: application/json" -d @-
|
||||||
|
|
||||||
|
create:
|
||||||
|
if: github.event_name == 'create'
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
ref: ${{ github.event.repository.default_branch }}
|
||||||
|
sparse-checkout: .github/scripts
|
||||||
|
sparse-checkout-cone-mode: false
|
||||||
|
- name: Notify Discord
|
||||||
|
env:
|
||||||
|
WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }}
|
||||||
|
REF_TYPE: ${{ github.event.ref_type }}
|
||||||
|
REF: ${{ github.event.ref }}
|
||||||
|
ACTOR: ${{ github.actor }}
|
||||||
|
REPO_URL: ${{ github.event.repository.html_url }}
|
||||||
|
run: |
|
||||||
|
set -o pipefail
|
||||||
|
source .github/scripts/discord-helpers.sh
|
||||||
|
[ -z "$WEBHOOK" ] && exit 0
|
||||||
|
|
||||||
|
[ "$REF_TYPE" = "branch" ] && ICON="🌿" || ICON="🏷️"
|
||||||
|
REF_TRUNC=$(printf '%s' "$REF" | trunc $MAX_TITLE)
|
||||||
|
[ ${#REF} -gt $MAX_TITLE ] && REF_TRUNC="${REF_TRUNC}..."
|
||||||
|
REF_ESC=$(printf '%s' "$REF_TRUNC" | esc)
|
||||||
|
REF_URL=$(jq -rn --arg ref "$REF" '$ref | @uri')
|
||||||
|
ACTOR_ESC=$(printf '%s' "$ACTOR" | esc)
|
||||||
|
MSG="$ICON **${REF_TYPE^} created: [$REF_ESC](<$REPO_URL/tree/$REF_URL>)** by @$ACTOR_ESC"
|
||||||
|
jq -n --arg content "$MSG" '{content: $content}' | curl -sf --retry 2 -X POST "$WEBHOOK" -H "Content-Type: application/json" -d @-
|
||||||
|
|
||||||
|
delete:
|
||||||
|
if: github.event_name == 'delete'
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Notify Discord
|
- name: Notify Discord
|
||||||
uses: sarisia/actions-status-discord@v1
|
env:
|
||||||
if: always()
|
WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }}
|
||||||
with:
|
REF_TYPE: ${{ github.event.ref_type }}
|
||||||
webhook: ${{ secrets.DISCORD_WEBHOOK }}
|
REF: ${{ github.event.ref }}
|
||||||
status: ${{ job.status }}
|
ACTOR: ${{ github.actor }}
|
||||||
title: "Triggered by ${{ github.event_name }}"
|
run: |
|
||||||
color: 0x5865F2
|
set -o pipefail
|
||||||
|
[ -z "$WEBHOOK" ] && exit 0
|
||||||
|
esc() { sed -e 's/[][\*_()~`>]/\\&/g' -e 's/@/@ /g'; }
|
||||||
|
trunc() { tr '\n\r' ' ' | cut -c1-"$1"; }
|
||||||
|
|
||||||
|
REF_TRUNC=$(printf '%s' "$REF" | trunc 100)
|
||||||
|
[ ${#REF} -gt 100 ] && REF_TRUNC="${REF_TRUNC}..."
|
||||||
|
REF_ESC=$(printf '%s' "$REF_TRUNC" | esc)
|
||||||
|
ACTOR_ESC=$(printf '%s' "$ACTOR" | esc)
|
||||||
|
MSG="🗑️ **${REF_TYPE^} deleted: $REF_ESC** by @$ACTOR_ESC"
|
||||||
|
jq -n --arg content "$MSG" '{content: $content}' | curl -sf --retry 2 -X POST "$WEBHOOK" -H "Content-Type: application/json" -d @-
|
||||||
|
|
|
||||||
|
|
@ -70,4 +70,6 @@ z*/
|
||||||
.codex
|
.codex
|
||||||
.github/chatmodes
|
.github/chatmodes
|
||||||
.agent
|
.agent
|
||||||
.agentvibes/
|
.agentvibes/
|
||||||
|
.kiro/
|
||||||
|
.roo
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
name: Brainstorming Session
|
name: brainstorming-session
|
||||||
description: Facilitate interactive brainstorming sessions using diverse creative techniques and ideation methods
|
description: Facilitate interactive brainstorming sessions using diverse creative techniques and ideation methods
|
||||||
context_file: '' # Optional context file path for project-specific guidance
|
context_file: '' # Optional context file path for project-specific guidance
|
||||||
---
|
---
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
name: Party Mode
|
name: party-mode
|
||||||
description: Orchestrates group discussions between all installed BMAD agents, enabling natural multi-agent conversations
|
description: Orchestrates group discussions between all installed BMAD agents, enabling natural multi-agent conversations
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
name: Create Agent
|
name: create-agent
|
||||||
description: Interactive workflow to build BMAD Core compliant agents with optional brainstorming, persona development, and command structure
|
description: Interactive workflow to build BMAD Core compliant agents with optional brainstorming, persona development, and command structure
|
||||||
web_bundle: true
|
web_bundle: true
|
||||||
---
|
---
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
name: Create Workflow
|
name: create-workflow
|
||||||
description: Create structured standalone workflows using markdown-based step architecture
|
description: Create structured standalone workflows using markdown-based step architecture
|
||||||
web_bundle: true
|
web_bundle: true
|
||||||
---
|
---
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
name: Edit Agent
|
name: edit-agent
|
||||||
description: Edit existing BMAD agents while following all best practices and conventions
|
description: Edit existing BMAD agents while following all best practices and conventions
|
||||||
web_bundle: false
|
web_bundle: false
|
||||||
---
|
---
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
name: Edit Workflow
|
name: edit-workflow
|
||||||
description: Intelligent workflow editor that helps modify existing workflows while following best practices
|
description: Intelligent workflow editor that helps modify existing workflows while following best practices
|
||||||
web_bundle: true
|
web_bundle: true
|
||||||
---
|
---
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
name: Workflow Compliance Check
|
name: workflow-compliance-check
|
||||||
description: Systematic validation of workflows against BMAD standards with adversarial analysis and detailed reporting
|
description: Systematic validation of workflows against BMAD standards with adversarial analysis and detailed reporting
|
||||||
web_bundle: false
|
web_bundle: false
|
||||||
---
|
---
|
||||||
|
|
|
||||||
|
|
@ -76,8 +76,7 @@ The BMad Method Module (BMM) provides a comprehensive team of specialized AI age
|
||||||
- `create-prd` - Create PRD for Level 2-4 projects (creates FRs/NFRs only)
|
- `create-prd` - Create PRD for Level 2-4 projects (creates FRs/NFRs only)
|
||||||
- `tech-spec` - Quick spec for Level 0-1 projects
|
- `tech-spec` - Quick spec for Level 0-1 projects
|
||||||
- `create-epics-and-stories` - Break PRD into implementable pieces (runs AFTER architecture)
|
- `create-epics-and-stories` - Break PRD into implementable pieces (runs AFTER architecture)
|
||||||
- `validate-prd` - Validate PRD completeness
|
- `implementation-readiness` - Validate PRD + Architecture + Epics + UX (optional)
|
||||||
- `validate-tech-spec` - Validate Technical Specification
|
|
||||||
- `correct-course` - Handle mid-project changes
|
- `correct-course` - Handle mid-project changes
|
||||||
- `workflow-init` - Initialize workflow tracking
|
- `workflow-init` - Initialize workflow tracking
|
||||||
|
|
||||||
|
|
@ -146,7 +145,7 @@ The BMad Method Module (BMM) provides a comprehensive team of specialized AI age
|
||||||
- `workflow-status` - Check what to do next
|
- `workflow-status` - Check what to do next
|
||||||
- `create-architecture` - Produce a Scale Adaptive Architecture
|
- `create-architecture` - Produce a Scale Adaptive Architecture
|
||||||
- `validate-architecture` - Validate architecture document
|
- `validate-architecture` - Validate architecture document
|
||||||
- `implementation-readiness` - Validate readiness for Phase 4
|
- `implementation-readiness` - Validate PRD + Architecture + Epics + UX (optional)
|
||||||
|
|
||||||
**Communication Style:** Comprehensive yet pragmatic. Uses architectural metaphors. Balances technical depth with accessibility. Connects decisions to business value.
|
**Communication Style:** Comprehensive yet pragmatic. Uses architectural metaphors. Balances technical depth with accessibility. Connects decisions to business value.
|
||||||
|
|
||||||
|
|
@ -642,13 +641,12 @@ Some workflows are available to multiple agents:
|
||||||
|
|
||||||
Many workflows have optional validation workflows that perform independent review:
|
Many workflows have optional validation workflows that perform independent review:
|
||||||
|
|
||||||
| Validation | Agent | Validates |
|
| Validation | Agent | Validates |
|
||||||
| ----------------------- | ----------- | -------------------------------- |
|
| -------------------------- | ----------- | ------------------------------------------ |
|
||||||
| `validate-prd` | PM | PRD completeness (FRs/NFRs only) |
|
| `implementation-readiness` | Architect | PRD + Architecture + Epics + UX (optional) |
|
||||||
| `validate-tech-spec` | PM | Technical specification quality |
|
| `validate-architecture` | Architect | Architecture document |
|
||||||
| `validate-architecture` | Architect | Architecture document |
|
| `validate-design` | UX Designer | UX specification and artifacts |
|
||||||
| `validate-design` | UX Designer | UX specification and artifacts |
|
| `validate-create-story` | SM | Story draft |
|
||||||
| `validate-create-story` | SM | Story draft |
|
|
||||||
|
|
||||||
**When to use validation:**
|
**When to use validation:**
|
||||||
|
|
||||||
|
|
@ -945,9 +943,8 @@ Agent analyzes project state → recommends next workflow
|
||||||
|
|
||||||
```
|
```
|
||||||
Each phase has validation gates:
|
Each phase has validation gates:
|
||||||
- Phase 2 to 3: validate-prd, validate-tech-spec
|
- Phase 3 to 4: implementation-readiness (validates PRD + Architecture + Epics + UX (optional))
|
||||||
- Phase 3 to 4: implementation-readiness
|
Run validation before advancing to implementation
|
||||||
Run validation before advancing
|
|
||||||
```
|
```
|
||||||
|
|
||||||
**Course correction:**
|
**Course correction:**
|
||||||
|
|
|
||||||
|
|
@ -147,7 +147,7 @@ If status file exists, use workflow-status. If not, use workflow-init.
|
||||||
|
|
||||||
### Q: How do I know when Phase 3 is complete and I can start Phase 4?
|
### Q: How do I know when Phase 3 is complete and I can start Phase 4?
|
||||||
|
|
||||||
**A:** For Level 3-4, run the implementation-readiness workflow. It validates that PRD (FRs/NFRs), architecture, epics+stories, and UX (if applicable) are cohesive before implementation. Pass the gate check = ready for Phase 4.
|
**A:** For Level 3-4, run the implementation-readiness workflow. It validates PRD + Architecture + Epics + UX (optional) are aligned before implementation. Pass the gate check = ready for Phase 4.
|
||||||
|
|
||||||
### Q: Can I run workflows in parallel or do they have to be sequential?
|
### Q: Can I run workflows in parallel or do they have to be sequential?
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -246,7 +246,7 @@ Workflow that initializes Phase 4 implementation by creating sprint-status.yaml,
|
||||||
|
|
||||||
### Gate Check
|
### Gate Check
|
||||||
|
|
||||||
Validation workflow (implementation-readiness) run before Phase 4 to ensure PRD, architecture, and UX documents are cohesive with no gaps or contradictions. Required for BMad Method and Enterprise Method tracks.
|
Validation workflow (implementation-readiness) run before Phase 4 to ensure PRD + Architecture + Epics + UX (optional) are aligned with no gaps or contradictions. Required for BMad Method and Enterprise Method tracks.
|
||||||
|
|
||||||
### DoD (Definition of Done)
|
### DoD (Definition of Done)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,37 @@
|
||||||
|
# Workflow Diagram Maintenance
|
||||||
|
|
||||||
|
## Regenerating SVG from Excalidraw
|
||||||
|
|
||||||
|
When you edit `workflow-method-greenfield.excalidraw`, regenerate the SVG:
|
||||||
|
|
||||||
|
1. Open https://excalidraw.com/
|
||||||
|
2. Load the `.excalidraw` file
|
||||||
|
3. Click menu (☰) → Export image → SVG
|
||||||
|
4. **Set "Scale" to 1x** (default is 2x)
|
||||||
|
5. Click "Export"
|
||||||
|
6. Save as `workflow-method-greenfield.svg`
|
||||||
|
7. **Validate the changes** (see below)
|
||||||
|
8. Commit both files together
|
||||||
|
|
||||||
|
**Important:**
|
||||||
|
|
||||||
|
- Always use **1x scale** to maintain consistent dimensions
|
||||||
|
- Automated export tools (`excalidraw-to-svg`) are broken - use manual export only
|
||||||
|
|
||||||
|
## Visual Validation
|
||||||
|
|
||||||
|
After regenerating the SVG, validate that it renders correctly:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./tools/validate-svg-changes.sh src/modules/bmm/docs/images/workflow-method-greenfield.svg
|
||||||
|
```
|
||||||
|
|
||||||
|
This script:
|
||||||
|
|
||||||
|
- Checks for required dependencies (Playwright, ImageMagick)
|
||||||
|
- Installs Playwright locally if needed (no package.json pollution)
|
||||||
|
- Renders old vs new SVG using browser-accurate rendering
|
||||||
|
- Compares pixel-by-pixel and generates a diff image
|
||||||
|
- Outputs a prompt for AI visual analysis (paste into Gemini/Claude)
|
||||||
|
|
||||||
|
**Threshold**: <0.01% difference is acceptable (anti-aliasing variations)
|
||||||
|
|
@ -1036,10 +1036,6 @@
|
||||||
"type": "arrow",
|
"type": "arrow",
|
||||||
"id": "arrow-discovery-no"
|
"id": "arrow-discovery-no"
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"type": "arrow",
|
|
||||||
"id": "arrow-prd-validate"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"id": "arrow-phase1-to-phase2",
|
"id": "arrow-phase1-to-phase2",
|
||||||
"type": "arrow"
|
"type": "arrow"
|
||||||
|
|
@ -1055,17 +1051,21 @@
|
||||||
{
|
{
|
||||||
"id": "arrow-has-ui-no",
|
"id": "arrow-has-ui-no",
|
||||||
"type": "arrow"
|
"type": "arrow"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "arrow-prd-hasui",
|
||||||
|
"type": "arrow"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"locked": false,
|
"locked": false,
|
||||||
"version": 107,
|
"version": 108,
|
||||||
"versionNonce": 930129274,
|
"versionNonce": 930129275,
|
||||||
"index": "aN",
|
"index": "aN",
|
||||||
"isDeleted": false,
|
"isDeleted": false,
|
||||||
"strokeStyle": "solid",
|
"strokeStyle": "solid",
|
||||||
"seed": 1,
|
"seed": 1,
|
||||||
"frameId": null,
|
"frameId": null,
|
||||||
"updated": 1764191563350,
|
"updated": 1764952855000,
|
||||||
"link": null
|
"link": null
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
@ -1107,197 +1107,6 @@
|
||||||
"autoResize": true,
|
"autoResize": true,
|
||||||
"lineHeight": 1.25
|
"lineHeight": 1.25
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"id": "arrow-prd-validate",
|
|
||||||
"type": "arrow",
|
|
||||||
"x": 439.4640518625828,
|
|
||||||
"y": 331.0450590268819,
|
|
||||||
"width": 0.17283039375342923,
|
|
||||||
"height": 28.50332681186643,
|
|
||||||
"angle": 0,
|
|
||||||
"strokeColor": "#1976d2",
|
|
||||||
"backgroundColor": "transparent",
|
|
||||||
"fillStyle": "solid",
|
|
||||||
"strokeWidth": 2,
|
|
||||||
"roughness": 0,
|
|
||||||
"opacity": 100,
|
|
||||||
"groupIds": [],
|
|
||||||
"startBinding": {
|
|
||||||
"elementId": "proc-prd",
|
|
||||||
"focus": 0,
|
|
||||||
"gap": 1
|
|
||||||
},
|
|
||||||
"endBinding": {
|
|
||||||
"elementId": "proc-validate-prd",
|
|
||||||
"focus": 0,
|
|
||||||
"gap": 1
|
|
||||||
},
|
|
||||||
"points": [
|
|
||||||
[
|
|
||||||
0,
|
|
||||||
0
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.17283039375342923,
|
|
||||||
28.50332681186643
|
|
||||||
]
|
|
||||||
],
|
|
||||||
"lastCommittedPoint": null,
|
|
||||||
"version": 102,
|
|
||||||
"versionNonce": 1274591910,
|
|
||||||
"index": "aP",
|
|
||||||
"isDeleted": false,
|
|
||||||
"strokeStyle": "solid",
|
|
||||||
"seed": 1,
|
|
||||||
"frameId": null,
|
|
||||||
"roundness": null,
|
|
||||||
"boundElements": [],
|
|
||||||
"updated": 1764191023838,
|
|
||||||
"link": null,
|
|
||||||
"locked": false,
|
|
||||||
"startArrowhead": null,
|
|
||||||
"endArrowhead": "arrow"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "proc-validate-prd",
|
|
||||||
"type": "rectangle",
|
|
||||||
"x": 360,
|
|
||||||
"y": 360,
|
|
||||||
"width": 160,
|
|
||||||
"height": 80,
|
|
||||||
"angle": 0,
|
|
||||||
"strokeColor": "#43a047",
|
|
||||||
"backgroundColor": "#c8e6c9",
|
|
||||||
"fillStyle": "solid",
|
|
||||||
"strokeWidth": 2,
|
|
||||||
"roughness": 0,
|
|
||||||
"opacity": 100,
|
|
||||||
"roundness": {
|
|
||||||
"type": 3,
|
|
||||||
"value": 8
|
|
||||||
},
|
|
||||||
"groupIds": [
|
|
||||||
"proc-validate-prd-group"
|
|
||||||
],
|
|
||||||
"boundElements": [
|
|
||||||
{
|
|
||||||
"type": "text",
|
|
||||||
"id": "proc-validate-prd-text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "arrow",
|
|
||||||
"id": "arrow-prd-validate"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "arrow",
|
|
||||||
"id": "arrow-validate-prd-hasui"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "jv0rnlK2D9JKIGTO7pUtT",
|
|
||||||
"type": "arrow"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"locked": false,
|
|
||||||
"version": 3,
|
|
||||||
"versionNonce": 894806650,
|
|
||||||
"index": "aQ",
|
|
||||||
"isDeleted": false,
|
|
||||||
"strokeStyle": "solid",
|
|
||||||
"seed": 1,
|
|
||||||
"frameId": null,
|
|
||||||
"updated": 1764191341774,
|
|
||||||
"link": null
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "proc-validate-prd-text",
|
|
||||||
"type": "text",
|
|
||||||
"x": 370,
|
|
||||||
"y": 375,
|
|
||||||
"width": 140,
|
|
||||||
"height": 50,
|
|
||||||
"angle": 0,
|
|
||||||
"strokeColor": "#1e1e1e",
|
|
||||||
"backgroundColor": "transparent",
|
|
||||||
"fillStyle": "solid",
|
|
||||||
"strokeWidth": 2,
|
|
||||||
"roughness": 0,
|
|
||||||
"opacity": 100,
|
|
||||||
"groupIds": [
|
|
||||||
"proc-validate-prd-group"
|
|
||||||
],
|
|
||||||
"fontSize": 14,
|
|
||||||
"fontFamily": 1,
|
|
||||||
"text": "Validate PRD\n<<optional>>",
|
|
||||||
"textAlign": "center",
|
|
||||||
"verticalAlign": "middle",
|
|
||||||
"containerId": "proc-validate-prd",
|
|
||||||
"locked": false,
|
|
||||||
"version": 2,
|
|
||||||
"versionNonce": 944332155,
|
|
||||||
"index": "aR",
|
|
||||||
"isDeleted": false,
|
|
||||||
"strokeStyle": "solid",
|
|
||||||
"seed": 1,
|
|
||||||
"frameId": null,
|
|
||||||
"roundness": null,
|
|
||||||
"boundElements": [],
|
|
||||||
"updated": 1763522171080,
|
|
||||||
"link": null,
|
|
||||||
"originalText": "Validate PRD\n<<optional>>",
|
|
||||||
"autoResize": true,
|
|
||||||
"lineHeight": 1.7857142857142858
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "arrow-validate-prd-hasui",
|
|
||||||
"type": "arrow",
|
|
||||||
"x": 440,
|
|
||||||
"y": 440,
|
|
||||||
"width": 0,
|
|
||||||
"height": 30,
|
|
||||||
"angle": 0,
|
|
||||||
"strokeColor": "#1976d2",
|
|
||||||
"backgroundColor": "transparent",
|
|
||||||
"fillStyle": "solid",
|
|
||||||
"strokeWidth": 2,
|
|
||||||
"roughness": 0,
|
|
||||||
"opacity": 100,
|
|
||||||
"groupIds": [],
|
|
||||||
"startBinding": {
|
|
||||||
"elementId": "proc-validate-prd",
|
|
||||||
"focus": 0,
|
|
||||||
"gap": 1
|
|
||||||
},
|
|
||||||
"endBinding": {
|
|
||||||
"elementId": "decision-has-ui",
|
|
||||||
"focus": 0,
|
|
||||||
"gap": 1
|
|
||||||
},
|
|
||||||
"points": [
|
|
||||||
[
|
|
||||||
0,
|
|
||||||
0
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0,
|
|
||||||
30
|
|
||||||
]
|
|
||||||
],
|
|
||||||
"lastCommittedPoint": null,
|
|
||||||
"version": 2,
|
|
||||||
"versionNonce": 1369541557,
|
|
||||||
"index": "aS",
|
|
||||||
"isDeleted": false,
|
|
||||||
"strokeStyle": "solid",
|
|
||||||
"seed": 1,
|
|
||||||
"frameId": null,
|
|
||||||
"roundness": null,
|
|
||||||
"boundElements": [],
|
|
||||||
"updated": 1763522171080,
|
|
||||||
"link": null,
|
|
||||||
"locked": false,
|
|
||||||
"startArrowhead": null,
|
|
||||||
"endArrowhead": "arrow"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"id": "decision-has-ui",
|
"id": "decision-has-ui",
|
||||||
"type": "diamond",
|
"type": "diamond",
|
||||||
|
|
@ -1322,7 +1131,7 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"type": "arrow",
|
"type": "arrow",
|
||||||
"id": "arrow-validate-prd-hasui"
|
"id": "arrow-prd-hasui"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"type": "arrow",
|
"type": "arrow",
|
||||||
|
|
@ -1334,15 +1143,15 @@
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"locked": false,
|
"locked": false,
|
||||||
"version": 2,
|
"version": 3,
|
||||||
"versionNonce": 1003877915,
|
"versionNonce": 1003877916,
|
||||||
"index": "aT",
|
"index": "aT",
|
||||||
"isDeleted": false,
|
"isDeleted": false,
|
||||||
"strokeStyle": "solid",
|
"strokeStyle": "solid",
|
||||||
"seed": 1,
|
"seed": 1,
|
||||||
"frameId": null,
|
"frameId": null,
|
||||||
"roundness": null,
|
"roundness": null,
|
||||||
"updated": 1763522171080,
|
"updated": 1764952855000,
|
||||||
"link": null
|
"link": null
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
@ -5162,6 +4971,57 @@
|
||||||
"startArrowhead": null,
|
"startArrowhead": null,
|
||||||
"endArrowhead": "arrow",
|
"endArrowhead": "arrow",
|
||||||
"elbowed": false
|
"elbowed": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "arrow-prd-hasui",
|
||||||
|
"type": "arrow",
|
||||||
|
"x": 440,
|
||||||
|
"y": 330,
|
||||||
|
"width": 0,
|
||||||
|
"height": 140,
|
||||||
|
"angle": 0,
|
||||||
|
"strokeColor": "#1976d2",
|
||||||
|
"backgroundColor": "transparent",
|
||||||
|
"fillStyle": "solid",
|
||||||
|
"strokeWidth": 2,
|
||||||
|
"roughness": 0,
|
||||||
|
"opacity": 100,
|
||||||
|
"groupIds": [],
|
||||||
|
"startBinding": {
|
||||||
|
"elementId": "proc-prd",
|
||||||
|
"focus": 0,
|
||||||
|
"gap": 1
|
||||||
|
},
|
||||||
|
"endBinding": {
|
||||||
|
"elementId": "decision-has-ui",
|
||||||
|
"focus": 0,
|
||||||
|
"gap": 1
|
||||||
|
},
|
||||||
|
"points": [
|
||||||
|
[
|
||||||
|
0,
|
||||||
|
0
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0,
|
||||||
|
140
|
||||||
|
]
|
||||||
|
],
|
||||||
|
"lastCommittedPoint": null,
|
||||||
|
"version": 1,
|
||||||
|
"versionNonce": 1,
|
||||||
|
"index": "b1J",
|
||||||
|
"isDeleted": false,
|
||||||
|
"strokeStyle": "solid",
|
||||||
|
"seed": 1,
|
||||||
|
"frameId": null,
|
||||||
|
"roundness": null,
|
||||||
|
"boundElements": [],
|
||||||
|
"updated": 1764952855000,
|
||||||
|
"link": null,
|
||||||
|
"locked": false,
|
||||||
|
"startArrowhead": null,
|
||||||
|
"endArrowhead": "arrow"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"appState": {
|
"appState": {
|
||||||
|
|
|
||||||
File diff suppressed because one or more lines are too long
|
Before Width: | Height: | Size: 88 KiB After Width: | Height: | Size: 87 KiB |
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
name: Product Brief Workflow
|
name: create-product-brief
|
||||||
description: Create comprehensive product briefs through collaborative step-by-step discovery as creative Business Analyst working with the user as peers.
|
description: Create comprehensive product briefs through collaborative step-by-step discovery as creative Business Analyst working with the user as peers.
|
||||||
web_bundle: true
|
web_bundle: true
|
||||||
---
|
---
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
name: Research Workflow
|
name: research
|
||||||
description: Conduct comprehensive research across multiple domains using current web data and verified sources - Market, Technical, Domain and other research types.
|
description: Conduct comprehensive research across multiple domains using current web data and verified sources - Market, Technical, Domain and other research types.
|
||||||
|
web_bundle: true
|
||||||
---
|
---
|
||||||
|
|
||||||
# Research Workflow
|
# Research Workflow
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,9 @@
|
||||||
|
---
|
||||||
|
name: create-ux-design
|
||||||
|
description: Work with a peer UX Design expert to plan your applications UX patterns, look and feel.
|
||||||
|
web_bundle: true
|
||||||
|
---
|
||||||
|
|
||||||
# Create UX Design Workflow
|
# Create UX Design Workflow
|
||||||
|
|
||||||
**Goal:** Create comprehensive UX design specifications through collaborative visual exploration and informed decision-making where you act as a UX facilitator working with a product stakeholder.
|
**Goal:** Create comprehensive UX design specifications through collaborative visual exploration and informed decision-making where you act as a UX facilitator working with a product stakeholder.
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
---
|
---
|
||||||
name: PRD Workflow
|
name: create-prd
|
||||||
description: Creates a comprehensive PRDs through collaborative step-by-step discovery between two product managers working as peers.
|
description: Creates a comprehensive PRDs through collaborative step-by-step discovery between two product managers working as peers.
|
||||||
main_config: `{project-root}/{bmad_folder}/bmm/config.yaml`
|
main_config: '{project-root}/{bmad_folder}/bmm/config.yaml'
|
||||||
web_bundle: true
|
web_bundle: true
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
name: Architecture Workflow
|
name: create-architecture
|
||||||
description: Collaborative architectural decision facilitation for AI-agent consistency. Replaces template-driven architecture with intelligent, adaptive conversation that produces a decision-focused architecture document optimized for preventing agent conflicts.
|
description: Collaborative architectural decision facilitation for AI-agent consistency. Replaces template-driven architecture with intelligent, adaptive conversation that produces a decision-focused architecture document optimized for preventing agent conflicts.
|
||||||
|
web_bundle: true
|
||||||
---
|
---
|
||||||
|
|
||||||
# Architecture Workflow
|
# Architecture Workflow
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
name: 'Create Epics and Stories'
|
name: create-epics-stories
|
||||||
description: 'Transform PRD requirements and Architecture decisions into comprehensive stories organized by user value. This workflow requires completed PRD + Architecture documents (UX recommended if UI exists) and breaks down requirements into implementation-ready epics and user stories that incorporate all available technical and design context. Creates detailed, actionable stories with complete acceptance criteria for development teams.'
|
description: 'Transform PRD requirements and Architecture decisions into comprehensive stories organized by user value. This workflow requires completed PRD + Architecture documents (UX recommended if UI exists) and breaks down requirements into implementation-ready epics and user stories that incorporate all available technical and design context. Creates detailed, actionable stories with complete acceptance criteria for development teams.'
|
||||||
web_bundle: true
|
web_bundle: true
|
||||||
---
|
---
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
name: 'Implementation Readiness'
|
name: check-implementation-readiness
|
||||||
description: 'Critical validation workflow that assesses PRD, Architecture, and Epics & Stories for completeness and alignment before implementation. Uses adversarial review approach to find gaps and issues.'
|
description: 'Critical validation workflow that assesses PRD, Architecture, and Epics & Stories for completeness and alignment before implementation. Uses adversarial review approach to find gaps and issues.'
|
||||||
web_bundle: false
|
web_bundle: false
|
||||||
---
|
---
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
# Senior Developer Review - Validation Checklist
|
# Senior Developer Review - Validation Checklist
|
||||||
|
|
||||||
- [ ] Story file loaded from `{{story_path}}`
|
- [ ] Story file loaded from `{{story_path}}`
|
||||||
- [ ] Story Status verified as one of: {{allow_status_values}}
|
- [ ] Story Status verified as reviewable (review)
|
||||||
- [ ] Epic and Story IDs resolved ({{epic_num}}.{{story_num}})
|
- [ ] Epic and Story IDs resolved ({{epic_num}}.{{story_num}})
|
||||||
- [ ] Story Context located or warning recorded
|
- [ ] Story Context located or warning recorded
|
||||||
- [ ] Epic Tech Spec located or warning recorded
|
- [ ] Epic Tech Spec located or warning recorded
|
||||||
|
|
@ -17,6 +17,7 @@
|
||||||
- [ ] Review notes appended under "Senior Developer Review (AI)"
|
- [ ] Review notes appended under "Senior Developer Review (AI)"
|
||||||
- [ ] Change Log updated with review entry
|
- [ ] Change Log updated with review entry
|
||||||
- [ ] Status updated according to settings (if enabled)
|
- [ ] Status updated according to settings (if enabled)
|
||||||
|
- [ ] Sprint status synced (if sprint tracking enabled)
|
||||||
- [ ] Story saved successfully
|
- [ ] Story saved successfully
|
||||||
|
|
||||||
_Reviewer: {{user_name}} on {{date}}_
|
_Reviewer: {{user_name}} on {{date}}_
|
||||||
|
|
@ -16,6 +16,7 @@
|
||||||
<step n="1" goal="Load story and discover changes">
|
<step n="1" goal="Load story and discover changes">
|
||||||
<action>Use provided {{story_path}} or ask user which story file to review</action>
|
<action>Use provided {{story_path}} or ask user which story file to review</action>
|
||||||
<action>Read COMPLETE story file</action>
|
<action>Read COMPLETE story file</action>
|
||||||
|
<action>Set {{story_key}} = extracted key from filename (e.g., "1-2-user-authentication.md" → "1-2-user-authentication") or story metadata</action>
|
||||||
<action>Parse sections: Story, Acceptance Criteria, Tasks/Subtasks, Dev Agent Record → File List, Change Log</action>
|
<action>Parse sections: Story, Acceptance Criteria, Tasks/Subtasks, Dev Agent Record → File List, Change Log</action>
|
||||||
|
|
||||||
<!-- Discover actual changes via git -->
|
<!-- Discover actual changes via git -->
|
||||||
|
|
@ -106,6 +107,8 @@
|
||||||
|
|
||||||
<step n="4" goal="Present findings and fix them">
|
<step n="4" goal="Present findings and fix them">
|
||||||
<action>Categorize findings: HIGH (must fix), MEDIUM (should fix), LOW (nice to fix)</action>
|
<action>Categorize findings: HIGH (must fix), MEDIUM (should fix), LOW (nice to fix)</action>
|
||||||
|
<action>Set {{fixed_count}} = 0</action>
|
||||||
|
<action>Set {{action_count}} = 0</action>
|
||||||
|
|
||||||
<output>**🔥 CODE REVIEW FINDINGS, {user_name}!**
|
<output>**🔥 CODE REVIEW FINDINGS, {user_name}!**
|
||||||
|
|
||||||
|
|
@ -145,11 +148,15 @@
|
||||||
<action>Add/update tests as needed</action>
|
<action>Add/update tests as needed</action>
|
||||||
<action>Update File List in story if files changed</action>
|
<action>Update File List in story if files changed</action>
|
||||||
<action>Update story Dev Agent Record with fixes applied</action>
|
<action>Update story Dev Agent Record with fixes applied</action>
|
||||||
|
<action>Set {{fixed_count}} = number of HIGH and MEDIUM issues fixed</action>
|
||||||
|
<action>Set {{action_count}} = 0</action>
|
||||||
</check>
|
</check>
|
||||||
|
|
||||||
<check if="user chooses 2">
|
<check if="user chooses 2">
|
||||||
<action>Add "Review Follow-ups (AI)" subsection to Tasks/Subtasks</action>
|
<action>Add "Review Follow-ups (AI)" subsection to Tasks/Subtasks</action>
|
||||||
<action>For each issue: `- [ ] [AI-Review][Severity] Description [file:line]`</action>
|
<action>For each issue: `- [ ] [AI-Review][Severity] Description [file:line]`</action>
|
||||||
|
<action>Set {{action_count}} = number of action items created</action>
|
||||||
|
<action>Set {{fixed_count}} = 0</action>
|
||||||
</check>
|
</check>
|
||||||
|
|
||||||
<check if="user chooses 3">
|
<check if="user chooses 3">
|
||||||
|
|
@ -158,11 +165,52 @@
|
||||||
</check>
|
</check>
|
||||||
</step>
|
</step>
|
||||||
|
|
||||||
<step n="5" goal="Update story status">
|
<step n="5" goal="Update story status and sync sprint tracking">
|
||||||
<action>If all HIGH issues fixed and ACs implemented → Update story Status to "done"</action>
|
<!-- Determine new status based on review outcome -->
|
||||||
<action>If issues remain → Update story Status to "in-progress"</action>
|
<check if="all HIGH and MEDIUM issues fixed AND all ACs implemented">
|
||||||
|
<action>Set {{new_status}} = "done"</action>
|
||||||
|
<action>Update story Status field to "done"</action>
|
||||||
|
</check>
|
||||||
|
<check if="HIGH or MEDIUM issues remain OR ACs not fully implemented">
|
||||||
|
<action>Set {{new_status}} = "in-progress"</action>
|
||||||
|
<action>Update story Status field to "in-progress"</action>
|
||||||
|
</check>
|
||||||
<action>Save story file</action>
|
<action>Save story file</action>
|
||||||
|
|
||||||
|
<!-- Determine sprint tracking status -->
|
||||||
|
<check if="{sprint_status} file exists">
|
||||||
|
<action>Set {{current_sprint_status}} = "enabled"</action>
|
||||||
|
</check>
|
||||||
|
<check if="{sprint_status} file does NOT exist">
|
||||||
|
<action>Set {{current_sprint_status}} = "no-sprint-tracking"</action>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<!-- Sync sprint-status.yaml when story status changes (only if sprint tracking enabled) -->
|
||||||
|
<check if="{{current_sprint_status}} != 'no-sprint-tracking'">
|
||||||
|
<action>Load the FULL file: {sprint_status}</action>
|
||||||
|
<action>Find development_status key matching {{story_key}}</action>
|
||||||
|
|
||||||
|
<check if="{{new_status}} == 'done'">
|
||||||
|
<action>Update development_status[{{story_key}}] = "done"</action>
|
||||||
|
<action>Save file, preserving ALL comments and structure</action>
|
||||||
|
<output>✅ Sprint status synced: {{story_key}} → done</output>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="{{new_status}} == 'in-progress'">
|
||||||
|
<action>Update development_status[{{story_key}}] = "in-progress"</action>
|
||||||
|
<action>Save file, preserving ALL comments and structure</action>
|
||||||
|
<output>🔄 Sprint status synced: {{story_key}} → in-progress</output>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="story key not found in sprint status">
|
||||||
|
<output>⚠️ Story file updated, but sprint-status sync failed: {{story_key}} not found in sprint-status.yaml</output>
|
||||||
|
</check>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="{{current_sprint_status}} == 'no-sprint-tracking'">
|
||||||
|
<output>ℹ️ Story status updated (no sprint tracking configured)</output>
|
||||||
|
</check>
|
||||||
|
|
||||||
<output>**✅ Review Complete!**
|
<output>**✅ Review Complete!**
|
||||||
|
|
||||||
**Story Status:** {{new_status}}
|
**Story Status:** {{new_status}}
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
# Review Story Workflow
|
# Review Story Workflow
|
||||||
name: code-review
|
name: code-review
|
||||||
description: "Perform an ADVERSARIAL Senior Developer code review that finds 3-10 specific problems in every story. Challenges everything: code quality, test coverage, architecture compliance, security, performance. NEVER accepts 'looks good' - must find minimum issues and can auto-fix with user approval."
|
description: "Perform an ADVERSARIAL Senior Developer code review that finds 3-10 specific problems in every story. Challenges everything: code quality, test coverage, architecture compliance, security, performance. NEVER accepts `looks good` - must find minimum issues and can auto-fix with user approval."
|
||||||
author: "BMad"
|
author: "BMad"
|
||||||
|
|
||||||
# Critical variables from config
|
# Critical variables from config
|
||||||
|
|
|
||||||
|
|
@ -32,18 +32,115 @@
|
||||||
</check>
|
</check>
|
||||||
|
|
||||||
<check if="Mode B">
|
<check if="Mode B">
|
||||||
<ask>**[t] Plan first** - Create tech-spec then implement
|
|
||||||
|
<!-- Escalation Threshold: Lightweight check - should we invoke scale-adaptive? -->
|
||||||
|
|
||||||
|
<action>Evaluate escalation threshold against user input (minimal tokens, no file loading):
|
||||||
|
|
||||||
|
**Triggers escalation** (if 2+ signals present):
|
||||||
|
|
||||||
|
- Multiple components mentioned (e.g., dashboard + api + database)
|
||||||
|
- System-level language (e.g., platform, integration, architecture)
|
||||||
|
- Uncertainty about approach (e.g., "how should I", "best way to")
|
||||||
|
- Multi-layer scope (e.g., UI + backend + data together)
|
||||||
|
- Extended timeframe (e.g., "this week", "over the next few days")
|
||||||
|
|
||||||
|
**Reduces signal:**
|
||||||
|
|
||||||
|
- Simplicity markers (e.g., "just", "quickly", "fix", "bug", "typo", "simple", "basic", "minor")
|
||||||
|
- Single file/component focus
|
||||||
|
- Confident, specific request
|
||||||
|
|
||||||
|
Use holistic judgment, not mechanical keyword matching.</action>
|
||||||
|
|
||||||
|
<!-- No Escalation: Simple request, offer existing choice -->
|
||||||
|
<check if="escalation threshold NOT triggered">
|
||||||
|
<ask>**[t] Plan first** - Create tech-spec then implement
|
||||||
**[e] Execute directly** - Start now</ask>
|
**[e] Execute directly** - Start now</ask>
|
||||||
|
|
||||||
<check if="t">
|
<check if="t">
|
||||||
<action>Load and execute {create_tech_spec_workflow}</action>
|
<action>Load and execute {create_tech_spec_workflow}</action>
|
||||||
<action>Continue to implementation after spec complete</action>
|
<action>Continue to implementation after spec complete</action>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="e">
|
||||||
|
<ask>Any additional guidance before I begin? (patterns, files, constraints) Or "go" to start.</ask>
|
||||||
|
<goto>step_2</goto>
|
||||||
|
</check>
|
||||||
|
|
||||||
</check>
|
</check>
|
||||||
|
|
||||||
<check if="e">
|
<!-- Escalation Triggered: Load scale-adaptive and evaluate level -->
|
||||||
<ask>Any additional guidance before I begin? (patterns, files, constraints) Or "go" to start.</ask>
|
<check if="escalation threshold triggered">
|
||||||
<goto>step_2</goto>
|
<action>Load {project_levels} and evaluate user input against detection_hints.keywords</action>
|
||||||
|
<action>Determine level (0-4) using scale-adaptive definitions</action>
|
||||||
|
|
||||||
|
<!-- Level 0: Scale-adaptive confirms simple, fall back to standard choice -->
|
||||||
|
<check if="level 0">
|
||||||
|
<ask>**[t] Plan first** - Create tech-spec then implement
|
||||||
|
|
||||||
|
**[e] Execute directly** - Start now</ask>
|
||||||
|
|
||||||
|
<check if="t">
|
||||||
|
<action>Load and execute {create_tech_spec_workflow}</action>
|
||||||
|
<action>Continue to implementation after spec complete</action>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="e">
|
||||||
|
<ask>Any additional guidance before I begin? (patterns, files, constraints) Or "go" to start.</ask>
|
||||||
|
<goto>step_2</goto>
|
||||||
|
</check>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="level 1 or 2 or couldn't determine level">
|
||||||
|
<ask>This looks like a focused feature with multiple components.
|
||||||
|
|
||||||
|
**[t] Create tech-spec first** (recommended)
|
||||||
|
**[w] Seems bigger than quick-dev** — see what BMad Method recommends (workflow-init)
|
||||||
|
**[e] Execute directly**</ask>
|
||||||
|
|
||||||
|
<check if="t">
|
||||||
|
<action>Load and execute {create_tech_spec_workflow}</action>
|
||||||
|
<action>Continue to implementation after spec complete</action>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="w">
|
||||||
|
<action>Load and execute {workflow_init}</action>
|
||||||
|
<action>EXIT quick-dev - user has been routed to BMad Method</action>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="e">
|
||||||
|
<ask>Any additional guidance before I begin? (patterns, files, constraints) Or "go" to start.</ask>
|
||||||
|
<goto>step_2</goto>
|
||||||
|
</check>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<!-- Level 3+: BMad Method territory, recommend workflow-init -->
|
||||||
|
<check if="level 3 or higher">
|
||||||
|
<ask>This sounds like platform/system work.
|
||||||
|
|
||||||
|
**[w] Start BMad Method** (recommended) (workflow-init)
|
||||||
|
**[t] Create tech-spec** (lighter planning)
|
||||||
|
**[e] Execute directly** - feeling lucky</ask>
|
||||||
|
|
||||||
|
<check if="w">
|
||||||
|
<action>Load and execute {workflow_init}</action>
|
||||||
|
<action>EXIT quick-dev - user has been routed to BMad Method</action>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="t">
|
||||||
|
<action>Load and execute {create_tech_spec_workflow}</action>
|
||||||
|
<action>Continue to implementation after spec complete</action>
|
||||||
|
</check>
|
||||||
|
|
||||||
|
<check if="e">
|
||||||
|
<ask>Any additional guidance before I begin? (patterns, files, constraints) Or "go" to start.</ask>
|
||||||
|
<goto>step_2</goto>
|
||||||
|
</check>
|
||||||
|
</check>
|
||||||
|
|
||||||
</check>
|
</check>
|
||||||
|
|
||||||
</check>
|
</check>
|
||||||
|
|
||||||
</step>
|
</step>
|
||||||
|
|
|
||||||
|
|
@ -25,5 +25,9 @@ create_tech_spec_workflow: "{project-root}/{bmad_folder}/bmm/workflows/bmad-quic
|
||||||
party_mode_exec: "{project-root}/{bmad_folder}/core/workflows/party-mode/workflow.md"
|
party_mode_exec: "{project-root}/{bmad_folder}/core/workflows/party-mode/workflow.md"
|
||||||
advanced_elicitation: "{project-root}/{bmad_folder}/core/tasks/advanced-elicitation.xml"
|
advanced_elicitation: "{project-root}/{bmad_folder}/core/tasks/advanced-elicitation.xml"
|
||||||
|
|
||||||
|
# Routing resources (lazy-loaded)
|
||||||
|
project_levels: "{project-root}/{bmad_folder}/bmm/workflows/workflow-status/project-levels.yaml"
|
||||||
|
workflow_init: "{project-root}/{bmad_folder}/bmm/workflows/workflow-status/init/workflow.yaml"
|
||||||
|
|
||||||
standalone: true
|
standalone: true
|
||||||
web_bundle: false
|
web_bundle: false
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
name: Generate Project Context
|
name: generate-project-context
|
||||||
description: Creates a concise project_context.md file with critical rules and patterns that AI agents must follow when implementing code. Optimized for LLM context efficiency.
|
description: Creates a concise project_context.md file with critical rules and patterns that AI agents must follow when implementing code. Optimized for LLM context efficiency.
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -27,10 +27,21 @@ jobs:
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Determine Node version
|
||||||
|
id: node-version
|
||||||
|
run: |
|
||||||
|
if [ -f .nvmrc ]; then
|
||||||
|
echo "value=$(cat .nvmrc)" >> "$GITHUB_OUTPUT"
|
||||||
|
echo "Using Node from .nvmrc"
|
||||||
|
else
|
||||||
|
echo "value=24" >> "$GITHUB_OUTPUT"
|
||||||
|
echo "Using default Node 24 (current LTS)"
|
||||||
|
fi
|
||||||
|
|
||||||
- name: Setup Node.js
|
- name: Setup Node.js
|
||||||
uses: actions/setup-node@v4
|
uses: actions/setup-node@v4
|
||||||
with:
|
with:
|
||||||
node-version-file: ".nvmrc"
|
node-version: ${{ steps.node-version.outputs.value }}
|
||||||
cache: "npm"
|
cache: "npm"
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
|
|
@ -54,10 +65,21 @@ jobs:
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Determine Node version
|
||||||
|
id: node-version
|
||||||
|
run: |
|
||||||
|
if [ -f .nvmrc ]; then
|
||||||
|
echo "value=$(cat .nvmrc)" >> "$GITHUB_OUTPUT"
|
||||||
|
echo "Using Node from .nvmrc"
|
||||||
|
else
|
||||||
|
echo "value=22" >> "$GITHUB_OUTPUT"
|
||||||
|
echo "Using default Node 22 (current LTS)"
|
||||||
|
fi
|
||||||
|
|
||||||
- name: Setup Node.js
|
- name: Setup Node.js
|
||||||
uses: actions/setup-node@v4
|
uses: actions/setup-node@v4
|
||||||
with:
|
with:
|
||||||
node-version-file: ".nvmrc"
|
node-version: ${{ steps.node-version.outputs.value }}
|
||||||
cache: "npm"
|
cache: "npm"
|
||||||
|
|
||||||
- name: Cache Playwright browsers
|
- name: Cache Playwright browsers
|
||||||
|
|
@ -99,10 +121,21 @@ jobs:
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Determine Node version
|
||||||
|
id: node-version
|
||||||
|
run: |
|
||||||
|
if [ -f .nvmrc ]; then
|
||||||
|
echo "value=$(cat .nvmrc)" >> "$GITHUB_OUTPUT"
|
||||||
|
echo "Using Node from .nvmrc"
|
||||||
|
else
|
||||||
|
echo "value=22" >> "$GITHUB_OUTPUT"
|
||||||
|
echo "Using default Node 22 (current LTS)"
|
||||||
|
fi
|
||||||
|
|
||||||
- name: Setup Node.js
|
- name: Setup Node.js
|
||||||
uses: actions/setup-node@v4
|
uses: actions/setup-node@v4
|
||||||
with:
|
with:
|
||||||
node-version-file: ".nvmrc"
|
node-version: ${{ steps.node-version.outputs.value }}
|
||||||
cache: "npm"
|
cache: "npm"
|
||||||
|
|
||||||
- name: Cache Playwright browsers
|
- name: Cache Playwright browsers
|
||||||
|
|
|
||||||
|
|
@ -15,6 +15,8 @@ variables:
|
||||||
npm_config_cache: "$CI_PROJECT_DIR/.npm"
|
npm_config_cache: "$CI_PROJECT_DIR/.npm"
|
||||||
# Playwright browser cache
|
# Playwright browser cache
|
||||||
PLAYWRIGHT_BROWSERS_PATH: "$CI_PROJECT_DIR/.cache/ms-playwright"
|
PLAYWRIGHT_BROWSERS_PATH: "$CI_PROJECT_DIR/.cache/ms-playwright"
|
||||||
|
# Default Node version when .nvmrc is missing
|
||||||
|
DEFAULT_NODE_VERSION: "24"
|
||||||
|
|
||||||
# Caching configuration
|
# Caching configuration
|
||||||
cache:
|
cache:
|
||||||
|
|
@ -29,19 +31,32 @@ cache:
|
||||||
# Lint stage - Code quality checks
|
# Lint stage - Code quality checks
|
||||||
lint:
|
lint:
|
||||||
stage: lint
|
stage: lint
|
||||||
image: node:20
|
image: node:$DEFAULT_NODE_VERSION
|
||||||
script:
|
before_script:
|
||||||
|
- |
|
||||||
|
NODE_VERSION=$(cat .nvmrc 2>/dev/null || echo "$DEFAULT_NODE_VERSION")
|
||||||
|
echo "Using Node $NODE_VERSION"
|
||||||
|
npm install -g n
|
||||||
|
n "$NODE_VERSION"
|
||||||
|
node -v
|
||||||
- npm ci
|
- npm ci
|
||||||
|
script:
|
||||||
- npm run lint
|
- npm run lint
|
||||||
timeout: 5 minutes
|
timeout: 5 minutes
|
||||||
|
|
||||||
# Test stage - Parallel execution with sharding
|
# Test stage - Parallel execution with sharding
|
||||||
.test-template: &test-template
|
.test-template: &test-template
|
||||||
stage: test
|
stage: test
|
||||||
image: node:20
|
image: node:$DEFAULT_NODE_VERSION
|
||||||
needs:
|
needs:
|
||||||
- lint
|
- lint
|
||||||
before_script:
|
before_script:
|
||||||
|
- |
|
||||||
|
NODE_VERSION=$(cat .nvmrc 2>/dev/null || echo "$DEFAULT_NODE_VERSION")
|
||||||
|
echo "Using Node $NODE_VERSION"
|
||||||
|
npm install -g n
|
||||||
|
n "$NODE_VERSION"
|
||||||
|
node -v
|
||||||
- npm ci
|
- npm ci
|
||||||
- npx playwright install --with-deps chromium
|
- npx playwright install --with-deps chromium
|
||||||
artifacts:
|
artifacts:
|
||||||
|
|
@ -75,7 +90,7 @@ test:shard-4:
|
||||||
# Burn-in stage - Flaky test detection
|
# Burn-in stage - Flaky test detection
|
||||||
burn-in:
|
burn-in:
|
||||||
stage: burn-in
|
stage: burn-in
|
||||||
image: node:20
|
image: node:$DEFAULT_NODE_VERSION
|
||||||
needs:
|
needs:
|
||||||
- test:shard-1
|
- test:shard-1
|
||||||
- test:shard-2
|
- test:shard-2
|
||||||
|
|
@ -86,6 +101,12 @@ burn-in:
|
||||||
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
|
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
|
||||||
- if: '$CI_PIPELINE_SOURCE == "schedule"'
|
- if: '$CI_PIPELINE_SOURCE == "schedule"'
|
||||||
before_script:
|
before_script:
|
||||||
|
- |
|
||||||
|
NODE_VERSION=$(cat .nvmrc 2>/dev/null || echo "$DEFAULT_NODE_VERSION")
|
||||||
|
echo "Using Node $NODE_VERSION"
|
||||||
|
npm install -g n
|
||||||
|
n "$NODE_VERSION"
|
||||||
|
node -v
|
||||||
- npm ci
|
- npm ci
|
||||||
- npx playwright install --with-deps chromium
|
- npx playwright install --with-deps chromium
|
||||||
script:
|
script:
|
||||||
|
|
|
||||||
|
|
@ -61,8 +61,8 @@ Scaffolds a production-ready CI/CD quality pipeline with test execution, burn-in
|
||||||
- Ask user if unable to auto-detect
|
- Ask user if unable to auto-detect
|
||||||
|
|
||||||
5. **Read Environment Configuration**
|
5. **Read Environment Configuration**
|
||||||
- Check for `.nvmrc` to determine Node version
|
- Use `.nvmrc` for Node version if present
|
||||||
- Default to Node 20 LTS if not found
|
- If missing, default to a current LTS (Node 24) or newer instead of a fixed old version
|
||||||
- Read `package.json` to identify dependencies (affects caching strategy)
|
- Read `package.json` to identify dependencies (affects caching strategy)
|
||||||
|
|
||||||
**Halt Condition:** If preflight checks fail, stop immediately and report which requirement failed.
|
**Halt Condition:** If preflight checks fail, stop immediately and report which requirement failed.
|
||||||
|
|
|
||||||
|
|
@ -56,11 +56,6 @@ phases:
|
||||||
output: "Enterprise PRD with compliance requirements"
|
output: "Enterprise PRD with compliance requirements"
|
||||||
note: "Must address existing system constraints and migration strategy"
|
note: "Must address existing system constraints and migration strategy"
|
||||||
|
|
||||||
- id: "validate-prd"
|
|
||||||
recommended: true
|
|
||||||
agent: "pm"
|
|
||||||
command: "validate-prd"
|
|
||||||
|
|
||||||
- id: "create-ux-design"
|
- id: "create-ux-design"
|
||||||
recommended: true
|
recommended: true
|
||||||
agent: "ux-designer"
|
agent: "ux-designer"
|
||||||
|
|
@ -114,7 +109,7 @@ phases:
|
||||||
required: true
|
required: true
|
||||||
agent: "architect"
|
agent: "architect"
|
||||||
command: "implementation-readiness"
|
command: "implementation-readiness"
|
||||||
note: "Critical gate - validates all planning + Epics before touching production system"
|
note: "Validates PRD + Architecture + Epics + UX (optional)"
|
||||||
|
|
||||||
- phase: 3
|
- phase: 3
|
||||||
name: "Implementation"
|
name: "Implementation"
|
||||||
|
|
|
||||||
|
|
@ -44,11 +44,6 @@ phases:
|
||||||
output: "Comprehensive Product Requirements Document"
|
output: "Comprehensive Product Requirements Document"
|
||||||
note: "Enterprise-level requirements with compliance considerations"
|
note: "Enterprise-level requirements with compliance considerations"
|
||||||
|
|
||||||
- id: "validate-prd"
|
|
||||||
recommended: true
|
|
||||||
agent: "pm"
|
|
||||||
command: "validate-prd"
|
|
||||||
|
|
||||||
- id: "create-ux-design"
|
- id: "create-ux-design"
|
||||||
recommended: true
|
recommended: true
|
||||||
agent: "ux-designer"
|
agent: "ux-designer"
|
||||||
|
|
@ -102,7 +97,7 @@ phases:
|
||||||
required: true
|
required: true
|
||||||
agent: "architect"
|
agent: "architect"
|
||||||
command: "implementation-readiness"
|
command: "implementation-readiness"
|
||||||
note: "Validates all planning artifacts + Epics + testability align before implementation"
|
note: "Validates PRD + Architecture + Epics + UX (optional)"
|
||||||
|
|
||||||
- phase: 3
|
- phase: 3
|
||||||
name: "Implementation"
|
name: "Implementation"
|
||||||
|
|
|
||||||
|
|
@ -55,11 +55,6 @@ phases:
|
||||||
output: "PRD focused on new features/changes"
|
output: "PRD focused on new features/changes"
|
||||||
note: "Must consider existing system constraints"
|
note: "Must consider existing system constraints"
|
||||||
|
|
||||||
- id: "validate-prd"
|
|
||||||
optional: true
|
|
||||||
agent: "pm"
|
|
||||||
command: "validate-prd"
|
|
||||||
|
|
||||||
- id: "create-ux-design"
|
- id: "create-ux-design"
|
||||||
conditional: "if_has_ui"
|
conditional: "if_has_ui"
|
||||||
agent: "ux-designer"
|
agent: "ux-designer"
|
||||||
|
|
@ -98,7 +93,7 @@ phases:
|
||||||
required: true
|
required: true
|
||||||
agent: "architect"
|
agent: "architect"
|
||||||
command: "implementation-readiness"
|
command: "implementation-readiness"
|
||||||
note: "Validates PRD + UX + Architecture + Epics cohesion before implementation"
|
note: "Validates PRD + Architecture + Epics + UX (optional)"
|
||||||
|
|
||||||
- phase: 3
|
- phase: 3
|
||||||
name: "Implementation"
|
name: "Implementation"
|
||||||
|
|
|
||||||
|
|
@ -43,12 +43,6 @@ phases:
|
||||||
command: "prd"
|
command: "prd"
|
||||||
output: "Product Requirements Document with FRs and NFRs"
|
output: "Product Requirements Document with FRs and NFRs"
|
||||||
|
|
||||||
- id: "validate-prd"
|
|
||||||
optional: true
|
|
||||||
agent: "pm"
|
|
||||||
command: "validate-prd"
|
|
||||||
note: "Quality check for PRD completeness"
|
|
||||||
|
|
||||||
- id: "create-ux-design"
|
- id: "create-ux-design"
|
||||||
conditional: "if_has_ui"
|
conditional: "if_has_ui"
|
||||||
agent: "ux-designer"
|
agent: "ux-designer"
|
||||||
|
|
@ -89,7 +83,7 @@ phases:
|
||||||
required: true
|
required: true
|
||||||
agent: "architect"
|
agent: "architect"
|
||||||
command: "implementation-readiness"
|
command: "implementation-readiness"
|
||||||
note: "Validates PRD + UX + Architecture + Epics + Testability cohesion before implementation"
|
note: "Validates PRD + Architecture + Epics + UX (optional)"
|
||||||
|
|
||||||
- phase: 3
|
- phase: 3
|
||||||
name: "Implementation"
|
name: "Implementation"
|
||||||
|
|
|
||||||
|
|
@ -51,6 +51,7 @@ class Installer {
|
||||||
this.configCollector = new ConfigCollector();
|
this.configCollector = new ConfigCollector();
|
||||||
this.ideConfigManager = new IdeConfigManager();
|
this.ideConfigManager = new IdeConfigManager();
|
||||||
this.installedFiles = []; // Track all installed files
|
this.installedFiles = []; // Track all installed files
|
||||||
|
this.ttsInjectedFiles = []; // Track files with TTS injection applied
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -146,8 +147,8 @@ class Installer {
|
||||||
content = content.replaceAll('{*bmad_folder*}', '{bmad_folder}');
|
content = content.replaceAll('{*bmad_folder*}', '{bmad_folder}');
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process AgentVibes injection points
|
// Process AgentVibes injection points (pass targetPath for tracking)
|
||||||
content = this.processTTSInjectionPoints(content);
|
content = this.processTTSInjectionPoints(content, targetPath);
|
||||||
|
|
||||||
// Write to target with replaced content
|
// Write to target with replaced content
|
||||||
await fs.ensureDir(path.dirname(targetPath));
|
await fs.ensureDir(path.dirname(targetPath));
|
||||||
|
|
@ -226,10 +227,14 @@ class Installer {
|
||||||
* - src/modules/bmm/agents/*.md (rules sections)
|
* - src/modules/bmm/agents/*.md (rules sections)
|
||||||
* - TTS Hook: .claude/hooks/bmad-speak.sh (in AgentVibes repo)
|
* - TTS Hook: .claude/hooks/bmad-speak.sh (in AgentVibes repo)
|
||||||
*/
|
*/
|
||||||
processTTSInjectionPoints(content) {
|
processTTSInjectionPoints(content, targetPath = null) {
|
||||||
// Check if AgentVibes is enabled (set during installation configuration)
|
// Check if AgentVibes is enabled (set during installation configuration)
|
||||||
const enableAgentVibes = this.enableAgentVibes || false;
|
const enableAgentVibes = this.enableAgentVibes || false;
|
||||||
|
|
||||||
|
// Check if content contains any TTS injection markers
|
||||||
|
const hasPartyMode = content.includes('<!-- TTS_INJECTION:party-mode -->');
|
||||||
|
const hasAgentTTS = content.includes('<!-- TTS_INJECTION:agent-tts -->');
|
||||||
|
|
||||||
if (enableAgentVibes) {
|
if (enableAgentVibes) {
|
||||||
// Replace party-mode injection marker with actual TTS call
|
// Replace party-mode injection marker with actual TTS call
|
||||||
// Use single quotes to prevent shell expansion of special chars like !
|
// Use single quotes to prevent shell expansion of special chars like !
|
||||||
|
|
@ -253,6 +258,12 @@ If AgentVibes party mode is enabled, immediately trigger TTS with agent's voice:
|
||||||
IMPORTANT: Use single quotes as shown - do NOT escape special characters like ! or $ inside single quotes
|
IMPORTANT: Use single quotes as shown - do NOT escape special characters like ! or $ inside single quotes
|
||||||
Run in background (&) to avoid blocking`,
|
Run in background (&) to avoid blocking`,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Track files that had TTS injection applied
|
||||||
|
if (targetPath && (hasPartyMode || hasAgentTTS)) {
|
||||||
|
const injectionType = hasPartyMode ? 'party-mode' : 'agent-tts';
|
||||||
|
this.ttsInjectedFiles.push({ path: targetPath, type: injectionType });
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
// Strip injection markers cleanly when AgentVibes is disabled
|
// Strip injection markers cleanly when AgentVibes is disabled
|
||||||
content = content.replaceAll(/<!-- TTS_INJECTION:party-mode -->\n?/g, '');
|
content = content.replaceAll(/<!-- TTS_INJECTION:party-mode -->\n?/g, '');
|
||||||
|
|
@ -1021,6 +1032,8 @@ If AgentVibes party mode is enabled, immediately trigger TTS with agent's voice:
|
||||||
modules: config.modules,
|
modules: config.modules,
|
||||||
ides: config.ides,
|
ides: config.ides,
|
||||||
customFiles: customFiles.length > 0 ? customFiles : undefined,
|
customFiles: customFiles.length > 0 ? customFiles : undefined,
|
||||||
|
ttsInjectedFiles: this.enableAgentVibes && this.ttsInjectedFiles.length > 0 ? this.ttsInjectedFiles : undefined,
|
||||||
|
agentVibesEnabled: this.enableAgentVibes || false,
|
||||||
});
|
});
|
||||||
|
|
||||||
// Offer cleanup for legacy files (only for updates, not fresh installs, and only if not skipped)
|
// Offer cleanup for legacy files (only for updates, not fresh installs, and only if not skipped)
|
||||||
|
|
@ -1526,13 +1539,16 @@ If AgentVibes party mode is enabled, immediately trigger TTS with agent's voice:
|
||||||
|
|
||||||
// Build YAML + customize to .md
|
// Build YAML + customize to .md
|
||||||
const customizeExists = await fs.pathExists(customizePath);
|
const customizeExists = await fs.pathExists(customizePath);
|
||||||
const xmlContent = await this.xmlHandler.buildFromYaml(yamlPath, customizeExists ? customizePath : null, {
|
let xmlContent = await this.xmlHandler.buildFromYaml(yamlPath, customizeExists ? customizePath : null, {
|
||||||
includeMetadata: true,
|
includeMetadata: true,
|
||||||
});
|
});
|
||||||
|
|
||||||
// DO NOT replace {project-root} - LLMs understand this placeholder at runtime
|
// DO NOT replace {project-root} - LLMs understand this placeholder at runtime
|
||||||
// const processedContent = xmlContent.replaceAll('{project-root}', projectDir);
|
// const processedContent = xmlContent.replaceAll('{project-root}', projectDir);
|
||||||
|
|
||||||
|
// Process TTS injection points (pass targetPath for tracking)
|
||||||
|
xmlContent = this.processTTSInjectionPoints(xmlContent, mdPath);
|
||||||
|
|
||||||
// Write the built .md file to bmad/{module}/agents/ with POSIX-compliant final newline
|
// Write the built .md file to bmad/{module}/agents/ with POSIX-compliant final newline
|
||||||
const content = xmlContent.endsWith('\n') ? xmlContent : xmlContent + '\n';
|
const content = xmlContent.endsWith('\n') ? xmlContent : xmlContent + '\n';
|
||||||
await fs.writeFile(mdPath, content, 'utf8');
|
await fs.writeFile(mdPath, content, 'utf8');
|
||||||
|
|
@ -1628,13 +1644,16 @@ If AgentVibes party mode is enabled, immediately trigger TTS with agent's voice:
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build YAML to XML .md
|
// Build YAML to XML .md
|
||||||
const xmlContent = await this.xmlHandler.buildFromYaml(sourceYamlPath, customizeExists ? customizePath : null, {
|
let xmlContent = await this.xmlHandler.buildFromYaml(sourceYamlPath, customizeExists ? customizePath : null, {
|
||||||
includeMetadata: true,
|
includeMetadata: true,
|
||||||
});
|
});
|
||||||
|
|
||||||
// DO NOT replace {project-root} - LLMs understand this placeholder at runtime
|
// DO NOT replace {project-root} - LLMs understand this placeholder at runtime
|
||||||
// const processedContent = xmlContent.replaceAll('{project-root}', projectDir);
|
// const processedContent = xmlContent.replaceAll('{project-root}', projectDir);
|
||||||
|
|
||||||
|
// Process TTS injection points (pass targetPath for tracking)
|
||||||
|
xmlContent = this.processTTSInjectionPoints(xmlContent, targetMdPath);
|
||||||
|
|
||||||
// Write the built .md file with POSIX-compliant final newline
|
// Write the built .md file with POSIX-compliant final newline
|
||||||
const content = xmlContent.endsWith('\n') ? xmlContent : xmlContent + '\n';
|
const content = xmlContent.endsWith('\n') ? xmlContent : xmlContent + '\n';
|
||||||
await fs.writeFile(targetMdPath, content, 'utf8');
|
await fs.writeFile(targetMdPath, content, 'utf8');
|
||||||
|
|
@ -1722,13 +1741,16 @@ If AgentVibes party mode is enabled, immediately trigger TTS with agent's voice:
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build YAML + customize to .md
|
// Build YAML + customize to .md
|
||||||
const xmlContent = await this.xmlHandler.buildFromYaml(sourceYamlPath, customizeExists ? customizePath : null, {
|
let xmlContent = await this.xmlHandler.buildFromYaml(sourceYamlPath, customizeExists ? customizePath : null, {
|
||||||
includeMetadata: true,
|
includeMetadata: true,
|
||||||
});
|
});
|
||||||
|
|
||||||
// DO NOT replace {project-root} - LLMs understand this placeholder at runtime
|
// DO NOT replace {project-root} - LLMs understand this placeholder at runtime
|
||||||
// const processedContent = xmlContent.replaceAll('{project-root}', projectDir);
|
// const processedContent = xmlContent.replaceAll('{project-root}', projectDir);
|
||||||
|
|
||||||
|
// Process TTS injection points (pass targetPath for tracking)
|
||||||
|
xmlContent = this.processTTSInjectionPoints(xmlContent, targetMdPath);
|
||||||
|
|
||||||
// Write the rebuilt .md file with POSIX-compliant final newline
|
// Write the rebuilt .md file with POSIX-compliant final newline
|
||||||
const content = xmlContent.endsWith('\n') ? xmlContent : xmlContent + '\n';
|
const content = xmlContent.endsWith('\n') ? xmlContent : xmlContent + '\n';
|
||||||
await fs.writeFile(targetMdPath, content, 'utf8');
|
await fs.writeFile(targetMdPath, content, 'utf8');
|
||||||
|
|
|
||||||
|
|
@ -105,7 +105,7 @@ class ManifestGenerator {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Recursively find and parse workflow.yaml files
|
* Recursively find and parse workflow.yaml and workflow.md files
|
||||||
*/
|
*/
|
||||||
async getWorkflowsFromPath(basePath, moduleName) {
|
async getWorkflowsFromPath(basePath, moduleName) {
|
||||||
const workflows = [];
|
const workflows = [];
|
||||||
|
|
@ -126,11 +126,23 @@ class ManifestGenerator {
|
||||||
// Recurse into subdirectories
|
// Recurse into subdirectories
|
||||||
const newRelativePath = relativePath ? `${relativePath}/${entry.name}` : entry.name;
|
const newRelativePath = relativePath ? `${relativePath}/${entry.name}` : entry.name;
|
||||||
await findWorkflows(fullPath, newRelativePath);
|
await findWorkflows(fullPath, newRelativePath);
|
||||||
} else if (entry.name === 'workflow.yaml') {
|
} else if (entry.name === 'workflow.yaml' || entry.name === 'workflow.md') {
|
||||||
// Parse workflow file
|
// Parse workflow file (both YAML and MD formats)
|
||||||
try {
|
try {
|
||||||
const content = await fs.readFile(fullPath, 'utf8');
|
const content = await fs.readFile(fullPath, 'utf8');
|
||||||
const workflow = yaml.load(content);
|
|
||||||
|
let workflow;
|
||||||
|
if (entry.name === 'workflow.yaml') {
|
||||||
|
// Parse YAML workflow
|
||||||
|
workflow = yaml.load(content);
|
||||||
|
} else {
|
||||||
|
// Parse MD workflow with YAML frontmatter
|
||||||
|
const frontmatterMatch = content.match(/^---\n([\s\S]*?)\n---/);
|
||||||
|
if (!frontmatterMatch) {
|
||||||
|
continue; // Skip MD files without frontmatter
|
||||||
|
}
|
||||||
|
workflow = yaml.load(frontmatterMatch[1]);
|
||||||
|
}
|
||||||
|
|
||||||
// Skip template workflows (those with placeholder values)
|
// Skip template workflows (those with placeholder values)
|
||||||
if (workflow.name && workflow.name.includes('{') && workflow.name.includes('}')) {
|
if (workflow.name && workflow.name.includes('{') && workflow.name.includes('}')) {
|
||||||
|
|
@ -141,18 +153,15 @@ class ManifestGenerator {
|
||||||
// Build relative path for installation
|
// Build relative path for installation
|
||||||
const installPath =
|
const installPath =
|
||||||
moduleName === 'core'
|
moduleName === 'core'
|
||||||
? `${this.bmadFolderName}/core/workflows/${relativePath}/workflow.yaml`
|
? `${this.bmadFolderName}/core/workflows/${relativePath}/${entry.name}`
|
||||||
: `${this.bmadFolderName}/${moduleName}/workflows/${relativePath}/workflow.yaml`;
|
: `${this.bmadFolderName}/${moduleName}/workflows/${relativePath}/${entry.name}`;
|
||||||
|
|
||||||
// Check for standalone property (default: false)
|
|
||||||
const standalone = workflow.standalone === true;
|
|
||||||
|
|
||||||
|
// ALL workflows now generate commands - no standalone property needed
|
||||||
workflows.push({
|
workflows.push({
|
||||||
name: workflow.name,
|
name: workflow.name,
|
||||||
description: workflow.description.replaceAll('"', '""'), // Escape quotes for CSV
|
description: workflow.description.replaceAll('"', '""'), // Escape quotes for CSV
|
||||||
module: moduleName,
|
module: moduleName,
|
||||||
path: installPath,
|
path: installPath,
|
||||||
standalone: standalone,
|
|
||||||
});
|
});
|
||||||
|
|
||||||
// Add to files list
|
// Add to files list
|
||||||
|
|
@ -541,12 +550,12 @@ class ManifestGenerator {
|
||||||
async writeWorkflowManifest(cfgDir) {
|
async writeWorkflowManifest(cfgDir) {
|
||||||
const csvPath = path.join(cfgDir, 'workflow-manifest.csv');
|
const csvPath = path.join(cfgDir, 'workflow-manifest.csv');
|
||||||
|
|
||||||
// Create CSV header with standalone column
|
// Create CSV header - removed standalone column as ALL workflows now generate commands
|
||||||
let csv = 'name,description,module,path,standalone\n';
|
let csv = 'name,description,module,path\n';
|
||||||
|
|
||||||
// Add all workflows
|
// Add all workflows - no standalone property needed anymore
|
||||||
for (const workflow of this.workflows) {
|
for (const workflow of this.workflows) {
|
||||||
csv += `"${workflow.name}","${workflow.description}","${workflow.module}","${workflow.path}","${workflow.standalone}"\n`;
|
csv += `"${workflow.name}","${workflow.description}","${workflow.module}","${workflow.path}"\n`;
|
||||||
}
|
}
|
||||||
|
|
||||||
await fs.writeFile(csvPath, csv);
|
await fs.writeFile(csvPath, csv);
|
||||||
|
|
|
||||||
|
|
@ -3,6 +3,7 @@ const fs = require('fs-extra');
|
||||||
const { BaseIdeSetup } = require('./_base-ide');
|
const { BaseIdeSetup } = require('./_base-ide');
|
||||||
const chalk = require('chalk');
|
const chalk = require('chalk');
|
||||||
const { AgentCommandGenerator } = require('./shared/agent-command-generator');
|
const { AgentCommandGenerator } = require('./shared/agent-command-generator');
|
||||||
|
const { WorkflowCommandGenerator } = require('./shared/workflow-command-generator');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Auggie CLI setup handler
|
* Auggie CLI setup handler
|
||||||
|
|
@ -33,10 +34,23 @@ class AuggieSetup extends BaseIdeSetup {
|
||||||
const agentGen = new AgentCommandGenerator(this.bmadFolderName);
|
const agentGen = new AgentCommandGenerator(this.bmadFolderName);
|
||||||
const { artifacts: agentArtifacts } = await agentGen.collectAgentArtifacts(bmadDir, options.selectedModules || []);
|
const { artifacts: agentArtifacts } = await agentGen.collectAgentArtifacts(bmadDir, options.selectedModules || []);
|
||||||
|
|
||||||
// Get tasks, tools, and workflows (standalone only)
|
// Get tasks, tools, and workflows (ALL workflows now generate commands)
|
||||||
const tasks = await this.getTasks(bmadDir, true);
|
const tasks = await this.getTasks(bmadDir, true);
|
||||||
const tools = await this.getTools(bmadDir, true);
|
const tools = await this.getTools(bmadDir, true);
|
||||||
const workflows = await this.getWorkflows(bmadDir, true);
|
|
||||||
|
// Get ALL workflows using the new workflow command generator
|
||||||
|
const workflowGenerator = new WorkflowCommandGenerator(this.bmadFolderName);
|
||||||
|
const { artifacts: workflowArtifacts, counts: workflowCounts } = await workflowGenerator.collectWorkflowArtifacts(bmadDir);
|
||||||
|
|
||||||
|
// Convert workflow artifacts to expected format
|
||||||
|
const workflows = workflowArtifacts
|
||||||
|
.filter((artifact) => artifact.type === 'workflow-command')
|
||||||
|
.map((artifact) => ({
|
||||||
|
module: artifact.module,
|
||||||
|
name: path.basename(artifact.relativePath, '.md'),
|
||||||
|
path: artifact.sourcePath,
|
||||||
|
content: artifact.content,
|
||||||
|
}));
|
||||||
|
|
||||||
const bmadCommandsDir = path.join(location, 'bmad');
|
const bmadCommandsDir = path.join(location, 'bmad');
|
||||||
const agentsDir = path.join(bmadCommandsDir, 'agents');
|
const agentsDir = path.join(bmadCommandsDir, 'agents');
|
||||||
|
|
@ -73,13 +87,11 @@ class AuggieSetup extends BaseIdeSetup {
|
||||||
await this.writeFile(targetPath, commandContent);
|
await this.writeFile(targetPath, commandContent);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Install workflows
|
// Install workflows (already generated commands)
|
||||||
for (const workflow of workflows) {
|
for (const workflow of workflows) {
|
||||||
const content = await this.readFile(workflow.path);
|
// Use the pre-generated workflow command content
|
||||||
const commandContent = this.createWorkflowCommand(workflow, content);
|
|
||||||
|
|
||||||
const targetPath = path.join(workflowsDir, `${workflow.module}-${workflow.name}.md`);
|
const targetPath = path.join(workflowsDir, `${workflow.module}-${workflow.name}.md`);
|
||||||
await this.writeFile(targetPath, commandContent);
|
await this.writeFile(targetPath, workflow.content);
|
||||||
}
|
}
|
||||||
|
|
||||||
const totalInstalled = agentArtifacts.length + tasks.length + tools.length + workflows.length;
|
const totalInstalled = agentArtifacts.length + tasks.length + tools.length + workflows.length;
|
||||||
|
|
|
||||||
|
|
@ -3,6 +3,7 @@ const fs = require('fs-extra');
|
||||||
const { BaseIdeSetup } = require('./_base-ide');
|
const { BaseIdeSetup } = require('./_base-ide');
|
||||||
const chalk = require('chalk');
|
const chalk = require('chalk');
|
||||||
const { AgentCommandGenerator } = require('./shared/agent-command-generator');
|
const { AgentCommandGenerator } = require('./shared/agent-command-generator');
|
||||||
|
const { WorkflowCommandGenerator } = require('./shared/workflow-command-generator');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Crush IDE setup handler
|
* Crush IDE setup handler
|
||||||
|
|
@ -34,10 +35,23 @@ class CrushSetup extends BaseIdeSetup {
|
||||||
const agentGen = new AgentCommandGenerator(this.bmadFolderName);
|
const agentGen = new AgentCommandGenerator(this.bmadFolderName);
|
||||||
const { artifacts: agentArtifacts } = await agentGen.collectAgentArtifacts(bmadDir, options.selectedModules || []);
|
const { artifacts: agentArtifacts } = await agentGen.collectAgentArtifacts(bmadDir, options.selectedModules || []);
|
||||||
|
|
||||||
// Get tasks, tools, and workflows (standalone only)
|
// Get tasks, tools, and workflows (ALL workflows now generate commands)
|
||||||
const tasks = await this.getTasks(bmadDir, true);
|
const tasks = await this.getTasks(bmadDir, true);
|
||||||
const tools = await this.getTools(bmadDir, true);
|
const tools = await this.getTools(bmadDir, true);
|
||||||
const workflows = await this.getWorkflows(bmadDir, true);
|
|
||||||
|
// Get ALL workflows using the new workflow command generator
|
||||||
|
const workflowGenerator = new WorkflowCommandGenerator(this.bmadFolderName);
|
||||||
|
const { artifacts: workflowArtifacts, counts: workflowCounts } = await workflowGenerator.collectWorkflowArtifacts(bmadDir);
|
||||||
|
|
||||||
|
// Convert workflow artifacts to expected format for organizeByModule
|
||||||
|
const workflows = workflowArtifacts
|
||||||
|
.filter((artifact) => artifact.type === 'workflow-command')
|
||||||
|
.map((artifact) => ({
|
||||||
|
module: artifact.module,
|
||||||
|
name: path.basename(artifact.relativePath, '.md'),
|
||||||
|
path: artifact.sourcePath,
|
||||||
|
content: artifact.content,
|
||||||
|
}));
|
||||||
|
|
||||||
// Organize by module
|
// Organize by module
|
||||||
const agentCount = await this.organizeByModule(commandsDir, agentArtifacts, tasks, tools, workflows, projectDir);
|
const agentCount = await this.organizeByModule(commandsDir, agentArtifacts, tasks, tools, workflows, projectDir);
|
||||||
|
|
@ -113,13 +127,12 @@ class CrushSetup extends BaseIdeSetup {
|
||||||
toolCount++;
|
toolCount++;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy module-specific workflows
|
// Copy module-specific workflow commands (already generated)
|
||||||
const moduleWorkflows = workflows.filter((w) => w.module === module);
|
const moduleWorkflows = workflows.filter((w) => w.module === module);
|
||||||
for (const workflow of moduleWorkflows) {
|
for (const workflow of moduleWorkflows) {
|
||||||
const content = await this.readFile(workflow.path);
|
// Use the pre-generated workflow command content
|
||||||
const commandContent = this.createWorkflowCommand(workflow, content);
|
|
||||||
const targetPath = path.join(moduleWorkflowsDir, `${workflow.name}.md`);
|
const targetPath = path.join(moduleWorkflowsDir, `${workflow.name}.md`);
|
||||||
await this.writeFile(targetPath, commandContent);
|
await this.writeFile(targetPath, workflow.content);
|
||||||
workflowCount++;
|
workflowCount++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -2,6 +2,7 @@ const path = require('node:path');
|
||||||
const { BaseIdeSetup } = require('./_base-ide');
|
const { BaseIdeSetup } = require('./_base-ide');
|
||||||
const chalk = require('chalk');
|
const chalk = require('chalk');
|
||||||
const { AgentCommandGenerator } = require('./shared/agent-command-generator');
|
const { AgentCommandGenerator } = require('./shared/agent-command-generator');
|
||||||
|
const { WorkflowCommandGenerator } = require('./shared/workflow-command-generator');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Cursor IDE setup handler
|
* Cursor IDE setup handler
|
||||||
|
|
@ -53,10 +54,22 @@ class CursorSetup extends BaseIdeSetup {
|
||||||
// Convert artifacts to agent format for index creation
|
// Convert artifacts to agent format for index creation
|
||||||
const agents = agentArtifacts.map((a) => ({ module: a.module, name: a.name }));
|
const agents = agentArtifacts.map((a) => ({ module: a.module, name: a.name }));
|
||||||
|
|
||||||
// Get tasks, tools, and workflows (standalone only)
|
// Get tasks, tools, and workflows (ALL workflows now generate commands)
|
||||||
const tasks = await this.getTasks(bmadDir, true);
|
const tasks = await this.getTasks(bmadDir, true);
|
||||||
const tools = await this.getTools(bmadDir, true);
|
const tools = await this.getTools(bmadDir, true);
|
||||||
const workflows = await this.getWorkflows(bmadDir, true);
|
|
||||||
|
// Get ALL workflows using the new workflow command generator
|
||||||
|
const workflowGenerator = new WorkflowCommandGenerator(this.bmadFolderName);
|
||||||
|
const { artifacts: workflowArtifacts, counts: workflowCounts } = await workflowGenerator.collectWorkflowArtifacts(bmadDir);
|
||||||
|
|
||||||
|
// Convert artifacts to workflow objects for directory creation
|
||||||
|
const workflows = workflowArtifacts
|
||||||
|
.filter((artifact) => artifact.type === 'workflow-command')
|
||||||
|
.map((artifact) => ({
|
||||||
|
module: artifact.module,
|
||||||
|
name: path.basename(artifact.relativePath, '.md'),
|
||||||
|
path: artifact.sourcePath,
|
||||||
|
}));
|
||||||
|
|
||||||
// Create directories for each module
|
// Create directories for each module
|
||||||
const modules = new Set();
|
const modules = new Set();
|
||||||
|
|
@ -113,18 +126,21 @@ class CursorSetup extends BaseIdeSetup {
|
||||||
toolCount++;
|
toolCount++;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process and copy workflows
|
// Process and copy workflow commands (generated, not raw workflows)
|
||||||
let workflowCount = 0;
|
let workflowCount = 0;
|
||||||
for (const workflow of workflows) {
|
for (const artifact of workflowArtifacts) {
|
||||||
const content = await this.readAndProcess(workflow.path, {
|
if (artifact.type === 'workflow-command') {
|
||||||
module: workflow.module,
|
// Add MDC metadata header to workflow command
|
||||||
name: workflow.name,
|
const content = this.wrapLauncherWithMDC(artifact.content, {
|
||||||
});
|
module: artifact.module,
|
||||||
|
name: path.basename(artifact.relativePath, '.md'),
|
||||||
|
});
|
||||||
|
|
||||||
const targetPath = path.join(bmadRulesDir, workflow.module, 'workflows', `${workflow.name}.mdc`);
|
const targetPath = path.join(bmadRulesDir, artifact.module, 'workflows', `${path.basename(artifact.relativePath, '.md')}.mdc`);
|
||||||
|
|
||||||
await this.writeFile(targetPath, content);
|
await this.writeFile(targetPath, content);
|
||||||
workflowCount++;
|
workflowCount++;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create BMAD index file (but NOT .cursorrules - user manages that)
|
// Create BMAD index file (but NOT .cursorrules - user manages that)
|
||||||
|
|
|
||||||
|
|
@ -4,6 +4,7 @@ const yaml = require('js-yaml');
|
||||||
const { BaseIdeSetup } = require('./_base-ide');
|
const { BaseIdeSetup } = require('./_base-ide');
|
||||||
const chalk = require('chalk');
|
const chalk = require('chalk');
|
||||||
const { AgentCommandGenerator } = require('./shared/agent-command-generator');
|
const { AgentCommandGenerator } = require('./shared/agent-command-generator');
|
||||||
|
const { WorkflowCommandGenerator } = require('./shared/workflow-command-generator');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gemini CLI setup handler
|
* Gemini CLI setup handler
|
||||||
|
|
@ -68,9 +69,13 @@ class GeminiSetup extends BaseIdeSetup {
|
||||||
const agentGen = new AgentCommandGenerator(this.bmadFolderName);
|
const agentGen = new AgentCommandGenerator(this.bmadFolderName);
|
||||||
const { artifacts: agentArtifacts } = await agentGen.collectAgentArtifacts(bmadDir, options.selectedModules || []);
|
const { artifacts: agentArtifacts } = await agentGen.collectAgentArtifacts(bmadDir, options.selectedModules || []);
|
||||||
|
|
||||||
// Get tasks
|
// Get tasks and workflows (ALL workflows now generate commands)
|
||||||
const tasks = await this.getTasks(bmadDir);
|
const tasks = await this.getTasks(bmadDir);
|
||||||
|
|
||||||
|
// Get ALL workflows using the new workflow command generator
|
||||||
|
const workflowGenerator = new WorkflowCommandGenerator(this.bmadFolderName);
|
||||||
|
const { artifacts: workflowArtifacts, counts: workflowCounts } = await workflowGenerator.collectWorkflowArtifacts(bmadDir);
|
||||||
|
|
||||||
// Install agents as TOML files with bmad- prefix (flat structure)
|
// Install agents as TOML files with bmad- prefix (flat structure)
|
||||||
let agentCount = 0;
|
let agentCount = 0;
|
||||||
for (const artifact of agentArtifacts) {
|
for (const artifact of agentArtifacts) {
|
||||||
|
|
@ -98,17 +103,37 @@ class GeminiSetup extends BaseIdeSetup {
|
||||||
console.log(chalk.green(` ✓ Added task: /bmad:tasks:${task.module}:${task.name}`));
|
console.log(chalk.green(` ✓ Added task: /bmad:tasks:${task.module}:${task.name}`));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Install workflows as TOML files with bmad- prefix (flat structure)
|
||||||
|
let workflowCount = 0;
|
||||||
|
for (const artifact of workflowArtifacts) {
|
||||||
|
if (artifact.type === 'workflow-command') {
|
||||||
|
// Create TOML wrapper around workflow command content
|
||||||
|
const tomlContent = await this.createWorkflowToml(artifact);
|
||||||
|
|
||||||
|
// Flat structure: bmad-workflow-{module}-{name}.toml
|
||||||
|
const workflowName = path.basename(artifact.relativePath, '.md');
|
||||||
|
const tomlPath = path.join(commandsDir, `bmad-workflow-${artifact.module}-${workflowName}.toml`);
|
||||||
|
await this.writeFile(tomlPath, tomlContent);
|
||||||
|
workflowCount++;
|
||||||
|
|
||||||
|
console.log(chalk.green(` ✓ Added workflow: /bmad:workflows:${artifact.module}:${workflowName}`));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
console.log(chalk.green(`✓ ${this.name} configured:`));
|
console.log(chalk.green(`✓ ${this.name} configured:`));
|
||||||
console.log(chalk.dim(` - ${agentCount} agents configured`));
|
console.log(chalk.dim(` - ${agentCount} agents configured`));
|
||||||
console.log(chalk.dim(` - ${taskCount} tasks configured`));
|
console.log(chalk.dim(` - ${taskCount} tasks configured`));
|
||||||
|
console.log(chalk.dim(` - ${workflowCount} workflows configured`));
|
||||||
console.log(chalk.dim(` - Commands directory: ${path.relative(projectDir, commandsDir)}`));
|
console.log(chalk.dim(` - Commands directory: ${path.relative(projectDir, commandsDir)}`));
|
||||||
console.log(chalk.dim(` - Agent activation: /bmad:agents:{agent-name}`));
|
console.log(chalk.dim(` - Agent activation: /bmad:agents:{agent-name}`));
|
||||||
console.log(chalk.dim(` - Task activation: /bmad:tasks:{task-name}`));
|
console.log(chalk.dim(` - Task activation: /bmad:tasks:{task-name}`));
|
||||||
|
console.log(chalk.dim(` - Workflow activation: /bmad:workflows:{workflow-name}`));
|
||||||
|
|
||||||
return {
|
return {
|
||||||
success: true,
|
success: true,
|
||||||
agents: agentCount,
|
agents: agentCount,
|
||||||
tasks: taskCount,
|
tasks: taskCount,
|
||||||
|
workflows: workflowCount,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -179,6 +204,27 @@ ${contentWithoutFrontmatter}
|
||||||
return tomlContent;
|
return tomlContent;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create workflow TOML content from artifact
|
||||||
|
*/
|
||||||
|
async createWorkflowToml(artifact) {
|
||||||
|
// Extract description from artifact content
|
||||||
|
const descriptionMatch = artifact.content.match(/description:\s*"([^"]+)"/);
|
||||||
|
const description = descriptionMatch
|
||||||
|
? descriptionMatch[1]
|
||||||
|
: `BMAD ${artifact.module.toUpperCase()} Workflow: ${path.basename(artifact.relativePath, '.md')}`;
|
||||||
|
|
||||||
|
// Strip frontmatter from command content
|
||||||
|
const frontmatterRegex = /^---\s*\n[\s\S]*?\n---\s*\n/;
|
||||||
|
const contentWithoutFrontmatter = artifact.content.replace(frontmatterRegex, '').trim();
|
||||||
|
|
||||||
|
return `description = "${description}"
|
||||||
|
prompt = """
|
||||||
|
${contentWithoutFrontmatter}
|
||||||
|
"""
|
||||||
|
`;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Cleanup Gemini configuration - surgically remove only BMAD files
|
* Cleanup Gemini configuration - surgically remove only BMAD files
|
||||||
*/
|
*/
|
||||||
|
|
|
||||||
|
|
@ -3,6 +3,7 @@ const fs = require('fs-extra');
|
||||||
const { BaseIdeSetup } = require('./_base-ide');
|
const { BaseIdeSetup } = require('./_base-ide');
|
||||||
const chalk = require('chalk');
|
const chalk = require('chalk');
|
||||||
const { AgentCommandGenerator } = require('./shared/agent-command-generator');
|
const { AgentCommandGenerator } = require('./shared/agent-command-generator');
|
||||||
|
const { WorkflowCommandGenerator } = require('./shared/workflow-command-generator');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* iFlow CLI setup handler
|
* iFlow CLI setup handler
|
||||||
|
|
@ -29,9 +30,11 @@ class IFlowSetup extends BaseIdeSetup {
|
||||||
const commandsDir = path.join(iflowDir, this.commandsDir, 'bmad');
|
const commandsDir = path.join(iflowDir, this.commandsDir, 'bmad');
|
||||||
const agentsDir = path.join(commandsDir, 'agents');
|
const agentsDir = path.join(commandsDir, 'agents');
|
||||||
const tasksDir = path.join(commandsDir, 'tasks');
|
const tasksDir = path.join(commandsDir, 'tasks');
|
||||||
|
const workflowsDir = path.join(commandsDir, 'workflows');
|
||||||
|
|
||||||
await this.ensureDir(agentsDir);
|
await this.ensureDir(agentsDir);
|
||||||
await this.ensureDir(tasksDir);
|
await this.ensureDir(tasksDir);
|
||||||
|
await this.ensureDir(workflowsDir);
|
||||||
|
|
||||||
// Generate agent launchers
|
// Generate agent launchers
|
||||||
const agentGen = new AgentCommandGenerator(this.bmadFolderName);
|
const agentGen = new AgentCommandGenerator(this.bmadFolderName);
|
||||||
|
|
@ -47,9 +50,13 @@ class IFlowSetup extends BaseIdeSetup {
|
||||||
agentCount++;
|
agentCount++;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get tasks
|
// Get tasks and workflows (ALL workflows now generate commands)
|
||||||
const tasks = await this.getTasks(bmadDir);
|
const tasks = await this.getTasks(bmadDir);
|
||||||
|
|
||||||
|
// Get ALL workflows using the new workflow command generator
|
||||||
|
const workflowGenerator = new WorkflowCommandGenerator(this.bmadFolderName);
|
||||||
|
const { artifacts: workflowArtifacts, counts: workflowCounts } = await workflowGenerator.collectWorkflowArtifacts(bmadDir);
|
||||||
|
|
||||||
// Setup tasks as commands
|
// Setup tasks as commands
|
||||||
let taskCount = 0;
|
let taskCount = 0;
|
||||||
for (const task of tasks) {
|
for (const task of tasks) {
|
||||||
|
|
@ -61,15 +68,27 @@ class IFlowSetup extends BaseIdeSetup {
|
||||||
taskCount++;
|
taskCount++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Setup workflows as commands (already generated)
|
||||||
|
let workflowCount = 0;
|
||||||
|
for (const artifact of workflowArtifacts) {
|
||||||
|
if (artifact.type === 'workflow-command') {
|
||||||
|
const targetPath = path.join(workflowsDir, `${artifact.module}-${path.basename(artifact.relativePath, '.md')}.md`);
|
||||||
|
await this.writeFile(targetPath, artifact.content);
|
||||||
|
workflowCount++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
console.log(chalk.green(`✓ ${this.name} configured:`));
|
console.log(chalk.green(`✓ ${this.name} configured:`));
|
||||||
console.log(chalk.dim(` - ${agentCount} agent commands created`));
|
console.log(chalk.dim(` - ${agentCount} agent commands created`));
|
||||||
console.log(chalk.dim(` - ${taskCount} task commands created`));
|
console.log(chalk.dim(` - ${taskCount} task commands created`));
|
||||||
|
console.log(chalk.dim(` - ${workflowCount} workflow commands created`));
|
||||||
console.log(chalk.dim(` - Commands directory: ${path.relative(projectDir, commandsDir)}`));
|
console.log(chalk.dim(` - Commands directory: ${path.relative(projectDir, commandsDir)}`));
|
||||||
|
|
||||||
return {
|
return {
|
||||||
success: true,
|
success: true,
|
||||||
agents: agentCount,
|
agents: agentCount,
|
||||||
tasks: taskCount,
|
tasks: taskCount,
|
||||||
|
workflows: workflowCount,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,327 @@
|
||||||
|
const path = require('node:path');
|
||||||
|
const { BaseIdeSetup } = require('./_base-ide');
|
||||||
|
const chalk = require('chalk');
|
||||||
|
const fs = require('fs-extra');
|
||||||
|
const yaml = require('js-yaml');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Kiro CLI setup handler for BMad Method
|
||||||
|
*/
|
||||||
|
class KiroCliSetup extends BaseIdeSetup {
|
||||||
|
constructor() {
|
||||||
|
super('kiro-cli', 'Kiro CLI', false);
|
||||||
|
this.configDir = '.kiro';
|
||||||
|
this.agentsDir = 'agents';
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Cleanup old BMAD installation before reinstalling
|
||||||
|
* @param {string} projectDir - Project directory
|
||||||
|
*/
|
||||||
|
async cleanup(projectDir) {
|
||||||
|
const bmadAgentsDir = path.join(projectDir, this.configDir, this.agentsDir);
|
||||||
|
|
||||||
|
if (await fs.pathExists(bmadAgentsDir)) {
|
||||||
|
// Remove existing BMad agents
|
||||||
|
const files = await fs.readdir(bmadAgentsDir);
|
||||||
|
for (const file of files) {
|
||||||
|
if (file.startsWith('bmad-') || file.includes('bmad')) {
|
||||||
|
await fs.remove(path.join(bmadAgentsDir, file));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
console.log(chalk.dim(` Cleaned old BMAD agents from ${this.name}`));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Setup Kiro CLI configuration with BMad agents
|
||||||
|
* @param {string} projectDir - Project directory
|
||||||
|
* @param {string} bmadDir - BMAD installation directory
|
||||||
|
* @param {Object} options - Setup options
|
||||||
|
*/
|
||||||
|
async setup(projectDir, bmadDir, options = {}) {
|
||||||
|
console.log(chalk.cyan(`Setting up ${this.name}...`));
|
||||||
|
|
||||||
|
await this.cleanup(projectDir);
|
||||||
|
|
||||||
|
const kiroDir = path.join(projectDir, this.configDir);
|
||||||
|
const agentsDir = path.join(kiroDir, this.agentsDir);
|
||||||
|
|
||||||
|
await this.ensureDir(agentsDir);
|
||||||
|
|
||||||
|
// Create BMad agents from source YAML files
|
||||||
|
await this.createBmadAgentsFromSource(agentsDir, projectDir);
|
||||||
|
|
||||||
|
console.log(chalk.green(`✓ ${this.name} configured with BMad agents`));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create BMad agent definitions from source YAML files
|
||||||
|
* @param {string} agentsDir - Agents directory
|
||||||
|
* @param {string} projectDir - Project directory
|
||||||
|
*/
|
||||||
|
async createBmadAgentsFromSource(agentsDir, projectDir) {
|
||||||
|
const sourceDir = path.join(__dirname, '../../../../../src/modules');
|
||||||
|
|
||||||
|
// Find all agent YAML files
|
||||||
|
const agentFiles = await this.findAgentFiles(sourceDir);
|
||||||
|
|
||||||
|
for (const agentFile of agentFiles) {
|
||||||
|
try {
|
||||||
|
await this.processAgentFile(agentFile, agentsDir, projectDir);
|
||||||
|
} catch (error) {
|
||||||
|
console.warn(chalk.yellow(`⚠️ Failed to process ${agentFile}: ${error.message}`));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Find all agent YAML files in modules and core
|
||||||
|
* @param {string} sourceDir - Source modules directory
|
||||||
|
* @returns {Array} Array of agent file paths
|
||||||
|
*/
|
||||||
|
async findAgentFiles(sourceDir) {
|
||||||
|
const agentFiles = [];
|
||||||
|
|
||||||
|
// Check core agents
|
||||||
|
const coreAgentsDir = path.join(__dirname, '../../../../../src/core/agents');
|
||||||
|
if (await fs.pathExists(coreAgentsDir)) {
|
||||||
|
const files = await fs.readdir(coreAgentsDir);
|
||||||
|
|
||||||
|
for (const file of files) {
|
||||||
|
if (file.endsWith('.agent.yaml')) {
|
||||||
|
agentFiles.push(path.join(coreAgentsDir, file));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check module agents
|
||||||
|
if (!(await fs.pathExists(sourceDir))) {
|
||||||
|
return agentFiles;
|
||||||
|
}
|
||||||
|
|
||||||
|
const modules = await fs.readdir(sourceDir);
|
||||||
|
|
||||||
|
for (const module of modules) {
|
||||||
|
const moduleAgentsDir = path.join(sourceDir, module, 'agents');
|
||||||
|
|
||||||
|
if (await fs.pathExists(moduleAgentsDir)) {
|
||||||
|
const files = await fs.readdir(moduleAgentsDir);
|
||||||
|
|
||||||
|
for (const file of files) {
|
||||||
|
if (file.endsWith('.agent.yaml')) {
|
||||||
|
agentFiles.push(path.join(moduleAgentsDir, file));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return agentFiles;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Validate BMad Core compliance
|
||||||
|
* @param {Object} agentData - Agent YAML data
|
||||||
|
* @returns {boolean} True if compliant
|
||||||
|
*/
|
||||||
|
validateBmadCompliance(agentData) {
|
||||||
|
const requiredFields = ['agent.metadata.id', 'agent.persona.role', 'agent.persona.principles'];
|
||||||
|
|
||||||
|
for (const field of requiredFields) {
|
||||||
|
const keys = field.split('.');
|
||||||
|
let current = agentData;
|
||||||
|
|
||||||
|
for (const key of keys) {
|
||||||
|
if (!current || !current[key]) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
current = current[key];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Process individual agent YAML file
|
||||||
|
* @param {string} agentFile - Path to agent YAML file
|
||||||
|
* @param {string} agentsDir - Target agents directory
|
||||||
|
* @param {string} projectDir - Project directory
|
||||||
|
*/
|
||||||
|
async processAgentFile(agentFile, agentsDir, projectDir) {
|
||||||
|
const yamlContent = await fs.readFile(agentFile, 'utf8');
|
||||||
|
const agentData = yaml.load(yamlContent);
|
||||||
|
|
||||||
|
if (!this.validateBmadCompliance(agentData)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract module from file path
|
||||||
|
const normalizedPath = path.normalize(agentFile);
|
||||||
|
const pathParts = normalizedPath.split(path.sep);
|
||||||
|
const basename = path.basename(agentFile, '.agent.yaml');
|
||||||
|
|
||||||
|
// Find the module name from path
|
||||||
|
let moduleName = 'unknown';
|
||||||
|
if (pathParts.includes('src')) {
|
||||||
|
const srcIndex = pathParts.indexOf('src');
|
||||||
|
if (srcIndex + 3 < pathParts.length) {
|
||||||
|
const folderAfterSrc = pathParts[srcIndex + 1];
|
||||||
|
// Handle both src/core/agents and src/modules/[module]/agents patterns
|
||||||
|
if (folderAfterSrc === 'core') {
|
||||||
|
moduleName = 'core';
|
||||||
|
} else if (folderAfterSrc === 'modules') {
|
||||||
|
moduleName = pathParts[srcIndex + 2]; // The actual module name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract the agent name from the ID path in YAML if available
|
||||||
|
let agentBaseName = basename;
|
||||||
|
if (agentData.agent && agentData.agent.metadata && agentData.agent.metadata.id) {
|
||||||
|
const idPath = agentData.agent.metadata.id;
|
||||||
|
agentBaseName = path.basename(idPath, '.md');
|
||||||
|
}
|
||||||
|
|
||||||
|
const agentName = `bmad-${moduleName}-${agentBaseName}`;
|
||||||
|
const sanitizedAgentName = this.sanitizeAgentName(agentName);
|
||||||
|
|
||||||
|
// Create JSON definition
|
||||||
|
await this.createAgentDefinitionFromYaml(agentsDir, sanitizedAgentName, agentData);
|
||||||
|
|
||||||
|
// Create prompt file
|
||||||
|
await this.createAgentPromptFromYaml(agentsDir, sanitizedAgentName, agentData, projectDir);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sanitize agent name for file naming
|
||||||
|
* @param {string} name - Agent name
|
||||||
|
* @returns {string} Sanitized name
|
||||||
|
*/
|
||||||
|
sanitizeAgentName(name) {
|
||||||
|
return name
|
||||||
|
.toLowerCase()
|
||||||
|
.replaceAll(/\s+/g, '-')
|
||||||
|
.replaceAll(/[^a-z0-9-]/g, '');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create agent JSON definition from YAML data
|
||||||
|
* @param {string} agentsDir - Agents directory
|
||||||
|
* @param {string} agentName - Agent name (role-based)
|
||||||
|
* @param {Object} agentData - Agent YAML data
|
||||||
|
*/
|
||||||
|
async createAgentDefinitionFromYaml(agentsDir, agentName, agentData) {
|
||||||
|
const personName = agentData.agent.metadata.name;
|
||||||
|
const role = agentData.agent.persona.role;
|
||||||
|
|
||||||
|
const agentConfig = {
|
||||||
|
name: agentName,
|
||||||
|
description: `${personName} - ${role}`,
|
||||||
|
prompt: `file://./${agentName}-prompt.md`,
|
||||||
|
tools: ['*'],
|
||||||
|
mcpServers: {},
|
||||||
|
useLegacyMcpJson: true,
|
||||||
|
resources: [],
|
||||||
|
};
|
||||||
|
|
||||||
|
const agentPath = path.join(agentsDir, `${agentName}.json`);
|
||||||
|
await fs.writeJson(agentPath, agentConfig, { spaces: 2 });
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create agent prompt from YAML data
|
||||||
|
* @param {string} agentsDir - Agents directory
|
||||||
|
* @param {string} agentName - Agent name (role-based)
|
||||||
|
* @param {Object} agentData - Agent YAML data
|
||||||
|
* @param {string} projectDir - Project directory
|
||||||
|
*/
|
||||||
|
async createAgentPromptFromYaml(agentsDir, agentName, agentData, projectDir) {
|
||||||
|
const promptPath = path.join(agentsDir, `${agentName}-prompt.md`);
|
||||||
|
|
||||||
|
// Generate prompt from YAML data
|
||||||
|
const prompt = this.generatePromptFromYaml(agentData);
|
||||||
|
await fs.writeFile(promptPath, prompt);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generate prompt content from YAML data
|
||||||
|
* @param {Object} agentData - Agent YAML data
|
||||||
|
* @returns {string} Generated prompt
|
||||||
|
*/
|
||||||
|
generatePromptFromYaml(agentData) {
|
||||||
|
const agent = agentData.agent;
|
||||||
|
const name = agent.metadata.name;
|
||||||
|
const icon = agent.metadata.icon || '🤖';
|
||||||
|
const role = agent.persona.role;
|
||||||
|
const identity = agent.persona.identity;
|
||||||
|
const style = agent.persona.communication_style;
|
||||||
|
const principles = agent.persona.principles;
|
||||||
|
|
||||||
|
let prompt = `# ${name} ${icon}\n\n`;
|
||||||
|
prompt += `## Role\n${role}\n\n`;
|
||||||
|
|
||||||
|
if (identity) {
|
||||||
|
prompt += `## Identity\n${identity}\n\n`;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (style) {
|
||||||
|
prompt += `## Communication Style\n${style}\n\n`;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (principles) {
|
||||||
|
prompt += `## Principles\n`;
|
||||||
|
if (typeof principles === 'string') {
|
||||||
|
// Handle multi-line string principles
|
||||||
|
prompt += principles + '\n\n';
|
||||||
|
} else if (Array.isArray(principles)) {
|
||||||
|
// Handle array principles
|
||||||
|
for (const principle of principles) {
|
||||||
|
prompt += `- ${principle}\n`;
|
||||||
|
}
|
||||||
|
prompt += '\n';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add menu items if available
|
||||||
|
if (agent.menu && agent.menu.length > 0) {
|
||||||
|
prompt += `## Available Workflows\n`;
|
||||||
|
for (let i = 0; i < agent.menu.length; i++) {
|
||||||
|
const item = agent.menu[i];
|
||||||
|
prompt += `${i + 1}. **${item.trigger}**: ${item.description}\n`;
|
||||||
|
}
|
||||||
|
prompt += '\n';
|
||||||
|
}
|
||||||
|
|
||||||
|
prompt += `## Instructions\nYou are ${name}, part of the BMad Method. Follow your role and principles while assisting users with their development needs.\n`;
|
||||||
|
|
||||||
|
return prompt;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if Kiro CLI is available
|
||||||
|
* @returns {Promise<boolean>} True if available
|
||||||
|
*/
|
||||||
|
async isAvailable() {
|
||||||
|
try {
|
||||||
|
const { execSync } = require('node:child_process');
|
||||||
|
execSync('kiro-cli --version', { stdio: 'ignore' });
|
||||||
|
return true;
|
||||||
|
} catch {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get installation instructions
|
||||||
|
* @returns {string} Installation instructions
|
||||||
|
*/
|
||||||
|
getInstallInstructions() {
|
||||||
|
return `Install Kiro CLI:
|
||||||
|
curl -fsSL https://github.com/aws/kiro-cli/releases/latest/download/install.sh | bash
|
||||||
|
|
||||||
|
Or visit: https://github.com/aws/kiro-cli`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = { KiroCliSetup };
|
||||||
|
|
@ -47,7 +47,7 @@ class OpenCodeSetup extends BaseIdeSetup {
|
||||||
agentCount++;
|
agentCount++;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Install workflow commands with flat naming: bmad-workflow-{module}-{name}.md
|
// Install workflow commands with flat naming: bmad-{module}-{workflow-name}
|
||||||
const workflowGenerator = new WorkflowCommandGenerator(this.bmadFolderName);
|
const workflowGenerator = new WorkflowCommandGenerator(this.bmadFolderName);
|
||||||
const { artifacts: workflowArtifacts, counts: workflowCounts } = await workflowGenerator.collectWorkflowArtifacts(bmadDir);
|
const { artifacts: workflowArtifacts, counts: workflowCounts } = await workflowGenerator.collectWorkflowArtifacts(bmadDir);
|
||||||
|
|
||||||
|
|
@ -55,10 +55,10 @@ class OpenCodeSetup extends BaseIdeSetup {
|
||||||
for (const artifact of workflowArtifacts) {
|
for (const artifact of workflowArtifacts) {
|
||||||
if (artifact.type === 'workflow-command') {
|
if (artifact.type === 'workflow-command') {
|
||||||
const commandContent = artifact.content;
|
const commandContent = artifact.content;
|
||||||
// Flat structure: bmad-workflow-{module}-{name}.md
|
// Flat structure: bmad-{module}-{workflow-name}.md
|
||||||
// artifact.relativePath is like: bmm/workflows/plan-project.md
|
// artifact.relativePath is like: bmm/workflows/plan-project.md
|
||||||
const workflowName = path.basename(artifact.relativePath, '.md');
|
const workflowName = path.basename(artifact.relativePath, '.md');
|
||||||
const targetPath = path.join(commandsBaseDir, `bmad-workflow-${artifact.module}-${workflowName}.md`);
|
const targetPath = path.join(commandsBaseDir, `bmad-${artifact.module}-${workflowName}.md`);
|
||||||
await this.writeFile(targetPath, commandContent);
|
await this.writeFile(targetPath, commandContent);
|
||||||
workflowCommandCount++;
|
workflowCommandCount++;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -5,34 +5,13 @@ const { AgentCommandGenerator } = require('./shared/agent-command-generator');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Roo IDE setup handler
|
* Roo IDE setup handler
|
||||||
* Creates custom modes in .roomodes file
|
* Creates custom commands in .roo/commands directory
|
||||||
*/
|
*/
|
||||||
class RooSetup extends BaseIdeSetup {
|
class RooSetup extends BaseIdeSetup {
|
||||||
constructor() {
|
constructor() {
|
||||||
super('roo', 'Roo Code');
|
super('roo', 'Roo Code');
|
||||||
this.configFile = '.roomodes';
|
this.configDir = '.roo';
|
||||||
this.defaultPermissions = {
|
this.commandsDir = 'commands';
|
||||||
dev: {
|
|
||||||
description: 'Development files',
|
|
||||||
fileRegex: String.raw`.*\.(js|jsx|ts|tsx|py|java|cpp|c|h|cs|go|rs|php|rb|swift)$`,
|
|
||||||
},
|
|
||||||
config: {
|
|
||||||
description: 'Configuration files',
|
|
||||||
fileRegex: String.raw`.*\.(json|yaml|yml|toml|xml|ini|env|config)$`,
|
|
||||||
},
|
|
||||||
docs: {
|
|
||||||
description: 'Documentation files',
|
|
||||||
fileRegex: String.raw`.*\.(md|mdx|rst|txt|doc|docx)$`,
|
|
||||||
},
|
|
||||||
styles: {
|
|
||||||
description: 'Style and design files',
|
|
||||||
fileRegex: String.raw`.*\.(css|scss|sass|less|stylus)$`,
|
|
||||||
},
|
|
||||||
all: {
|
|
||||||
description: 'All files',
|
|
||||||
fileRegex: '.*',
|
|
||||||
},
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -44,94 +23,96 @@ class RooSetup extends BaseIdeSetup {
|
||||||
async setup(projectDir, bmadDir, options = {}) {
|
async setup(projectDir, bmadDir, options = {}) {
|
||||||
console.log(chalk.cyan(`Setting up ${this.name}...`));
|
console.log(chalk.cyan(`Setting up ${this.name}...`));
|
||||||
|
|
||||||
// Check for existing .roomodes file
|
// Create .roo/commands directory
|
||||||
const roomodesPath = path.join(projectDir, this.configFile);
|
const rooCommandsDir = path.join(projectDir, this.configDir, this.commandsDir);
|
||||||
let existingModes = [];
|
await this.ensureDir(rooCommandsDir);
|
||||||
let existingContent = '';
|
|
||||||
|
|
||||||
if (await this.pathExists(roomodesPath)) {
|
// Generate agent launchers
|
||||||
existingContent = await this.readFile(roomodesPath);
|
|
||||||
// Parse existing modes to avoid duplicates
|
|
||||||
const modeMatches = existingContent.matchAll(/- slug: ([\w-]+)/g);
|
|
||||||
for (const match of modeMatches) {
|
|
||||||
existingModes.push(match[1]);
|
|
||||||
}
|
|
||||||
console.log(chalk.yellow(`Found existing .roomodes file with ${existingModes.length} modes`));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate agent launchers (though Roo will reference the actual .bmad agents)
|
|
||||||
const agentGen = new AgentCommandGenerator(this.bmadFolderName);
|
const agentGen = new AgentCommandGenerator(this.bmadFolderName);
|
||||||
const { artifacts: agentArtifacts } = await agentGen.collectAgentArtifacts(bmadDir, options.selectedModules || []);
|
const { artifacts: agentArtifacts } = await agentGen.collectAgentArtifacts(bmadDir, options.selectedModules || []);
|
||||||
|
|
||||||
// Always use 'all' permissions - users can customize in .roomodes file
|
|
||||||
const permissionChoice = 'all';
|
|
||||||
|
|
||||||
// Create modes content
|
|
||||||
let newModesContent = '';
|
|
||||||
let addedCount = 0;
|
let addedCount = 0;
|
||||||
let skippedCount = 0;
|
let skippedCount = 0;
|
||||||
|
|
||||||
for (const artifact of agentArtifacts) {
|
for (const artifact of agentArtifacts) {
|
||||||
const slug = `bmad-${artifact.module}-${artifact.name}`;
|
const commandName = `bmad-${artifact.module}-agent-${artifact.name}`;
|
||||||
|
const commandPath = path.join(rooCommandsDir, `${commandName}.md`);
|
||||||
|
|
||||||
// Skip if already exists
|
// Skip if already exists
|
||||||
if (existingModes.includes(slug)) {
|
if (await this.pathExists(commandPath)) {
|
||||||
console.log(chalk.dim(` Skipping ${slug} - already exists`));
|
console.log(chalk.dim(` Skipping ${commandName} - already exists`));
|
||||||
skippedCount++;
|
skippedCount++;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read the actual agent file from .bmad for metadata extraction
|
// Read the actual agent file from .bmad for metadata extraction (installed agents are .md files)
|
||||||
const agentPath = path.join(bmadDir, artifact.module, 'agents', `${artifact.name}.md`);
|
const agentPath = path.join(bmadDir, artifact.module, 'agents', `${artifact.name}.md`);
|
||||||
const content = await this.readFile(agentPath);
|
const content = await this.readFile(agentPath);
|
||||||
|
|
||||||
// Create mode entry that references the actual .bmad agent
|
// Create command file that references the actual .bmad agent
|
||||||
const modeEntry = await this.createModeEntry(
|
await this.createCommandFile({ module: artifact.module, name: artifact.name, path: agentPath }, content, commandPath, projectDir);
|
||||||
{ module: artifact.module, name: artifact.name, path: agentPath },
|
|
||||||
content,
|
|
||||||
permissionChoice,
|
|
||||||
projectDir,
|
|
||||||
);
|
|
||||||
|
|
||||||
newModesContent += modeEntry;
|
|
||||||
addedCount++;
|
addedCount++;
|
||||||
console.log(chalk.green(` ✓ Added mode: ${slug}`));
|
console.log(chalk.green(` ✓ Added command: ${commandName}`));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build final content
|
|
||||||
let finalContent = '';
|
|
||||||
if (existingContent) {
|
|
||||||
// Append to existing content
|
|
||||||
finalContent = existingContent.trim() + '\n' + newModesContent;
|
|
||||||
} else {
|
|
||||||
// Create new .roomodes file
|
|
||||||
finalContent = 'customModes:\n' + newModesContent;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write .roomodes file
|
|
||||||
await this.writeFile(roomodesPath, finalContent);
|
|
||||||
|
|
||||||
console.log(chalk.green(`✓ ${this.name} configured:`));
|
console.log(chalk.green(`✓ ${this.name} configured:`));
|
||||||
console.log(chalk.dim(` - ${addedCount} modes added`));
|
console.log(chalk.dim(` - ${addedCount} commands added`));
|
||||||
if (skippedCount > 0) {
|
if (skippedCount > 0) {
|
||||||
console.log(chalk.dim(` - ${skippedCount} modes skipped (already exist)`));
|
console.log(chalk.dim(` - ${skippedCount} commands skipped (already exist)`));
|
||||||
}
|
}
|
||||||
console.log(chalk.dim(` - Configuration file: ${this.configFile}`));
|
console.log(chalk.dim(` - Commands directory: ${this.configDir}/${this.commandsDir}/bmad/`));
|
||||||
console.log(chalk.dim(` - Permission level: all (unrestricted)`));
|
console.log(chalk.dim(` Commands will be available when you open this project in Roo Code`));
|
||||||
console.log(chalk.yellow(`\n 💡 Tip: Edit ${this.configFile} to customize file permissions per agent`));
|
|
||||||
console.log(chalk.dim(` Modes will be available when you open this project in Roo Code`));
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
success: true,
|
success: true,
|
||||||
modes: addedCount,
|
commands: addedCount,
|
||||||
skipped: skippedCount,
|
skipped: skippedCount,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a mode entry for an agent
|
* Create a unified command file for agents
|
||||||
|
* @param {string} commandPath - Path where to write the command file
|
||||||
|
* @param {Object} options - Command options
|
||||||
|
* @param {string} options.name - Display name for the command
|
||||||
|
* @param {string} options.description - Description for the command
|
||||||
|
* @param {string} options.agentPath - Path to the agent file (relative to project root)
|
||||||
|
* @param {string} [options.icon] - Icon emoji (defaults to 🤖)
|
||||||
|
* @param {string} [options.extraContent] - Additional content to include before activation
|
||||||
*/
|
*/
|
||||||
async createModeEntry(agent, content, permissionChoice, projectDir) {
|
async createAgentCommandFile(commandPath, options) {
|
||||||
|
const { name, description, agentPath, icon = '🤖', extraContent = '' } = options;
|
||||||
|
|
||||||
|
// Build command content with YAML frontmatter
|
||||||
|
let commandContent = `---\n`;
|
||||||
|
commandContent += `name: '${icon} ${name}'\n`;
|
||||||
|
commandContent += `description: '${description}'\n`;
|
||||||
|
commandContent += `---\n\n`;
|
||||||
|
|
||||||
|
commandContent += `You must fully embody this agent's persona and follow all activation instructions exactly as specified. NEVER break character until given an exit command.\n\n`;
|
||||||
|
|
||||||
|
// Add any extra content (e.g., warnings for custom agents)
|
||||||
|
if (extraContent) {
|
||||||
|
commandContent += `${extraContent}\n\n`;
|
||||||
|
}
|
||||||
|
|
||||||
|
commandContent += `<agent-activation CRITICAL="TRUE">\n`;
|
||||||
|
commandContent += `1. LOAD the FULL agent file from @${agentPath}\n`;
|
||||||
|
commandContent += `2. READ its entire contents - this contains the complete agent persona, menu, and instructions\n`;
|
||||||
|
commandContent += `3. Execute ALL activation steps exactly as written in the agent file\n`;
|
||||||
|
commandContent += `4. Follow the agent's persona and menu system precisely\n`;
|
||||||
|
commandContent += `5. Stay in character throughout the session\n`;
|
||||||
|
commandContent += `</agent-activation>\n`;
|
||||||
|
|
||||||
|
// Write command file
|
||||||
|
await this.writeFile(commandPath, commandContent);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a command file for an agent
|
||||||
|
*/
|
||||||
|
async createCommandFile(agent, content, commandPath, projectDir) {
|
||||||
// Extract metadata from agent content
|
// Extract metadata from agent content
|
||||||
const titleMatch = content.match(/title="([^"]+)"/);
|
const titleMatch = content.match(/title="([^"]+)"/);
|
||||||
const title = titleMatch ? titleMatch[1] : this.formatTitle(agent.name);
|
const title = titleMatch ? titleMatch[1] : this.formatTitle(agent.name);
|
||||||
|
|
@ -142,66 +123,16 @@ class RooSetup extends BaseIdeSetup {
|
||||||
const whenToUseMatch = content.match(/whenToUse="([^"]+)"/);
|
const whenToUseMatch = content.match(/whenToUse="([^"]+)"/);
|
||||||
const whenToUse = whenToUseMatch ? whenToUseMatch[1] : `Use for ${title} tasks`;
|
const whenToUse = whenToUseMatch ? whenToUseMatch[1] : `Use for ${title} tasks`;
|
||||||
|
|
||||||
// Get the activation header from central template
|
|
||||||
const activationHeader = await this.getAgentCommandHeader();
|
|
||||||
|
|
||||||
const roleDefinitionMatch = content.match(/roleDefinition="([^"]+)"/);
|
|
||||||
const roleDefinition = roleDefinitionMatch
|
|
||||||
? roleDefinitionMatch[1]
|
|
||||||
: `You are a ${title} specializing in ${title.toLowerCase()} tasks and responsibilities.`;
|
|
||||||
|
|
||||||
// Get relative path
|
// Get relative path
|
||||||
const relativePath = path.relative(projectDir, agent.path).replaceAll('\\', '/');
|
const relativePath = path.relative(projectDir, agent.path).replaceAll('\\', '/');
|
||||||
|
|
||||||
// Determine permissions
|
// Use unified method
|
||||||
const permissions = this.getPermissionsForAgent(agent, permissionChoice);
|
await this.createAgentCommandFile(commandPath, {
|
||||||
|
name: title,
|
||||||
// Build mode entry
|
description: whenToUse,
|
||||||
const slug = `bmad-${agent.module}-${agent.name}`;
|
agentPath: relativePath,
|
||||||
let modeEntry = ` - slug: ${slug}\n`;
|
icon: icon,
|
||||||
modeEntry += ` name: '${icon} ${title}'\n`;
|
});
|
||||||
|
|
||||||
if (permissions && permissions.description) {
|
|
||||||
modeEntry += ` description: '${permissions.description}'\n`;
|
|
||||||
}
|
|
||||||
|
|
||||||
modeEntry += ` roleDefinition: ${roleDefinition}\n`;
|
|
||||||
modeEntry += ` whenToUse: ${whenToUse}\n`;
|
|
||||||
modeEntry += ` customInstructions: ${activationHeader} Read the full YAML from ${relativePath} start activation to alter your state of being follow startup section instructions stay in this being until told to exit this mode\n`;
|
|
||||||
modeEntry += ` groups:\n`;
|
|
||||||
modeEntry += ` - read\n`;
|
|
||||||
|
|
||||||
if (permissions && permissions.fileRegex) {
|
|
||||||
modeEntry += ` - - edit\n`;
|
|
||||||
modeEntry += ` - fileRegex: ${permissions.fileRegex}\n`;
|
|
||||||
modeEntry += ` description: ${permissions.description}\n`;
|
|
||||||
} else {
|
|
||||||
modeEntry += ` - edit\n`;
|
|
||||||
}
|
|
||||||
|
|
||||||
return modeEntry;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get permissions configuration for an agent
|
|
||||||
*/
|
|
||||||
getPermissionsForAgent(agent, permissionChoice) {
|
|
||||||
if (permissionChoice === 'custom') {
|
|
||||||
// Custom logic based on agent name/module
|
|
||||||
if (agent.name.includes('dev') || agent.name.includes('code')) {
|
|
||||||
return this.defaultPermissions.dev;
|
|
||||||
} else if (agent.name.includes('doc') || agent.name.includes('write')) {
|
|
||||||
return this.defaultPermissions.docs;
|
|
||||||
} else if (agent.name.includes('config') || agent.name.includes('setup')) {
|
|
||||||
return this.defaultPermissions.config;
|
|
||||||
} else if (agent.name.includes('style') || agent.name.includes('css')) {
|
|
||||||
return this.defaultPermissions.styles;
|
|
||||||
}
|
|
||||||
// Default to all for custom agents
|
|
||||||
return this.defaultPermissions.all;
|
|
||||||
}
|
|
||||||
|
|
||||||
return this.defaultPermissions[permissionChoice] || null;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -219,8 +150,26 @@ class RooSetup extends BaseIdeSetup {
|
||||||
*/
|
*/
|
||||||
async cleanup(projectDir) {
|
async cleanup(projectDir) {
|
||||||
const fs = require('fs-extra');
|
const fs = require('fs-extra');
|
||||||
const roomodesPath = path.join(projectDir, this.configFile);
|
const rooCommandsDir = path.join(projectDir, this.configDir, this.commandsDir);
|
||||||
|
|
||||||
|
if (await fs.pathExists(rooCommandsDir)) {
|
||||||
|
const files = await fs.readdir(rooCommandsDir);
|
||||||
|
let removedCount = 0;
|
||||||
|
|
||||||
|
for (const file of files) {
|
||||||
|
if (file.startsWith('bmad-') && file.endsWith('.md')) {
|
||||||
|
await fs.remove(path.join(rooCommandsDir, file));
|
||||||
|
removedCount++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (removedCount > 0) {
|
||||||
|
console.log(chalk.dim(`Removed ${removedCount} BMAD commands from .roo/commands/`));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Also clean up old .roomodes file if it exists
|
||||||
|
const roomodesPath = path.join(projectDir, '.roomodes');
|
||||||
if (await fs.pathExists(roomodesPath)) {
|
if (await fs.pathExists(roomodesPath)) {
|
||||||
const content = await fs.readFile(roomodesPath, 'utf8');
|
const content = await fs.readFile(roomodesPath, 'utf8');
|
||||||
|
|
||||||
|
|
@ -245,7 +194,9 @@ class RooSetup extends BaseIdeSetup {
|
||||||
|
|
||||||
// Write back filtered content
|
// Write back filtered content
|
||||||
await fs.writeFile(roomodesPath, filteredLines.join('\n'));
|
await fs.writeFile(roomodesPath, filteredLines.join('\n'));
|
||||||
console.log(chalk.dim(`Removed ${removedCount} BMAD modes from .roomodes`));
|
if (removedCount > 0) {
|
||||||
|
console.log(chalk.dim(`Removed ${removedCount} BMAD modes from legacy .roomodes file`));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -254,68 +205,53 @@ class RooSetup extends BaseIdeSetup {
|
||||||
* @param {string} projectDir - Project directory
|
* @param {string} projectDir - Project directory
|
||||||
* @param {string} agentName - Agent name (e.g., "fred-commit-poet")
|
* @param {string} agentName - Agent name (e.g., "fred-commit-poet")
|
||||||
* @param {string} agentPath - Path to compiled agent (relative to project root)
|
* @param {string} agentPath - Path to compiled agent (relative to project root)
|
||||||
* @param {Object} metadata - Agent metadata
|
* @param {Object} metadata - Agent metadata (unused, kept for compatibility)
|
||||||
* @returns {Object} Installation result
|
* @returns {Object} Installation result
|
||||||
*/
|
*/
|
||||||
async installCustomAgentLauncher(projectDir, agentName, agentPath, metadata) {
|
async installCustomAgentLauncher(projectDir, agentName, agentPath, metadata) {
|
||||||
const roomodesPath = path.join(projectDir, this.configFile);
|
const rooCommandsDir = path.join(projectDir, this.configDir, this.commandsDir);
|
||||||
let existingContent = '';
|
await this.ensureDir(rooCommandsDir);
|
||||||
|
|
||||||
// Read existing .roomodes file
|
const commandName = `bmad-custom-agent-${agentName.toLowerCase()}`;
|
||||||
if (await this.pathExists(roomodesPath)) {
|
const commandPath = path.join(rooCommandsDir, `${commandName}.md`);
|
||||||
existingContent = await this.readFile(roomodesPath);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create custom agent mode entry
|
// Check if command already exists
|
||||||
const slug = `bmad-custom-${agentName.toLowerCase()}`;
|
if (await this.pathExists(commandPath)) {
|
||||||
const modeEntry = ` - slug: ${slug}
|
|
||||||
name: 'BMAD Custom: ${agentName}'
|
|
||||||
description: |
|
|
||||||
Custom BMAD agent: ${agentName}
|
|
||||||
|
|
||||||
**⚠️ IMPORTANT**: Run @${agentPath} first to load the complete agent!
|
|
||||||
|
|
||||||
This is a launcher for the custom BMAD agent "${agentName}". The agent will follow the persona and instructions from the main agent file.
|
|
||||||
prompt: |
|
|
||||||
@${agentPath}
|
|
||||||
always: false
|
|
||||||
permissions: all
|
|
||||||
`;
|
|
||||||
|
|
||||||
// Check if mode already exists
|
|
||||||
if (existingContent.includes(slug)) {
|
|
||||||
return {
|
return {
|
||||||
ide: 'roo',
|
ide: 'roo',
|
||||||
path: this.configFile,
|
path: path.join(this.configDir, this.commandsDir, `${commandName}.md`),
|
||||||
command: agentName,
|
command: commandName,
|
||||||
type: 'custom-agent-launcher',
|
type: 'custom-agent-launcher',
|
||||||
alreadyExists: true,
|
alreadyExists: true,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build final content
|
// Read the custom agent file to extract metadata (same as regular agents)
|
||||||
let finalContent = '';
|
const fullAgentPath = path.join(projectDir, agentPath);
|
||||||
if (existingContent) {
|
const content = await this.readFile(fullAgentPath);
|
||||||
// Find customModes section or add it
|
|
||||||
if (existingContent.includes('customModes:')) {
|
|
||||||
// Append to existing customModes
|
|
||||||
finalContent = existingContent + modeEntry;
|
|
||||||
} else {
|
|
||||||
// Add customModes section
|
|
||||||
finalContent = existingContent.trim() + '\n\ncustomModes:\n' + modeEntry;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Create new .roomodes file with customModes
|
|
||||||
finalContent = 'customModes:\n' + modeEntry;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write .roomodes file
|
// Extract metadata from agent content
|
||||||
await this.writeFile(roomodesPath, finalContent);
|
const titleMatch = content.match(/title="([^"]+)"/);
|
||||||
|
const title = titleMatch ? titleMatch[1] : this.formatTitle(agentName);
|
||||||
|
|
||||||
|
const iconMatch = content.match(/icon="([^"]+)"/);
|
||||||
|
const icon = iconMatch ? iconMatch[1] : '🤖';
|
||||||
|
|
||||||
|
const whenToUseMatch = content.match(/whenToUse="([^"]+)"/);
|
||||||
|
const whenToUse = whenToUseMatch ? whenToUseMatch[1] : `Use for ${title} tasks`;
|
||||||
|
|
||||||
|
// Use unified method without extra content (clean)
|
||||||
|
await this.createAgentCommandFile(commandPath, {
|
||||||
|
name: title,
|
||||||
|
description: whenToUse,
|
||||||
|
agentPath: agentPath,
|
||||||
|
icon: icon,
|
||||||
|
});
|
||||||
|
|
||||||
return {
|
return {
|
||||||
ide: 'roo',
|
ide: 'roo',
|
||||||
path: this.configFile,
|
path: path.join(this.configDir, this.commandsDir, `${commandName}.md`),
|
||||||
command: slug,
|
command: commandName,
|
||||||
type: 'custom-agent-launcher',
|
type: 'custom-agent-launcher',
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -90,6 +90,11 @@ async function getAgentsFromDir(dirPath, moduleName) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Skip README files and other non-agent files
|
||||||
|
if (file.toLowerCase() === 'readme.md' || file.toLowerCase().startsWith('readme-')) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
if (file.includes('.customize.')) {
|
if (file.includes('.customize.')) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
@ -101,6 +106,11 @@ async function getAgentsFromDir(dirPath, moduleName) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Only include files that have agent-specific content (compiled agents have <agent> tag)
|
||||||
|
if (!content.includes('<agent')) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
agents.push({
|
agents.push({
|
||||||
path: filePath,
|
path: filePath,
|
||||||
name: file.replace('.md', ''),
|
name: file.replace('.md', ''),
|
||||||
|
|
|
||||||
|
|
@ -25,16 +25,16 @@ class WorkflowCommandGenerator {
|
||||||
return { generated: 0 };
|
return { generated: 0 };
|
||||||
}
|
}
|
||||||
|
|
||||||
// Filter to only standalone workflows
|
// ALL workflows now generate commands - no standalone filtering
|
||||||
const standaloneWorkflows = workflows.filter((w) => w.standalone === 'true' || w.standalone === true);
|
const allWorkflows = workflows;
|
||||||
|
|
||||||
// Base commands directory
|
// Base commands directory
|
||||||
const baseCommandsDir = path.join(projectDir, '.claude', 'commands', 'bmad');
|
const baseCommandsDir = path.join(projectDir, '.claude', 'commands', 'bmad');
|
||||||
|
|
||||||
let generatedCount = 0;
|
let generatedCount = 0;
|
||||||
|
|
||||||
// Generate a command file for each standalone workflow, organized by module
|
// Generate a command file for each workflow, organized by module
|
||||||
for (const workflow of standaloneWorkflows) {
|
for (const workflow of allWorkflows) {
|
||||||
const moduleWorkflowsDir = path.join(baseCommandsDir, workflow.module, 'workflows');
|
const moduleWorkflowsDir = path.join(baseCommandsDir, workflow.module, 'workflows');
|
||||||
await fs.ensureDir(moduleWorkflowsDir);
|
await fs.ensureDir(moduleWorkflowsDir);
|
||||||
|
|
||||||
|
|
@ -46,7 +46,7 @@ class WorkflowCommandGenerator {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Also create a workflow launcher README in each module
|
// Also create a workflow launcher README in each module
|
||||||
const groupedWorkflows = this.groupWorkflowsByModule(standaloneWorkflows);
|
const groupedWorkflows = this.groupWorkflowsByModule(allWorkflows);
|
||||||
await this.createModuleWorkflowLaunchers(baseCommandsDir, groupedWorkflows);
|
await this.createModuleWorkflowLaunchers(baseCommandsDir, groupedWorkflows);
|
||||||
|
|
||||||
return { generated: generatedCount };
|
return { generated: generatedCount };
|
||||||
|
|
@ -59,12 +59,12 @@ class WorkflowCommandGenerator {
|
||||||
return { artifacts: [], counts: { commands: 0, launchers: 0 } };
|
return { artifacts: [], counts: { commands: 0, launchers: 0 } };
|
||||||
}
|
}
|
||||||
|
|
||||||
// Filter to only standalone workflows
|
// ALL workflows now generate commands - no standalone filtering
|
||||||
const standaloneWorkflows = workflows.filter((w) => w.standalone === 'true' || w.standalone === true);
|
const allWorkflows = workflows;
|
||||||
|
|
||||||
const artifacts = [];
|
const artifacts = [];
|
||||||
|
|
||||||
for (const workflow of standaloneWorkflows) {
|
for (const workflow of allWorkflows) {
|
||||||
const commandContent = await this.generateCommandContent(workflow, bmadDir);
|
const commandContent = await this.generateCommandContent(workflow, bmadDir);
|
||||||
artifacts.push({
|
artifacts.push({
|
||||||
type: 'workflow-command',
|
type: 'workflow-command',
|
||||||
|
|
@ -75,7 +75,7 @@ class WorkflowCommandGenerator {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
const groupedWorkflows = this.groupWorkflowsByModule(standaloneWorkflows);
|
const groupedWorkflows = this.groupWorkflowsByModule(allWorkflows);
|
||||||
for (const [module, launcherContent] of Object.entries(this.buildModuleWorkflowLaunchers(groupedWorkflows))) {
|
for (const [module, launcherContent] of Object.entries(this.buildModuleWorkflowLaunchers(groupedWorkflows))) {
|
||||||
artifacts.push({
|
artifacts.push({
|
||||||
type: 'workflow-launcher',
|
type: 'workflow-launcher',
|
||||||
|
|
@ -89,7 +89,7 @@ class WorkflowCommandGenerator {
|
||||||
return {
|
return {
|
||||||
artifacts,
|
artifacts,
|
||||||
counts: {
|
counts: {
|
||||||
commands: standaloneWorkflows.length,
|
commands: allWorkflows.length,
|
||||||
launchers: Object.keys(groupedWorkflows).length,
|
launchers: Object.keys(groupedWorkflows).length,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
@ -99,8 +99,13 @@ class WorkflowCommandGenerator {
|
||||||
* Generate command content for a workflow
|
* Generate command content for a workflow
|
||||||
*/
|
*/
|
||||||
async generateCommandContent(workflow, bmadDir) {
|
async generateCommandContent(workflow, bmadDir) {
|
||||||
// Load the template
|
// Determine template based on workflow file type
|
||||||
const template = await fs.readFile(this.templatePath, 'utf8');
|
const isMarkdownWorkflow = workflow.path.endsWith('workflow.md');
|
||||||
|
const templateName = isMarkdownWorkflow ? 'workflow-commander.md' : 'workflow-command-template.md';
|
||||||
|
const templatePath = path.join(path.dirname(this.templatePath), templateName);
|
||||||
|
|
||||||
|
// Load the appropriate template
|
||||||
|
const template = await fs.readFile(templatePath, 'utf8');
|
||||||
|
|
||||||
// Convert source path to installed path
|
// Convert source path to installed path
|
||||||
// From: /Users/.../src/modules/bmm/workflows/.../workflow.yaml
|
// From: /Users/.../src/modules/bmm/workflows/.../workflow.yaml
|
||||||
|
|
@ -127,9 +132,7 @@ class WorkflowCommandGenerator {
|
||||||
.replaceAll('{{description}}', workflow.description)
|
.replaceAll('{{description}}', workflow.description)
|
||||||
.replaceAll('{{workflow_path}}', workflowPath)
|
.replaceAll('{{workflow_path}}', workflowPath)
|
||||||
.replaceAll('{bmad_folder}', this.bmadFolderName)
|
.replaceAll('{bmad_folder}', this.bmadFolderName)
|
||||||
.replaceAll('{*bmad_folder*}', '{bmad_folder}')
|
.replaceAll('{*bmad_folder*}', '{bmad_folder}');
|
||||||
.replaceAll('{{interactive}}', workflow.interactive)
|
|
||||||
.replaceAll('{{author}}', workflow.author || 'BMAD');
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,5 @@
|
||||||
|
---
|
||||||
|
description: '{{description}}'
|
||||||
|
---
|
||||||
|
|
||||||
|
IT IS CRITICAL THAT YOU FOLLOW THIS COMMAND: LOAD the FULL @{{workflow_path}}, READ its entire contents and follow its directions exactly!
|
||||||
|
|
@ -482,10 +482,39 @@ function compileAgent(yamlContent, answers = {}, agentName = '', targetPath = ''
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Process TTS injection markers in content
|
||||||
|
* @param {string} content - Content to process
|
||||||
|
* @param {boolean} enableAgentVibes - Whether AgentVibes is enabled
|
||||||
|
* @returns {Object} { content: string, hadInjection: boolean }
|
||||||
|
*/
|
||||||
|
function processTTSInjectionPoints(content, enableAgentVibes) {
|
||||||
|
const hasAgentTTS = content.includes('<!-- TTS_INJECTION:agent-tts -->');
|
||||||
|
|
||||||
|
if (enableAgentVibes && hasAgentTTS) {
|
||||||
|
// Replace agent-tts injection marker with TTS rule
|
||||||
|
content = content.replaceAll(
|
||||||
|
'<!-- TTS_INJECTION:agent-tts -->',
|
||||||
|
`- When responding to user messages, speak your responses using TTS:
|
||||||
|
Call: \`.claude/hooks/bmad-speak.sh '{agent-id}' '{response-text}'\` after each response
|
||||||
|
Replace {agent-id} with YOUR agent ID from <agent id="..."> tag at top of this file
|
||||||
|
Replace {response-text} with the text you just output to the user
|
||||||
|
IMPORTANT: Use single quotes as shown - do NOT escape special characters like ! or $ inside single quotes
|
||||||
|
Run in background (&) to avoid blocking`,
|
||||||
|
);
|
||||||
|
return { content, hadInjection: true };
|
||||||
|
} else if (!enableAgentVibes && hasAgentTTS) {
|
||||||
|
// Strip injection markers when disabled
|
||||||
|
content = content.replaceAll(/<!-- TTS_INJECTION:agent-tts -->\n?/g, '');
|
||||||
|
}
|
||||||
|
|
||||||
|
return { content, hadInjection: false };
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Compile agent file to .md
|
* Compile agent file to .md
|
||||||
* @param {string} yamlPath - Path to agent YAML file
|
* @param {string} yamlPath - Path to agent YAML file
|
||||||
* @param {Object} options - { answers: {}, outputPath: string }
|
* @param {Object} options - { answers: {}, outputPath: string, enableAgentVibes: boolean }
|
||||||
* @returns {Object} Compilation result
|
* @returns {Object} Compilation result
|
||||||
*/
|
*/
|
||||||
function compileAgentFile(yamlPath, options = {}) {
|
function compileAgentFile(yamlPath, options = {}) {
|
||||||
|
|
@ -501,13 +530,24 @@ function compileAgentFile(yamlPath, options = {}) {
|
||||||
outputPath = path.join(dir, `${basename}.md`);
|
outputPath = path.join(dir, `${basename}.md`);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Process TTS injection points if enableAgentVibes option is provided
|
||||||
|
let xml = result.xml;
|
||||||
|
let ttsInjected = false;
|
||||||
|
if (options.enableAgentVibes !== undefined) {
|
||||||
|
const ttsResult = processTTSInjectionPoints(xml, options.enableAgentVibes);
|
||||||
|
xml = ttsResult.content;
|
||||||
|
ttsInjected = ttsResult.hadInjection;
|
||||||
|
}
|
||||||
|
|
||||||
// Write compiled XML
|
// Write compiled XML
|
||||||
fs.writeFileSync(outputPath, result.xml, 'utf8');
|
fs.writeFileSync(outputPath, xml, 'utf8');
|
||||||
|
|
||||||
return {
|
return {
|
||||||
...result,
|
...result,
|
||||||
|
xml,
|
||||||
outputPath,
|
outputPath,
|
||||||
sourcePath: yamlPath,
|
sourcePath: yamlPath,
|
||||||
|
ttsInjected,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -363,11 +363,60 @@ class UI {
|
||||||
`🔧 Tools Configured: ${result.ides?.length > 0 ? result.ides.join(', ') : 'none'}`,
|
`🔧 Tools Configured: ${result.ides?.length > 0 ? result.ides.join(', ') : 'none'}`,
|
||||||
];
|
];
|
||||||
|
|
||||||
|
// Add AgentVibes TTS info if enabled
|
||||||
|
if (result.agentVibesEnabled) {
|
||||||
|
summary.push(`🎤 AgentVibes TTS: Enabled`);
|
||||||
|
}
|
||||||
|
|
||||||
CLIUtils.displayBox(summary.join('\n\n'), {
|
CLIUtils.displayBox(summary.join('\n\n'), {
|
||||||
borderColor: 'green',
|
borderColor: 'green',
|
||||||
borderStyle: 'round',
|
borderStyle: 'round',
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// Display TTS injection details if present
|
||||||
|
if (result.ttsInjectedFiles && result.ttsInjectedFiles.length > 0) {
|
||||||
|
console.log('\n' + chalk.cyan.bold('═══════════════════════════════════════════════════'));
|
||||||
|
console.log(chalk.cyan.bold(' AgentVibes TTS Injection Summary'));
|
||||||
|
console.log(chalk.cyan.bold('═══════════════════════════════════════════════════\n'));
|
||||||
|
|
||||||
|
// Explain what TTS injection is
|
||||||
|
console.log(chalk.white.bold('What is TTS Injection?\n'));
|
||||||
|
console.log(chalk.dim(' TTS (Text-to-Speech) injection adds voice instructions to BMAD agents,'));
|
||||||
|
console.log(chalk.dim(' enabling them to speak their responses aloud using AgentVibes.\n'));
|
||||||
|
console.log(chalk.dim(' Example: When you activate the PM agent, it will greet you with'));
|
||||||
|
console.log(chalk.dim(' spoken audio like "Hey! I\'m your Project Manager. How can I help?"\n'));
|
||||||
|
|
||||||
|
console.log(chalk.green(`✅ TTS injection applied to ${result.ttsInjectedFiles.length} file(s):\n`));
|
||||||
|
|
||||||
|
// Group by type
|
||||||
|
const partyModeFiles = result.ttsInjectedFiles.filter((f) => f.type === 'party-mode');
|
||||||
|
const agentTTSFiles = result.ttsInjectedFiles.filter((f) => f.type === 'agent-tts');
|
||||||
|
|
||||||
|
if (partyModeFiles.length > 0) {
|
||||||
|
console.log(chalk.yellow(' Party Mode (multi-agent conversations):'));
|
||||||
|
for (const file of partyModeFiles) {
|
||||||
|
console.log(chalk.dim(` • ${file.path}`));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (agentTTSFiles.length > 0) {
|
||||||
|
console.log(chalk.yellow(' Agent TTS (individual agent voices):'));
|
||||||
|
for (const file of agentTTSFiles) {
|
||||||
|
console.log(chalk.dim(` • ${file.path}`));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Show backup info and restore command
|
||||||
|
console.log('\n' + chalk.white.bold('Backups & Recovery:\n'));
|
||||||
|
console.log(chalk.dim(' Pre-injection backups are stored in:'));
|
||||||
|
console.log(chalk.cyan(' ~/.bmad-tts-backups/\n'));
|
||||||
|
console.log(chalk.dim(' To restore original files (removes TTS instructions):'));
|
||||||
|
console.log(chalk.cyan(` bmad-tts-injector.sh --restore ${result.path}\n`));
|
||||||
|
|
||||||
|
console.log(chalk.cyan('💡 BMAD agents will now speak when activated!'));
|
||||||
|
console.log(chalk.dim(' Ensure AgentVibes is installed: https://agentvibes.org'));
|
||||||
|
}
|
||||||
|
|
||||||
console.log('\n' + chalk.green.bold('✨ BMAD is ready to use!'));
|
console.log('\n' + chalk.green.bold('✨ BMAD is ready to use!'));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,356 @@
|
||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Visual SVG Validation Script
|
||||||
|
#
|
||||||
|
# Compares old vs new SVG files using browser-accurate rendering (Playwright)
|
||||||
|
# and pixel-level comparison (ImageMagick), then generates a prompt for AI analysis.
|
||||||
|
#
|
||||||
|
# Usage: ./tools/validate-svg-changes.sh <path-to-svg>
|
||||||
|
#
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
SVG_FILE="${1:-src/modules/bmm/docs/images/workflow-method-greenfield.svg}"
|
||||||
|
TMP_DIR="/tmp/svg-validation-$$"
|
||||||
|
|
||||||
|
echo "🎨 Visual SVG Validation"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Check if file exists
|
||||||
|
if [ ! -f "$SVG_FILE" ]; then
|
||||||
|
echo "❌ Error: SVG file not found: $SVG_FILE"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check for ImageMagick
|
||||||
|
if ! command -v magick &> /dev/null; then
|
||||||
|
echo "❌ ImageMagick not found"
|
||||||
|
echo ""
|
||||||
|
echo "Install with:"
|
||||||
|
echo " brew install imagemagick"
|
||||||
|
echo ""
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "✓ ImageMagick found"
|
||||||
|
|
||||||
|
# Check for Node.js
|
||||||
|
if ! command -v node &> /dev/null; then
|
||||||
|
echo "❌ Node.js not found"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "✓ Node.js found ($(node -v))"
|
||||||
|
|
||||||
|
# Check for Playwright (local install)
|
||||||
|
if [ ! -d "node_modules/playwright" ]; then
|
||||||
|
echo ""
|
||||||
|
echo "📦 Playwright not found locally"
|
||||||
|
echo "Installing Playwright (local to this project, no package.json changes)..."
|
||||||
|
echo ""
|
||||||
|
npm install --no-save playwright
|
||||||
|
echo ""
|
||||||
|
echo "✓ Playwright installed"
|
||||||
|
else
|
||||||
|
echo "✓ Playwright found"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "🔄 Rendering SVGs to PNG..."
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Create temp directory
|
||||||
|
mkdir -p "$TMP_DIR"
|
||||||
|
|
||||||
|
# Extract old SVG from git
|
||||||
|
git show HEAD:"$SVG_FILE" > "$TMP_DIR/old.svg" 2>/dev/null || {
|
||||||
|
echo "❌ Could not extract old SVG from git HEAD"
|
||||||
|
echo " Make sure you have uncommitted changes to compare"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Copy new SVG
|
||||||
|
cp "$SVG_FILE" "$TMP_DIR/new.svg"
|
||||||
|
|
||||||
|
# Create Node.js renderer script in project directory (so it can find node_modules)
|
||||||
|
cat > "tools/render-svg-temp.js" << 'EOJS'
|
||||||
|
const { chromium } = require('playwright');
|
||||||
|
const fs = require('fs');
|
||||||
|
|
||||||
|
async function renderSVG(svgPath, pngPath) {
|
||||||
|
const browser = await chromium.launch({ headless: true });
|
||||||
|
const page = await browser.newPage();
|
||||||
|
|
||||||
|
const svgContent = fs.readFileSync(svgPath, 'utf8');
|
||||||
|
const widthMatch = svgContent.match(/width="([^"]+)"/);
|
||||||
|
const heightMatch = svgContent.match(/height="([^"]+)"/);
|
||||||
|
const width = Math.ceil(parseFloat(widthMatch[1]));
|
||||||
|
const height = Math.ceil(parseFloat(heightMatch[1]));
|
||||||
|
|
||||||
|
const html = `
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<style>
|
||||||
|
body { margin: 0; padding: 0; background: white; }
|
||||||
|
svg { display: block; }
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>${svgContent}</body>
|
||||||
|
</html>
|
||||||
|
`;
|
||||||
|
|
||||||
|
await page.setContent(html);
|
||||||
|
await page.setViewportSize({ width, height });
|
||||||
|
await page.waitForTimeout(1000);
|
||||||
|
await page.screenshot({ path: pngPath, fullPage: true });
|
||||||
|
await browser.close();
|
||||||
|
|
||||||
|
console.log(`✓ Rendered ${pngPath}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
(async () => {
|
||||||
|
await renderSVG(process.argv[2], process.argv[3]);
|
||||||
|
await renderSVG(process.argv[4], process.argv[5]);
|
||||||
|
})();
|
||||||
|
EOJS
|
||||||
|
|
||||||
|
# Render both SVGs (run from project dir so node_modules is accessible)
|
||||||
|
node tools/render-svg-temp.js \
|
||||||
|
"$TMP_DIR/old.svg" "$TMP_DIR/old.png" \
|
||||||
|
"$TMP_DIR/new.svg" "$TMP_DIR/new.png"
|
||||||
|
|
||||||
|
# Clean up temp script
|
||||||
|
rm tools/render-svg-temp.js
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "🔍 Comparing pixels..."
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Compare using ImageMagick
|
||||||
|
DIFF_OUTPUT=$(magick compare -metric AE "$TMP_DIR/old.png" "$TMP_DIR/new.png" "$TMP_DIR/diff.png" 2>&1 || true)
|
||||||
|
DIFF_PIXELS=$(echo "$DIFF_OUTPUT" | awk '{print $1}')
|
||||||
|
|
||||||
|
# Get image dimensions
|
||||||
|
DIMENSIONS=$(magick identify -format "%wx%h" "$TMP_DIR/old.png")
|
||||||
|
WIDTH=$(echo "$DIMENSIONS" | cut -d'x' -f1)
|
||||||
|
HEIGHT=$(echo "$DIMENSIONS" | cut -d'x' -f2)
|
||||||
|
TOTAL_PIXELS=$((WIDTH * HEIGHT))
|
||||||
|
|
||||||
|
# Calculate percentage
|
||||||
|
DIFF_PERCENT=$(echo "scale=4; $DIFF_PIXELS / $TOTAL_PIXELS * 100" | bc)
|
||||||
|
|
||||||
|
echo "📊 Results:"
|
||||||
|
echo " Dimensions: ${WIDTH} × ${HEIGHT}"
|
||||||
|
echo " Total pixels: $(printf "%'d" $TOTAL_PIXELS)"
|
||||||
|
echo " Different pixels: $(printf "%'d" $DIFF_PIXELS)"
|
||||||
|
echo " Difference: ${DIFF_PERCENT}%"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
if (( $(echo "$DIFF_PERCENT < 0.01" | bc -l) )); then
|
||||||
|
echo "✅ ESSENTIALLY IDENTICAL (< 0.01% difference)"
|
||||||
|
VERDICT="essentially identical"
|
||||||
|
elif (( $(echo "$DIFF_PERCENT < 0.1" | bc -l) )); then
|
||||||
|
echo "⚠️ MINOR DIFFERENCES (< 0.1%)"
|
||||||
|
VERDICT="minor differences detected"
|
||||||
|
else
|
||||||
|
echo "❌ SIGNIFICANT DIFFERENCES (≥ 0.1%)"
|
||||||
|
VERDICT="significant differences detected"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "📁 Output files:"
|
||||||
|
echo " Old render: $TMP_DIR/old.png"
|
||||||
|
echo " New render: $TMP_DIR/new.png"
|
||||||
|
echo " Diff image: $TMP_DIR/diff.png"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Generate HTML comparison page
|
||||||
|
cat > "$TMP_DIR/comparison.html" << 'EOHTML'
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<title>SVG Comparison</title>
|
||||||
|
<style>
|
||||||
|
* { margin: 0; padding: 0; box-sizing: border-box; }
|
||||||
|
body {
|
||||||
|
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, sans-serif;
|
||||||
|
background: #f5f5f5;
|
||||||
|
padding: 20px;
|
||||||
|
}
|
||||||
|
.header {
|
||||||
|
background: white;
|
||||||
|
padding: 20px;
|
||||||
|
border-radius: 8px;
|
||||||
|
margin-bottom: 20px;
|
||||||
|
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
|
||||||
|
}
|
||||||
|
h1 { margin-bottom: 10px; color: #333; }
|
||||||
|
.stats {
|
||||||
|
display: grid;
|
||||||
|
grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
|
||||||
|
gap: 10px;
|
||||||
|
margin-top: 15px;
|
||||||
|
}
|
||||||
|
.stat {
|
||||||
|
background: #f8f9fa;
|
||||||
|
padding: 10px;
|
||||||
|
border-radius: 4px;
|
||||||
|
}
|
||||||
|
.stat-label { font-size: 12px; color: #666; text-transform: uppercase; }
|
||||||
|
.stat-value { font-size: 18px; font-weight: 600; color: #333; margin-top: 4px; }
|
||||||
|
.container {
|
||||||
|
display: grid;
|
||||||
|
grid-template-columns: 1fr 1fr 1fr;
|
||||||
|
gap: 20px;
|
||||||
|
margin-bottom: 20px;
|
||||||
|
}
|
||||||
|
.panel {
|
||||||
|
background: white;
|
||||||
|
padding: 20px;
|
||||||
|
border-radius: 8px;
|
||||||
|
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
|
||||||
|
}
|
||||||
|
h2 {
|
||||||
|
margin: 0 0 15px 0;
|
||||||
|
color: #333;
|
||||||
|
font-size: 18px;
|
||||||
|
border-bottom: 2px solid #e0e0e0;
|
||||||
|
padding-bottom: 10px;
|
||||||
|
}
|
||||||
|
.image-container {
|
||||||
|
border: 1px solid #ddd;
|
||||||
|
background: white;
|
||||||
|
overflow: auto;
|
||||||
|
max-height: 600px;
|
||||||
|
}
|
||||||
|
img {
|
||||||
|
display: block;
|
||||||
|
max-width: 100%;
|
||||||
|
height: auto;
|
||||||
|
}
|
||||||
|
.verdict {
|
||||||
|
display: inline-block;
|
||||||
|
padding: 4px 12px;
|
||||||
|
border-radius: 12px;
|
||||||
|
font-size: 14px;
|
||||||
|
font-weight: 600;
|
||||||
|
}
|
||||||
|
.verdict.good { background: #d4edda; color: #155724; }
|
||||||
|
.verdict.warning { background: #fff3cd; color: #856404; }
|
||||||
|
.verdict.bad { background: #f8d7da; color: #721c24; }
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<div class="header">
|
||||||
|
<h1>🎨 SVG Visual Comparison</h1>
|
||||||
|
<p><strong>File:</strong> FILENAME_PLACEHOLDER</p>
|
||||||
|
<div class="stats">
|
||||||
|
<div class="stat">
|
||||||
|
<div class="stat-label">Dimensions</div>
|
||||||
|
<div class="stat-value">DIMENSIONS_PLACEHOLDER</div>
|
||||||
|
</div>
|
||||||
|
<div class="stat">
|
||||||
|
<div class="stat-label">Different Pixels</div>
|
||||||
|
<div class="stat-value">DIFF_PIXELS_PLACEHOLDER</div>
|
||||||
|
</div>
|
||||||
|
<div class="stat">
|
||||||
|
<div class="stat-label">Difference</div>
|
||||||
|
<div class="stat-value">DIFF_PERCENT_PLACEHOLDER%</div>
|
||||||
|
</div>
|
||||||
|
<div class="stat">
|
||||||
|
<div class="stat-label">Verdict</div>
|
||||||
|
<div class="stat-value"><span class="verdict VERDICT_CLASS_PLACEHOLDER">VERDICT_PLACEHOLDER</span></div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="container">
|
||||||
|
<div class="panel">
|
||||||
|
<h2>📄 Old (HEAD)</h2>
|
||||||
|
<div class="image-container">
|
||||||
|
<img src="old.png" alt="Old SVG">
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="panel">
|
||||||
|
<h2>📝 New (Working)</h2>
|
||||||
|
<div class="image-container">
|
||||||
|
<img src="new.png" alt="New SVG">
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="panel">
|
||||||
|
<h2>🔍 Diff (Red = Changes)</h2>
|
||||||
|
<div class="image-container">
|
||||||
|
<img src="diff.png" alt="Diff">
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
|
EOHTML
|
||||||
|
|
||||||
|
# Determine verdict class for styling
|
||||||
|
if (( $(echo "$DIFF_PERCENT < 0.01" | bc -l) )); then
|
||||||
|
VERDICT_CLASS="good"
|
||||||
|
elif (( $(echo "$DIFF_PERCENT < 0.1" | bc -l) )); then
|
||||||
|
VERDICT_CLASS="warning"
|
||||||
|
else
|
||||||
|
VERDICT_CLASS="bad"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Replace placeholders in HTML
|
||||||
|
sed -i '' "s|FILENAME_PLACEHOLDER|$SVG_FILE|g" "$TMP_DIR/comparison.html"
|
||||||
|
sed -i '' "s|DIMENSIONS_PLACEHOLDER|${WIDTH} × ${HEIGHT}|g" "$TMP_DIR/comparison.html"
|
||||||
|
sed -i '' "s|DIFF_PIXELS_PLACEHOLDER|$(printf "%'d" $DIFF_PIXELS) / $(printf "%'d" $TOTAL_PIXELS)|g" "$TMP_DIR/comparison.html"
|
||||||
|
sed -i '' "s|DIFF_PERCENT_PLACEHOLDER|$DIFF_PERCENT|g" "$TMP_DIR/comparison.html"
|
||||||
|
sed -i '' "s|VERDICT_PLACEHOLDER|$VERDICT|g" "$TMP_DIR/comparison.html"
|
||||||
|
sed -i '' "s|VERDICT_CLASS_PLACEHOLDER|$VERDICT_CLASS|g" "$TMP_DIR/comparison.html"
|
||||||
|
|
||||||
|
echo "✓ Generated comparison page: $TMP_DIR/comparison.html"
|
||||||
|
echo ""
|
||||||
|
echo "🌐 Opening comparison in browser..."
|
||||||
|
open "$TMP_DIR/comparison.html"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||||
|
echo ""
|
||||||
|
echo "🤖 AI VISUAL ANALYSIS PROMPT"
|
||||||
|
echo ""
|
||||||
|
echo "Copy and paste this into Gemini/Claude with the diff image attached:"
|
||||||
|
echo ""
|
||||||
|
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||||
|
cat << PROMPT
|
||||||
|
|
||||||
|
I've made changes to an Excalidraw diagram SVG file. Please analyze the visual differences between the old and new versions.
|
||||||
|
|
||||||
|
**Automated Analysis:**
|
||||||
|
- Dimensions: ${WIDTH} × ${HEIGHT} pixels
|
||||||
|
- Different pixels: $(printf "%'d" $DIFF_PIXELS) out of $(printf "%'d" $TOTAL_PIXELS)
|
||||||
|
- Difference: ${DIFF_PERCENT}%
|
||||||
|
- Verdict: ${VERDICT}
|
||||||
|
|
||||||
|
**Attached Image:**
|
||||||
|
The attached image shows the pixel-level diff (red = differences).
|
||||||
|
|
||||||
|
**Questions:**
|
||||||
|
1. Are the differences purely anti-aliasing/rendering artifacts, or are there actual content changes?
|
||||||
|
2. If there are content changes, what specifically changed?
|
||||||
|
3. Do the changes align with the intent to remove zombie Excalidraw elements (elements marked as deleted but left in the JSON)?
|
||||||
|
4. Is this safe to commit?
|
||||||
|
|
||||||
|
**Context:**
|
||||||
|
- File: $SVG_FILE
|
||||||
|
- Changes: Removed 191 lines of zombie JSON from Excalidraw source
|
||||||
|
- Expected: Visual output should be identical (zombie elements were already marked as deleted)
|
||||||
|
|
||||||
|
PROMPT
|
||||||
|
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||||
|
echo ""
|
||||||
|
echo "📎 Attach this file to your AI prompt:"
|
||||||
|
echo " $TMP_DIR/diff.png"
|
||||||
|
echo ""
|
||||||
|
echo "💡 To open the diff image:"
|
||||||
|
echo " open $TMP_DIR/diff.png"
|
||||||
|
echo ""
|
||||||
Loading…
Reference in New Issue