From 53220420a5c382fa0822f28975e216bc15443c70 Mon Sep 17 00:00:00 2001 From: Alex Verkhovsky Date: Sat, 31 Jan 2026 16:25:31 -0700 Subject: [PATCH 1/9] fix: add disable-model-invocation to all generated slash commands (#1501) Prevents Claude from auto-invoking BMad skills without explicit user request. Adds disable-model-invocation: true frontmatter to all command templates and inline generators for Claude Code and Codex. Co-authored-by: Brian --- tools/cli/installers/lib/ide/_config-driven.js | 2 ++ tools/cli/installers/lib/ide/codex.js | 1 + .../installers/lib/ide/shared/task-tool-command-generator.js | 1 + .../cli/installers/lib/ide/templates/agent-command-template.md | 1 + .../cli/installers/lib/ide/templates/combined/default-agent.md | 1 + .../lib/ide/templates/combined/default-workflow-yaml.md | 1 + .../installers/lib/ide/templates/combined/default-workflow.md | 1 + .../installers/lib/ide/templates/workflow-command-template.md | 1 + tools/cli/installers/lib/ide/templates/workflow-commander.md | 1 + 9 files changed, 10 insertions(+) diff --git a/tools/cli/installers/lib/ide/_config-driven.js b/tools/cli/installers/lib/ide/_config-driven.js index 022bff7b..87be7300 100644 --- a/tools/cli/installers/lib/ide/_config-driven.js +++ b/tools/cli/installers/lib/ide/_config-driven.js @@ -283,6 +283,7 @@ class ConfigDrivenIdeSetup extends BaseIdeSetup { return `--- name: '{{name}}' description: '{{description}}' +disable-model-invocation: true --- You must fully embody this agent's persona and follow all activation instructions exactly as specified. @@ -297,6 +298,7 @@ You must fully embody this agent's persona and follow all activation instruction return `--- name: '{{name}}' description: '{{description}}' +disable-model-invocation: true --- # {{name}} diff --git a/tools/cli/installers/lib/ide/codex.js b/tools/cli/installers/lib/ide/codex.js index 60250a39..5cd503e2 100644 --- a/tools/cli/installers/lib/ide/codex.js +++ b/tools/cli/installers/lib/ide/codex.js @@ -411,6 +411,7 @@ class CodexSetup extends BaseIdeSetup { const launcherContent = `--- name: '${agentName}' description: '${agentName} agent' +disable-model-invocation: true --- You must fully embody this agent's persona and follow all activation instructions exactly as specified. NEVER break character until given an exit command. diff --git a/tools/cli/installers/lib/ide/shared/task-tool-command-generator.js b/tools/cli/installers/lib/ide/shared/task-tool-command-generator.js index 6b90de9f..a0c4bcf8 100644 --- a/tools/cli/installers/lib/ide/shared/task-tool-command-generator.js +++ b/tools/cli/installers/lib/ide/shared/task-tool-command-generator.js @@ -72,6 +72,7 @@ class TaskToolCommandGenerator { return `--- description: '${description.replaceAll("'", "''")}' +disable-model-invocation: true --- # ${item.displayName || item.name} diff --git a/tools/cli/installers/lib/ide/templates/agent-command-template.md b/tools/cli/installers/lib/ide/templates/agent-command-template.md index 89713631..90e176a0 100644 --- a/tools/cli/installers/lib/ide/templates/agent-command-template.md +++ b/tools/cli/installers/lib/ide/templates/agent-command-template.md @@ -1,6 +1,7 @@ --- name: '{{name}}' description: '{{description}}' +disable-model-invocation: true --- You must fully embody this agent's persona and follow all activation instructions exactly as specified. NEVER break character until given an exit command. diff --git a/tools/cli/installers/lib/ide/templates/combined/default-agent.md b/tools/cli/installers/lib/ide/templates/combined/default-agent.md index f8ad9380..17a0be4b 100644 --- a/tools/cli/installers/lib/ide/templates/combined/default-agent.md +++ b/tools/cli/installers/lib/ide/templates/combined/default-agent.md @@ -1,6 +1,7 @@ --- name: '{{name}}' description: '{{description}}' +disable-model-invocation: true --- You must fully embody this agent's persona and follow all activation instructions exactly as specified. NEVER break character until given an exit command. diff --git a/tools/cli/installers/lib/ide/templates/combined/default-workflow-yaml.md b/tools/cli/installers/lib/ide/templates/combined/default-workflow-yaml.md index eca90437..2a5e49b8 100644 --- a/tools/cli/installers/lib/ide/templates/combined/default-workflow-yaml.md +++ b/tools/cli/installers/lib/ide/templates/combined/default-workflow-yaml.md @@ -1,6 +1,7 @@ --- name: '{{name}}' description: '{{description}}' +disable-model-invocation: true --- IT IS CRITICAL THAT YOU FOLLOW THESE STEPS - while staying in character as the current agent persona you may have loaded: diff --git a/tools/cli/installers/lib/ide/templates/combined/default-workflow.md b/tools/cli/installers/lib/ide/templates/combined/default-workflow.md index afb0dea5..8c4fa818 100644 --- a/tools/cli/installers/lib/ide/templates/combined/default-workflow.md +++ b/tools/cli/installers/lib/ide/templates/combined/default-workflow.md @@ -1,6 +1,7 @@ --- name: '{{name}}' description: '{{description}}' +disable-model-invocation: true --- IT IS CRITICAL THAT YOU FOLLOW THIS COMMAND: LOAD the FULL @{project-root}/{{bmadFolderName}}/{{path}}, READ its entire contents and follow its directions exactly! diff --git a/tools/cli/installers/lib/ide/templates/workflow-command-template.md b/tools/cli/installers/lib/ide/templates/workflow-command-template.md index 5c9e436c..472c1553 100644 --- a/tools/cli/installers/lib/ide/templates/workflow-command-template.md +++ b/tools/cli/installers/lib/ide/templates/workflow-command-template.md @@ -1,5 +1,6 @@ --- description: '{{description}}' +disable-model-invocation: true --- IT IS CRITICAL THAT YOU FOLLOW THESE STEPS - while staying in character as the current agent persona you may have loaded: diff --git a/tools/cli/installers/lib/ide/templates/workflow-commander.md b/tools/cli/installers/lib/ide/templates/workflow-commander.md index 3645c1a2..d49c8319 100644 --- a/tools/cli/installers/lib/ide/templates/workflow-commander.md +++ b/tools/cli/installers/lib/ide/templates/workflow-commander.md @@ -1,5 +1,6 @@ --- description: '{{description}}' +disable-model-invocation: true --- IT IS CRITICAL THAT YOU FOLLOW THIS COMMAND: LOAD the FULL @{{workflow_path}}, READ its entire contents and follow its directions exactly! From 7fcfd4c1b8321e83e2bf5eb607be0fd90be546e9 Mon Sep 17 00:00:00 2001 From: Michael Pursifull Date: Sat, 31 Jan 2026 17:26:41 -0600 Subject: [PATCH 2/9] fix: correct party-mode workflow file extension in workflow.xml (#1499) The party-mode workflow reference uses .yaml extension but the actual file is workflow.md. This broken reference has been present since alpha.17. Fixes #1212 Co-authored-by: Brian --- src/core/tasks/workflow.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/tasks/workflow.xml b/src/core/tasks/workflow.xml index 137b6dd5..fcf6f96b 100644 --- a/src/core/tasks/workflow.xml +++ b/src/core/tasks/workflow.xml @@ -81,7 +81,7 @@ Continue to next step - Start the party-mode workflow {project-root}/_bmad/core/workflows/party-mode/workflow.yaml + Start the party-mode workflow {project-root}/_bmad/core/workflows/party-mode/workflow.md From 8c59fb96a7b71f856683aa60a28210ae7b5f62d2 Mon Sep 17 00:00:00 2001 From: Ramiz Date: Sun, 1 Feb 2026 00:27:57 +0100 Subject: [PATCH 3/9] Enable sidecar for tech writer agent (#1487) fix Pagie hasSidecar --- src/bmm/agents/tech-writer/tech-writer.agent.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/bmm/agents/tech-writer/tech-writer.agent.yaml b/src/bmm/agents/tech-writer/tech-writer.agent.yaml index d1bb7e91..43f376c1 100644 --- a/src/bmm/agents/tech-writer/tech-writer.agent.yaml +++ b/src/bmm/agents/tech-writer/tech-writer.agent.yaml @@ -7,7 +7,7 @@ agent: title: Technical Writer icon: 📚 module: bmm - hasSidecar: false + hasSidecar: true persona: role: Technical Documentation Specialist + Knowledge Curator From 7afe018f82b401d741f84ccec5f40603d1d08e6d Mon Sep 17 00:00:00 2001 From: Michael Pursifull Date: Sat, 31 Jan 2026 19:27:17 -0600 Subject: [PATCH 4/9] fix: correct relative path to validation workflow in step-e-04-complete (#1498) The validationWorkflow reference uses ./steps-v/step-v-01-discovery.md but steps-v/ is a sibling of steps-e/, not a child. Corrected to ../steps-v/step-v-01-discovery.md. Fixes #1496 Co-authored-by: Brian --- .../2-plan-workflows/create-prd/steps-e/step-e-04-complete.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/bmm/workflows/2-plan-workflows/create-prd/steps-e/step-e-04-complete.md b/src/bmm/workflows/2-plan-workflows/create-prd/steps-e/step-e-04-complete.md index 733f1a52..5d681fee 100644 --- a/src/bmm/workflows/2-plan-workflows/create-prd/steps-e/step-e-04-complete.md +++ b/src/bmm/workflows/2-plan-workflows/create-prd/steps-e/step-e-04-complete.md @@ -4,7 +4,7 @@ description: 'Complete & Validate - Present options for next steps including ful # File references (ONLY variables used in this step) prdFile: '{prd_file_path}' -validationWorkflow: './steps-v/step-v-01-discovery.md' +validationWorkflow: '../steps-v/step-v-01-discovery.md' --- # Step E-4: Complete & Validate From 0a7329ff235b82f1485dd04f836788e62aab225c Mon Sep 17 00:00:00 2001 From: Michael Pursifull Date: Sat, 31 Jan 2026 19:27:48 -0600 Subject: [PATCH 5/9] fix: correct relative path to prd-purpose.md in step-11-polish (#1497) The purposeFile reference uses ./data/prd-purpose.md but data/ is a sibling of steps-c/, not a child. Corrected to ../data/prd-purpose.md. Fixes #1495 --- .../2-plan-workflows/create-prd/steps-c/step-11-polish.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/bmm/workflows/2-plan-workflows/create-prd/steps-c/step-11-polish.md b/src/bmm/workflows/2-plan-workflows/create-prd/steps-c/step-11-polish.md index 23200915..70bf198c 100644 --- a/src/bmm/workflows/2-plan-workflows/create-prd/steps-c/step-11-polish.md +++ b/src/bmm/workflows/2-plan-workflows/create-prd/steps-c/step-11-polish.md @@ -5,7 +5,7 @@ description: 'Optimize and polish the complete PRD document for flow, coherence, # File References nextStepFile: './step-12-complete.md' outputFile: '{planning_artifacts}/prd.md' -purposeFile: './data/prd-purpose.md' +purposeFile: '../data/prd-purpose.md' # Task References advancedElicitationTask: '{project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml' From d45eff15bf31bcbd2eee79751a34e57682e31bc9 Mon Sep 17 00:00:00 2001 From: Brian Madison Date: Sat, 31 Jan 2026 18:19:12 -0600 Subject: [PATCH 6/9] gh-skill in progress --- .claude/skills/gh-triage/SKILL.md | 60 ++ .claude/skills/gh-triage/scripts/gh_triage.py | 713 ++++++++++++++++++ .github/ISSUE_TEMPLATE/bug-report.yaml | 124 +++ .github/ISSUE_TEMPLATE/documentation.yaml | 55 ++ ...{feature_request.md => feature-request.md} | 0 .gitignore | 2 +- 6 files changed, 953 insertions(+), 1 deletion(-) create mode 100644 .claude/skills/gh-triage/SKILL.md create mode 100755 .claude/skills/gh-triage/scripts/gh_triage.py create mode 100644 .github/ISSUE_TEMPLATE/bug-report.yaml create mode 100644 .github/ISSUE_TEMPLATE/documentation.yaml rename .github/ISSUE_TEMPLATE/{feature_request.md => feature-request.md} (100%) diff --git a/.claude/skills/gh-triage/SKILL.md b/.claude/skills/gh-triage/SKILL.md new file mode 100644 index 00000000..a198fc0d --- /dev/null +++ b/.claude/skills/gh-triage/SKILL.md @@ -0,0 +1,60 @@ +--- +name: gh-triage +description: Fetch all GitHub issues via gh CLI and provide consolidated AI-powered analysis with clustering, prioritization, and actionable insights. Use for issue triage, backlog cleanup, or when user mentions "issues", "triage", or "backlog". +license: MIT +metadata: + author: bmad-code-org + version: "2.1.0" + anthropic-internal: Core team issue triage tool for BMad Method repositories + min-github-cli-version: "2.0" +compatibility: Requires gh CLI, Python 3.8+, and git repository +--- + +# GitHub Issue Triage + +**IMPORTANT:** Never include time or effort estimates in output or recommendations. + +## What This Does + +1. **Fetch all issues** from repository via gh CLI (configurable: open/closed/all) +2. **Extract data** into structured format (JSON + markdown tables) +3. **Generate AI analysis** with: + - Issue clustering by theme + - Priority recommendations + - Actionable insights + - Cross-repo detection + - Cleanup candidates + +## Steps + +```bash +# 1. Navigate to scripts directory +cd .claude/skills/gh-triage/scripts + +# 2. Run the triage tool (outputs to _bmad-output/triage-reports/) +python3 gh_triage.py --state open + +# 3. Review the generated report +cat _bmad-output/triage-reports/triage-*.md +``` + +## Command Reference + +| Parameter | Description | Default | +| ---------------- | ------------------------------------------ | -------------------------------------------------- | +| `--repo` | Repository (auto-detected from git remote) | current repo | +| `--state` | Filter: `all`, `open`, `closed` | `open` | +| `--focus` | Filter by keywords in title/body | none | +| `--output`, `-o` | Save output to file | `_bmad-output/triage-reports/triage-YYYY-MM-DD.md` | +| `--json` | Output as JSON instead of markdown | false (outputs to stdout) | +| `--limit` | Max issues to fetch | 1000 | + +## Output + +All reports automatically save to `_bmad-output/triage-reports/` with: +- Summary statistics +- Issue clusters by theme +- Priority matrix +- Actionable recommendations +- Cross-repo issues with close commands +- Cleanup candidates (duplicates, stale, outdated) diff --git a/.claude/skills/gh-triage/scripts/gh_triage.py b/.claude/skills/gh-triage/scripts/gh_triage.py new file mode 100755 index 00000000..e73da756 --- /dev/null +++ b/.claude/skills/gh-triage/scripts/gh_triage.py @@ -0,0 +1,713 @@ +#!/usr/bin/env python3 +""" +GitHub Issue Triage Tool + +Fetches, categorizes, and groups GitHub issues for efficient triage. +Optimized for large datasets with parallel processing support. + +IMPORTANT: Never provide time, date, or effort estimates in output. +AI execution speed varies greatly from human timelines. +Focus on what needs to be done, not how long it takes. +""" + +import argparse +import json +import os +import re +import subprocess +from collections import defaultdict +from dataclasses import dataclass, field +from datetime import datetime, timedelta, timezone +from typing import List, Dict, Any, Optional, Set, Tuple +from enum import Enum +from difflib import SequenceMatcher +import re + + +class Category(Enum): + """Issue categories""" + BUG = "bug" + FEATURE = "feature" + ENHANCEMENT = "enhancement" + DOCUMENTATION = "documentation" + PERFORMANCE = "performance" + SECURITY = "security" + QUESTION = "question" + REFACTOR = "refactor" + TECH_DEBT = "tech-debt" + OTHER = "other" + + +class Priority(Enum): + """Priority levels""" + CRITICAL = "critical" + HIGH = "high" + MEDIUM = "medium" + LOW = "low" + INFORMATIONAL = "informational" + + +class TriagingStatus(Enum): + """Triage status""" + NEEDS_TRIAGE = "needs-triage" + READY_FOR_DEV = "ready-for-dev" + BLOCKED = "blocked" + STALE = "stale" + DUPLICATE = "duplicate" + INVALID = "invalid" + IN_PROGRESS = "in-progress" + COMPLETED = "completed" + + +@dataclass +class Issue: + """Represents a GitHub issue with triage metadata""" + number: int + title: str + state: str + author: str + created_at: datetime + updated_at: Optional[datetime] + labels: List[str] + body: Optional[str] + comments: int + category: Category = field(default=Category.OTHER) + priority: Priority = field(default=Priority.MEDIUM) + triage_status: TriagingStatus = field(default=TriagingStatus.NEEDS_TRIAGE) + + @property + def age_days(self) -> int: + """Age of issue in days""" + return (datetime.now(timezone.utc) - self.created_at).days + + @property + def days_since_update(self) -> Optional[int]: + """Days since last update""" + if not self.updated_at: + return None + return (datetime.now(timezone.utc) - self.updated_at).days + + @property + def url(self) -> str: + """GitHub URL for the issue""" + # Get from parent triage object + return f"https://github.com/{self._repo_url}/issues/{self.number}" + + +class IssueCategorizer: + """Categorizes issues based on content and metadata""" + + # Keywords for categorization + CATEGORY_KEYWORDS = { + Category.BUG: ['bug', 'fix', 'crash', 'error', 'broken', 'fails', 'exception', 'segfault', 'leak'], + Category.FEATURE: ['feature', 'add ', 'implement', 'support for', 'new ', 'request', 'wish'], + Category.ENHANCEMENT: ['enhance', 'improve', 'optimize', 'better', 'enhancement'], + Category.DOCUMENTATION: ['doc', 'readme', 'tutorial', 'guide', 'documentation', 'example', 'comment'], + Category.PERFORMANCE: ['slow', 'performance', 'latency', 'speed', 'fast', 'optimize', 'memory'], + Category.SECURITY: ['security', 'vulnerability', 'exploit', 'xss', 'injection', 'csrf', 'auth'], + Category.QUESTION: ['question', 'how to', 'help', 'confusion', 'unclear', 'clarify'], + Category.REFACTOR: ['refactor', 'clean up', 'reorganize', 'restructure', 'simplify'], + Category.TECH_DEBT: ['tech debt', 'technical debt', 'legacy', 'deprecated', 'cleanup'], + } + + # Priority indicators from labels + PRIORITY_LABELS = { + Priority.CRITICAL: ['critical', 'blocker', 'urgent'], + Priority.HIGH: ['high', 'important', 'priority'], + Priority.MEDIUM: ['medium'], + Priority.LOW: ['low', 'minor', 'trivial'], + } + + def categorize(self, issue: Issue) -> Category: + """Determine category based on title, body, and labels""" + text = f"{issue.title} {issue.body or ''}".lower() + + # Check labels first + for label in issue.labels: + label_lower = label.lower() + if any(cat_str in label_lower for cat_str in ['bug', 'defect']): + return Category.BUG + if any(cat_str in label_lower for cat_str in ['feature', 'enhancement']): + return Category.FEATURE + if 'doc' in label_lower: + return Category.DOCUMENTATION + if 'perf' in label_lower: + return Category.PERFORMANCE + if 'security' in label_lower: + return Category.SECURITY + + # Check keywords + for category, keywords in self.CATEGORY_KEYWORDS.items(): + if any(keyword in text for keyword in keywords): + return category + + return Category.OTHER + + def determine_priority(self, issue: Issue) -> Priority: + """Determine priority based on labels and metadata""" + # Check labels + for label in issue.labels: + label_lower = label.lower() + for priority, keywords in self.PRIORITY_LABELS.items(): + if any(keyword in label_lower for keyword in keywords): + return priority + + # Infer from metadata + if issue.age_days > 90 and issue.state == 'open': + # Old open issues are lower priority + return Priority.LOW + + if issue.comments > 10: + # Highly discussed issues are important + return Priority.HIGH + + return Priority.MEDIUM + + def determine_triage_status(self, issue: Issue) -> TriagingStatus: + """Determine triage status""" + # Check labels + for label in issue.labels: + label_lower = label.lower() + if 'duplicate' in label_lower: + return TriagingStatus.DUPLICATE + if any(x in label_lower for x in ['invalid', 'wontfix', 'wont-fix']): + return TriagingStatus.INVALID + if 'blocked' in label_lower or 'blocking' in label_lower: + return TriagingStatus.BLOCKED + if any(x in label_lower for x in ['in-progress', 'in progress', 'working']): + return TriagingStatus.IN_PROGRESS + + # Check staleness + if issue.state.upper() == 'OPEN': + if issue.days_since_update and issue.days_since_update > 30: + return TriagingStatus.STALE + if not any(label.lower() in ['accepted', 'approved', 'ready'] for label in issue.labels): + return TriagingStatus.NEEDS_TRIAGE + return TriagingStatus.READY_FOR_DEV + + return TriagingStatus.COMPLETED + + +class IssueTriage: + """Main triage coordinator""" + + # Module repository mapping + MODULE_REPOS = { + 'builder': { + 'repo': 'bmad-code-org/bmad-builder', + 'names': ['bmb', 'builder', 'bmad-builder', 'agent builder', 'agent-builder'], + 'url': 'https://github.com/bmad-code-org/bmad-builder' + }, + 'tea': { + 'repo': 'bmad-code-org/bmad-method-test-architecture-enterprise', + 'names': ['tea', 'test architect', 'test-architect', 'test architecture'], + 'url': 'https://github.com/bmad-code-org/bmad-method-test-architecture-enterprise' + }, + 'bmgd': { + 'repo': 'bmad-code-org/bmad-module-game-dev-studio', + 'names': ['bmgd', 'game dev', 'game-dev', 'gamedev', 'game dev studio', 'game-dev-studio'], + 'url': 'https://github.com/bmad-code-org/bmad-module-game-dev-studio' + }, + 'cis': { + 'repo': 'bmad-code-org/bmad-module-creative-intelligence-suite', + 'names': ['cis', 'creative intelligence', 'creative-intelligence', 'creative intelligence suite'], + 'url': 'https://github.com/bmad-code-org/bmad-module-creative-intelligence-suite' + }, + } + + def __init__(self, repo: Optional[str] = None, state: str = 'open'): + self.repo = repo or self._detect_repo() + self.state = state + self.categorizer = IssueCategorizer() + self.issues: List[Issue] = [] + self._repo_url = self.repo # Store for issue URL generation + + def _detect_repo(self) -> str: + """Detect repository from git remote""" + try: + result = subprocess.run( + ['git', 'remote', 'get-url', 'origin'], + capture_output=True, + text=True, + check=True + ) + url = result.stdout.strip() + # Convert git@github.com:user/repo.git to user/repo + if url.startswith('git@github.com:'): + return url[15:-4] + if url.startswith('https://github.com/'): + return url[19:-4] + except subprocess.CalledProcessError: + pass + return 'unknown/repo' + + def fetch_issues(self) -> List[Issue]: + """Fetch issues using gh CLI""" + print(f"Fetching issues from {self.repo}...") + + cmd = [ + 'gh', 'issue', 'list', + '--repo', self.repo, + '--state', self.state, + '--limit', '1000', # Fetch up to 1000 issues (default is 30) + '--json', 'number,title,state,author,createdAt,updatedAt,labels,body,comments' + ] + + result = subprocess.run(cmd, capture_output=True, text=True, check=True) + data = json.loads(result.stdout) + + self.issues = [] + for item in data: + labels = [label['name'] for label in item.get('labels', [])] + issue = Issue( + number=item['number'], + title=item['title'], + state=item['state'], + author=item['author']['login'], + created_at=datetime.fromisoformat(item['createdAt'].replace('Z', '+00:00')), + updated_at=datetime.fromisoformat(item['updatedAt'].replace('Z', '+00:00')) if item.get('updatedAt') else None, + labels=labels, + body=item.get('body'), + comments=len(item.get('comments', [])) + ) + issue._repo_url = self._repo_url + self.issues.append(issue) + + print(f"Fetched {len(self.issues)} issues") + return self.issues + + def analyze_issues(self, focus_filter: Optional[str] = None) -> List[Issue]: + """Analyze and categorize all issues""" + print("Analyzing issues...") + + for issue in self.issues: + issue.category = self.categorizer.categorize(issue) + issue.priority = self.categorizer.determine_priority(issue) + issue.triage_status = self.categorizer.determine_triage_status(issue) + + # Apply focus filter if provided + if focus_filter: + keywords = focus_filter.lower().split() + self.issues = [ + issue for issue in self.issues + if any(keyword in f"{issue.title} {issue.body or ''}".lower() + for keyword in keywords) + ] + print(f"Filtered to {len(self.issues)} issues matching focus criteria") + + return self.issues + + def find_duplicates(self, threshold: float = 0.7) -> List[Tuple[Issue, Issue, float]]: + """Find potential duplicate issues based on title similarity""" + print("Detecting potential duplicates...") + duplicates = [] + open_issues = [i for i in self.issues if i.state.upper() == 'OPEN'] + + for i, issue1 in enumerate(open_issues): + for issue2 in open_issues[i+1:]: + # Skip if already marked as duplicate + if 'duplicate' in [l.lower() for l in issue1.labels + issue2.labels]: + continue + + # Calculate title similarity + similarity = SequenceMatcher(None, issue1.title.lower(), issue2.title.lower()).ratio() + + if similarity >= threshold: + duplicates.append((issue1, issue2, similarity)) + + # Sort by similarity (highest first) + duplicates.sort(key=lambda x: x[2], reverse=True) + return duplicates + + def find_outdated_issues(self, before_date: datetime = None) -> Dict[str, List[Issue]]: + """Find issues that are likely outdated""" + print("Identifying outdated issues...") + + if before_date is None: + # Default to December 1, 2025 + before_date = datetime(2025, 12, 1, tzinfo=timezone.utc) + + outdated = { + 'old_issues': [], + 'v4_issues': [], + 'ancient_stale': [] + } + + open_issues = [i for i in self.issues if i.state.upper() == 'OPEN'] + + for issue in open_issues: + # Issues created before cutoff date + if issue.created_at < before_date: + outdated['old_issues'].append(issue) + + # Issues mentioning v4 + text = f"{issue.title} {issue.body or ''}".lower() + if 'v4' in text or 'version 4' in text or 'v 4' in text: + outdated['v4_issues'].append(issue) + + # Issues very old and stale (>90 days since update) + if issue.age_days > 90 and (issue.days_since_update or 0) > 90: + outdated['ancient_stale'].append(issue) + + return outdated + + def generate_bulk_commands(self, issues: List[Issue], label: str) -> List[str]: + """Generate gh CLI commands for bulk operations""" + commands = [] + for issue in issues: + cmd = f"gh issue edit {issue.number} --repo {self.repo} --add-label '{label}'" + commands.append(cmd) + return commands + + def generate_close_commands(self, issues: List[Issue], reason: str) -> List[str]: + """Generate gh CLI commands to close issues with comment""" + commands = [] + for issue in issues: + comment = reason.replace("'", "'\\''") # Escape single quotes + cmd = (f"gh issue close {issue.number} --repo {self.repo} " + f"--comment '{comment}'") + commands.append(cmd) + return commands + + def find_cross_repo_issues(self) -> Dict[str, List[Tuple[Issue, str]]]: + """Find issues that belong in other module repositories""" + print("Detecting cross-repo issues...") + cross_repo = defaultdict(list) + + open_issues = [i for i in self.issues if i.state.upper() == 'OPEN'] + + for issue in open_issues: + text = f"{issue.title} {issue.body or ''}".lower() + + for module_key, module_info in self.MODULE_REPOS.items(): + # Check if issue mentions this module + for name in module_info['names']: + # Use word boundaries to avoid false positives + pattern = r'\b' + re.escape(name) + r'\b' + if re.search(pattern, text): + cross_repo[module_key].append((issue, name)) + break # Only add once per issue + + return cross_repo + + def generate_actionable_recommendations(self) -> str: + """Generate actionable recommendations with specific commands""" + lines = [] + open_issues = [i for i in self.issues if i.state.upper() == 'OPEN'] + + lines.append("## 🎯 Actionable Recommendations\n") + + # Cross-repo issues (show first!) + cross_repo_issues = self.find_cross_repo_issues() + if cross_repo_issues: + total_cross = sum(len(issues) for issues in cross_repo_issues.values()) + lines.append(f"### Issues in Wrong Repository ({total_cross} issues)\n") + lines.append("**High Priority.** These issues should be closed here and opened in the correct module repository:\n") + + for module_key, issues in cross_repo_issues.items(): + if issues: + module_info = self.MODULE_REPOS[module_key] + lines.append(f"#### {module_info['repo'].replace('bmad-code-org/', '').title()} ({len(issues)} issues)") + lines.append(f"**Correct repo:** [{module_info['repo']}]({module_info['url']}/issues/new)\n") + lines.append(f"**Close these and report in the correct repo:**") + lines.append(f"```bash") + + for issue, matched_name in issues[:10]: # Show first 10 + comment = (f"This issue relates to {matched_name} which is maintained in a separate repository. " + f"Please report this issue at {module_info['url']}/issues/new") + lines.append(f"gh issue close {issue.number} --repo {self.repo} --comment '{comment}'") + + if len(issues) > 10: + lines.append(f"# ... and {len(issues) - 10} more") + lines.append(f"```\n") + + # Find duplicates + duplicates = self.find_duplicates() + if duplicates: + lines.append(f"### Potential Duplicates ({len(duplicates)} pairs)\n") + lines.append("**Manual review required.** Close the older issue as a duplicate of the newer one:\n") + for issue1, issue2, similarity in duplicates[:20]: # Top 20 + older = issue1 if issue1.created_at < issue2.created_at else issue2 + newer = issue2 if issue1.created_at < issue2.created_at else issue1 + lines.append(f"#### {older.title}") + lines.append(f"- **Older:** #{older.number} ({older.age_days} days old)") + lines.append(f"- **Newer:** #{newer.number} ({newer.age_days} days old)") + lines.append(f"- **Similarity:** {similarity:.1%}") + lines.append(f"- **Command:** `gh issue close {older.number} --repo {self.repo} --comment 'Duplicate of #{newer.number}' --duplicate-of {newer.number}`") + lines.append("") + + # Find outdated issues + outdated = self.find_outdated_issues() + total_outdated = len(outdated['old_issues']) + len(outdated['v4_issues']) + len(outdated['ancient_stale']) + + if total_outdated > 0: + lines.append(f"### Outdated Issues ({total_outdated} total)\n") + + # Pre-Dec 2025 issues + if outdated['old_issues']: + cutoff_date = datetime(2025, 12, 1, tzinfo=timezone.utc).strftime('%B %Y') + lines.append(f"#### Issues from before {cutoff_date} ({len(outdated['old_issues'])})") + lines.append(f"These issues are quite old and may no longer be relevant. Consider reviewing and closing outdated ones.\n") + lines.append("**To add label for review:**") + lines.append(f"```bash") + for issue in outdated['old_issues'][:10]: # Show first 10 + lines.append(f"gh issue edit {issue.number} --repo {self.repo} --add-label 'outdated,needs-review'") + if len(outdated['old_issues']) > 10: + lines.append(f"# ... and {len(outdated['old_issues']) - 10} more") + lines.append(f"```\n") + + # v4-related issues + if outdated['v4_issues']: + lines.append(f"#### v4-Related Issues ({len(outdated['v4_issues'])})") + lines.append(f"BMad Method v4 is deprecated. These issues likely no longer apply to v6.\n") + lines.append("**Bulk close with comment:**") + lines.append(f"```bash") + for issue in outdated['v4_issues'][:10]: + lines.append(f"gh issue close {issue.number} --repo {self.repo} --comment 'Closing as this relates to BMad Method v4 which is deprecated. Please open a new issue if this still applies to v6.'") + if len(outdated['v4_issues']) > 10: + lines.append(f"# ... and {len(outdated['v4_issues']) - 10} more") + lines.append(f"```\n") + + # Ancient stale issues + if outdated['ancient_stale']: + lines.append(f"#### Ancient Stale Issues ({len(outdated['ancient_stale'])})") + lines.append(f"Issues that are both very old (>90 days) and haven't been updated in >90 days.\n") + lines.append("**Close as stale:**") + lines.append(f"```bash") + for issue in outdated['ancient_stale'][:10]: + lines.append(f"gh issue close {issue.number} --repo {self.repo} --comment 'Closing due to inactivity. Please reopen if this is still relevant.'") + if len(outdated['ancient_stale']) > 10: + lines.append(f"# ... and {len(outdated['ancient_stale']) - 10} more") + lines.append(f"```\n") + + # Bulk tagging suggestions + lines.append("### Bulk Tagging Suggestions\n") + lines.append("Add appropriate labels to untagged issues:\n") + + # Find issues without category labels + untagged = [i for i in open_issues if not any( + l.lower() in ['bug', 'feature', 'enhancement', 'documentation', 'performance', 'question', 'refactor', 'tech-debt'] + for l in i.labels + )] + + if untagged: + lines.append(f"**Issues without category labels ({len(untagged)}):**\n") + by_category = defaultdict(list) + for issue in untagged: + by_category[issue.category.value].append(issue) + + for category, issues in sorted(by_category.items(), key=lambda x: len(x[1]), reverse=True)[:5]: + lines.append(f"##### Label as `{category}` ({len(issues)} issues)") + lines.append(f"```bash") + for issue in issues[:5]: + lines.append(f"gh issue edit {issue.number} --repo {self.repo} --add-label '{category}'") + if len(issues) > 5: + lines.append(f"# ... and {len(issues) - 5} more") + lines.append(f"```\n") + + # Priority labeling + no_priority = [i for i in open_issues if not any( + l.lower() in ['critical', 'high', 'medium', 'low', 'priority'] + for l in i.labels + )] + + if no_priority: + # Group by priority + by_priority = defaultdict(list) + for issue in no_priority: + by_priority[issue.priority.value].append(issue) + + lines.append("**Add priority labels:**\n") + for priority_level in ['critical', 'high', 'medium', 'low']: + if priority_level in by_priority: + count = len(by_priority[priority_level]) + lines.append(f"##### Label as `{priority_level}` priority ({count} issues)") + lines.append(f"```bash") + for issue in by_priority[priority_level][:5]: + lines.append(f"gh issue edit {issue.number} --repo {self.repo} --add-label '{priority_level}'") + if count > 5: + lines.append(f"# ... and {count - 5} more") + lines.append(f"```\n") + + return "\n".join(lines) + + def generate_report(self) -> str: + """Generate markdown triage report""" + lines = [] + + # Summary + lines.append("# GitHub Issue Triage Report\n") + lines.append(f"**Repository:** {self.repo}\n") + lines.append(f"**Generated:** {datetime.now().strftime('%Y-%m-%d %H:%M:%S UTC')}\n") + + # Count stats + total = len(self.issues) + open_issues = [i for i in self.issues if i.state.upper() == 'OPEN'] + closed_issues = [i for i in self.issues if i.state.upper() == 'CLOSED'] + + lines.append("## Summary\n") + lines.append(f"- **Total Issues:** {total}") + lines.append(f"- **Open:** {len(open_issues)} | **Closed:** {len(closed_issues)}") + + # Category breakdown + category_counts = defaultdict(int) + for issue in self.issues: + category_counts[issue.category] += 1 + top_categories = sorted(category_counts.items(), key=lambda x: x[1], reverse=True)[:5] + lines.append(f"- **Top Categories:** {', '.join(f'{cat.value} ({count})' for cat, count in top_categories)}") + lines.append("") + + # Priority action items + lines.append("## Priority Action Items\n") + + # Critical/High priority open issues + critical_issues = [i for i in open_issues if i.priority in [Priority.CRITICAL, Priority.HIGH]] + if critical_issues: + lines.append(f"### {'🚨 ' if critical_issues else ''}Critical & High Priority ({len(critical_issues)})\n") + for issue in sorted(critical_issues, key=lambda x: x.age_days, reverse=True): + lines.append(f"- [#{issue.number}]({issue.url}) {issue.title}") + lines.append(f" - {issue.category.value.upper()} | {issue.priority.value.upper()} | Age: {issue.age_days} days") + if issue.labels: + lines.append(f" - Labels: {', '.join(issue.labels)}") + lines.append("") + + # Stale issues needing review + stale_issues = [i for i in open_issues if i.triage_status == TriagingStatus.STALE] + if stale_issues: + lines.append(f"### Stale Issues - Needs Review ({len(stale_issues)})\n") + for issue in sorted(stale_issues, key=lambda x: x.days_since_update or 0, reverse=True)[:15]: + days_stale = issue.days_since_update or 0 + lines.append(f"- [#{issue.number}]({issue.url}) {issue.title}") + lines.append(f" - Last updated {days_stale} days ago | {issue.category.value}") + lines.append("") + + # Categories + lines.append("## Categories\n") + + for category in Category: + category_issues = [i for i in self.issues if i.category == category] + if not category_issues: + continue + + open_in_cat = [i for i in category_issues if i.state.upper() == 'OPEN'] + closed_in_cat = [i for i in category_issues if i.state.upper() == 'CLOSED'] + + lines.append(f"### {category.value.title()} ({len(open_in_cat)} open, {len(closed_in_cat)} closed)\n") + + # Sort open by priority + priority_order = {Priority.CRITICAL: 0, Priority.HIGH: 1, Priority.MEDIUM: 2, Priority.LOW: 3, Priority.INFORMATIONAL: 4} + open_in_cat_sorted = sorted(open_in_cat, key=lambda x: priority_order.get(x.priority, 5)) + + for issue in open_in_cat_sorted[:20]: # Limit to 20 per category + status_icon = { + TriagingStatus.NEEDS_TRIAGE: '🔍', + TriagingStatus.READY_FOR_DEV: '✅', + TriagingStatus.BLOCKED: '🚫', + TriagingStatus.STALE: '💤', + TriagingStatus.IN_PROGRESS: '🔧', + }.get(issue.triage_status, '') + + lines.append(f"{status_icon} [#{issue.number}]({issue.url}) {issue.title}") + lines.append(f"
Details") + lines.append(f" ") + lines.append(f" - **Priority:** {issue.priority.value}") + lines.append(f" - **Status:** {issue.triage_status.value}") + lines.append(f" - **Age:** {issue.age_days} days") + lines.append(f" - **Author:** {issue.author}") + if issue.labels: + lines.append(f" - **Labels:** {', '.join(issue.labels)}") + lines.append(f"
") + lines.append("") + + if len(open_in_cat) > 20: + lines.append(f"*... and {len(open_in_cat) - 20} more*\n") + + # Actionable recommendations + lines.append(self.generate_actionable_recommendations()) + + # Cleanup candidates + lines.append("## Cleanup Candidates\n") + + duplicates = [i for i in self.issues if i.triage_status == TriagingStatus.DUPLICATE] + if duplicates: + lines.append(f"### Duplicates ({len(duplicates)})\n") + for issue in duplicates: + lines.append(f"- [#{issue.number}]({issue.url}) {issue.title}") + + invalid = [i for i in self.issues if i.triage_status == TriagingStatus.INVALID] + if invalid: + lines.append(f"\n### Invalid/Wontfix ({len(invalid)})\n") + for issue in invalid: + lines.append(f"- [#{issue.number}]({issue.url}) {issue.title}") + + lines.append("\n---\n") + lines.append("*Report generated by BMad Issue Triage Tool*") + + return "\n".join(lines) + + +def main(): + parser = argparse.ArgumentParser( + description='Triage and categorize GitHub issues', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + %(prog)s # Triage open issues in current repo + %(prog)s --state all # All issues (including closed) + %(prog)s --focus "installer" # Filter for installer-related issues + %(prog)s --repo user/repo --state closed + """ + ) + + parser.add_argument('--repo', help='Repository (default: detect from git)') + parser.add_argument('--state', choices=['all', 'open', 'closed'], default='open', + help='Filter by state (default: open)') + parser.add_argument('--focus', help='Focus context to filter issues') + parser.add_argument('--output', '-o', help='Output file (default: _bmad-output/triage-reports/triage-.md)') + parser.add_argument('--json', action='store_true', help='Output as JSON') + + args = parser.parse_args() + + # Set default output to _bmad-output/triage-reports if not specified + if not args.output and not args.json: + os.makedirs('_bmad-output/triage-reports', exist_ok=True) + output_date = datetime.now().strftime('%Y-%m-%d') + args.output = f'_bmad-output/triage-reports/triage-{output_date}.md' + + triage = IssueTriage(repo=args.repo, state=args.state) + triage.fetch_issues() + triage.analyze_issues(focus_filter=args.focus) + + if args.json: + # Output as JSON for further processing + data = [ + { + 'number': i.number, + 'title': i.title, + 'state': i.state, + 'category': i.category.value, + 'priority': i.priority.value, + 'triage_status': i.triage_status.value, + 'age_days': i.age_days, + 'url': i.url, + 'labels': i.labels, + 'author': i.author + } + for i in triage.issues + ] + output = json.dumps(data, indent=2) + else: + output = triage.generate_report() + + if args.output: + with open(args.output, 'w') as f: + f.write(output) + # Get relative path for nicer output + rel_path = args.output.replace('./', '') + print(f"✅ Report saved to: {rel_path}") + else: + print(output) + + +if __name__ == '__main__': + main() diff --git a/.github/ISSUE_TEMPLATE/bug-report.yaml b/.github/ISSUE_TEMPLATE/bug-report.yaml new file mode 100644 index 00000000..6c5507d9 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug-report.yaml @@ -0,0 +1,124 @@ +name: Bug Report +description: File a bug report to help us improve BMad Method +title: "[BUG] " +labels: bug +assignees: [] +body: + - type: markdown + attributes: + value: | + Thanks for filing a bug report! Please fill out the information below to help us reproduce and fix the issue. + + - type: textarea + id: description + attributes: + label: Description + description: Clear and concise description of what the bug is + placeholder: e.g., When I run /dev-story, it crashes on step 3 + validations: + required: true + + - type: textarea + id: steps + attributes: + label: Steps to reproduce + description: Step-by-step instructions to reproduce the behavior + placeholder: | + 1. Run 'npx bmad-method install' + 2. Select option X + 3. Run workflow Y + 4. See error + validations: + required: true + + - type: textarea + id: expected + attributes: + label: Expected behavior + description: What you expected to happen + placeholder: The workflow should complete successfully + validations: + required: true + + - type: textarea + id: actual + attributes: + label: Actual behavior + description: What actually happened + placeholder: The workflow crashed with error "..." + validations: + required: true + + - type: textarea + id: screenshots + attributes: + label: Screenshots + description: Add screenshots if applicable (paste images directly) + placeholder: Paste any relevant screenshots here + + - type: dropdown + id: module + attributes: + label: Which module is this for? + description: Select the BMad module this issue relates to + options: + - BMad Method (BMM) - Core Framework + - BMad Builder (BMB) - Agent Builder Tool + - Test Architect (TEA) - Test Strategy Module + - Game Dev Studio (BMGD) - Game Development Module + - Creative Intelligence Suite (CIS) - Innovation Module + - Not sure / Other + validations: + required: true + + - type: input + id: version + attributes: + label: BMad Version + description: "Check with: npx bmad-method --version or check package.json" + placeholder: e.g., 6.0.0-Beta.4 + validations: + required: true + + - type: dropdown + id: ide + attributes: + label: Which AI IDE are you using? + options: + - Claude Code + - Cursor + - Windsurf + - Copilot CLI / GitHub Copilot + - Kilo Code + - Other + validations: + required: true + + - type: dropdown + id: platform + attributes: + label: Operating System + options: + - macOS + - Windows + - Linux + - Other + validations: + required: true + + - type: textarea + id: logs + attributes: + label: Relevant log output + description: Copy and paste any relevant log output + render: shell + + - type: checkboxes + id: terms + attributes: + label: Confirm + options: + - label: I've searched for existing issues + required: true + - label: I'm using the latest version + required: false diff --git a/.github/ISSUE_TEMPLATE/documentation.yaml b/.github/ISSUE_TEMPLATE/documentation.yaml new file mode 100644 index 00000000..00729a36 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/documentation.yaml @@ -0,0 +1,55 @@ +name: Documentation +description: Report issues or suggest improvements to documentation +title: "[DOCS] " +labels: documentation +assignees: [] +body: + - type: markdown + attributes: + value: | + Help us improve the BMad Method documentation! + + - type: dropdown + id: doc-type + attributes: + label: What type of documentation issue is this? + options: + - Error or inaccuracy + - Missing information + - Unclear or confusing + - Outdated content + - Request for new documentation + - Typo or grammar + validations: + required: true + + - type: textarea + id: location + attributes: + label: Documentation location + description: Where is the documentation that needs improvement? + placeholder: e.g., http://docs.bmad-method.org/tutorials/getting-started/ or "In the README" + validations: + required: true + + - type: textarea + id: issue + attributes: + label: What's the issue? + description: Describe the documentation issue in detail + placeholder: e.g., Step 3 says to run command X but it should be command Y + validations: + required: true + + - type: textarea + id: suggestion + attributes: + label: Suggested improvement + description: How would you like to see this improved? + placeholder: e.g., Change the command to X and add an example + + - type: input + id: version + attributes: + label: BMad Version (if applicable) + placeholder: e.g., 6.0.0-Beta.4 diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature-request.md similarity index 100% rename from .github/ISSUE_TEMPLATE/feature_request.md rename to .github/ISSUE_TEMPLATE/feature-request.md diff --git a/.gitignore b/.gitignore index 885cb245..6af83303 100644 --- a/.gitignore +++ b/.gitignore @@ -50,7 +50,7 @@ _bmad-output .qwen .rovodev .kilocodemodes -.claude +.claude/commands .codex .github/chatmodes .github/agents From bdcd8afa422b5f12d90acc6bd2bd238e9397c7d9 Mon Sep 17 00:00:00 2001 From: Brian Madison Date: Sat, 31 Jan 2026 21:21:15 -0600 Subject: [PATCH 7/9] gh triage report creator --- .../bmad-bmm-generate-project-context.md | 13 +- .claude/skills/gh-triage/SKILL.md | 176 ++++- .claude/skills/gh-triage/scripts/gh_triage.py | 713 ------------------ 3 files changed, 140 insertions(+), 762 deletions(-) delete mode 100755 .claude/skills/gh-triage/scripts/gh_triage.py diff --git a/.claude/commands/bmad-bmm-generate-project-context.md b/.claude/commands/bmad-bmm-generate-project-context.md index 7c17f855..452871c3 100644 --- a/.claude/commands/bmad-bmm-generate-project-context.md +++ b/.claude/commands/bmad-bmm-generate-project-context.md @@ -1,14 +1,7 @@ --- name: 'generate-project-context' -description: 'Scan existing codebase to generate a lean LLM-optimized project-context.md with critical implementation rules and patterns for AI agents' +description: 'Creates a concise project-context.md file with critical rules and patterns that AI agents must follow when implementing code. Optimized for LLM context efficiency.' +disable-model-invocation: true --- -IT IS CRITICAL THAT YOU FOLLOW THESE STEPS - while staying in character as the current agent persona you may have loaded: - - -1. Always LOAD the FULL @{project-root}/_bmad/core/tasks/workflow.xml -2. READ its entire contents - this is the CORE OS for EXECUTING the specific workflow-config @{project-root}/_bmad/bmm/workflows/generate-project-context/workflow.md -3. Pass the yaml path @{project-root}/_bmad/bmm/workflows/generate-project-context/workflow.md as 'workflow-config' parameter to the workflow.xml instructions -4. Follow workflow.xml instructions EXACTLY as written to process and follow the specific workflow config and its instructions -5. Save outputs after EACH section when generating any documents from templates - +IT IS CRITICAL THAT YOU FOLLOW THIS COMMAND: LOAD the FULL @{project-root}/_bmad/bmm/workflows/generate-project-context/workflow.md, READ its entire contents and follow its directions exactly! diff --git a/.claude/skills/gh-triage/SKILL.md b/.claude/skills/gh-triage/SKILL.md index a198fc0d..59795159 100644 --- a/.claude/skills/gh-triage/SKILL.md +++ b/.claude/skills/gh-triage/SKILL.md @@ -1,60 +1,158 @@ --- name: gh-triage -description: Fetch all GitHub issues via gh CLI and provide consolidated AI-powered analysis with clustering, prioritization, and actionable insights. Use for issue triage, backlog cleanup, or when user mentions "issues", "triage", or "backlog". +description: Fetch all GitHub issues via gh CLI and use AI agents to deeply analyze, cluster, and prioritize issues with actual understanding. Use for issue triage, backlog cleanup, or when user mentions "issues", "triage", or "backlog". license: MIT +disable-model-invocation: true metadata: author: bmad-code-org - version: "2.1.0" + version: "3.0.0" anthropic-internal: Core team issue triage tool for BMad Method repositories min-github-cli-version: "2.0" -compatibility: Requires gh CLI, Python 3.8+, and git repository +compatibility: Requires gh CLI, git repository, and BMad Method with Task tool support --- -# GitHub Issue Triage +# GitHub Issue Triage with AI Analysis -**IMPORTANT:** Never include time or effort estimates in output or recommendations. +**CRITICAL RULES:** +- NEVER include time or effort estimates in output or recommendations +- Focus on WHAT needs to be done, not HOW LONG it takes +- Use Bash tool with gh CLI for all GitHub operations -## What This Does +## Execution Plan -1. **Fetch all issues** from repository via gh CLI (configurable: open/closed/all) -2. **Extract data** into structured format (JSON + markdown tables) -3. **Generate AI analysis** with: - - Issue clustering by theme - - Priority recommendations - - Actionable insights - - Cross-repo detection - - Cleanup candidates +You will perform GitHub issue triage using AI agents for deep analysis: -## Steps +### Step 1: Fetch Issues +Use `gh issue list` to fetch all open issues from the current repository in JSON format. -```bash -# 1. Navigate to scripts directory -cd .claude/skills/gh-triage/scripts +### Step 2: Batch Creation +Split issues into batches of ~10 issues each for parallel analysis. -# 2. Run the triage tool (outputs to _bmad-output/triage-reports/) -python3 gh_triage.py --state open +### Step 3: Parallel Agent Analysis +For EACH batch, use the Task tool with `subagent_type=general-purpose` to launch an agent with this prompt: -# 3. Review the generated report -cat _bmad-output/triage-reports/triage-*.md +``` +You are analyzing a batch of GitHub issues for deep understanding and triage. + +**YOUR TASK:** +Read the issues in your batch and provide DEEP analysis: + +1. **For EACH issue, analyze:** + - What is this ACTUALLY about? (beyond keywords) + - What component/system does it affect? + - What's the impact and severity? + - Is it a bug, feature request, or something else? + - What specific theme does it belong to? + +2. **PRIORITY ASSESSMENT:** + - CRITICAL: Blocks users, security issues, data loss, broken installers + - HIGH: Major functionality broken, important features missing + - MEDIUM: Workarounds available, minor bugs, nice-to-have features + - LOW: Edge cases, cosmetic issues, questions + +3. **RELATIONSHIPS:** + - Duplicates: Near-identical issues about the same problem + - Related: Issues connected by theme or root cause + - Dependencies: One issue blocks or requires another + +**YOUR BATCH:** +[Paste the batch of issues here - each with number, title, body, labels] + +**OUTPUT FORMAT (JSON only, no markdown):** +{ + "issues": [ + { + "number": 123, + "title": "issue title", + "deep_understanding": "2-3 sentences explaining what this is really about", + "affected_components": ["installer", "workflows", "docs"], + "issue_type": "bug/feature/question/tech-debt", + "priority": "CRITICAL/HIGH/MEDIUM/LOW", + "priority_rationale": "Why this priority level", + "theme": "installation/workflow/integration/docs/ide-support/etc", + "relationships": { + "duplicates_of": [456], + "related_to": [789, 101], + "blocks": [111] + } + } + ], + "cross_repo_issues": [ + {"number": 123, "target_repo": "bmad-builder", "reason": "about agent builder"} + ], + "cleanup_candidates": [ + {"number": 456, "reason": "v4-related/outdated/duplicate"} + ], + "themes_found": { + "Installation Blockers": { + "count": 5, + "root_cause": "Common pattern if identifiable" + } + } +} + +Return ONLY valid JSON. No explanations outside the JSON structure. ``` -## Command Reference +### Step 4: Consolidate & Generate Report +After all agents complete, create a comprehensive markdown report saved to `_bmad-output/triage-reports/triage-YYYY-MM-DD.md` with: -| Parameter | Description | Default | -| ---------------- | ------------------------------------------ | -------------------------------------------------- | -| `--repo` | Repository (auto-detected from git remote) | current repo | -| `--state` | Filter: `all`, `open`, `closed` | `open` | -| `--focus` | Filter by keywords in title/body | none | -| `--output`, `-o` | Save output to file | `_bmad-output/triage-reports/triage-YYYY-MM-DD.md` | -| `--json` | Output as JSON instead of markdown | false (outputs to stdout) | -| `--limit` | Max issues to fetch | 1000 | +## Report Structure -## Output +### Executive Summary +- Total issues analyzed +- Issue count by priority (CRITICAL, HIGH, MEDIUM, LOW) +- Major themes discovered +- Top 5 critical issues requiring immediate attention -All reports automatically save to `_bmad-output/triage-reports/` with: -- Summary statistics -- Issue clusters by theme -- Priority matrix -- Actionable recommendations -- Cross-repo issues with close commands -- Cleanup candidates (duplicates, stale, outdated) +### Critical Issues (CRITICAL Priority) +For each CRITICAL issue: +- **#123 - [Issue Title](url)** +- **What it's about:** [Deep understanding] +- **Affected:** [Components] +- **Why Critical:** [Rationale] +- **Suggested Action:** [Specific action] + +### High Priority Issues (HIGH Priority) +Same format as Critical, grouped by theme. + +### Theme Clusters +For each major theme: +- **Theme Name** (N issues) +- **What connects these:** [Pattern] +- **Root cause:** [If identifiable] +- **Consolidated actions:** [Bulk actions if applicable] +- **Issues:** #123, #456, #789 + +### Relationships & Dependencies +- **Duplicates:** List pairs with `gh issue close` commands +- **Related Issues:** Groups of related issues +- **Dependencies:** Blocking relationships + +### Cross-Repo Issues +Issues that should be migrated to other repositories (bmad-builder, bmad-module-creative-intelligence-suite, bmad-module-game-dev-studio, bmad-method-test-architecture-enterprise). + +For each, provide: +``` +gh issue close XXX --repo CURRENT_REPO --comment "This issue belongs in REPO. Please report at https://github.com/TARGET_REPO/issues/new" +``` + +### Cleanup Candidates +- **v4-related:** Deprecated version issues with close commands +- **Stale:** No activity >30 days +- **Low priority + old:** Low priority issues >60 days old + +### Actionable Next Steps +Specific, prioritized actions: +1. [CRITICAL] Fix broken installer - affects all new users +2. [HIGH] Resolve Windows path escaping issues +3. [HIGH] Address workflow integration bugs +etc. + +Include `gh` commands where applicable for bulk actions. + +--- + +## Execute Now + +Begin by fetching issues from the current repository and follow the plan above. diff --git a/.claude/skills/gh-triage/scripts/gh_triage.py b/.claude/skills/gh-triage/scripts/gh_triage.py deleted file mode 100755 index e73da756..00000000 --- a/.claude/skills/gh-triage/scripts/gh_triage.py +++ /dev/null @@ -1,713 +0,0 @@ -#!/usr/bin/env python3 -""" -GitHub Issue Triage Tool - -Fetches, categorizes, and groups GitHub issues for efficient triage. -Optimized for large datasets with parallel processing support. - -IMPORTANT: Never provide time, date, or effort estimates in output. -AI execution speed varies greatly from human timelines. -Focus on what needs to be done, not how long it takes. -""" - -import argparse -import json -import os -import re -import subprocess -from collections import defaultdict -from dataclasses import dataclass, field -from datetime import datetime, timedelta, timezone -from typing import List, Dict, Any, Optional, Set, Tuple -from enum import Enum -from difflib import SequenceMatcher -import re - - -class Category(Enum): - """Issue categories""" - BUG = "bug" - FEATURE = "feature" - ENHANCEMENT = "enhancement" - DOCUMENTATION = "documentation" - PERFORMANCE = "performance" - SECURITY = "security" - QUESTION = "question" - REFACTOR = "refactor" - TECH_DEBT = "tech-debt" - OTHER = "other" - - -class Priority(Enum): - """Priority levels""" - CRITICAL = "critical" - HIGH = "high" - MEDIUM = "medium" - LOW = "low" - INFORMATIONAL = "informational" - - -class TriagingStatus(Enum): - """Triage status""" - NEEDS_TRIAGE = "needs-triage" - READY_FOR_DEV = "ready-for-dev" - BLOCKED = "blocked" - STALE = "stale" - DUPLICATE = "duplicate" - INVALID = "invalid" - IN_PROGRESS = "in-progress" - COMPLETED = "completed" - - -@dataclass -class Issue: - """Represents a GitHub issue with triage metadata""" - number: int - title: str - state: str - author: str - created_at: datetime - updated_at: Optional[datetime] - labels: List[str] - body: Optional[str] - comments: int - category: Category = field(default=Category.OTHER) - priority: Priority = field(default=Priority.MEDIUM) - triage_status: TriagingStatus = field(default=TriagingStatus.NEEDS_TRIAGE) - - @property - def age_days(self) -> int: - """Age of issue in days""" - return (datetime.now(timezone.utc) - self.created_at).days - - @property - def days_since_update(self) -> Optional[int]: - """Days since last update""" - if not self.updated_at: - return None - return (datetime.now(timezone.utc) - self.updated_at).days - - @property - def url(self) -> str: - """GitHub URL for the issue""" - # Get from parent triage object - return f"https://github.com/{self._repo_url}/issues/{self.number}" - - -class IssueCategorizer: - """Categorizes issues based on content and metadata""" - - # Keywords for categorization - CATEGORY_KEYWORDS = { - Category.BUG: ['bug', 'fix', 'crash', 'error', 'broken', 'fails', 'exception', 'segfault', 'leak'], - Category.FEATURE: ['feature', 'add ', 'implement', 'support for', 'new ', 'request', 'wish'], - Category.ENHANCEMENT: ['enhance', 'improve', 'optimize', 'better', 'enhancement'], - Category.DOCUMENTATION: ['doc', 'readme', 'tutorial', 'guide', 'documentation', 'example', 'comment'], - Category.PERFORMANCE: ['slow', 'performance', 'latency', 'speed', 'fast', 'optimize', 'memory'], - Category.SECURITY: ['security', 'vulnerability', 'exploit', 'xss', 'injection', 'csrf', 'auth'], - Category.QUESTION: ['question', 'how to', 'help', 'confusion', 'unclear', 'clarify'], - Category.REFACTOR: ['refactor', 'clean up', 'reorganize', 'restructure', 'simplify'], - Category.TECH_DEBT: ['tech debt', 'technical debt', 'legacy', 'deprecated', 'cleanup'], - } - - # Priority indicators from labels - PRIORITY_LABELS = { - Priority.CRITICAL: ['critical', 'blocker', 'urgent'], - Priority.HIGH: ['high', 'important', 'priority'], - Priority.MEDIUM: ['medium'], - Priority.LOW: ['low', 'minor', 'trivial'], - } - - def categorize(self, issue: Issue) -> Category: - """Determine category based on title, body, and labels""" - text = f"{issue.title} {issue.body or ''}".lower() - - # Check labels first - for label in issue.labels: - label_lower = label.lower() - if any(cat_str in label_lower for cat_str in ['bug', 'defect']): - return Category.BUG - if any(cat_str in label_lower for cat_str in ['feature', 'enhancement']): - return Category.FEATURE - if 'doc' in label_lower: - return Category.DOCUMENTATION - if 'perf' in label_lower: - return Category.PERFORMANCE - if 'security' in label_lower: - return Category.SECURITY - - # Check keywords - for category, keywords in self.CATEGORY_KEYWORDS.items(): - if any(keyword in text for keyword in keywords): - return category - - return Category.OTHER - - def determine_priority(self, issue: Issue) -> Priority: - """Determine priority based on labels and metadata""" - # Check labels - for label in issue.labels: - label_lower = label.lower() - for priority, keywords in self.PRIORITY_LABELS.items(): - if any(keyword in label_lower for keyword in keywords): - return priority - - # Infer from metadata - if issue.age_days > 90 and issue.state == 'open': - # Old open issues are lower priority - return Priority.LOW - - if issue.comments > 10: - # Highly discussed issues are important - return Priority.HIGH - - return Priority.MEDIUM - - def determine_triage_status(self, issue: Issue) -> TriagingStatus: - """Determine triage status""" - # Check labels - for label in issue.labels: - label_lower = label.lower() - if 'duplicate' in label_lower: - return TriagingStatus.DUPLICATE - if any(x in label_lower for x in ['invalid', 'wontfix', 'wont-fix']): - return TriagingStatus.INVALID - if 'blocked' in label_lower or 'blocking' in label_lower: - return TriagingStatus.BLOCKED - if any(x in label_lower for x in ['in-progress', 'in progress', 'working']): - return TriagingStatus.IN_PROGRESS - - # Check staleness - if issue.state.upper() == 'OPEN': - if issue.days_since_update and issue.days_since_update > 30: - return TriagingStatus.STALE - if not any(label.lower() in ['accepted', 'approved', 'ready'] for label in issue.labels): - return TriagingStatus.NEEDS_TRIAGE - return TriagingStatus.READY_FOR_DEV - - return TriagingStatus.COMPLETED - - -class IssueTriage: - """Main triage coordinator""" - - # Module repository mapping - MODULE_REPOS = { - 'builder': { - 'repo': 'bmad-code-org/bmad-builder', - 'names': ['bmb', 'builder', 'bmad-builder', 'agent builder', 'agent-builder'], - 'url': 'https://github.com/bmad-code-org/bmad-builder' - }, - 'tea': { - 'repo': 'bmad-code-org/bmad-method-test-architecture-enterprise', - 'names': ['tea', 'test architect', 'test-architect', 'test architecture'], - 'url': 'https://github.com/bmad-code-org/bmad-method-test-architecture-enterprise' - }, - 'bmgd': { - 'repo': 'bmad-code-org/bmad-module-game-dev-studio', - 'names': ['bmgd', 'game dev', 'game-dev', 'gamedev', 'game dev studio', 'game-dev-studio'], - 'url': 'https://github.com/bmad-code-org/bmad-module-game-dev-studio' - }, - 'cis': { - 'repo': 'bmad-code-org/bmad-module-creative-intelligence-suite', - 'names': ['cis', 'creative intelligence', 'creative-intelligence', 'creative intelligence suite'], - 'url': 'https://github.com/bmad-code-org/bmad-module-creative-intelligence-suite' - }, - } - - def __init__(self, repo: Optional[str] = None, state: str = 'open'): - self.repo = repo or self._detect_repo() - self.state = state - self.categorizer = IssueCategorizer() - self.issues: List[Issue] = [] - self._repo_url = self.repo # Store for issue URL generation - - def _detect_repo(self) -> str: - """Detect repository from git remote""" - try: - result = subprocess.run( - ['git', 'remote', 'get-url', 'origin'], - capture_output=True, - text=True, - check=True - ) - url = result.stdout.strip() - # Convert git@github.com:user/repo.git to user/repo - if url.startswith('git@github.com:'): - return url[15:-4] - if url.startswith('https://github.com/'): - return url[19:-4] - except subprocess.CalledProcessError: - pass - return 'unknown/repo' - - def fetch_issues(self) -> List[Issue]: - """Fetch issues using gh CLI""" - print(f"Fetching issues from {self.repo}...") - - cmd = [ - 'gh', 'issue', 'list', - '--repo', self.repo, - '--state', self.state, - '--limit', '1000', # Fetch up to 1000 issues (default is 30) - '--json', 'number,title,state,author,createdAt,updatedAt,labels,body,comments' - ] - - result = subprocess.run(cmd, capture_output=True, text=True, check=True) - data = json.loads(result.stdout) - - self.issues = [] - for item in data: - labels = [label['name'] for label in item.get('labels', [])] - issue = Issue( - number=item['number'], - title=item['title'], - state=item['state'], - author=item['author']['login'], - created_at=datetime.fromisoformat(item['createdAt'].replace('Z', '+00:00')), - updated_at=datetime.fromisoformat(item['updatedAt'].replace('Z', '+00:00')) if item.get('updatedAt') else None, - labels=labels, - body=item.get('body'), - comments=len(item.get('comments', [])) - ) - issue._repo_url = self._repo_url - self.issues.append(issue) - - print(f"Fetched {len(self.issues)} issues") - return self.issues - - def analyze_issues(self, focus_filter: Optional[str] = None) -> List[Issue]: - """Analyze and categorize all issues""" - print("Analyzing issues...") - - for issue in self.issues: - issue.category = self.categorizer.categorize(issue) - issue.priority = self.categorizer.determine_priority(issue) - issue.triage_status = self.categorizer.determine_triage_status(issue) - - # Apply focus filter if provided - if focus_filter: - keywords = focus_filter.lower().split() - self.issues = [ - issue for issue in self.issues - if any(keyword in f"{issue.title} {issue.body or ''}".lower() - for keyword in keywords) - ] - print(f"Filtered to {len(self.issues)} issues matching focus criteria") - - return self.issues - - def find_duplicates(self, threshold: float = 0.7) -> List[Tuple[Issue, Issue, float]]: - """Find potential duplicate issues based on title similarity""" - print("Detecting potential duplicates...") - duplicates = [] - open_issues = [i for i in self.issues if i.state.upper() == 'OPEN'] - - for i, issue1 in enumerate(open_issues): - for issue2 in open_issues[i+1:]: - # Skip if already marked as duplicate - if 'duplicate' in [l.lower() for l in issue1.labels + issue2.labels]: - continue - - # Calculate title similarity - similarity = SequenceMatcher(None, issue1.title.lower(), issue2.title.lower()).ratio() - - if similarity >= threshold: - duplicates.append((issue1, issue2, similarity)) - - # Sort by similarity (highest first) - duplicates.sort(key=lambda x: x[2], reverse=True) - return duplicates - - def find_outdated_issues(self, before_date: datetime = None) -> Dict[str, List[Issue]]: - """Find issues that are likely outdated""" - print("Identifying outdated issues...") - - if before_date is None: - # Default to December 1, 2025 - before_date = datetime(2025, 12, 1, tzinfo=timezone.utc) - - outdated = { - 'old_issues': [], - 'v4_issues': [], - 'ancient_stale': [] - } - - open_issues = [i for i in self.issues if i.state.upper() == 'OPEN'] - - for issue in open_issues: - # Issues created before cutoff date - if issue.created_at < before_date: - outdated['old_issues'].append(issue) - - # Issues mentioning v4 - text = f"{issue.title} {issue.body or ''}".lower() - if 'v4' in text or 'version 4' in text or 'v 4' in text: - outdated['v4_issues'].append(issue) - - # Issues very old and stale (>90 days since update) - if issue.age_days > 90 and (issue.days_since_update or 0) > 90: - outdated['ancient_stale'].append(issue) - - return outdated - - def generate_bulk_commands(self, issues: List[Issue], label: str) -> List[str]: - """Generate gh CLI commands for bulk operations""" - commands = [] - for issue in issues: - cmd = f"gh issue edit {issue.number} --repo {self.repo} --add-label '{label}'" - commands.append(cmd) - return commands - - def generate_close_commands(self, issues: List[Issue], reason: str) -> List[str]: - """Generate gh CLI commands to close issues with comment""" - commands = [] - for issue in issues: - comment = reason.replace("'", "'\\''") # Escape single quotes - cmd = (f"gh issue close {issue.number} --repo {self.repo} " - f"--comment '{comment}'") - commands.append(cmd) - return commands - - def find_cross_repo_issues(self) -> Dict[str, List[Tuple[Issue, str]]]: - """Find issues that belong in other module repositories""" - print("Detecting cross-repo issues...") - cross_repo = defaultdict(list) - - open_issues = [i for i in self.issues if i.state.upper() == 'OPEN'] - - for issue in open_issues: - text = f"{issue.title} {issue.body or ''}".lower() - - for module_key, module_info in self.MODULE_REPOS.items(): - # Check if issue mentions this module - for name in module_info['names']: - # Use word boundaries to avoid false positives - pattern = r'\b' + re.escape(name) + r'\b' - if re.search(pattern, text): - cross_repo[module_key].append((issue, name)) - break # Only add once per issue - - return cross_repo - - def generate_actionable_recommendations(self) -> str: - """Generate actionable recommendations with specific commands""" - lines = [] - open_issues = [i for i in self.issues if i.state.upper() == 'OPEN'] - - lines.append("## 🎯 Actionable Recommendations\n") - - # Cross-repo issues (show first!) - cross_repo_issues = self.find_cross_repo_issues() - if cross_repo_issues: - total_cross = sum(len(issues) for issues in cross_repo_issues.values()) - lines.append(f"### Issues in Wrong Repository ({total_cross} issues)\n") - lines.append("**High Priority.** These issues should be closed here and opened in the correct module repository:\n") - - for module_key, issues in cross_repo_issues.items(): - if issues: - module_info = self.MODULE_REPOS[module_key] - lines.append(f"#### {module_info['repo'].replace('bmad-code-org/', '').title()} ({len(issues)} issues)") - lines.append(f"**Correct repo:** [{module_info['repo']}]({module_info['url']}/issues/new)\n") - lines.append(f"**Close these and report in the correct repo:**") - lines.append(f"```bash") - - for issue, matched_name in issues[:10]: # Show first 10 - comment = (f"This issue relates to {matched_name} which is maintained in a separate repository. " - f"Please report this issue at {module_info['url']}/issues/new") - lines.append(f"gh issue close {issue.number} --repo {self.repo} --comment '{comment}'") - - if len(issues) > 10: - lines.append(f"# ... and {len(issues) - 10} more") - lines.append(f"```\n") - - # Find duplicates - duplicates = self.find_duplicates() - if duplicates: - lines.append(f"### Potential Duplicates ({len(duplicates)} pairs)\n") - lines.append("**Manual review required.** Close the older issue as a duplicate of the newer one:\n") - for issue1, issue2, similarity in duplicates[:20]: # Top 20 - older = issue1 if issue1.created_at < issue2.created_at else issue2 - newer = issue2 if issue1.created_at < issue2.created_at else issue1 - lines.append(f"#### {older.title}") - lines.append(f"- **Older:** #{older.number} ({older.age_days} days old)") - lines.append(f"- **Newer:** #{newer.number} ({newer.age_days} days old)") - lines.append(f"- **Similarity:** {similarity:.1%}") - lines.append(f"- **Command:** `gh issue close {older.number} --repo {self.repo} --comment 'Duplicate of #{newer.number}' --duplicate-of {newer.number}`") - lines.append("") - - # Find outdated issues - outdated = self.find_outdated_issues() - total_outdated = len(outdated['old_issues']) + len(outdated['v4_issues']) + len(outdated['ancient_stale']) - - if total_outdated > 0: - lines.append(f"### Outdated Issues ({total_outdated} total)\n") - - # Pre-Dec 2025 issues - if outdated['old_issues']: - cutoff_date = datetime(2025, 12, 1, tzinfo=timezone.utc).strftime('%B %Y') - lines.append(f"#### Issues from before {cutoff_date} ({len(outdated['old_issues'])})") - lines.append(f"These issues are quite old and may no longer be relevant. Consider reviewing and closing outdated ones.\n") - lines.append("**To add label for review:**") - lines.append(f"```bash") - for issue in outdated['old_issues'][:10]: # Show first 10 - lines.append(f"gh issue edit {issue.number} --repo {self.repo} --add-label 'outdated,needs-review'") - if len(outdated['old_issues']) > 10: - lines.append(f"# ... and {len(outdated['old_issues']) - 10} more") - lines.append(f"```\n") - - # v4-related issues - if outdated['v4_issues']: - lines.append(f"#### v4-Related Issues ({len(outdated['v4_issues'])})") - lines.append(f"BMad Method v4 is deprecated. These issues likely no longer apply to v6.\n") - lines.append("**Bulk close with comment:**") - lines.append(f"```bash") - for issue in outdated['v4_issues'][:10]: - lines.append(f"gh issue close {issue.number} --repo {self.repo} --comment 'Closing as this relates to BMad Method v4 which is deprecated. Please open a new issue if this still applies to v6.'") - if len(outdated['v4_issues']) > 10: - lines.append(f"# ... and {len(outdated['v4_issues']) - 10} more") - lines.append(f"```\n") - - # Ancient stale issues - if outdated['ancient_stale']: - lines.append(f"#### Ancient Stale Issues ({len(outdated['ancient_stale'])})") - lines.append(f"Issues that are both very old (>90 days) and haven't been updated in >90 days.\n") - lines.append("**Close as stale:**") - lines.append(f"```bash") - for issue in outdated['ancient_stale'][:10]: - lines.append(f"gh issue close {issue.number} --repo {self.repo} --comment 'Closing due to inactivity. Please reopen if this is still relevant.'") - if len(outdated['ancient_stale']) > 10: - lines.append(f"# ... and {len(outdated['ancient_stale']) - 10} more") - lines.append(f"```\n") - - # Bulk tagging suggestions - lines.append("### Bulk Tagging Suggestions\n") - lines.append("Add appropriate labels to untagged issues:\n") - - # Find issues without category labels - untagged = [i for i in open_issues if not any( - l.lower() in ['bug', 'feature', 'enhancement', 'documentation', 'performance', 'question', 'refactor', 'tech-debt'] - for l in i.labels - )] - - if untagged: - lines.append(f"**Issues without category labels ({len(untagged)}):**\n") - by_category = defaultdict(list) - for issue in untagged: - by_category[issue.category.value].append(issue) - - for category, issues in sorted(by_category.items(), key=lambda x: len(x[1]), reverse=True)[:5]: - lines.append(f"##### Label as `{category}` ({len(issues)} issues)") - lines.append(f"```bash") - for issue in issues[:5]: - lines.append(f"gh issue edit {issue.number} --repo {self.repo} --add-label '{category}'") - if len(issues) > 5: - lines.append(f"# ... and {len(issues) - 5} more") - lines.append(f"```\n") - - # Priority labeling - no_priority = [i for i in open_issues if not any( - l.lower() in ['critical', 'high', 'medium', 'low', 'priority'] - for l in i.labels - )] - - if no_priority: - # Group by priority - by_priority = defaultdict(list) - for issue in no_priority: - by_priority[issue.priority.value].append(issue) - - lines.append("**Add priority labels:**\n") - for priority_level in ['critical', 'high', 'medium', 'low']: - if priority_level in by_priority: - count = len(by_priority[priority_level]) - lines.append(f"##### Label as `{priority_level}` priority ({count} issues)") - lines.append(f"```bash") - for issue in by_priority[priority_level][:5]: - lines.append(f"gh issue edit {issue.number} --repo {self.repo} --add-label '{priority_level}'") - if count > 5: - lines.append(f"# ... and {count - 5} more") - lines.append(f"```\n") - - return "\n".join(lines) - - def generate_report(self) -> str: - """Generate markdown triage report""" - lines = [] - - # Summary - lines.append("# GitHub Issue Triage Report\n") - lines.append(f"**Repository:** {self.repo}\n") - lines.append(f"**Generated:** {datetime.now().strftime('%Y-%m-%d %H:%M:%S UTC')}\n") - - # Count stats - total = len(self.issues) - open_issues = [i for i in self.issues if i.state.upper() == 'OPEN'] - closed_issues = [i for i in self.issues if i.state.upper() == 'CLOSED'] - - lines.append("## Summary\n") - lines.append(f"- **Total Issues:** {total}") - lines.append(f"- **Open:** {len(open_issues)} | **Closed:** {len(closed_issues)}") - - # Category breakdown - category_counts = defaultdict(int) - for issue in self.issues: - category_counts[issue.category] += 1 - top_categories = sorted(category_counts.items(), key=lambda x: x[1], reverse=True)[:5] - lines.append(f"- **Top Categories:** {', '.join(f'{cat.value} ({count})' for cat, count in top_categories)}") - lines.append("") - - # Priority action items - lines.append("## Priority Action Items\n") - - # Critical/High priority open issues - critical_issues = [i for i in open_issues if i.priority in [Priority.CRITICAL, Priority.HIGH]] - if critical_issues: - lines.append(f"### {'🚨 ' if critical_issues else ''}Critical & High Priority ({len(critical_issues)})\n") - for issue in sorted(critical_issues, key=lambda x: x.age_days, reverse=True): - lines.append(f"- [#{issue.number}]({issue.url}) {issue.title}") - lines.append(f" - {issue.category.value.upper()} | {issue.priority.value.upper()} | Age: {issue.age_days} days") - if issue.labels: - lines.append(f" - Labels: {', '.join(issue.labels)}") - lines.append("") - - # Stale issues needing review - stale_issues = [i for i in open_issues if i.triage_status == TriagingStatus.STALE] - if stale_issues: - lines.append(f"### Stale Issues - Needs Review ({len(stale_issues)})\n") - for issue in sorted(stale_issues, key=lambda x: x.days_since_update or 0, reverse=True)[:15]: - days_stale = issue.days_since_update or 0 - lines.append(f"- [#{issue.number}]({issue.url}) {issue.title}") - lines.append(f" - Last updated {days_stale} days ago | {issue.category.value}") - lines.append("") - - # Categories - lines.append("## Categories\n") - - for category in Category: - category_issues = [i for i in self.issues if i.category == category] - if not category_issues: - continue - - open_in_cat = [i for i in category_issues if i.state.upper() == 'OPEN'] - closed_in_cat = [i for i in category_issues if i.state.upper() == 'CLOSED'] - - lines.append(f"### {category.value.title()} ({len(open_in_cat)} open, {len(closed_in_cat)} closed)\n") - - # Sort open by priority - priority_order = {Priority.CRITICAL: 0, Priority.HIGH: 1, Priority.MEDIUM: 2, Priority.LOW: 3, Priority.INFORMATIONAL: 4} - open_in_cat_sorted = sorted(open_in_cat, key=lambda x: priority_order.get(x.priority, 5)) - - for issue in open_in_cat_sorted[:20]: # Limit to 20 per category - status_icon = { - TriagingStatus.NEEDS_TRIAGE: '🔍', - TriagingStatus.READY_FOR_DEV: '✅', - TriagingStatus.BLOCKED: '🚫', - TriagingStatus.STALE: '💤', - TriagingStatus.IN_PROGRESS: '🔧', - }.get(issue.triage_status, '') - - lines.append(f"{status_icon} [#{issue.number}]({issue.url}) {issue.title}") - lines.append(f"
Details") - lines.append(f" ") - lines.append(f" - **Priority:** {issue.priority.value}") - lines.append(f" - **Status:** {issue.triage_status.value}") - lines.append(f" - **Age:** {issue.age_days} days") - lines.append(f" - **Author:** {issue.author}") - if issue.labels: - lines.append(f" - **Labels:** {', '.join(issue.labels)}") - lines.append(f"
") - lines.append("") - - if len(open_in_cat) > 20: - lines.append(f"*... and {len(open_in_cat) - 20} more*\n") - - # Actionable recommendations - lines.append(self.generate_actionable_recommendations()) - - # Cleanup candidates - lines.append("## Cleanup Candidates\n") - - duplicates = [i for i in self.issues if i.triage_status == TriagingStatus.DUPLICATE] - if duplicates: - lines.append(f"### Duplicates ({len(duplicates)})\n") - for issue in duplicates: - lines.append(f"- [#{issue.number}]({issue.url}) {issue.title}") - - invalid = [i for i in self.issues if i.triage_status == TriagingStatus.INVALID] - if invalid: - lines.append(f"\n### Invalid/Wontfix ({len(invalid)})\n") - for issue in invalid: - lines.append(f"- [#{issue.number}]({issue.url}) {issue.title}") - - lines.append("\n---\n") - lines.append("*Report generated by BMad Issue Triage Tool*") - - return "\n".join(lines) - - -def main(): - parser = argparse.ArgumentParser( - description='Triage and categorize GitHub issues', - formatter_class=argparse.RawDescriptionHelpFormatter, - epilog=""" -Examples: - %(prog)s # Triage open issues in current repo - %(prog)s --state all # All issues (including closed) - %(prog)s --focus "installer" # Filter for installer-related issues - %(prog)s --repo user/repo --state closed - """ - ) - - parser.add_argument('--repo', help='Repository (default: detect from git)') - parser.add_argument('--state', choices=['all', 'open', 'closed'], default='open', - help='Filter by state (default: open)') - parser.add_argument('--focus', help='Focus context to filter issues') - parser.add_argument('--output', '-o', help='Output file (default: _bmad-output/triage-reports/triage-.md)') - parser.add_argument('--json', action='store_true', help='Output as JSON') - - args = parser.parse_args() - - # Set default output to _bmad-output/triage-reports if not specified - if not args.output and not args.json: - os.makedirs('_bmad-output/triage-reports', exist_ok=True) - output_date = datetime.now().strftime('%Y-%m-%d') - args.output = f'_bmad-output/triage-reports/triage-{output_date}.md' - - triage = IssueTriage(repo=args.repo, state=args.state) - triage.fetch_issues() - triage.analyze_issues(focus_filter=args.focus) - - if args.json: - # Output as JSON for further processing - data = [ - { - 'number': i.number, - 'title': i.title, - 'state': i.state, - 'category': i.category.value, - 'priority': i.priority.value, - 'triage_status': i.triage_status.value, - 'age_days': i.age_days, - 'url': i.url, - 'labels': i.labels, - 'author': i.author - } - for i in triage.issues - ] - output = json.dumps(data, indent=2) - else: - output = triage.generate_report() - - if args.output: - with open(args.output, 'w') as f: - f.write(output) - # Get relative path for nicer output - rel_path = args.output.replace('./', '') - print(f"✅ Report saved to: {rel_path}") - else: - print(output) - - -if __name__ == '__main__': - main() From 6af79165d8bba47b8269543bfc4276c00b31e92a Mon Sep 17 00:00:00 2001 From: Brian Madison Date: Sat, 31 Jan 2026 21:22:36 -0600 Subject: [PATCH 8/9] fix agent scan --- tools/validate-agent-schema.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/validate-agent-schema.js b/tools/validate-agent-schema.js index b351f8ec..409bd551 100644 --- a/tools/validate-agent-schema.js +++ b/tools/validate-agent-schema.js @@ -28,7 +28,7 @@ async function main(customProjectRoot) { const project_root = customProjectRoot || path.join(__dirname, '..'); // Find all agent files - const agentFiles = await glob('src/{core,modules/*}/agents/*.agent.yaml', { + const agentFiles = await glob('src/{core,bmm/*}/agents/**/*.agent.yaml', { cwd: project_root, absolute: true, }); From 1665ad68df034ea07098743b36b1d796eeb00ce5 Mon Sep 17 00:00:00 2001 From: Brian Madison Date: Sat, 31 Jan 2026 21:58:37 -0600 Subject: [PATCH 9/9] fix agent scan and help csv files --- src/bmm/agents/quick-flow-solo-dev.agent.yaml | 4 +- src/bmm/agents/quinn.agent.yaml | 2 +- .../agents/tech-writer/tech-writer.agent.yaml | 4 +- src/bmm/module-help.csv | 37 +++++++++++-------- src/core/module-help.csv | 16 ++++---- tools/validate-agent-schema.js | 2 +- 6 files changed, 35 insertions(+), 30 deletions(-) diff --git a/src/bmm/agents/quick-flow-solo-dev.agent.yaml b/src/bmm/agents/quick-flow-solo-dev.agent.yaml index c488db6d..6d207a39 100644 --- a/src/bmm/agents/quick-flow-solo-dev.agent.yaml +++ b/src/bmm/agents/quick-flow-solo-dev.agent.yaml @@ -18,9 +18,9 @@ agent: - Specs are for building, not bureaucracy. Code that ships is better than perfect code that doesn't. menu: - - trigger: TS or fuzzy match on tech-spec + - trigger: QS or fuzzy match on quick-spec exec: "{project-root}/_bmad/bmm/workflows/bmad-quick-flow/quick-spec/workflow.md" - description: "[TS] Tech Spec: Architect a quick but complete technical spec with implementation-ready stories/specs" + description: "[QS] Quick Spec: Architect a quick but complete technical spec with implementation-ready stories/specs" - trigger: QD or fuzzy match on quick-dev workflow: "{project-root}/_bmad/bmm/workflows/bmad-quick-flow/quick-dev/workflow.md" diff --git a/src/bmm/agents/quinn.agent.yaml b/src/bmm/agents/quinn.agent.yaml index eafea2d0..73ed366c 100644 --- a/src/bmm/agents/quinn.agent.yaml +++ b/src/bmm/agents/quinn.agent.yaml @@ -27,7 +27,7 @@ agent: - Focus on realistic user scenarios menu: - - trigger: QA + - trigger: qa workflow: "{project-root}/_bmad/bmm/workflows/qa/automate/workflow.yaml" description: "[QA] Automate - Generate tests for existing features (simplified)" diff --git a/src/bmm/agents/tech-writer/tech-writer.agent.yaml b/src/bmm/agents/tech-writer/tech-writer.agent.yaml index 43f376c1..a742a6c9 100644 --- a/src/bmm/agents/tech-writer/tech-writer.agent.yaml +++ b/src/bmm/agents/tech-writer/tech-writer.agent.yaml @@ -28,9 +28,9 @@ agent: action: "Engage in multi-turn conversation until you fully understand the ask, use subprocess if available for any web search, research or document review required to extract and return only relevant info to parent context. Author final document following all `_bmad/_memory/tech-writer-sidecar/documentation-standards.md`. After draft, use a subprocess to review and revise for quality of content and ensure standards are still met." description: "[WD] Write Document: Describe in detail what you want, and the agent will follow the documentation best practices defined in agent memory." - - trigger: WD or fuzzy match on write-document + - trigger: US or fuzzy match on update-standards action: "Update `_bmad/_memory/tech-writer-sidecar/documentation-standards.md` adding user preferences to User Specified CRITICAL Rules section. Remove any contradictory rules as needed. Share with user the updates made." - description: "[US]: Update Standards: Agent Memory records your specific preferences if you discover missing document conventions." + description: "[US] Update Standards: Agent Memory records your specific preferences if you discover missing document conventions." - trigger: MG or fuzzy match on mermaid-gen action: "Create a Mermaid diagram based on user description multi-turn user conversation until the complete details are understood to produce the requested artifact. If not specified, suggest diagram types based on ask. Strictly follow Mermaid syntax and CommonMark fenced code block standards." diff --git a/src/bmm/module-help.csv b/src/bmm/module-help.csv index 9a235d44..af7c52c2 100644 --- a/src/bmm/module-help.csv +++ b/src/bmm/module-help.csv @@ -1,23 +1,28 @@ module,phase,name,code,sequence,workflow-file,command,required,agent,options,description,output-location,outputs, -bmm,anytime,Document Project,DP,10,_bmad/bmm/workflows/document-project/workflow.yaml,bmad-bmm-document-project,false,analyst,Create Mode,"Analyze an existing project to produce useful documentation",project-knowledge,*, -bmm,anytime,Generate Project Context,GPC,15,_bmad/bmm/workflows/generate-project-context/workflow.md,bmad-bmm-generate-project-context,false,analyst,Create Mode,"Scan existing codebase to generate a lean LLM-optimized project-context.md containing critical implementation rules patterns and conventions for AI agents. Essential for brownfield projects and quick-flow.",output_folder,"project context", -bmm,anytime,Quick Spec,TS,20,_bmad/bmm/workflows/bmad-quick-flow/quick-spec/workflow.md,bmad-bmm-quick-spec,false,quick-flow-solo-dev,Create Mode,"Do not suggest for potentially very complex things unless requested or if the user complains that they do not want to follow the extensive planning of the bmad method. Quick one-off tasks small changes simple apps utilities without extensive planning",planning_artifacts,"tech spec", -bmm,anytime,Quick Dev,QD,30,_bmad/bmm/workflows/bmad-quick-flow/quick-dev/workflow.md,bmad-bmm-quick-dev,false,quick-flow-solo-dev,Create Mode,"Quick one-off tasks small changes simple apps utilities without extensive planning - Do not suggest for potentially very complex things unless requested or if the user complains that they do not want to follow the extensive planning of the bmad method, unless the user is already working through the implementation phase and just requests a 1 off things not already in the plan",,, -bmm,anytime,Correct Course,CC,40,_bmad/bmm/workflows/4-implementation/correct-course/workflow.yaml,bmad-bmm-correct-course,false,sm,Create Mode,"Anytime: Navigate significant changes. May recommend start over update PRD redo architecture sprint planning or correct epics and stories",planning_artifacts,"change proposal", +bmm,anytime,Document Project,DP,,_bmad/bmm/workflows/document-project/workflow.yaml,bmad-bmm-document-project,false,analyst,Create Mode,"Analyze an existing project to produce useful documentation",project-knowledge,*, +bmm,anytime,Generate Project Context,GPC,,_bmad/bmm/workflows/generate-project-context/workflow.md,bmad-bmm-generate-project-context,false,analyst,Create Mode,"Scan existing codebase to generate a lean LLM-optimized project-context.md containing critical implementation rules patterns and conventions for AI agents. Essential for brownfield projects and quick-flow.",output_folder,"project context", +bmm,anytime,Quick Spec,QS,,_bmad/bmm/workflows/bmad-quick-flow/quick-spec/workflow.md,bmad-bmm-quick-spec,false,quick-flow-solo-dev,Create Mode,"Do not suggest for potentially very complex things unless requested or if the user complains that they do not want to follow the extensive planning of the bmad method. Quick one-off tasks small changes simple apps brownfield additions to well established patterns utilities without extensive planning",planning_artifacts,"tech spec", +bmm,anytime,Quick Dev,QD,,_bmad/bmm/workflows/bmad-quick-flow/quick-dev/workflow.md,bmad-bmm-quick-dev,false,quick-flow-solo-dev,Create Mode,"Quick one-off tasks small changes simple apps utilities without extensive planning - Do not suggest for potentially very complex things unless requested or if the user complains that they do not want to follow the extensive planning of the bmad method, unless the user is already working through the implementation phase and just requests a 1 off things not already in the plan",,, +bmm,anytime,Correct Course,CC,,_bmad/bmm/workflows/4-implementation/correct-course/workflow.yaml,bmad-bmm-correct-course,false,sm,Create Mode,"Anytime: Navigate significant changes. May recommend start over update PRD redo architecture sprint planning or correct epics and stories",planning_artifacts,"change proposal", +bmm,anytime,Create Dataflow,CDF,,_bmad/bmm/workflows/excalidraw-diagrams/create-dataflow/workflow.yaml,bmad-bmm-create-excalidraw-dataflow,false,ux-designer,Create Mode,"Create data flow diagrams (DFD) in Excalidraw format - can be called standalone or during any workflow to add visual documentation",planning_artifacts,"dataflow diagram", +bmm,anytime,Create Diagram,CED,,_bmad/bmm/workflows/excalidraw-diagrams/create-diagram/workflow.yaml,bmad-bmm-create-excalidraw-diagram,false,ux-designer,Create Mode,"Create system architecture diagrams ERDs UML diagrams or general technical diagrams in Excalidraw format - use anytime or call from architecture workflow to add visual documentation",planning_artifacts,"diagram", +bmm,anytime,Create Flowchart,CFC,,_bmad/bmm/workflows/excalidraw-diagrams/create-flowchart/workflow.yaml,bmad-bmm-create-excalidraw-flowchart,false,ux-designer,Create Mode,"Create a flowchart visualization in Excalidraw format for processes pipelines or logic flows - use anytime or during architecture to add process documentation",planning_artifacts,"flowchart", +bmm,anytime,Create Wireframe,CEW,,_bmad/bmm/workflows/excalidraw-diagrams/create-wireframe/workflow.yaml,bmad-bmm-create-excalidraw-wireframe,false,ux-designer,Create Mode,"Create website or app wireframes in Excalidraw format - use anytime standalone or call from UX workflow to add UI mockups",planning_artifacts,"wireframe", +bmm,anytime,Write Document,WD,,_bmad/bmm/agents/tech-writer/tech-writer.agent.yaml,bmad-bmm-write-document,false,tech-writer,,"Describe in detail what you want, and the agent will follow the documentation best practices defined in agent memory. Multi-turn conversation with subprocess for research/review.",project-knowledge,"document", +bmm,anytime,Update Standards,US,,_bmad/bmm/agents/tech-writer/tech-writer.agent.yaml,bmad-bmm-update-standards,false,tech-writer,,"Update agent memory documentation-standards.md with your specific preferences if you discover missing document conventions.",_bmad/_memory/tech-writer-sidecar,"standards", +bmm,anytime,Mermaid Generate,MG,,_bmad/bmm/agents/tech-writer/tech-writer.agent.yaml,bmad-bmm-mermaid-generate,false,tech-writer,,"Create a Mermaid diagram based on user description. Will suggest diagram types if not specified.",planning_artifacts,"mermaid diagram", +bmm,anytime,Validate Document,VD,,_bmad/bmm/agents/tech-writer/tech-writer.agent.yaml,bmad-bmm-validate-document,false,tech-writer,,"Review the specified document against documentation standards and best practices. Returns specific actionable improvement suggestions organized by priority.",planning_artifacts,"validation report", +bmm,anytime,Explain Concept,EC,,_bmad/bmm/agents/tech-writer/tech-writer.agent.yaml,bmad-bmm-explain-concept,false,tech-writer,,"Create clear technical explanations with examples and diagrams for complex concepts. Breaks down into digestible sections using task-oriented approach.",project_knowledge,"explanation", bmm,1-analysis,Brainstorm Project,BP,10,_bmad/core/workflows/brainstorming/workflow.md,bmad-brainstorming,false,analyst,data=_bmad/bmm/data/project-context-template.md,"Expert Guided Facilitation through a single or multiple techniques",planning_artifacts,"brainstorming session", -bmm,1-analysis,Market Research,MR,20,_bmad/bmm/workflows/1-analysis/research/workflow.md,bmad-bmm-research,false,analyst,Create Mode research_type=market,"Market analysis competitive landscape customer needs and trends","planning_artifacts|project-knowledge","research documents" -bmm,1-analysis,Domain Research,DR,21,_bmad/bmm/workflows/1-analysis/research/workflow.md,bmad-bmm-research,false,analyst,Create Mode research_type=domain,"Industry domain deep dive subject matter expertise and terminology","planning_artifacts|project-knowledge","research documents" -bmm,1-analysis,Technical Research,TR,22,_bmad/bmm/workflows/1-analysis/research/workflow.md,bmad-bmm-research,false,analyst,Create Mode research_type=technical,"Technical feasibility architecture options and implementation approaches","planning_artifacts|project-knowledge","research documents" +bmm,1-analysis,Market Research,MR,20,_bmad/bmm/workflows/1-analysis/research/workflow.md,bmad-bmm-research,false,analyst,Create Mode research_type=market,"Market analysis competitive landscape customer needs and trends","planning_artifacts|project-knowledge","research documents", +bmm,1-analysis,Domain Research,DR,21,_bmad/bmm/workflows/1-analysis/research/workflow.md,bmad-bmm-research,false,analyst,Create Mode research_type=domain,"Industry domain deep dive subject matter expertise and terminology","planning_artifacts|project_knowledge","research documents", +bmm,1-analysis,Technical Research,TR,22,_bmad/bmm/workflows/1-analysis/research/workflow.md,bmad-bmm-research,false,analyst,Create Mode research_type=technical,"Technical feasibility architecture options and implementation approaches","planning_artifacts|project_knowledge","research documents", bmm,1-analysis,Create Brief,CB,30,_bmad/bmm/workflows/1-analysis/create-product-brief/workflow.md,bmad-bmm-create-brief,false,analyst,Create Mode,"A guided experience to nail down your product idea",planning_artifacts,"product brief", bmm,1-analysis,Validate Brief,VB,40,_bmad/bmm/workflows/1-analysis/create-product-brief/workflow.md,bmad-bmm-validate-brief,false,analyst,Validate Mode,"Validates product brief completeness",planning_artifacts,"brief validation report", -bmm,2-planning,Create PRD,CP,10,_bmad/bmm/workflows/2-plan-workflows/create-prd/workflow.md,bmad-bmm-prd,true,pm,Create Mode,"Expert led facilitation to produce your Product Requirements Document",planning_artifacts,prd, -bmm,2-planning,Validate PRD,VP,20,_bmad/bmm/workflows/2-plan-workflows/create-prd/workflow.md,bmad-bmm-prd,false,pm,Validate Mode,"Validate PRD is comprehensive lean well organized and cohesive",planning_artifacts,"prd validation report", +bmm,2-planning,Create PRD,CP,10,_bmad/bmm/workflows/2-plan-workflows/create-prd/workflow.md,bmad-bmm-create-prd,true,pm,Create Mode,"Expert led facilitation to produce your Product Requirements Document",planning_artifacts,prd, +bmm,2-planning,Validate PRD,VP,20,_bmad/bmm/workflows/2-plan-workflows/create-prd/workflow.md,bmad-bmm-validate-prd,false,pm,Validate Mode,"Validate PRD is comprehensive lean well organized and cohesive",planning_artifacts,"prd validation report", bmm,2-planning,Create UX,CU,30,_bmad/bmm/workflows/2-plan-workflows/create-ux-design/workflow.md,bmad-bmm-create-ux-design,false,ux-designer,Create Mode,"Guidance through realizing the plan for your UX, strongly recommended if a UI is a primary piece of the proposed project",planning_artifacts,"ux design", bmm,2-planning,Validate UX,VU,40,_bmad/bmm/workflows/2-plan-workflows/create-ux-design/workflow.md,bmad-bmm-create-ux-design,false,ux-designer,Validate Mode,"Validates UX design deliverables",planning_artifacts,"ux validation report", -,anytime,Create Dataflow,CDF,50,_bmad/bmm/workflows/excalidraw-diagrams/create-dataflow/workflow.yaml,bmad-bmm-create-excalidraw-dataflow,false,ux-designer,Create Mode,"Create data flow diagrams (DFD) in Excalidraw format - can be called standalone or during any workflow to add visual documentation",planning_artifacts,"dataflow diagram", -,anytime,Create Diagram,CED,51,_bmad/bmm/workflows/excalidraw-diagrams/create-diagram/workflow.yaml,bmad-bmm-create-excalidraw-diagram,false,ux-designer,Create Mode,"Create system architecture diagrams ERDs UML diagrams or general technical diagrams in Excalidraw format - use anytime or call from architecture workflow to add visual documentation",planning_artifacts,"diagram", -,anytime,Create Flowchart,CFC,52,_bmad/bmm/workflows/excalidraw-diagrams/create-flowchart/workflow.yaml,bmad-bmm-create-excalidraw-flowchart,false,ux-designer,Create Mode,"Create a flowchart visualization in Excalidraw format for processes pipelines or logic flows - use anytime or during architecture to add process documentation",planning_artifacts,"flowchart", -,anytime,Create Wireframe,CEW,53,_bmad/bmm/workflows/excalidraw-diagrams/create-wireframe/workflow.yaml,bmad-bmm-create-excalidraw-wireframe,false,ux-designer,Create Mode,"Create website or app wireframes in Excalidraw format - use anytime standalone or call from UX workflow to add UI mockups",planning_artifacts,"wireframe", bmm,3-solutioning,Create Architecture,CA,10,_bmad/bmm/workflows/3-solutioning/create-architecture/workflow.md,bmad-bmm-create-architecture,true,architect,Create Mode,"Guided Workflow to document technical decisions",planning_artifacts,architecture, bmm,3-solutioning,Validate Architecture,VA,20,_bmad/bmm/workflows/3-solutioning/create-architecture/workflow.md,bmad-bmm-create-architecture,false,architect,Validate Mode,"Validates architecture completeness",planning_artifacts,"architecture validation report", bmm,3-solutioning,Create Epics and Stories,CE,30,_bmad/bmm/workflows/3-solutioning/create-epics-and-stories/workflow.md,bmad-bmm-create-epics-and-stories,true,pm,Create Mode,"Create the Epics and Stories Listing",planning_artifacts,"epics and stories", @@ -25,9 +30,9 @@ bmm,3-solutioning,Validate Epics and Stories,VE,40,_bmad/bmm/workflows/3-solutio bmm,3-solutioning,Check Implementation Readiness,IR,70,_bmad/bmm/workflows/3-solutioning/check-implementation-readiness/workflow.md,bmad-bmm-check-implementation-readiness,true,architect,Validate Mode,"Ensure PRD UX Architecture and Epics Stories are aligned",planning_artifacts,"readiness report", bmm,4-implementation,Sprint Planning,SP,10,_bmad/bmm/workflows/4-implementation/sprint-planning/workflow.yaml,bmad-bmm-sprint-planning,true,sm,Create Mode,"Generate sprint plan for development tasks - this kicks off the implementation phase by producing a plan the implementation agents will follow in sequence for every story in the plan.",implementation_artifacts,"sprint status", bmm,4-implementation,Sprint Status,SS,20,_bmad/bmm/workflows/4-implementation/sprint-status/workflow.yaml,bmad-bmm-sprint-status,false,sm,Create Mode,"Anytime: Summarize sprint status and route to next workflow",,, -bmm,4-implementation,Create Story,CS,30,_bmad/bmm/workflows/4-implementation/create-story/workflow.yaml,bmad-bmm-create-story,true,sm,Create Mode,"Story cycle start: Prepare first found story in the sprint plan that is next, or if the command is run with a specific epic and story designation with context. Once complete, then VS then DS then CR then back to DS if needed or next CS or ER",implementation_artifacts,story, bmm,4-implementation,Validate Story,VS,35,_bmad/bmm/workflows/4-implementation/create-story/workflow.yaml,bmad-bmm-create-story,false,sm,Validate Mode,"Validates story readiness and completeness before development work begins",implementation_artifacts,"story validation report", +bmm,4-implementation,Create Story,CS,30,_bmad/bmm/workflows/4-implementation/create-story/workflow.yaml,bmad-bmm-create-story,true,sm,Create Mode,"Story cycle start: Prepare first found story in the sprint plan that is next, or if the command is run with a specific epic and story designation with context. Once complete, then VS then DS then CR then back to DS if needed or next CS or ER",implementation_artifacts,story, bmm,4-implementation,Dev Story,DS,40,_bmad/bmm/workflows/4-implementation/dev-story/workflow.yaml,bmad-bmm-dev-story,true,dev,Create Mode,"Story cycle: Execute story implementation tasks and tests then CR then back to DS if fixes needed",,, bmm,4-implementation,Code Review,CR,50,_bmad/bmm/workflows/4-implementation/code-review/workflow.yaml,bmad-bmm-code-review,false,dev,Create Mode,"Story cycle: If issues back to DS if approved then next CS or ER if epic complete",,, +bmm,4-implementation,QA Automation Test,QA,45,_bmad/bmm/workflows/qa/automate/workflow.yaml,bmad-bmm-qa-automate,false,quinn,Create Mode,"Generate automated API and E2E tests for implemented code using the project's existing test framework (detects existing well known in use test frameworks). Use after implementation to add test coverage. NOT for code review or story validation - use CR for that.",implementation_artifacts,"test suite", bmm,4-implementation,Retrospective,ER,60,_bmad/bmm/workflows/4-implementation/retrospective/workflow.yaml,bmad-bmm-retrospective,false,sm,Create Mode,"Optional at epic end: Review completed work lessons learned and next epic or if major issues consider CC",implementation_artifacts,retrospective, -bmm,4-implementation,Automate,QA,45,_bmad/bmm/workflows/qa/automate/workflow.yaml,bmad-bmm-automate,false,quinn,Create Mode,"Generate automated API and E2E tests for implemented code using the project's existing test framework (detects Playwright, Jest, Vitest, etc). Use after implementation to add test coverage. NOT for code review or story validation - use CR for that.",implementation_artifacts,"test suite", diff --git a/src/core/module-help.csv b/src/core/module-help.csv index 206f1cd3..599cef33 100644 --- a/src/core/module-help.csv +++ b/src/core/module-help.csv @@ -1,9 +1,9 @@ module,phase,name,code,sequence,workflow-file,command,required,agent,options,description,output-location,outputs -core,,Brainstorming,BS,20,_bmad/core/workflows/brainstorming/workflow.md,bmad-brainstorming,false,analyst,,Facilitate interactive brainstorming sessions using diverse creative techniques and ideation methods,{output_folder}/brainstorming/brainstorming-session-{{date}}.md,, -core,,Party Mode,PM,30,_bmad/core/workflows/party-mode/workflow.md,bmad-party-mode,false,party-mode facilitator,,Orchestrates group discussions between all installed BMAD agents enabling natural multi-agent conversations,, -core,,bmad-help,BH,40,_bmad/core/tasks/help.md,bmad-help,false,,,Get unstuck by showing what workflow steps come next or answering questions about what to do in the BMad Method,, -core,,Index Docs,ID,50,_bmad/core/tasks/index-docs.xml,bmad-index-docs,false,,,Generates or updates an index.md of all documents in the specified directory,, -core,,Shard Document,SD,70,_bmad/core/tasks/shard-doc.xml,bmad-shard-doc,false,,,Splits large markdown documents into smaller organized files based on level 2 sections,, -core,,Editorial Review - Prose,EP,80,_bmad/core/tasks/editorial-review-prose.xml,bmad-editorial-review-prose,false,,,Clinical copy-editor that reviews text for communication issues,,"three-column markdown table with suggested fixes", -core,,Editorial Review - Structure,ES,90,_bmad/core/tasks/editorial-review-structure.xml,bmad-editorial-review-structure,false,,,Structural editor that proposes cuts reorganization and simplification while preserving comprehension,, -core,,Adversarial Review (General),AR,100,_bmad/core/tasks/review-adversarial-general.xml,bmad-review-adversarial-general,false,,,Cynically review content and produce findings,, +core,anytime,Brainstorming,BS,,_bmad/core/workflows/brainstorming/workflow.md,bmad-brainstorming,false,analyst,,"Generate diverse ideas through interactive techniques. Use early in ideation phase or when stuck generating ideas.",{output_folder}/brainstorming/brainstorming-session-{{date}}.md,, +core,anytime,Party Mode,PM,,_bmad/core/workflows/party-mode/workflow.md,bmad-party-mode,false,party-mode facilitator,,"Orchestrate multi-agent discussions. Use when you need multiple agent perspectives or want agents to collaborate.",, +core,anytime,bmad-help,BH,,_bmad/core/tasks/help.md,bmad-help,false,,,"Get unstuck by showing what workflow steps come next or answering BMad Method questions.",, +core,anytime,Index Docs,ID,,_bmad/core/tasks/index-docs.xml,bmad-index-docs,false,,,"Create lightweight index for quick LLM scanning. Use when LLM needs to understand available docs without loading everything.",, +core,anytime,Shard Document,SD,,_bmad/core/tasks/shard-doc.xml,bmad-shard-doc,false,,,"Split large documents into smaller files by sections. Use when doc becomes too large (>500 lines) to manage effectively.",, +core,anytime,Editorial Review - Prose,EP,,_bmad/core/tasks/editorial-review-prose.xml,bmad-editorial-review-prose,false,,,"Review prose for clarity, tone, and communication issues. Use after drafting to polish written content.",report located with target document,"three-column markdown table with suggested fixes", +core,anytime,Editorial Review - Structure,ES,,_bmad/core/tasks/editorial-review-structure.xml,bmad-editorial-review-structure,false,,,"Propose cuts, reorganization, and simplification while preserving comprehension. Use when doc produced from multiple subprocesses or needs structural improvement.",report located with target document, +core,anytime,Adversarial Review (General),AR,,_bmad/core/tasks/review-adversarial-general.xml,bmad-review-adversarial-general,false,,,"Review content critically to find issues and weaknesses. Use for quality assurance or before finalizing deliverables. Code Review in other modules run this automatically, but its useful also for document reviews",, diff --git a/tools/validate-agent-schema.js b/tools/validate-agent-schema.js index 409bd551..9c3595fe 100644 --- a/tools/validate-agent-schema.js +++ b/tools/validate-agent-schema.js @@ -28,7 +28,7 @@ async function main(customProjectRoot) { const project_root = customProjectRoot || path.join(__dirname, '..'); // Find all agent files - const agentFiles = await glob('src/{core,bmm/*}/agents/**/*.agent.yaml', { + const agentFiles = await glob('src/{core,bmm}/agents/**/*.agent.yaml', { cwd: project_root, absolute: true, });