feat: add event-driven SaaS operations modules

Introduces 5 new modules for production SaaS operations with event-driven
architecture enabling loose coupling between components:

## Event Infrastructure (src/core/events/)
- event-schema.yaml: Canonical event type definitions (30+ event types)
- file-queue-transport.xml: File-based queue for local development
- publish-event.xml: Event publishing task
- Updated workflow.xml with <publish> tag support

## New Modules

### bmm-metrics (KPI/SLA Tracking)
- Metrics Analyst agent with quality gate guardian persona
- Workflows: define-kpis, define-slas, track-metrics, quality-gate-check, metrics-review
- DORA metrics support, velocity tracking, SLA breach detection
- Events: metrics.quality.pass/fail, metrics.kpi.updated, metrics.sla.breach

### bmm-release (Release Management)
- Release Manager agent for deployment coordination
- Workflows: release-planning, release-notes, rollback-planning
- Integrates with quality gates before release approval
- Events: release.candidate.created, release.deployed, release.rollback.*

### bmm-feedback (Customer Feedback Loop)
- Feedback Analyst agent for customer voice analysis
- Workflows: collect-feedback, analyze-feedback, feedback-report
- Sentiment analysis, theme extraction, priority suggestions
- Events: feedback.received, feedback.insight.generated, feedback.priority.suggested

### bmm-priority (Backlog Prioritization)
- Priority Manager agent with WSJF/RICE/MoSCoW frameworks
- Workflows: prioritize-backlog, priority-review
- Integrates feedback signals into priority decisions
- Events: priority.updated, priority.queue.reordered

### bmm-roadmap (Product Roadmap)
- Roadmap Planner agent for capacity-aware planning
- Workflows: roadmap-planning, capacity-planning
- Velocity-based timeline projections
- Events: roadmap.updated, roadmap.milestone.completed, roadmap.at.risk

## Architecture Highlights
- Modules communicate only through events (no direct dependencies)
- File-based queue for local dev, designed for SMTP transport in production
- Each module has: manifest, config, agent, workflows, event handlers, state
- Backward compatible - existing BMAD workflows unaffected

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Build Vendor 2025-11-28 15:19:14 +00:00
parent 5ea02d7091
commit 33aacede40
71 changed files with 9957 additions and 1 deletions

View File

@ -0,0 +1,858 @@
# BMAD Event Schema Definition
# Version: 1.0.0
#
# This schema defines the standard format for all events in the BMAD event-driven architecture.
# Events enable decoupled, asynchronous communication between modules.
schema_version: "1.0.0"
event_envelope:
description: "Standard envelope for all BMAD events"
required_fields:
- id
- type
- source
- timestamp
- payload
fields:
id:
type: string
format: uuid
description: "Unique identifier for this event instance"
example: "550e8400-e29b-41d4-a716-446655440000"
type:
type: string
format: "{domain}.{action}"
description: "Event type following domain.action pattern"
examples:
- "story.created"
- "story.done"
- "metrics.quality.pass"
- "release.deployed"
source:
type: string
format: "{module}/{workflow}"
description: "Origin of the event"
examples:
- "bmm/create-story"
- "bmm-metrics/quality-gate-check"
- "bmm-release/deploy-release"
timestamp:
type: string
format: ISO8601
description: "When the event was created"
example: "2024-01-15T10:30:00.000Z"
correlation_id:
type: string
format: uuid
required: false
description: "Links related events together (e.g., all events from one story lifecycle)"
example: "660e8400-e29b-41d4-a716-446655440001"
causation_id:
type: string
format: uuid
required: false
description: "ID of the event that caused this event"
example: "550e8400-e29b-41d4-a716-446655440000"
sequence:
type: integer
required: false
description: "Sequence number for ordering within a correlation group"
example: 3
version:
type: string
required: false
default: "1.0"
description: "Schema version for this event type"
payload:
type: object
description: "Event-specific data (varies by event type)"
metadata:
type: object
required: false
description: "Additional context about the event"
fields:
user:
type: string
description: "User who triggered the event"
session_id:
type: string
description: "Session identifier"
agent:
type: string
description: "BMAD agent that generated the event"
workflow:
type: string
description: "Workflow that generated the event"
environment:
type: string
enum: ["development", "staging", "production"]
description: "Environment where event was generated"
# Event Type Registry
# Defines all known event types with their payload schemas
event_types:
# ============================================
# Core Story Lifecycle Events (from existing BMM)
# ============================================
story.created:
domain: story
action: created
description: "A new story has been created from an epic"
source_workflow: "create-story"
payload:
story_id:
type: string
required: true
epic_id:
type: string
required: true
title:
type: string
required: true
acceptance_criteria:
type: array
items: string
priority:
type: integer
required: false
estimated_effort:
type: string
required: false
story.ready:
domain: story
action: ready
description: "A story is ready for development (context assembled)"
source_workflow: "story-ready"
payload:
story_id:
type: string
required: true
context_files:
type: array
items: string
description: "Files included in story context"
ready_at:
type: string
format: ISO8601
story.started:
domain: story
action: started
description: "Development has started on a story"
source_workflow: "dev-story"
payload:
story_id:
type: string
required: true
developer:
type: string
required: false
story.reviewed:
domain: story
action: reviewed
description: "A story has completed code review"
source_workflow: "code-review"
payload:
story_id:
type: string
required: true
review_status:
type: string
enum: ["approved", "changes_requested", "rejected"]
reviewer_notes:
type: string
required: false
issues_found:
type: array
items: string
story.done:
domain: story
action: done
description: "A story is complete (all AC met, tests pass)"
source_workflow: "story-done"
payload:
story_id:
type: string
required: true
epic_id:
type: string
required: true
completion_time:
type: string
format: ISO8601
tests_passed:
type: boolean
files_changed:
type: array
items: string
# ============================================
# Sprint Lifecycle Events
# ============================================
sprint.started:
domain: sprint
action: started
description: "A new sprint has begun"
source_workflow: "sprint-planning"
payload:
sprint_id:
type: string
required: true
sprint_number:
type: integer
required: true
start_date:
type: string
format: ISO8601
planned_stories:
type: array
items: string
capacity:
type: integer
description: "Story points or count"
sprint.ended:
domain: sprint
action: ended
description: "A sprint has completed"
source_workflow: "retrospective"
payload:
sprint_id:
type: string
required: true
sprint_number:
type: integer
required: true
end_date:
type: string
format: ISO8601
completed_stories:
type: array
items: string
incomplete_stories:
type: array
items: string
velocity:
type: integer
description: "Actual points/count completed"
# ============================================
# Epic Lifecycle Events
# ============================================
epic.created:
domain: epic
action: created
description: "A new epic has been created"
source_workflow: "create-epics-and-stories"
payload:
epic_id:
type: string
required: true
title:
type: string
required: true
story_count:
type: integer
epic.completed:
domain: epic
action: completed
description: "All stories in an epic are done"
source_workflow: "retrospective"
payload:
epic_id:
type: string
required: true
completion_date:
type: string
format: ISO8601
total_stories:
type: integer
lessons_learned:
type: array
items: string
# ============================================
# Code Review Events
# ============================================
code.reviewed:
domain: code
action: reviewed
description: "Code review completed for a change"
source_workflow: "code-review"
payload:
story_id:
type: string
required: true
review_result:
type: string
enum: ["approved", "needs_changes", "rejected"]
quality_score:
type: number
minimum: 0
maximum: 100
coverage_percent:
type: number
required: false
issues:
type: array
items:
type: object
properties:
severity: string
message: string
file: string
line: integer
# ============================================
# Metrics Module Events (NEW)
# ============================================
metrics.kpi.defined:
domain: metrics
action: kpi.defined
description: "New KPIs have been defined"
source_workflow: "define-kpis"
module: bmm-metrics
payload:
kpi_set_id:
type: string
required: true
kpis:
type: array
items:
type: object
properties:
name: string
target: number
unit: string
frequency: string
metrics.kpi.updated:
domain: metrics
action: kpi.updated
description: "KPI values have been updated"
source_workflow: "track-metrics"
module: bmm-metrics
payload:
kpi_name:
type: string
required: true
previous_value:
type: number
current_value:
type: number
required: true
target:
type: number
status:
type: string
enum: ["on_track", "at_risk", "off_track"]
metrics.sla.breach:
domain: metrics
action: sla.breach
description: "An SLA threshold has been exceeded"
source_workflow: "track-metrics"
module: bmm-metrics
payload:
sla_name:
type: string
required: true
threshold:
type: number
required: true
actual_value:
type: number
required: true
breach_time:
type: string
format: ISO8601
severity:
type: string
enum: ["warning", "critical"]
metrics.quality.pass:
domain: metrics
action: quality.pass
description: "All quality gates have passed"
source_workflow: "quality-gate-check"
module: bmm-metrics
payload:
story_id:
type: string
required: true
gates_checked:
type: array
items: string
overall_score:
type: number
timestamp:
type: string
format: ISO8601
metrics.quality.fail:
domain: metrics
action: quality.fail
description: "One or more quality gates failed"
source_workflow: "quality-gate-check"
module: bmm-metrics
payload:
story_id:
type: string
required: true
failed_gates:
type: array
items:
type: object
properties:
gate_name: string
expected: string
actual: string
reason: string
blocking:
type: boolean
description: "Whether this blocks release"
# ============================================
# Release Module Events (NEW)
# ============================================
release.planned:
domain: release
action: planned
description: "A release has been planned"
source_workflow: "release-planning"
module: bmm-release
payload:
release_id:
type: string
required: true
version:
type: string
required: true
planned_date:
type: string
format: ISO8601
included_stories:
type: array
items: string
release_type:
type: string
enum: ["major", "minor", "patch", "hotfix"]
release.qa.approved:
domain: release
action: qa.approved
description: "QA has signed off on a release"
source_workflow: "qa-signoff"
module: bmm-release
payload:
release_id:
type: string
required: true
approved_by:
type: string
approval_time:
type: string
format: ISO8601
test_summary:
type: object
properties:
passed: integer
failed: integer
skipped: integer
release.deployed:
domain: release
action: deployed
description: "A release has been deployed to an environment"
source_workflow: "deploy-release"
module: bmm-release
payload:
release_id:
type: string
required: true
version:
type: string
required: true
environment:
type: string
enum: ["staging", "production"]
deployment_time:
type: string
format: ISO8601
deployment_strategy:
type: string
enum: ["full", "canary", "blue_green", "rolling"]
rollout_percentage:
type: number
required: false
release.rollback:
domain: release
action: rollback
description: "A release has been rolled back"
source_workflow: "rollback"
module: bmm-release
payload:
release_id:
type: string
required: true
rollback_to_version:
type: string
required: true
reason:
type: string
required: true
initiated_by:
type: string
rollback_time:
type: string
format: ISO8601
release.evaluated:
domain: release
action: evaluated
description: "Post-release evaluation completed"
source_workflow: "post-release-evaluation"
module: bmm-release
payload:
release_id:
type: string
required: true
evaluation_date:
type: string
format: ISO8601
success_metrics:
type: object
issues_found:
type: array
items: string
rollback_required:
type: boolean
# ============================================
# Feedback Module Events (NEW)
# ============================================
feedback.interview.synthesized:
domain: feedback
action: interview.synthesized
description: "Customer interviews have been synthesized"
source_workflow: "synthesize-interviews"
module: bmm-feedback
payload:
synthesis_id:
type: string
required: true
interview_count:
type: integer
required: true
themes:
type: array
items:
type: object
properties:
name: string
frequency: integer
sentiment: string
key_insights:
type: array
items: string
feedback.hypothesis.created:
domain: feedback
action: hypothesis.created
description: "A product hypothesis has been created"
source_workflow: "generate-hypotheses"
module: bmm-feedback
payload:
hypothesis_id:
type: string
required: true
statement:
type: string
required: true
metric:
type: string
description: "How success will be measured"
target:
type: string
description: "Target improvement"
confidence:
type: string
enum: ["low", "medium", "high"]
feedback.experiment.designed:
domain: feedback
action: experiment.designed
description: "An experiment has been designed"
source_workflow: "design-experiment"
module: bmm-feedback
payload:
experiment_id:
type: string
required: true
hypothesis_id:
type: string
required: true
type:
type: string
enum: ["a_b_test", "feature_flag", "survey", "prototype"]
duration_days:
type: integer
sample_size:
type: integer
required: false
feedback.experiment.completed:
domain: feedback
action: experiment.completed
description: "An experiment has concluded with results"
source_workflow: "analyze-telemetry"
module: bmm-feedback
payload:
experiment_id:
type: string
required: true
hypothesis_id:
type: string
required: true
result:
type: string
enum: ["validated", "invalidated", "inconclusive"]
metrics:
type: object
recommendation:
type: string
feedback.telemetry.analyzed:
domain: feedback
action: telemetry.analyzed
description: "Usage telemetry has been analyzed"
source_workflow: "analyze-telemetry"
module: bmm-feedback
payload:
analysis_id:
type: string
required: true
period:
type: object
properties:
start: string
end: string
key_findings:
type: array
items: string
anomalies:
type: array
items: string
# ============================================
# Priority Module Events (NEW)
# ============================================
priority.backlog.scored:
domain: priority
action: backlog.scored
description: "Backlog items have been scored"
source_workflow: "score-backlog"
module: bmm-priority
payload:
scoring_id:
type: string
required: true
framework:
type: string
enum: ["rice", "wsjf", "moscow", "custom"]
items_scored:
type: integer
required: true
scores:
type: array
items:
type: object
properties:
item_id: string
score: number
components: object
priority.backlog.reordered:
domain: priority
action: backlog.reordered
description: "Backlog has been reprioritized"
source_workflow: "prioritize-stories"
module: bmm-priority
payload:
reorder_id:
type: string
required: true
previous_order:
type: array
items: string
new_order:
type: array
items: string
reason:
type: string
priority.matrix.generated:
domain: priority
action: matrix.generated
description: "Priority matrix has been generated"
source_workflow: "value-risk-matrix"
module: bmm-priority
payload:
matrix_id:
type: string
required: true
axes:
type: object
properties:
x: string
y: string
quadrants:
type: object
description: "Items grouped by quadrant"
# ============================================
# Roadmap Module Events (NEW)
# ============================================
roadmap.initiative.created:
domain: roadmap
action: initiative.created
description: "A strategic initiative has been created"
source_workflow: "create-initiative"
module: bmm-roadmap
payload:
initiative_id:
type: string
required: true
title:
type: string
required: true
description:
type: string
target_quarter:
type: string
example: "Q2 2024"
linked_okrs:
type: array
items: string
epic_ids:
type: array
items: string
roadmap.quarter.planned:
domain: roadmap
action: quarter.planned
description: "Quarterly planning completed"
source_workflow: "quarterly-planning"
module: bmm-roadmap
payload:
quarter:
type: string
required: true
example: "Q2 2024"
initiatives:
type: array
items: string
capacity_allocated:
type: integer
themes:
type: array
items: string
roadmap.okr.aligned:
domain: roadmap
action: okr.aligned
description: "OKRs have been aligned with initiatives"
source_workflow: "okr-alignment"
module: bmm-roadmap
payload:
alignment_id:
type: string
required: true
okrs:
type: array
items:
type: object
properties:
objective: string
key_results: array
initiatives: array
roadmap.initiative.completed:
domain: roadmap
action: initiative.completed
description: "An initiative has been completed"
source_workflow: "roadmap-review"
module: bmm-roadmap
payload:
initiative_id:
type: string
required: true
completion_date:
type: string
format: ISO8601
outcome:
type: string
enum: ["successful", "partial", "cancelled"]
metrics_achieved:
type: object
# Transport Configuration
transport:
default: file_queue
options:
file_queue:
description: "File-based queue for local/development"
config:
queue_dir: "{project-root}/.bmad/_events"
poll_interval_ms: 1000
retention_days: 7
smtp:
description: "SMTP-based transport for production/team"
config:
host: "${BMAD_SMTP_HOST:localhost}"
port: "${BMAD_SMTP_PORT:25}"
from: "bmad-events@local"
sqlite:
description: "SQLite-based queue for reliable local"
config:
db_path: "{project-root}/.bmad/_events/queue.db"
# Delivery Guarantees
delivery:
mode: at_least_once
retry:
max_attempts: 3
backoff_ms: [1000, 5000, 15000]
dead_letter:
enabled: true
path: "{project-root}/.bmad/_events/dead-letter"

View File

@ -0,0 +1,186 @@
<task id="{bmad_folder}/core/events/publish-event.xml" name="Publish Event">
<objective>Publish an event to the BMAD event bus for consumption by subscribed modules</objective>
<description>
This task is invoked by workflows to emit events when significant actions occur.
It creates a properly formatted event envelope and routes it to all subscribed modules.
Supports both file-based queue (development) and SMTP (production) transports.
</description>
<usage>
<example title="Inline in workflow instructions">
<publish event="story.done">
<payload>
<story_id>{{story_id}}</story_id>
<epic_id>{{epic_id}}</epic_id>
<completion_time>{{timestamp}}</completion_time>
<tests_passed>true</tests_passed>
</payload>
</publish>
</example>
<example title="Via invoke-task">
<invoke-task path="{bmad_folder}/core/events/publish-event.xml">
<param name="event_type">story.done</param>
<param name="payload">
story_id: "{{story_id}}"
epic_id: "{{epic_id}}"
</param>
</invoke-task>
</example>
</usage>
<parameters>
<param name="event_type" type="string" required="true"
description="Event type (e.g., story.done, metrics.quality.pass)" />
<param name="payload" type="object" required="true"
description="Event-specific data as YAML/JSON object" />
<param name="correlation_id" type="string" required="false"
description="Links related events; inherited from parent if in workflow context" />
<param name="metadata" type="object" required="false"
description="Additional context (user, agent, environment)" />
</parameters>
<flow>
<step n="1" title="Validate Event Type">
<action>Load event schema from {bmad_folder}/core/events/event-schema.yaml</action>
<check if="event_type not in event_types">
<action>Log WARNING: "Unknown event type: {event_type} - publishing anyway"</action>
</check>
<check if="event_type in event_types">
<action>Validate payload against schema for {event_type}</action>
<action if="validation fails">Log WARNING with missing/invalid fields</action>
</check>
</step>
<step n="2" title="Build Event Envelope">
<action>Generate event.id as UUID</action>
<action>Set event.type = {event_type}</action>
<action>Set event.source = "{current_module}/{current_workflow}"</action>
<action>Set event.timestamp = current ISO8601 timestamp</action>
<action>Set event.version = "1.0"</action>
<substep title="Handle Correlation">
<check if="correlation_id provided">
<action>Set event.correlation_id = {correlation_id}</action>
</check>
<check if="correlation_id NOT provided AND parent_event exists">
<action>Inherit event.correlation_id from parent_event</action>
<action>Set event.causation_id = parent_event.id</action>
</check>
<check if="correlation_id NOT provided AND no parent_event">
<action>Generate new correlation_id (this event starts a new chain)</action>
</check>
</substep>
<substep title="Add Metadata">
<action>Set event.metadata.user = {user_name} from config</action>
<action>Set event.metadata.agent = {current_agent} if loaded</action>
<action>Set event.metadata.workflow = {current_workflow}</action>
<action>Set event.metadata.environment = {environment} from config or "development"</action>
<action>Merge any additional metadata provided</action>
</substep>
<action>Set event.payload = {payload}</action>
</step>
<step n="3" title="Determine Transport">
<action>Load transport config from {bmad_folder}/core/config.yaml</action>
<check if="event_transport.type == 'auto'">
<action if="BMAD_SMTP_HOST is set">Use SMTP transport</action>
<action if="BMAD_SMTP_HOST not set">Use file_queue transport</action>
</check>
<check if="event_transport.type == 'file_queue'">
<action>Use file_queue transport</action>
</check>
<check if="event_transport.type == 'smtp'">
<action>Use SMTP transport</action>
</check>
</step>
<step n="4" title="Route to Subscribers">
<substep title="Load Subscription Registry">
<action>Read {queue_base_dir}/registry.yaml</action>
<action>Find all modules subscribed to {event_type}</action>
<check if="no subscribers">
<action>Log DEBUG: "No subscribers for {event_type} - event will be logged only"</action>
<action>Write event to {queue_base_dir}/_unrouted/{filename} for audit</action>
<goto step="5" />
</check>
</substep>
<substep title="Publish via File Queue" if="transport == file_queue">
<iterate for-each="subscriber in subscribers">
<action>Generate filename: {timestamp}-{event_type_slug}-{event_id_short}.yaml</action>
<action>Write event YAML to {queue_base_dir}/{subscriber.module}/pending/{filename}</action>
</iterate>
</substep>
<substep title="Publish via SMTP" if="transport == smtp">
<iterate for-each="subscriber in subscribers">
<action>Format event as email body (YAML content)</action>
<action>Set Subject: "[BMAD-EVENT] {event_type} - {event_id_short}"</action>
<action>Send to: {subscriber.inbox}</action>
</iterate>
</substep>
</step>
<step n="5" title="Log and Return">
<action>Log INFO: "Published event {event_type} [{event.id}] to {subscriber_count} subscribers"</action>
<action>Return event envelope for caller reference</action>
<output>
<field name="event_id" value="{event.id}" />
<field name="correlation_id" value="{event.correlation_id}" />
<field name="subscribers_notified" value="{subscriber_list}" />
<field name="timestamp" value="{event.timestamp}" />
</output>
</step>
</flow>
<event_envelope_example>
<yaml>
id: "550e8400-e29b-41d4-a716-446655440000"
type: "story.done"
source: "bmm/story-done"
timestamp: "2024-01-15T10:30:00.000Z"
correlation_id: "660e8400-e29b-41d4-a716-446655440001"
causation_id: "440e8400-e29b-41d4-a716-446655440002"
sequence: 5
version: "1.0"
payload:
story_id: "STORY-123"
epic_id: "EPIC-001"
completion_time: "2024-01-15T10:30:00.000Z"
tests_passed: true
files_changed:
- "src/auth/login.ts"
- "src/auth/login.test.ts"
metadata:
user: "developer@example.com"
agent: "dev"
workflow: "story-done"
environment: "development"
</yaml>
</event_envelope_example>
<error_handling>
<on_registry_not_found>
<action>Create empty registry.yaml</action>
<action>Log WARN: "Event registry not found, created empty registry"</action>
<action>Continue with no subscribers</action>
</on_registry_not_found>
<on_write_failure>
<action>Log ERROR: "Failed to write event to queue: {error}"</action>
<action>Retry up to 3 times with backoff</action>
<action if="all retries fail">Throw error to caller</action>
</on_write_failure>
<on_smtp_failure>
<action>Log ERROR: "Failed to send event via SMTP: {error}"</action>
<action>Fall back to file_queue if available</action>
</on_smtp_failure>
</error_handling>
</task>

View File

@ -0,0 +1,332 @@
<task id="{bmad_folder}/core/events/queue/file-queue-transport.xml" name="File Queue Event Transport">
<objective>Handle event publishing and subscription using file-based queue for local/development environments</objective>
<description>
This transport implements a zero-dependency event queue using the filesystem.
Each module has its own queue directory with pending/processing/completed subdirectories.
Events are written as YAML files and processed in order.
</description>
<configuration>
<param name="queue_base_dir" default="{project-root}/.bmad/_events" />
<param name="poll_interval_ms" default="1000" />
<param name="retention_days" default="7" />
<param name="max_retries" default="3" />
</configuration>
<directory_structure>
<example>
.bmad/_events/
├── registry.yaml # Subscription registry
├── bmm-metrics/
│ ├── pending/ # Events waiting to be processed
│ │ └── 2024-01-15T10-30-00-story-done-abc123.yaml
│ ├── processing/ # Events currently being handled
│ └── completed/ # Successfully processed events
├── bmm-release/
│ ├── pending/
│ ├── processing/
│ └── completed/
├── bmm-feedback/
│ └── ...
└── dead-letter/ # Failed events after max retries
└── 2024-01-15T10-35-00-story-done-abc123.yaml
</example>
</directory_structure>
<operations>
<operation name="initialize">
<description>Set up queue directories for a module</description>
<input name="module_name" type="string" required="true" />
<steps>
<step n="1">
<action>Create directory: {queue_base_dir}/{module_name}/pending</action>
</step>
<step n="2">
<action>Create directory: {queue_base_dir}/{module_name}/processing</action>
</step>
<step n="3">
<action>Create directory: {queue_base_dir}/{module_name}/completed</action>
</step>
<step n="4">
<action>Update registry.yaml with module subscription info</action>
</step>
</steps>
</operation>
<operation name="publish">
<description>Publish an event to all subscribed modules</description>
<input name="event" type="object" required="true">
<field name="type" description="Event type (e.g., story.done)" />
<field name="payload" description="Event-specific data" />
<field name="correlation_id" required="false" />
</input>
<steps>
<step n="1" title="Generate Event Envelope">
<action>Generate UUID for event.id</action>
<action>Set event.timestamp to current ISO8601</action>
<action>Set event.source to current {module}/{workflow}</action>
<action>Inherit or generate correlation_id</action>
</step>
<step n="2" title="Load Subscription Registry">
<action>Read {queue_base_dir}/registry.yaml</action>
<action>Find all modules subscribed to event.type</action>
</step>
<step n="3" title="Write Event to Each Subscriber Queue">
<iterate for-each="subscriber in subscribers">
<action>Generate filename: {timestamp}-{event_type}-{event_id_short}.yaml</action>
<action>Write event YAML to {queue_base_dir}/{subscriber}/pending/{filename}</action>
<action>Log: "Published {event.type} to {subscriber}"</action>
</iterate>
</step>
<step n="4" title="Return Publish Result">
<action>Return event.id and list of subscribers notified</action>
</step>
</steps>
<event_file_format>
<example>
# File: 2024-01-15T10-30-00-story-done-abc123.yaml
id: "550e8400-e29b-41d4-a716-446655440000"
type: "story.done"
source: "bmm/story-done"
timestamp: "2024-01-15T10:30:00.000Z"
correlation_id: "660e8400-e29b-41d4-a716-446655440001"
sequence: 5
version: "1.0"
payload:
story_id: "STORY-123"
epic_id: "EPIC-001"
completion_time: "2024-01-15T10:30:00.000Z"
tests_passed: true
files_changed:
- "src/auth/login.ts"
- "src/auth/login.test.ts"
metadata:
user: "developer@example.com"
agent: "dev"
workflow: "story-done"
environment: "development"
_queue_metadata:
received_at: "2024-01-15T10:30:01.000Z"
attempt: 1
max_attempts: 3
</example>
</event_file_format>
</operation>
<operation name="consume">
<description>Process pending events for a module</description>
<input name="module_name" type="string" required="true" />
<input name="handler_path" type="string" required="true" description="Path to event handler XML" />
<steps>
<step n="1" title="List Pending Events">
<action>List files in {queue_base_dir}/{module_name}/pending/ sorted by filename (chronological)</action>
<check if="no files found">
<action>Return empty - no events to process</action>
</check>
</step>
<step n="2" title="Move to Processing">
<action>Move first event file to {queue_base_dir}/{module_name}/processing/</action>
<action>Update _queue_metadata.processing_started</action>
</step>
<step n="3" title="Execute Handler">
<action>Load event YAML content</action>
<action>Load handler from handler_path</action>
<action>Execute handler with event as input</action>
<on_success>
<action>Move event file to {queue_base_dir}/{module_name}/completed/</action>
<action>Update _queue_metadata.completed_at</action>
<action>Log: "Successfully processed {event.type} [{event.id}]"</action>
</on_success>
<on_failure>
<action>Increment _queue_metadata.attempt</action>
<check if="attempt > max_attempts">
<action>Move to {queue_base_dir}/dead-letter/</action>
<action>Log ERROR: "Event {event.id} moved to dead-letter after {max_attempts} failures"</action>
</check>
<check if="attempt <= max_attempts">
<action>Move back to {queue_base_dir}/{module_name}/pending/ with updated attempt count</action>
<action>Log WARN: "Event {event.id} failed, will retry (attempt {attempt}/{max_attempts})"</action>
</check>
</on_failure>
</step>
</steps>
</operation>
<operation name="subscribe">
<description>Register a module's interest in event types</description>
<input name="module_name" type="string" required="true" />
<input name="event_types" type="array" required="true" description="List of event types to subscribe to" />
<input name="handler_mapping" type="object" required="true" description="Map of event_type to handler path" />
<steps>
<step n="1">
<action>Read {queue_base_dir}/registry.yaml (create if doesn't exist)</action>
</step>
<step n="2">
<action>Add/update entry for module_name with subscriptions</action>
<example>
modules:
bmm-metrics:
subscriptions:
- event_type: "story.done"
handler: "handlers/on-story-done.xml"
- event_type: "sprint.ended"
handler: "handlers/on-sprint-ended.xml"
registered_at: "2024-01-15T10:00:00.000Z"
</example>
</step>
<step n="3">
<action>Save registry.yaml</action>
</step>
<step n="4">
<action>Initialize queue directories if needed (call initialize operation)</action>
</step>
</steps>
</operation>
<operation name="cleanup">
<description>Remove old completed events beyond retention period</description>
<input name="retention_days" type="integer" default="7" />
<steps>
<step n="1">
<action>For each module directory in {queue_base_dir}</action>
</step>
<step n="2">
<action>List files in completed/ older than {retention_days}</action>
</step>
<step n="3">
<action>Delete old files</action>
</step>
<step n="4">
<action>Log cleanup summary</action>
</step>
</steps>
</operation>
<operation name="replay">
<description>Replay events from completed queue (for debugging/recovery)</description>
<input name="module_name" type="string" required="true" />
<input name="event_id" type="string" required="false" description="Specific event to replay" />
<input name="from_timestamp" type="string" required="false" description="Replay all events after this time" />
<steps>
<step n="1">
<action>Find event(s) in {queue_base_dir}/{module_name}/completed/</action>
</step>
<step n="2">
<action>Copy selected event(s) to pending/ with new _queue_metadata</action>
</step>
<step n="3">
<action>Log replay action for audit</action>
</step>
</steps>
</operation>
</operations>
<registry_schema>
<description>Schema for registry.yaml that tracks all subscriptions</description>
<example>
# File: {queue_base_dir}/registry.yaml
schema_version: "1.0"
last_updated: "2024-01-15T10:00:00.000Z"
# Global event routing table
event_routes:
"story.done":
- module: "bmm-metrics"
handler: "handlers/on-story-done.xml"
- module: "bmm-release"
handler: "handlers/on-story-done.xml"
"metrics.quality.pass":
- module: "bmm-release"
handler: "handlers/on-quality-pass.xml"
"release.deployed":
- module: "bmm-feedback"
handler: "handlers/on-release-deployed.xml"
# Module subscription details
modules:
bmm-metrics:
status: active
registered_at: "2024-01-15T10:00:00.000Z"
subscriptions:
- event_type: "story.done"
handler: "handlers/on-story-done.xml"
- event_type: "story.ready"
handler: "handlers/on-story-ready.xml"
- event_type: "sprint.ended"
handler: "handlers/on-sprint-ended.xml"
- event_type: "code.reviewed"
handler: "handlers/on-code-reviewed.xml"
bmm-release:
status: active
registered_at: "2024-01-15T10:00:00.000Z"
subscriptions:
- event_type: "story.done"
handler: "handlers/on-story-done.xml"
- event_type: "metrics.quality.pass"
handler: "handlers/on-quality-pass.xml"
bmm-feedback:
status: active
registered_at: "2024-01-15T10:00:00.000Z"
subscriptions:
- event_type: "release.deployed"
handler: "handlers/on-release-deployed.xml"
- event_type: "release.evaluated"
handler: "handlers/on-release-evaluated.xml"
</example>
</registry_schema>
<error_handling>
<strategy name="retry_with_backoff">
<description>Retry failed events with exponential backoff</description>
<config>
<attempt n="1" delay_ms="1000" />
<attempt n="2" delay_ms="5000" />
<attempt n="3" delay_ms="15000" />
</config>
</strategy>
<dead_letter_policy>
<description>Events that fail all retries go to dead-letter queue</description>
<action>Write failure reason to event metadata</action>
<action>Alert: Log ERROR with event details</action>
<action>Manual intervention required to replay or discard</action>
</dead_letter_policy>
</error_handling>
<monitoring>
<metrics>
<metric name="events_published" description="Count of events published" />
<metric name="events_processed" description="Count of events successfully processed" />
<metric name="events_failed" description="Count of events that failed processing" />
<metric name="events_dead_lettered" description="Count of events in dead-letter queue" />
<metric name="queue_depth" description="Number of pending events per module" />
<metric name="processing_latency_ms" description="Time from publish to completion" />
</metrics>
<health_check>
<check name="queue_writable" description="Verify queue directories are writable" />
<check name="registry_valid" description="Verify registry.yaml is valid YAML" />
<check name="no_stuck_processing" description="No events stuck in processing > 5 minutes" />
</health_check>
</monitoring>
</task>

View File

@ -64,6 +64,7 @@
<tag>invoke-task xml tag → Execute specified task</tag>
<tag>invoke-protocol name="protocol_name" xml tag → Execute reusable protocol from protocols section</tag>
<tag>goto step="x" → Jump to specified step</tag>
<tag>publish event="type" xml tag → Emit event to subscribed modules (see Event System)</tag>
</execute-tags>
</substep>
@ -91,7 +92,27 @@
</if>
</substep>
<substep n="2d" title="Step Completion">
<substep n="2d" title="Handle publish Tags (Event System)">
<if tag="publish">
<description>Emit events to the BMAD event bus for async module communication</description>
<action>Extract event type from publish tag's event attribute</action>
<action>Extract payload from nested elements or attributes</action>
<action>Invoke {bmad_folder}/core/events/publish-event.xml with event_type and payload</action>
<action>Log: "Event {event_type} published to {subscriber_count} subscribers"</action>
<note>Events are non-blocking - workflow continues immediately after publish</note>
<example>
&lt;publish event="story.done"&gt;
&lt;payload&gt;
&lt;story_id&gt;{{story_id}}&lt;/story_id&gt;
&lt;epic_id&gt;{{epic_id}}&lt;/epic_id&gt;
&lt;tests_passed&gt;true&lt;/tests_passed&gt;
&lt;/payload&gt;
&lt;/publish&gt;
</example>
</if>
</substep>
<substep n="2e" title="Step Completion">
<check>If no special tags and NOT #yolo:</check>
<ask>Continue to next step? (y/n/edit)</ask>
</substep>
@ -126,6 +147,7 @@
<tag>invoke-workflow - Call another workflow</tag>
<tag>invoke-task - Call a task</tag>
<tag>invoke-protocol - Execute a reusable protocol (e.g., discover_inputs)</tag>
<tag>publish event="type" - Emit event to event bus for async module communication</tag>
</execution>
<output>
<tag>template-output - Save content checkpoint</tag>

122
src/modules/README.md Normal file
View File

@ -0,0 +1,122 @@
# BMAD SaaS Operations Modules
This directory contains the event-driven SaaS operations modules for the BMAD Method. These modules are designed to operate independently through an event bus, enabling loose coupling and scalable architecture.
## Module Overview
| Module | Description | Agent | Key Events |
|--------|-------------|-------|------------|
| **bmm-metrics** | KPIs, SLAs, and Quality Gates | Metrics Analyst 📊 | `metrics.quality.*`, `metrics.kpi.*` |
| **bmm-release** | Release Management | Release Manager 🚀 | `release.*` |
| **bmm-feedback** | Customer Feedback Loop | Feedback Analyst 📣 | `feedback.*` |
| **bmm-priority** | Backlog Prioritization | Priority Manager 📊 | `priority.*` |
| **bmm-roadmap** | Product Roadmap Planning | Roadmap Planner 🗺️ | `roadmap.*` |
## Event-Driven Architecture
All modules communicate through events, enabling:
- **Loose Coupling**: Modules don't depend on each other directly
- **Async Processing**: Events are processed independently
- **Scalability**: Add new modules without modifying existing ones
- **Auditability**: All events are logged for tracking
### Event Flow Example
```
story.done event
├──→ bmm-metrics: Calculate cycle time, update velocity
│ │
│ └──→ metrics.kpi.updated event
│ │
│ └──→ bmm-roadmap: Update capacity projections
└──→ bmm-release: Add to pending release items
└──→ release.candidate.created event
└──→ bmm-metrics: Run quality gate check
├──→ metrics.quality.pass
│ │
│ └──→ bmm-release: Proceed with release
└──→ metrics.quality.fail
└──→ bmm-release: Block release
```
## Core Event Infrastructure
Located in `src/core/events/`:
- **event-schema.yaml**: Canonical event type definitions
- **queue/file-queue-transport.xml**: File-based queue for local development
- **publish-event.xml**: Event publishing task
## Module Installation
Modules are installed to `{project-root}/.bmad/{module-name}/` and include:
- Configuration files
- Agent definitions (compiled to .md)
- Workflow files
- Event handlers
- State files
## Quick Start
1. Install modules via BMAD installer
2. Configure each module in `.bmad/{module}/config.yaml`
3. Activate agents using slash commands or menu
## Event Types Reference
### Story Events (Core)
- `story.started` - Story work begins
- `story.done` - Story completed
- `story.ready` - Story ready for development
### Sprint Events (Core)
- `sprint.started` - Sprint begins
- `sprint.ended` - Sprint completes
### Metrics Events (bmm-metrics)
- `metrics.kpi.defined` - KPIs configured
- `metrics.kpi.updated` - KPI values updated
- `metrics.sla.defined` - SLAs configured
- `metrics.sla.breach` - SLA threshold breached
- `metrics.quality.pass` - Quality gates passed
- `metrics.quality.fail` - Quality gates failed
- `metrics.velocity.calculated` - Sprint velocity calculated
### Release Events (bmm-release)
- `release.candidate.created` - New release candidate
- `release.approved` - Release approved
- `release.deployed` - Release deployed
- `release.failed` - Deployment failed
- `release.rollback.initiated` - Rollback started
- `release.rollback.completed` - Rollback finished
### Feedback Events (bmm-feedback)
- `feedback.received` - New feedback submitted
- `feedback.analyzed` - Feedback analyzed
- `feedback.insight.generated` - Insight identified
- `feedback.priority.suggested` - Priority change suggested
### Priority Events (bmm-priority)
- `priority.updated` - Story priority changed
- `priority.queue.reordered` - Backlog reordered
### Roadmap Events (bmm-roadmap)
- `roadmap.updated` - Roadmap modified
- `roadmap.milestone.completed` - Milestone achieved
- `roadmap.at.risk` - Timeline at risk
## Contributing
When adding new modules:
1. Follow the directory structure pattern
2. Define events in manifest.yaml
3. Register handlers in events/subscriptions.yaml
4. Document published events in events/publications.yaml
5. Create agent with menu and workflows

View File

@ -0,0 +1,90 @@
# BMM-Feedback Module
Customer and User Feedback Loop module for the BMAD Method. Collects, analyzes, and routes feedback to influence product priorities.
## Overview
The bmm-feedback module provides:
- **Feedback Collection**: Gather feedback from multiple sources
- **Sentiment Analysis**: Categorize and score feedback
- **Feedback Routing**: Connect feedback to stories and priorities
- **Feedback Reports**: Generate insights for product decisions
## Event-Driven Architecture
This module operates through events, enabling loose coupling with other modules:
### Events Subscribed
| Event | Action |
|-------|--------|
| `release.deployed` | Trigger post-release feedback collection |
| `story.done` | Enable feature-specific feedback collection |
### Events Published
| Event | Description |
|-------|-------------|
| `feedback.received` | New feedback submitted |
| `feedback.analyzed` | Feedback analyzed and categorized |
| `feedback.insight.generated` | Actionable insight identified |
| `feedback.priority.suggested` | Feedback suggests priority change |
## Directory Structure
```
bmm-feedback/
├── README.md
├── manifest.yaml
├── config.yaml
├── agents/
│ └── feedback-analyst.agent.yaml
├── workflows/
│ ├── collect-feedback/
│ ├── analyze-feedback/
│ └── feedback-report/
├── events/
│ ├── subscriptions.yaml
│ ├── publications.yaml
│ └── handlers/
│ └── on-release-deployed.xml
├── tasks/
│ └── categorize-feedback.xml
├── templates/
│ └── feedback-report-template.md
└── state/
└── module-state.yaml
```
## Quick Start
1. Install the module via BMAD installer
2. Configure feedback sources in `.bmad/bmm-feedback/config.yaml`
3. Use the Feedback Analyst agent: `*feedback-analyst`
## Agent Commands
The Feedback Analyst agent provides:
- `*help` - Show available commands
- `*collect` - Start feedback collection
- `*analyze` - Analyze collected feedback
- `*report` - Generate feedback report
- `*trends` - View feedback trends
- `*exit` - Exit agent
## Integration with Other Modules
### bmm-release → bmm-feedback
When a release is deployed, bmm-feedback can automatically trigger feedback collection for the released features.
### bmm-feedback → bmm-priority
Feedback insights can suggest priority changes, publishing events that bmm-priority consumes to adjust the backlog.
## Feedback Sources
Configure which sources to collect feedback from:
- In-app feedback widgets
- Support tickets
- User surveys
- App store reviews
- Social media mentions
- NPS responses
- Customer interviews

View File

@ -0,0 +1,185 @@
# Feedback Analyst Agent Definition
# Compiles to .md during BMAD installation
name: feedback-analyst
displayName: Feedback Analyst
title: Customer Voice Champion + Insight Generator
icon: "📣"
persona:
role: "Feedback Analyst + Customer Voice Champion + Insight Generator"
identity: |
Empathetic listener who transforms customer feedback into actionable insights.
Expert at identifying patterns, sentiment trends, and priority signals from
diverse feedback sources. Bridges the gap between customer voice and product
decisions, ensuring the team hears what users actually need.
communication_style: |
Speaks with customer empathy - often quotes actual feedback.
Uses phrases like "customers are telling us..." and "the pattern shows..."
Presents data with human stories. Highlights both pain points and praise.
Always connects feedback to potential actions.
principles:
- "Every piece of feedback is a gift - treat it with respect"
- "Patterns matter more than individual complaints"
- "Quantify sentiment but don't lose the human story"
- "Feedback should drive action, not just reports"
- "The loudest feedback isn't always the most important"
- "Close the loop - let customers know they were heard"
activation:
critical: true
steps:
- step: 1
action: "Load persona from this agent file"
- step: 2
action: "Load module config from {project-root}/.bmad/bmm-feedback/config.yaml"
mandate: true
- step: 3
action: "Store config values: {user_name}, {project_name}, {sources}, {categories}"
- step: 4
action: "Load current feedback state from {project-root}/.bmad/bmm-feedback/state/module-state.yaml if exists"
- step: 5
action: "Greet user and display menu"
format: |
📣 **Feedback Analyst** ready, {user_name}
Current project: **{project_name}**
Feedback items: **{feedback_count}**
Unprocessed: **{unprocessed_count}**
{menu_items}
menu:
- cmd: "*help"
action: "Show numbered menu"
- cmd: "*collect"
workflow: "{project-root}/.bmad/bmm-feedback/workflows/collect-feedback/workflow.yaml"
description: "Collect feedback from configured sources"
- cmd: "*analyze"
workflow: "{project-root}/.bmad/bmm-feedback/workflows/analyze-feedback/workflow.yaml"
description: "Analyze collected feedback"
- cmd: "*report"
workflow: "{project-root}/.bmad/bmm-feedback/workflows/feedback-report/workflow.yaml"
description: "Generate feedback insights report"
- cmd: "*trends"
action: "#show-trends"
description: "View feedback trends"
- cmd: "*sentiment"
action: "#sentiment-summary"
description: "View sentiment analysis summary"
- cmd: "*top-issues"
action: "#top-issues"
description: "Show top feedback themes"
- cmd: "*add-feedback"
action: "#manual-feedback"
description: "Manually add feedback item"
- cmd: "*exit"
action: "Exit agent with confirmation"
prompts:
show-trends:
id: show-trends
content: |
Display feedback trends:
1. Load feedback history from state
2. Calculate 30-day trend for volume
3. Calculate sentiment trend
4. Identify emerging themes
5. Show trend visualization
6. Highlight significant changes
sentiment-summary:
id: sentiment-summary
content: |
Display sentiment analysis summary:
1. Load recent feedback with sentiment scores
2. Calculate overall sentiment
3. Break down by category
4. Show sentiment over time
5. Highlight notable quotes
top-issues:
id: top-issues
content: |
Display top feedback themes:
1. Load categorized feedback
2. Rank by frequency and impact
3. Show top 10 themes
4. Include representative quotes
5. Link to related stories if any
manual-feedback:
id: manual-feedback
content: |
Add feedback manually:
1. Prompt for feedback source
2. Prompt for feedback content
3. Prompt for customer identifier (optional)
4. Auto-categorize and analyze sentiment
5. Store and publish feedback.received event
expertise:
domains:
- "Customer feedback analysis"
- "Sentiment analysis and NLP"
- "Voice of customer programs"
- "NPS and satisfaction metrics"
- "Feedback loop management"
- "Product insight generation"
frameworks:
- "Jobs to be Done (JTBD)"
- "Net Promoter Score (NPS)"
- "Customer Effort Score (CES)"
- "Sentiment analysis models"
- "Thematic analysis"
tools:
- "Feedback collection and aggregation"
- "Sentiment scoring"
- "Theme identification"
- "Trend analysis"
- "Report generation"
collaboration:
works_with:
- agent: "pm"
purpose: "Provide customer insights for prioritization"
- agent: "ux-designer"
purpose: "Share usability feedback for design improvements"
- agent: "analyst"
purpose: "Combine feedback with market analysis"
handoffs:
- to: "bmm-priority"
event: "feedback.priority.suggested"
description: "High-impact feedback suggests priority change"
- from: "bmm-release"
event: "release.deployed"
description: "Trigger post-release feedback collection"
rules:
- "Always preserve the customer's voice - quote actual feedback"
- "Never dismiss feedback without analysis"
- "Connect feedback to existing stories when possible"
- "Flag urgent issues (security, data loss) immediately"
- "Balance quantitative trends with qualitative insights"
- "Respect customer privacy - anonymize when sharing"

View File

@ -0,0 +1,119 @@
# BMM-Feedback Module Configuration
# Copy to {project-root}/.bmad/bmm-feedback/config.yaml and customize
# Project identification
project_name: "{{project_name}}"
user_name: "{{user_name}}"
output_folder: "docs"
# Feedback collection sources
sources:
# In-app feedback
in_app:
enabled: true
widget_id: null # Configure your feedback widget
# Support tickets (integrations)
support:
enabled: false
provider: null # zendesk, freshdesk, intercom
api_key: null
# User surveys
surveys:
enabled: false
provider: null # typeform, surveymonkey, google_forms
# App store reviews
app_store:
enabled: false
ios_app_id: null
android_package: null
# NPS tracking
nps:
enabled: false
provider: null # delighted, satismeter, custom
# Feedback categorization
categories:
- name: "bug"
keywords: ["broken", "error", "crash", "doesn't work", "bug"]
priority_weight: 1.5
- name: "feature_request"
keywords: ["wish", "would be nice", "please add", "feature request"]
priority_weight: 1.0
- name: "usability"
keywords: ["confusing", "hard to", "can't find", "unclear"]
priority_weight: 1.2
- name: "performance"
keywords: ["slow", "takes forever", "loading", "timeout"]
priority_weight: 1.3
- name: "praise"
keywords: ["love", "great", "amazing", "thank you", "awesome"]
priority_weight: 0.5
# Sentiment analysis
sentiment:
enabled: true
# Threshold for negative sentiment alert
negative_threshold: -0.5
# Threshold for positive highlight
positive_threshold: 0.5
# Auto-collection triggers
auto_collect:
# Collect feedback after release
on_release: true
# Days after release to send survey
release_survey_delay_days: 7
# Collect feedback after story completion
on_story_done: false
# Feedback analysis settings
analysis:
# Minimum feedback items for trend analysis
min_items_for_trends: 10
# Rolling window for trend calculation
trend_window_days: 30
# Threshold for insight generation
insight_threshold: 5 # Same issue mentioned 5+ times
# Priority influence settings
priority_influence:
enabled: true
# Minimum feedback score to suggest priority change
min_score_for_suggestion: 10
# Auto-publish priority suggestions
auto_publish: false
# Notifications
notifications:
# Alert on negative sentiment spike
on_negative_spike:
enabled: true
channels:
- slack
# Report generation complete
on_report_ready:
enabled: true
channels:
- email
# Event subscriptions (for event bus)
events:
subscribe:
- "release.deployed"
- "story.done"
publish:
- "feedback.received"
- "feedback.analyzed"
- "feedback.insight.generated"
- "feedback.priority.suggested"

View File

@ -0,0 +1,78 @@
# BMM-Feedback Event Publications
# Events this module emits
version: "1.0.0"
module: "bmm-feedback"
publications:
# New feedback received
- event_type: "feedback.received"
description: "New feedback item submitted"
trigger: "Feedback collection or manual entry"
payload_schema:
feedback_id: { type: string, required: true }
source: { type: string, required: true }
content: { type: string, required: true }
category: { type: string, required: false }
customer_id: { type: string, required: false }
feature_id: { type: string, required: false }
timestamp: { type: datetime, required: true }
# Feedback analyzed
- event_type: "feedback.analyzed"
description: "Feedback item analyzed and categorized"
trigger: "Analysis workflow completion"
payload_schema:
feedback_id: { type: string, required: true }
category: { type: string, required: true }
sentiment_score: { type: number, required: true }
sentiment_label: { type: string, required: true }
themes: { type: array, required: true }
priority_score: { type: number, required: false }
linked_stories: { type: array, required: false }
# Actionable insight generated
- event_type: "feedback.insight.generated"
description: "Actionable insight identified from feedback patterns"
trigger: "Pattern threshold reached"
payload_schema:
insight_id: { type: string, required: true }
type: { type: string, required: true } # bug_pattern, feature_demand, usability_issue
title: { type: string, required: true }
description: { type: string, required: true }
feedback_count: { type: number, required: true }
feedback_ids: { type: array, required: true }
suggested_action: { type: string, required: false }
priority_impact: { type: number, required: false }
consumers:
- module: "bmm-priority"
action: "Consider for backlog prioritization"
# Priority change suggested
- event_type: "feedback.priority.suggested"
description: "Feedback suggests a priority change"
trigger: "High-impact feedback pattern detected"
payload_schema:
suggestion_id: { type: string, required: true }
story_id: { type: string, required: false }
current_priority: { type: string, required: false }
suggested_priority: { type: string, required: true }
reason: { type: string, required: true }
feedback_count: { type: number, required: true }
customer_impact: { type: string, required: true }
sample_feedback: { type: array, required: true }
consumers:
- module: "bmm-priority"
action: "Evaluate priority adjustment"
# Sentiment alert
- event_type: "feedback.sentiment.alert"
description: "Significant sentiment change detected"
trigger: "Sentiment threshold breach"
payload_schema:
alert_type: { type: string, required: true } # negative_spike, trend_change
category: { type: string, required: false }
current_sentiment: { type: number, required: true }
previous_sentiment: { type: number, required: true }
change_percent: { type: number, required: true }
sample_feedback: { type: array, required: true }

View File

@ -0,0 +1,29 @@
# BMM-Feedback Event Subscriptions
# Events this module listens to
version: "1.0.0"
module: "bmm-feedback"
subscriptions:
# Release deployed - trigger feedback collection
- event_type: "release.deployed"
handler: "handlers/on-release-deployed.xml"
description: "Trigger post-release feedback collection"
condition: "config.auto_collect.on_release == true"
action: "schedule_feedback_collection"
# Story done - optionally collect feature feedback
- event_type: "story.done"
handler: "handlers/on-story-done.xml"
description: "Enable feature-specific feedback"
condition: "config.auto_collect.on_story_done == true"
action: "enable_feature_feedback"
# Routing configuration
routing:
# Map feedback to features/stories
feature_mapping:
enabled: true
match_by:
- story_keywords
- feature_tags

View File

@ -0,0 +1,117 @@
# BMM-Feedback Module Manifest
# Customer Feedback Loop for BMAD
name: bmm-feedback
version: "1.0.0"
display_name: "BMAD Feedback Module"
description: "Customer feedback collection, analysis, and insight generation with event-driven architecture"
author: "BMad"
license: "MIT"
# Module category and tags
category: "product"
tags:
- feedback
- customer-voice
- sentiment
- insights
- nps
# Dependencies
dependencies:
core:
version: ">=1.0.0"
required: true
# Event Integration
events:
subscribes:
- release.deployed
- story.done
publishes:
- feedback.received
- feedback.analyzed
- feedback.insight.generated
- feedback.priority.suggested
- feedback.sentiment.alert
# Agents provided by this module
agents:
- name: feedback-analyst
file: agents/feedback-analyst.agent.yaml
description: "Customer Voice Champion + Insight Generator"
icon: "📣"
# Workflows provided
workflows:
- name: collect-feedback
path: workflows/collect-feedback
description: "Collect feedback from configured sources"
standalone: true
- name: analyze-feedback
path: workflows/analyze-feedback
description: "Analyze feedback for sentiment and themes"
standalone: true
- name: feedback-report
path: workflows/feedback-report
description: "Generate feedback insights report"
standalone: true
# Configuration schema
config_schema:
project_name:
type: string
required: true
user_name:
type: string
required: true
output_folder:
type: string
required: true
default: "docs"
sources:
type: object
description: "Feedback source configuration"
categories:
type: array
description: "Feedback category definitions"
sentiment:
type: object
properties:
enabled: { type: boolean, default: true }
negative_threshold: { type: number, default: -0.5 }
priority_influence:
type: object
properties:
enabled: { type: boolean, default: true }
min_score_for_suggestion: { type: number, default: 10 }
# Installation hooks
install:
post:
- action: "Initialize module state file"
- action: "Subscribe to events"
- action: "Generate slash commands"
# Slash commands to generate
slash_commands:
- name: "feedback-collect"
workflow: "collect-feedback"
description: "Collect customer feedback"
- name: "feedback-analyze"
workflow: "analyze-feedback"
description: "Analyze feedback sentiment and themes"
- name: "feedback-report"
workflow: "feedback-report"
description: "Generate feedback report"

View File

@ -0,0 +1,83 @@
# BMM-Feedback Module State
# This file is auto-managed by the bmm-feedback module
# Manual edits may be overwritten
version: "1.0.0"
module: "bmm-feedback"
initialized: false
last_updated: null
# Collection tracking
collection:
last_collection: null
total_collected: 0
sources_used: []
# Feedback items
feedback_items: []
# Example:
# - id: "fb-001"
# source: "in_app"
# content: "The new dashboard is confusing to navigate"
# customer_id: "user-123"
# release_id: "v2.1.0"
# created_at: "2024-01-15T10:00:00Z"
# analyzed: true
# category: "usability"
# sentiment_score: -0.3
# sentiment_label: "negative"
# themes: ["navigation", "dashboard"]
# linked_stories: ["STORY-456"]
# Analysis results
analysis:
last_analysis: null
items_analyzed: 0
average_sentiment: null
# Category counts
categories:
bug: 0
feature_request: 0
usability: 0
performance: 0
praise: 0
# Generated insights
insights: []
# Example:
# - id: "insight-001"
# type: "usability_issue"
# title: "Dashboard navigation confusion"
# description: "Multiple users report difficulty navigating new dashboard"
# feedback_count: 12
# feedback_ids: ["fb-001", "fb-002", ...]
# created_at: "2024-01-15T12:00:00Z"
# status: "active" # active, addressed, dismissed
# Priority suggestions
priority_suggestions: []
# Example:
# - id: "ps-001"
# target_story: "STORY-456"
# suggested_change: "increase"
# reason: "High volume of negative feedback"
# feedback_count: 15
# created_at: "2024-01-15T12:00:00Z"
# status: "pending" # pending, accepted, rejected
# Sentiment history (for trends)
sentiment_history: []
# Example:
# - date: "2024-01-15"
# average_sentiment: 0.2
# feedback_count: 25
# categories:
# bug: -0.5
# praise: 0.8
# Event processing
event_processing:
last_event_id: null
last_event_time: null
events_processed_count: 0

View File

@ -0,0 +1,270 @@
# Analyze Feedback Instructions
## Objective
Analyze feedback items to determine sentiment, categorize by theme, identify patterns, and generate actionable insights.
## Prerequisites
- Feedback items collected (via `*collect`)
- Category definitions in config
---
<step n="1" goal="Select feedback to analyze">
### Select Analysis Scope
<action>Load unanalyzed feedback from state</action>
**Unanalyzed Feedback:** {{unanalyzed_count}} items
<ask>What would you like to analyze?
[u] All unanalyzed feedback ({{unanalyzed_count}} items)
[r] Re-analyze all feedback
[s] Select specific items
[d] Date range
Choice: </ask>
<action>Store as {{analysis_scope}}</action>
<action>Load feedback items based on selection</action>
**Selected for Analysis:** {{selected_count}} items
</step>
---
<step n="2" goal="Perform sentiment analysis">
### Sentiment Analysis
<action>For each feedback item, calculate sentiment score</action>
<action>Score range: -1 (very negative) to +1 (very positive)</action>
**Sentiment Distribution:**
```
Very Negative [-1.0 to -0.5]: ████████ {{very_negative_count}}
Negative [-0.5 to -0.1]: ██████ {{negative_count}}
Neutral [-0.1 to +0.1]: ████ {{neutral_count}}
Positive [+0.1 to +0.5]: ██████████ {{positive_count}}
Very Positive [+0.5 to +1.0]: ████████████ {{very_positive_count}}
```
**Average Sentiment:** {{average_sentiment}}
**Sentiment Trend:** {{sentiment_trend}}
<check if="average_sentiment < config.sentiment.negative_threshold">
**ALERT: Negative sentiment spike detected!**
<publish event="feedback.sentiment.alert">
<payload>
<alert_type>negative_spike</alert_type>
<current_sentiment>{{average_sentiment}}</current_sentiment>
<previous_sentiment>{{previous_average}}</previous_sentiment>
<change_percent>{{sentiment_change}}</change_percent>
<sample_feedback>{{negative_samples}}</sample_feedback>
</payload>
</publish>
</check>
</step>
---
<step n="3" goal="Categorize feedback">
### Category Classification
<action>Match feedback content against category keywords</action>
<action>Assign primary and secondary categories</action>
<action>Calculate category confidence scores</action>
**Category Distribution:**
| Category | Count | % | Avg Sentiment |
|----------|-------|---|---------------|
{{#each categories}}
| {{name}} | {{count}} | {{percent}}% | {{avg_sentiment}} |
{{/each}}
**Top Categories:**
1. {{top_category_1.name}}: {{top_category_1.count}} items
2. {{top_category_2.name}}: {{top_category_2.count}} items
3. {{top_category_3.name}}: {{top_category_3.count}} items
</step>
---
<step n="4" goal="Extract themes">
### Theme Extraction
<action>Identify common themes and topics</action>
<action>Group related feedback by theme</action>
<action>Rank themes by frequency and impact</action>
**Emerging Themes:**
{{#each themes}}
### {{rank}}. {{name}}
- **Mentions:** {{count}}
- **Sentiment:** {{sentiment}}
- **Sample Quotes:**
{{#each sample_quotes}}
> "{{quote}}" - {{source}}
{{/each}}
{{/each}}
</step>
---
<step n="5" goal="Link to stories/features">
### Story Linking
<action>Load story keywords and feature tags</action>
<action>Match feedback to existing stories</action>
<action>Identify feedback without story matches</action>
**Linked to Stories:**
| Feedback ID | Story ID | Confidence |
|-------------|----------|------------|
{{#each linked_feedback}}
| {{feedback_id}} | {{story_id}} | {{confidence}}% |
{{/each}}
**Unlinked Feedback:** {{unlinked_count}} items
(May represent new feature requests or unknown issues)
</step>
---
<step n="6" goal="Generate insights">
### Insight Generation
<action>Identify patterns that exceed threshold</action>
<action>Generate actionable insights</action>
<check if="pattern_count >= config.analysis.insight_threshold">
{{#each insights}}
**Insight: {{title}}**
- **Type:** {{type}}
- **Feedback Count:** {{feedback_count}}
- **Impact Score:** {{impact_score}}
- **Description:** {{description}}
- **Suggested Action:** {{suggested_action}}
<publish event="feedback.insight.generated">
<payload>
<insight_id>{{id}}</insight_id>
<type>{{type}}</type>
<title>{{title}}</title>
<description>{{description}}</description>
<feedback_count>{{feedback_count}}</feedback_count>
<feedback_ids>{{feedback_ids}}</feedback_ids>
<suggested_action>{{suggested_action}}</suggested_action>
<priority_impact>{{priority_impact}}</priority_impact>
</payload>
</publish>
{{/each}}
</check>
</step>
---
<step n="7" goal="Check priority suggestions">
### Priority Impact Analysis
<action>Evaluate if any insights should trigger priority changes</action>
<action>Calculate priority scores based on feedback volume and sentiment</action>
<check if="high_impact_feedback_detected">
{{#each priority_suggestions}}
**Priority Suggestion:**
- **Story/Feature:** {{target}}
- **Current Priority:** {{current_priority}}
- **Suggested Priority:** {{suggested_priority}}
- **Reason:** {{reason}}
- **Supporting Feedback:** {{feedback_count}} items
<check if="config.priority_influence.auto_publish == true">
<publish event="feedback.priority.suggested">
<payload>
<suggestion_id>{{id}}</suggestion_id>
<story_id>{{story_id}}</story_id>
<current_priority>{{current_priority}}</current_priority>
<suggested_priority>{{suggested_priority}}</suggested_priority>
<reason>{{reason}}</reason>
<feedback_count>{{feedback_count}}</feedback_count>
<customer_impact>{{customer_impact}}</customer_impact>
<sample_feedback>{{sample_feedback}}</sample_feedback>
</payload>
</publish>
</check>
{{/each}}
</check>
</step>
---
<step n="8" goal="Save analysis results">
### Save Results
<action>Update feedback items with analysis results</action>
<action>Save insights to state</action>
<action>Update analysis timestamp</action>
{{#each analyzed_items}}
<publish event="feedback.analyzed">
<payload>
<feedback_id>{{id}}</feedback_id>
<category>{{category}}</category>
<sentiment_score>{{sentiment_score}}</sentiment_score>
<sentiment_label>{{sentiment_label}}</sentiment_label>
<themes>{{themes}}</themes>
<priority_score>{{priority_score}}</priority_score>
<linked_stories>{{linked_stories}}</linked_stories>
</payload>
</publish>
{{/each}}
</step>
---
## Completion
Feedback analysis complete.
**Summary:**
- Items Analyzed: {{analyzed_count}}
- Average Sentiment: {{average_sentiment}} ({{sentiment_label}})
- Top Category: {{top_category}}
- Insights Generated: {{insights_count}}
- Priority Suggestions: {{priority_suggestions_count}}
**Attention Needed:**
{{#if negative_spike}}
- Negative sentiment spike detected
{{/if}}
{{#if high_volume_theme}}
- High volume theme: {{high_volume_theme}}
{{/if}}
**Next Steps:**
1. Review generated insights
2. Generate report with `*report`
3. Address priority suggestions
**Output:** {{output_file_path}}

View File

@ -0,0 +1,38 @@
# Analyze Feedback Workflow
name: analyze-feedback
description: "Analyze collected feedback for sentiment, categories, and insights"
author: "BMad"
module: bmm-feedback
# Configuration
config_source: "{project-root}/.bmad/bmm-feedback/config.yaml"
project_name: "{config_source}:project_name"
output_folder: "{config_source}:output_folder"
categories: "{config_source}:categories"
sentiment_config: "{config_source}:sentiment"
date: system-generated
# Paths
installed_path: "{project-root}/.bmad/bmm-feedback/workflows/analyze-feedback"
instructions: "{installed_path}/instructions.md"
# Input
input:
feedback_ids:
description: "Specific feedback IDs to analyze (default: unanalyzed)"
required: false
default: "unanalyzed"
# Output
default_output_file: "{output_folder}/feedback/analysis-{{date}}.yaml"
# Events
publishes:
- event_type: "feedback.analyzed"
condition: "For each analyzed feedback item"
- event_type: "feedback.insight.generated"
condition: "When pattern threshold reached"
- event_type: "feedback.sentiment.alert"
condition: "When sentiment threshold breached"
standalone: true

View File

@ -0,0 +1,209 @@
# Collect Feedback Instructions
## Objective
Gather feedback from all configured sources, standardize the format, and prepare for analysis.
## Prerequisites
- At least one feedback source configured
- API credentials for external sources (if applicable)
---
<step n="1" goal="Select collection scope">
### Select Collection Scope
<ask>What feedback would you like to collect?
[a] All enabled sources
[s] Select specific sources
[m] Manual entry only
Choice: </ask>
<action>Store as {{collection_mode}}</action>
<check if="collection_mode == 's'">
**Enabled Sources:**
{{#each enabled_sources}}
[{{index}}] {{name}} - {{description}}
{{/each}}
<ask>Select sources (comma-separated numbers): </ask>
<action>Store as {{selected_sources}}</action>
</check>
<ask>Collect feedback from what time period?
[t] Today
[w] Last 7 days
[m] Last 30 days
[c] Custom date range
[a] All time (since last collection)
Period: </ask>
<action>Calculate {{since_date}} based on selection</action>
</step>
---
<step n="2" goal="Collect from in-app source" condition="in_app in selected_sources">
### In-App Feedback
<action>Connect to in-app feedback widget/database</action>
<action>Query feedback since {{since_date}}</action>
**In-App Feedback Retrieved:** {{in_app_count}} items
{{#each in_app_feedback}}
| ID | Date | Content (preview) | Rating |
|----|------|-------------------|--------|
| {{id}} | {{date}} | {{content_preview}} | {{rating}} |
{{/each}}
</step>
---
<step n="3" goal="Collect from support tickets" condition="support in selected_sources">
### Support Tickets
<action>Connect to support system ({{support.provider}})</action>
<action>Query tickets with feedback tag since {{since_date}}</action>
<action>Extract feedback content from tickets</action>
**Support Feedback Retrieved:** {{support_count}} items
</step>
---
<step n="4" goal="Collect from surveys" condition="surveys in selected_sources">
### Survey Responses
<action>Connect to survey provider ({{surveys.provider}})</action>
<action>Query responses since {{since_date}}</action>
<action>Map survey questions to feedback categories</action>
**Survey Responses Retrieved:** {{survey_count}} items
</step>
---
<step n="5" goal="Collect from app stores" condition="app_store in selected_sources">
### App Store Reviews
<action>Query iOS App Store reviews for {{app_store.ios_app_id}}</action>
<action>Query Google Play reviews for {{app_store.android_package}}</action>
<action>Filter reviews since {{since_date}}</action>
**App Store Reviews Retrieved:** {{app_store_count}} items
</step>
---
<step n="6" goal="Manual feedback entry" condition="collection_mode == 'm'">
### Manual Feedback Entry
<ask>Enter feedback details:
Source (e.g., email, call, meeting): </ask>
<action>Store as {{manual_source}}</action>
<ask>Customer identifier (optional): </ask>
<action>Store as {{manual_customer}}</action>
<ask>Feedback content: </ask>
<action>Store as {{manual_content}}</action>
<ask>Any additional context or notes: </ask>
<action>Store as {{manual_notes}}</action>
<action>Create feedback item from manual entry</action>
<ask>Add another feedback item?
[y] Yes
[n] No
Choice: </ask>
<check if="choice == 'y'">
<action>Loop back to manual entry</action>
</check>
</step>
---
<step n="7" goal="Standardize and deduplicate">
### Process Collected Feedback
<action>Standardize all feedback to common format</action>
<action>Detect and merge duplicates</action>
<action>Assign unique feedback IDs</action>
**Collection Summary:**
| Source | Raw | Deduplicated | New |
|--------|-----|--------------|-----|
| In-App | {{in_app_raw}} | {{in_app_dedup}} | {{in_app_new}} |
| Support | {{support_raw}} | {{support_dedup}} | {{support_new}} |
| Surveys | {{survey_raw}} | {{survey_dedup}} | {{survey_new}} |
| App Store | {{app_store_raw}} | {{app_store_dedup}} | {{app_store_new}} |
| Manual | {{manual_count}} | {{manual_count}} | {{manual_count}} |
| **Total** | {{total_raw}} | {{total_dedup}} | {{total_new}} |
</step>
---
<step n="8" goal="Store and publish">
### Store Feedback
<action>Save new feedback items to state</action>
<action>Link to release if release_id provided</action>
{{#each new_feedback_items}}
<publish event="feedback.received">
<payload>
<feedback_id>{{id}}</feedback_id>
<source>{{source}}</source>
<content>{{content}}</content>
<customer_id>{{customer_id}}</customer_id>
<release_id>{{release_id}}</release_id>
<timestamp>{{timestamp}}</timestamp>
</payload>
</publish>
{{/each}}
<action>Log: "Collected {{total_new}} new feedback items from {{sources_used}}"</action>
</step>
---
## Completion
Feedback collection complete.
**Summary:**
- Sources queried: {{sources_count}}
- New feedback items: {{total_new}}
- Duplicates removed: {{duplicates_removed}}
- Period: {{since_date}} to {{now}}
**Next Steps:**
1. Run `*analyze` to categorize and score feedback
2. Review high-priority items manually
3. Generate report with `*report`
**Output:** {{output_file_path}}

View File

@ -0,0 +1,39 @@
# Collect Feedback Workflow
name: collect-feedback
description: "Collect feedback from configured sources"
author: "BMad"
module: bmm-feedback
# Configuration
config_source: "{project-root}/.bmad/bmm-feedback/config.yaml"
project_name: "{config_source}:project_name"
output_folder: "{config_source}:output_folder"
sources: "{config_source}:sources"
date: system-generated
# Paths
installed_path: "{project-root}/.bmad/bmm-feedback/workflows/collect-feedback"
instructions: "{installed_path}/instructions.md"
# Input
input:
source:
description: "Specific source to collect from (default: all enabled)"
required: false
default: "all"
since:
description: "Collect feedback since this date"
required: false
release_id:
description: "Link feedback to specific release"
required: false
# Output
default_output_file: "{output_folder}/feedback/collection-{{date}}.yaml"
# Events
publishes:
- event_type: "feedback.received"
condition: "For each new feedback item collected"
standalone: true

View File

@ -0,0 +1,178 @@
# BMM-Metrics Module
KPI/SLA tracking and quality gate management for production SaaS operations.
## Overview
The BMM-Metrics module provides comprehensive metrics tracking, quality gate validation, and SLA monitoring to enable data-driven product decisions and ensure release quality.
## Key Capabilities
- **KPI Definition & Tracking**: Define and monitor product and engineering KPIs
- **SLA Management**: Set thresholds and alert on breaches
- **Quality Gates**: Validate releases against quality criteria
- **Metrics Review**: Periodic analysis and trend identification
- **Event-Driven Integration**: Subscribes to story/sprint events, publishes quality status
## Module Structure
```
bmm-metrics/
├── README.md
├── config.yaml
├── agents/
│ └── metrics-analyst.agent.yaml
├── events/
│ ├── subscriptions.yaml # Events this module listens to
│ ├── publications.yaml # Events this module emits
│ └── handlers/
│ ├── on-story-done.xml
│ ├── on-story-ready.xml
│ ├── on-sprint-ended.xml
│ └── on-code-reviewed.xml
├── workflows/
│ ├── define-kpis/ # Define product and engineering KPIs
│ ├── define-slas/ # Set SLA thresholds
│ ├── track-metrics/ # Collect and report metrics
│ ├── quality-gate-check/ # Pre-release quality validation
│ └── metrics-review/ # Weekly/monthly analysis
├── tasks/
│ ├── calculate-velocity.xml
│ ├── generate-dashboard.xml
│ └── check-sla-breach.xml
├── data/
│ ├── metric-types.csv
│ └── sla-thresholds.yaml
└── state/
└── module-state.yaml # Private module state
```
## Agent
### Metrics Analyst
**Role**: KPI definition, SLA monitoring, dashboard generation, quality gate validation
**Responsibilities**:
- Define meaningful KPIs aligned with business objectives
- Set realistic SLA thresholds based on baseline data
- Monitor metric trends and identify anomalies
- Validate quality gates before releases
- Generate periodic metrics reports
## Workflows
### define-kpis
Define product and engineering KPIs with targets and measurement frequency.
**Inputs**: PRD success metrics, business objectives
**Outputs**: KPI definitions document
### define-slas
Set SLA thresholds for critical metrics with alerting rules.
**Inputs**: KPI definitions, baseline data
**Outputs**: SLA configuration
### track-metrics
Periodic collection and reporting of metric values.
**Inputs**: Source data (stories, sprints, code)
**Outputs**: Metrics report, SLA breach alerts
### quality-gate-check
Validate a release candidate against quality criteria.
**Inputs**: Story ID, quality gate definitions
**Outputs**: Pass/fail status, detailed gate results
**Events Published**: `metrics.quality.pass` or `metrics.quality.fail`
### metrics-review
Weekly or monthly analysis of metric trends.
**Inputs**: Historical metric data
**Outputs**: Trend analysis, recommendations
## Event Integration
### Subscriptions (Listens To)
| Event | Handler | Purpose |
|-------|---------|---------|
| `story.done` | on-story-done.xml | Track story completion metrics |
| `story.ready` | on-story-ready.xml | Track cycle time from created to ready |
| `sprint.ended` | on-sprint-ended.xml | Calculate sprint velocity |
| `code.reviewed` | on-code-reviewed.xml | Track code quality metrics |
### Publications (Emits)
| Event | Trigger | Purpose |
|-------|---------|---------|
| `metrics.kpi.defined` | define-kpis workflow | Notify KPIs are defined |
| `metrics.kpi.updated` | track-metrics workflow | Notify KPI value changes |
| `metrics.sla.breach` | track-metrics workflow | Alert on SLA violations |
| `metrics.quality.pass` | quality-gate-check | Release can proceed |
| `metrics.quality.fail` | quality-gate-check | Release blocked |
## Configuration
```yaml
# config.yaml
module_name: bmm-metrics
module_version: "1.0.0"
# Metric collection settings
collection:
frequency: daily
retention_days: 90
# Quality gate defaults
quality_gates:
test_coverage_min: 80
code_review_required: true
no_critical_issues: true
no_security_vulnerabilities: true
# SLA defaults
sla_defaults:
warning_threshold_percent: 80
critical_threshold_percent: 95
# Dashboard settings
dashboard:
refresh_interval_minutes: 15
default_time_range: 30d
```
## Cross-Module Integration
- **bmm-release**: Quality gate status triggers release decisions
- **bmm-roadmap**: Velocity metrics inform capacity planning
- **bmm-priority**: Delivered value feeds prioritization scoring
- **bmm-feedback**: Usage metrics inform feedback analysis
## Quick Start
1. **Initialize metrics tracking**:
```
/bmad:bmm-metrics:workflows:define-kpis
```
2. **Set SLA thresholds**:
```
/bmad:bmm-metrics:workflows:define-slas
```
3. **Run quality gate** (typically automated):
```
/bmad:bmm-metrics:workflows:quality-gate-check
```
## Dependencies
- **BMM Module**: Story and sprint data
- **Core Events**: Event publishing infrastructure
## Author
Created as part of BMAD SaaS Extension - November 2024

View File

@ -0,0 +1,185 @@
# Metrics Analyst Agent Definition
# Compiles to .md during BMAD installation
name: metrics-analyst
displayName: Metrics Analyst
title: KPI Strategist + Quality Gate Guardian
icon: "📊"
persona:
role: "Metrics Analyst + KPI Strategist + Quality Gate Guardian"
identity: |
Data-driven analyst with deep expertise in product and engineering metrics.
Specializes in defining meaningful KPIs, monitoring SLAs, validating quality gates,
and translating metrics into actionable insights. Combines quantitative rigor with
business context to ensure metrics drive real improvements.
communication_style: |
Speaks in precise, data-backed statements. Uses charts and visualizations mentally.
Balances "the numbers say" with "this means for the business." Asks clarifying
questions about measurement methodology. Celebrates improvements, flags concerning
trends without alarm, always suggests actionable next steps.
principles:
- "What gets measured gets managed - but measure what matters"
- "Metrics without context are just numbers"
- "Quality gates exist to protect users, not punish developers"
- "Trends matter more than point-in-time values"
- "Every metric should have a clear owner and action plan"
- "If you can't act on it, don't track it"
activation:
critical: true
steps:
- step: 1
action: "Load persona from this agent file"
- step: 2
action: "Load module config from {project-root}/.bmad/bmm-metrics/config.yaml"
mandate: true
- step: 3
action: "Store config values: {user_name}, {project_name}, {quality_gates}, {sla}"
- step: 4
action: "Load current metrics state from {project-root}/.bmad/bmm-metrics/state/module-state.yaml if exists"
- step: 5
action: "Greet user and display menu"
format: |
📊 **Metrics Analyst** ready, {user_name}
Current project: **{project_name}**
{menu_items}
menu:
- cmd: "*help"
action: "Show numbered menu"
- cmd: "*define-kpis"
workflow: "{project-root}/.bmad/bmm-metrics/workflows/define-kpis/workflow.yaml"
description: "Define product and engineering KPIs"
- cmd: "*define-slas"
workflow: "{project-root}/.bmad/bmm-metrics/workflows/define-slas/workflow.yaml"
description: "Set SLA thresholds and alerting rules"
- cmd: "*track-metrics"
workflow: "{project-root}/.bmad/bmm-metrics/workflows/track-metrics/workflow.yaml"
description: "Collect and report current metrics"
- cmd: "*quality-gate"
workflow: "{project-root}/.bmad/bmm-metrics/workflows/quality-gate-check/workflow.yaml"
description: "Validate quality gates for a story/release"
- cmd: "*metrics-review"
workflow: "{project-root}/.bmad/bmm-metrics/workflows/metrics-review/workflow.yaml"
description: "Analyze metric trends and patterns"
- cmd: "*show-dashboard"
action: "#show-dashboard"
description: "Display current metrics dashboard"
- cmd: "*velocity-report"
action: "#velocity-report"
description: "Show sprint velocity history and trends"
- cmd: "*sla-status"
action: "#sla-status"
description: "Check current SLA compliance"
- cmd: "*exit"
action: "Exit agent with confirmation"
prompts:
show-dashboard:
id: show-dashboard
content: |
Generate a metrics dashboard summary by:
1. Loading current state from module-state.yaml
2. Calculating key metrics:
- Current sprint velocity vs average
- Story cycle time (last 30 days)
- Quality gate pass rate
- SLA compliance percentage
3. Display in a formatted dashboard view
4. Highlight any metrics that need attention
velocity-report:
id: velocity-report
content: |
Generate velocity report:
1. Load sprint history from state
2. Show last 6 sprints velocity
3. Calculate and display rolling average
4. Identify trend (improving/stable/declining)
5. Provide analysis of any significant changes
sla-status:
id: sla-status
content: |
Check SLA compliance:
1. Load SLA definitions from config
2. Load current metric values from state
3. Compare each metric against its SLA threshold
4. Flag any breaches or warnings
5. Provide compliance percentage and details
expertise:
domains:
- "Product metrics and KPIs"
- "Engineering metrics (DORA, velocity)"
- "Quality gate definition and validation"
- "SLA management and monitoring"
- "Data visualization and dashboards"
- "Trend analysis and forecasting"
frameworks:
- "DORA metrics (Deployment Frequency, Lead Time, Change Failure Rate, MTTR)"
- "Agile velocity tracking"
- "RICE/WSJF scoring"
- "OKR measurement"
- "Six Sigma quality metrics"
tools:
- "Metrics tracking and collection"
- "Dashboard generation"
- "Alerting and notification"
- "Trend analysis"
- "Report generation"
collaboration:
works_with:
- agent: "pm"
purpose: "Align KPIs with business objectives"
- agent: "tea"
purpose: "Define quality gates and test metrics"
- agent: "architect"
purpose: "Set engineering metrics and NFR targets"
- agent: "sm"
purpose: "Track sprint velocity and delivery metrics"
- agent: "release-manager"
purpose: "Quality gate validation before releases"
handoffs:
- to: "bmm-release"
event: "metrics.quality.pass"
description: "Quality gates passed, ready for release"
- to: "bmm-roadmap"
event: "metrics.velocity.calculated"
description: "Velocity data for capacity planning"
rules:
- "Always provide context with metrics - numbers alone are not enough"
- "Flag trends, not just thresholds - early warning is better than breach"
- "Quality gates should be achievable but meaningful"
- "SLAs should be based on baseline data, not arbitrary targets"
- "Every metric needs an owner who can take action"
- "Celebrate improvements as much as flagging problems"

View File

@ -0,0 +1,223 @@
# BMM-Metrics Module Configuration
# Version: 1.0.0
module_name: bmm-metrics
module_version: "1.0.0"
description: "KPI/SLA tracking and quality gate management for production SaaS"
# Paths (resolved at runtime)
output_folder: "{config_source}:output_folder"
metrics_folder: "{output_folder}/metrics"
# Inherit from parent module
config_source: "{project-root}/.bmad/bmm/config.yaml"
project_name: "{config_source}:project_name"
user_name: "{config_source}:user_name"
communication_language: "{config_source}:communication_language"
# Metric Collection Settings
collection:
# How often to automatically collect metrics
frequency: daily # daily, weekly, on_event
# How long to retain historical metric data
retention_days: 90
# Auto-collect on these events
trigger_events:
- story.done
- sprint.ended
- code.reviewed
# KPI Categories
kpi_categories:
product:
description: "Product health and user value metrics"
examples:
- "User Adoption Rate"
- "Feature Usage"
- "Customer Satisfaction (NPS)"
- "Time to Value"
engineering:
description: "Development team performance metrics"
examples:
- "Sprint Velocity"
- "Cycle Time"
- "Defect Rate"
- "Code Coverage"
quality:
description: "Code and release quality metrics"
examples:
- "Test Pass Rate"
- "Code Review Turnaround"
- "Production Incidents"
- "Mean Time to Recovery"
delivery:
description: "Delivery and deployment metrics"
examples:
- "Deployment Frequency"
- "Lead Time for Changes"
- "Change Failure Rate"
- "Mean Time to Recovery"
# Quality Gate Definitions
quality_gates:
# Code quality gates
test_coverage:
name: "Test Coverage"
description: "Minimum code test coverage percentage"
threshold: 80
unit: "%"
blocking: true
test_pass_rate:
name: "Test Pass Rate"
description: "All tests must pass"
threshold: 100
unit: "%"
blocking: true
code_review:
name: "Code Review Approved"
description: "Code must be reviewed and approved"
required: true
blocking: true
no_critical_issues:
name: "No Critical Issues"
description: "No critical or blocker issues open"
required: true
blocking: true
no_security_vulnerabilities:
name: "No Security Vulnerabilities"
description: "No high/critical security vulnerabilities"
required: true
blocking: true
documentation_complete:
name: "Documentation Complete"
description: "Required documentation is updated"
required: false
blocking: false
# SLA Configuration
sla:
# Default thresholds for SLA alerts
defaults:
warning_threshold_percent: 80 # Alert at 80% of limit
critical_threshold_percent: 95 # Critical at 95% of limit
# Standard SLA definitions
definitions:
story_cycle_time:
name: "Story Cycle Time"
description: "Time from story start to completion"
target: 5
unit: "days"
measurement: "business_days"
code_review_turnaround:
name: "Code Review Turnaround"
description: "Time from review request to completion"
target: 24
unit: "hours"
measurement: "calendar_hours"
defect_resolution:
name: "Defect Resolution Time"
description: "Time to resolve production defects"
target:
critical: 4
high: 24
medium: 72
low: 168
unit: "hours"
deployment_success_rate:
name: "Deployment Success Rate"
description: "Percentage of successful deployments"
target: 99
unit: "%"
# Dashboard Configuration
dashboard:
# How often to refresh dashboard data
refresh_interval_minutes: 15
# Default time range for charts
default_time_range: 30d # 7d, 14d, 30d, 90d
# Widgets to display
widgets:
- name: "Sprint Velocity"
type: "line_chart"
metric: "velocity"
time_range: "6_sprints"
- name: "Cycle Time Trend"
type: "line_chart"
metric: "cycle_time"
time_range: "30d"
- name: "Quality Gates Status"
type: "status_board"
metric: "quality_gates"
- name: "SLA Compliance"
type: "gauge"
metric: "sla_compliance"
- name: "Defect Trend"
type: "bar_chart"
metric: "defects"
time_range: "30d"
# Event Integration
events:
# Events this module subscribes to
subscriptions:
- event_type: "story.done"
handler: "events/handlers/on-story-done.xml"
- event_type: "story.ready"
handler: "events/handlers/on-story-ready.xml"
- event_type: "sprint.ended"
handler: "events/handlers/on-sprint-ended.xml"
- event_type: "code.reviewed"
handler: "events/handlers/on-code-reviewed.xml"
# Events this module publishes
publications:
- event_type: "metrics.kpi.defined"
- event_type: "metrics.kpi.updated"
- event_type: "metrics.sla.breach"
- event_type: "metrics.quality.pass"
- event_type: "metrics.quality.fail"
# Alerting
alerts:
enabled: true
channels:
# Where to send alerts
- type: "event" # Publish as event
enabled: true
- type: "log" # Log to metrics log
enabled: true
rules:
sla_breach:
severity: "high"
message: "SLA breach detected: {sla_name} at {actual_value} (threshold: {threshold})"
quality_gate_fail:
severity: "high"
message: "Quality gate failed: {gate_name} - {reason}"
velocity_drop:
severity: "medium"
condition: "velocity < 0.7 * average_velocity"
message: "Sprint velocity dropped significantly: {current} vs {average}"

View File

@ -0,0 +1,221 @@
# Metric Type Definitions
# Canonical definitions for all metric types in bmm-metrics module
version: "1.0.0"
# Velocity Metrics
velocity_metrics:
sprint_velocity:
name: "Sprint Velocity"
description: "Number of story points (or stories) completed in a sprint"
unit: "points"
aggregation: "sum"
higher_is_better: true
category: "velocity"
rolling_velocity:
name: "Rolling Velocity Average"
description: "Average velocity over last 6 sprints"
unit: "points"
aggregation: "average"
higher_is_better: true
category: "velocity"
window: 6
sprint_completion_rate:
name: "Sprint Completion Rate"
description: "Percentage of committed stories completed"
unit: "percent"
aggregation: "average"
higher_is_better: true
category: "velocity"
target: 80
# Delivery Metrics
delivery_metrics:
story_cycle_time:
name: "Story Cycle Time"
description: "Days from story start to completion"
unit: "days"
aggregation: "percentile"
percentiles: [50, 90, 95]
higher_is_better: false
category: "delivery"
pr_review_time:
name: "PR Review Time"
description: "Hours from PR creation to first review"
unit: "hours"
aggregation: "percentile"
percentiles: [50, 90]
higher_is_better: false
category: "delivery"
deployment_frequency:
name: "Deployment Frequency"
description: "Number of deployments per week"
unit: "deployments/week"
aggregation: "count"
higher_is_better: true
category: "delivery"
lead_time_for_changes:
name: "Lead Time for Changes"
description: "Time from commit to production (DORA)"
unit: "hours"
aggregation: "percentile"
percentiles: [50]
higher_is_better: false
category: "delivery"
dora_metric: true
# Quality Metrics
quality_metrics:
test_coverage:
name: "Test Coverage"
description: "Percentage of code covered by tests"
unit: "percent"
aggregation: "latest"
higher_is_better: true
category: "quality"
test_pass_rate:
name: "Test Pass Rate"
description: "Percentage of tests passing"
unit: "percent"
aggregation: "latest"
higher_is_better: true
category: "quality"
target: 100
defect_escape_rate:
name: "Defect Escape Rate"
description: "Percentage of defects found in production vs total"
unit: "percent"
aggregation: "average"
higher_is_better: false
category: "quality"
change_failure_rate:
name: "Change Failure Rate"
description: "Percentage of deployments causing failures (DORA)"
unit: "percent"
aggregation: "average"
higher_is_better: false
category: "quality"
dora_metric: true
code_review_coverage:
name: "Code Review Coverage"
description: "Percentage of changes with peer review"
unit: "percent"
aggregation: "average"
higher_is_better: true
category: "quality"
target: 100
# Operations Metrics
operations_metrics:
service_uptime:
name: "Service Uptime"
description: "Percentage of time service is available"
unit: "percent"
aggregation: "average"
higher_is_better: true
category: "operations"
mttr:
name: "Mean Time to Recovery"
description: "Average time to restore service after failure (DORA)"
unit: "hours"
aggregation: "average"
higher_is_better: false
category: "operations"
dora_metric: true
incident_response_time:
name: "Incident Response Time"
description: "Time from incident report to acknowledgment"
unit: "minutes"
aggregation: "percentile"
percentiles: [50, 90]
higher_is_better: false
category: "operations"
error_rate:
name: "Error Rate"
description: "Percentage of requests resulting in errors"
unit: "percent"
aggregation: "average"
higher_is_better: false
category: "operations"
# Security Metrics
security_metrics:
critical_vulnerabilities:
name: "Critical Vulnerabilities"
description: "Count of open critical security vulnerabilities"
unit: "count"
aggregation: "latest"
higher_is_better: false
category: "security"
target: 0
high_vulnerabilities:
name: "High Vulnerabilities"
description: "Count of open high security vulnerabilities"
unit: "count"
aggregation: "latest"
higher_is_better: false
category: "security"
security_scan_pass_rate:
name: "Security Scan Pass Rate"
description: "Percentage of security scans passing"
unit: "percent"
aggregation: "average"
higher_is_better: true
category: "security"
# Product Metrics
product_metrics:
user_activation_rate:
name: "User Activation Rate"
description: "Percentage of new users completing key action"
unit: "percent"
aggregation: "average"
higher_is_better: true
category: "product"
feature_adoption_rate:
name: "Feature Adoption Rate"
description: "Percentage of users using new features"
unit: "percent"
aggregation: "average"
higher_is_better: true
category: "product"
user_retention_d7:
name: "7-Day User Retention"
description: "Percentage of users returning after 7 days"
unit: "percent"
aggregation: "average"
higher_is_better: true
category: "product"
user_retention_d30:
name: "30-Day User Retention"
description: "Percentage of users returning after 30 days"
unit: "percent"
aggregation: "average"
higher_is_better: true
category: "product"
nps_score:
name: "Net Promoter Score"
description: "Customer loyalty metric (-100 to +100)"
unit: "score"
aggregation: "latest"
higher_is_better: true
category: "product"
range: [-100, 100]

View File

@ -0,0 +1,192 @@
# Default SLA Threshold Definitions
# These are starting points - customize via *define-slas workflow
version: "1.0.0"
# Delivery SLAs
delivery:
story_cycle_time:
name: "Story Cycle Time"
description: "Maximum days from story start to completion"
metric: "story_cycle_time"
comparison: "less_than"
target: 3
warning_threshold: 5
breach_threshold: 7
blocking: false
unit: "days"
sprint_completion_rate:
name: "Sprint Completion Rate"
description: "Minimum percentage of committed stories completed"
metric: "sprint_completion_rate"
comparison: "greater_than"
target: 85
warning_threshold: 75
breach_threshold: 60
blocking: false
unit: "percent"
pr_review_turnaround:
name: "PR Review Turnaround"
description: "Maximum hours to first review"
metric: "pr_review_time"
comparison: "less_than"
target: 4
warning_threshold: 8
breach_threshold: 24
blocking: false
unit: "hours"
# Quality SLAs
quality:
test_coverage:
name: "Test Coverage"
description: "Minimum code coverage percentage"
metric: "test_coverage"
comparison: "greater_than"
target: 80
warning_threshold: 70
breach_threshold: 60
blocking: true
unit: "percent"
test_pass_rate:
name: "Test Pass Rate"
description: "Minimum test pass rate"
metric: "test_pass_rate"
comparison: "greater_than"
target: 100
warning_threshold: 98
breach_threshold: 95
blocking: true
unit: "percent"
code_review_coverage:
name: "Code Review Coverage"
description: "Minimum percentage of changes reviewed"
metric: "code_review_coverage"
comparison: "greater_than"
target: 100
warning_threshold: 95
breach_threshold: 90
blocking: true
unit: "percent"
defect_escape_rate:
name: "Defect Escape Rate"
description: "Maximum defects escaping to production"
metric: "defect_escape_rate"
comparison: "less_than"
target: 3
warning_threshold: 5
breach_threshold: 10
blocking: false
unit: "percent"
# Security SLAs
security:
critical_vulnerabilities:
name: "No Critical Vulnerabilities"
description: "Zero critical security vulnerabilities"
metric: "critical_vulnerabilities"
comparison: "less_than"
target: 0
warning_threshold: 0
breach_threshold: 1
blocking: true
unit: "count"
high_vulnerabilities:
name: "High Vulnerabilities"
description: "Maximum high severity vulnerabilities"
metric: "high_vulnerabilities"
comparison: "less_than"
target: 0
warning_threshold: 2
breach_threshold: 5
blocking: false
unit: "count"
# Operations SLAs (for reference, typically managed by ops)
operations:
service_uptime:
name: "Service Uptime"
description: "Minimum service availability"
metric: "service_uptime"
comparison: "greater_than"
target: 99.9
warning_threshold: 99.5
breach_threshold: 99.0
blocking: false
unit: "percent"
measurement_window: "monthly"
mttr:
name: "Mean Time to Recovery"
description: "Maximum average recovery time"
metric: "mttr"
comparison: "less_than"
target: 1
warning_threshold: 4
breach_threshold: 24
blocking: false
unit: "hours"
incident_response_p1:
name: "P1 Incident Response"
description: "Maximum time to acknowledge critical incident"
metric: "incident_response_time"
comparison: "less_than"
target: 15
warning_threshold: 30
breach_threshold: 60
blocking: false
unit: "minutes"
severity: "P1"
# Velocity SLAs
velocity:
velocity_stability:
name: "Velocity Stability"
description: "Velocity should not drop significantly from average"
metric: "sprint_velocity"
comparison: "greater_than"
target_formula: "rolling_average * 0.8"
warning_formula: "rolling_average * 0.7"
breach_formula: "rolling_average * 0.6"
blocking: false
unit: "points"
note: "Dynamic threshold based on rolling average"
# Alerting Configuration Defaults
alerting:
warning:
channels:
- dashboard
- slack
timing:
remind_every: 24 # hours
breach:
channels:
- dashboard
- slack
- email
timing:
remind_every: 4 # hours
escalate_after: 24 # hours
# Escalation Defaults
escalation:
level_1:
after_hours: 0
notify: "team_lead"
level_2:
after_hours: 24
notify: "engineering_manager"
level_3:
after_hours: 72
notify: "director"

View File

@ -0,0 +1,123 @@
<event-handler
id="bmm-metrics/on-sprint-ended"
event="sprint.ended"
description="Calculate sprint velocity and update rolling metrics">
<objective>
When a sprint ends, calculate final velocity, compare to plan,
update rolling averages, and publish velocity metrics.
</objective>
<preconditions>
<check>Event payload contains sprint_id</check>
<check>Event payload contains sprint_number</check>
<check>Event payload contains completed_stories</check>
<check>Event payload contains incomplete_stories</check>
</preconditions>
<flow>
<step n="1" title="Load Sprint Data">
<action>Load module state from {module_path}/state/module-state.yaml</action>
<action>Get sprint tracking record for {event.payload.sprint_id}</action>
<action>Get planned stories count from sprint start event</action>
</step>
<step n="2" title="Calculate Velocity">
<action>Count completed stories: {event.payload.completed_stories.length}</action>
<action>Count incomplete stories: {event.payload.incomplete_stories.length}</action>
<action>Calculate completion rate: completed / planned * 100</action>
<check if="stories have points">
<action>Sum points from completed stories</action>
<action>velocity_points = sum of completed story points</action>
</check>
<check if="stories don't have points">
<action>velocity_count = completed story count</action>
</check>
</step>
<step n="3" title="Update Rolling Average">
<action>Load historical velocity from state (last 6 sprints)</action>
<action>Add current velocity to history</action>
<action>Calculate rolling_average = average of last 6 sprints</action>
<action>Determine trend: improving/stable/declining</action>
<trend_calculation>
<improving>Current velocity > rolling_average * 1.1</improving>
<declining>Current velocity less than rolling_average * 0.9</declining>
<stable>Otherwise</stable>
</trend_calculation>
</step>
<step n="4" title="Calculate Sprint Metrics">
<action>Calculate average cycle time for sprint stories</action>
<action>Calculate defects found during sprint</action>
<action>Calculate code review turnaround average</action>
<metrics>
<metric name="sprint_velocity" value="{velocity}" />
<metric name="sprint_completion_rate" value="{completion_rate}%" />
<metric name="sprint_avg_cycle_time" value="{avg_cycle_time}d" />
<metric name="sprint_stories_completed" value="{completed_count}" />
<metric name="sprint_stories_incomplete" value="{incomplete_count}" />
</metrics>
</step>
<step n="5" title="Check for Velocity Anomalies">
<check if="velocity < rolling_average * 0.7">
<action>Flag significant velocity drop</action>
<publish event="metrics.sla.breach">
<payload>
<sla_name>velocity_stability</sla_name>
<threshold>{rolling_average * 0.7}</threshold>
<actual_value>{velocity}</actual_value>
<breach_time>{event.timestamp}</breach_time>
<severity>warning</severity>
<affected_item>
<type>sprint</type>
<id>{event.payload.sprint_id}</id>
</affected_item>
<remediation>Review sprint retrospective for blockers</remediation>
</payload>
</publish>
</check>
</step>
<step n="6" title="Save State">
<action>Update sprint record with final metrics</action>
<action>Mark sprint as completed</action>
<action>Save state to module-state.yaml</action>
</step>
<step n="7" title="Publish Velocity Event">
<publish event="metrics.velocity.calculated">
<payload>
<sprint_id>{event.payload.sprint_id}</sprint_id>
<sprint_number>{event.payload.sprint_number}</sprint_number>
<velocity>{velocity}</velocity>
<planned>{planned_count}</planned>
<completed>{completed_count}</completed>
<rolling_average>{rolling_average}</rolling_average>
<trend>{trend}</trend>
<completion_rate>{completion_rate}</completion_rate>
</payload>
</publish>
<action>Log: "Sprint {sprint_number} velocity: {velocity} (avg: {rolling_average}, trend: {trend})"</action>
</step>
</flow>
<on-error>
<action>Log ERROR: "Failed to process sprint.ended: {error_message}"</action>
<action>Store event for manual review</action>
</on-error>
<output>
<field name="sprint_id" value="{event.payload.sprint_id}" />
<field name="velocity" value="{velocity}" />
<field name="rolling_average" value="{rolling_average}" />
<field name="trend" value="{trend}" />
<field name="completion_rate" value="{completion_rate}" />
</output>
</event-handler>

View File

@ -0,0 +1,114 @@
<event-handler
id="bmm-metrics/on-story-done"
event="story.done"
description="Track story completion metrics, update velocity, check quality gates">
<objective>
When a story is marked done, calculate cycle time, update sprint metrics,
and optionally trigger quality gate validation.
</objective>
<preconditions>
<check>Event payload contains story_id</check>
<check>Event payload contains epic_id</check>
<check>Event payload contains completion_time</check>
</preconditions>
<flow>
<step n="1" title="Load Story Metrics State">
<action>Load module state from {module_path}/state/module-state.yaml</action>
<action>Find or create story tracking record for {event.payload.story_id}</action>
</step>
<step n="2" title="Calculate Cycle Time">
<action>Get story start time from state (story.started event timestamp)</action>
<action>Calculate cycle_time = completion_time - start_time</action>
<action>Convert to business days if configured</action>
<action>Store in story metrics record</action>
<metrics_update>
<metric name="story_cycle_time">
<value>{cycle_time_days}</value>
<unit>days</unit>
<story_id>{event.payload.story_id}</story_id>
<epic_id>{event.payload.epic_id}</epic_id>
</metric>
</metrics_update>
</step>
<step n="3" title="Update Sprint Velocity Tracking">
<action>Identify current sprint from state</action>
<action>Increment sprint completed count</action>
<check if="story has points">
<action>Add points to sprint velocity total</action>
</check>
<action>Update sprint progress percentage</action>
</step>
<step n="4" title="Check SLA Compliance">
<action>Load SLA thresholds from config</action>
<check if="cycle_time > sla.story_cycle_time.target">
<action>Mark SLA breach</action>
<publish event="metrics.sla.breach">
<payload>
<sla_name>story_cycle_time</sla_name>
<threshold>{sla.story_cycle_time.target}</threshold>
<actual_value>{cycle_time_days}</actual_value>
<breach_time>{event.payload.completion_time}</breach_time>
<severity>warning</severity>
<affected_item>
<type>story</type>
<id>{event.payload.story_id}</id>
</affected_item>
</payload>
</publish>
</check>
</step>
<step n="5" title="Update Quality Metrics">
<check if="event.payload.tests_passed is defined">
<action>Update test pass rate metric</action>
</check>
<check if="event.payload.files_changed is defined">
<action>Track files changed for change frequency metric</action>
</check>
</step>
<step n="6" title="Trigger Quality Gate (Optional)">
<check if="config.auto_quality_gate_on_done == true">
<action>Queue quality-gate-check workflow for this story</action>
</check>
</step>
<step n="7" title="Save State and Publish Update">
<action>Save updated state to module-state.yaml</action>
<action>Log: "Processed story.done for {event.payload.story_id}: cycle_time={cycle_time_days}d"</action>
<publish event="metrics.kpi.updated">
<payload>
<kpi_name>story_completion</kpi_name>
<current_value>1</current_value>
<target>1</target>
<status>on_track</status>
<context>
<story_id>{event.payload.story_id}</story_id>
<cycle_time_days>{cycle_time_days}</cycle_time_days>
</context>
</payload>
</publish>
</step>
</flow>
<on-error>
<action>Log ERROR: "Failed to process story.done: {error_message}"</action>
<action>Store event in retry queue if transient error</action>
</on-error>
<output>
<field name="story_id" value="{event.payload.story_id}" />
<field name="cycle_time_days" value="{cycle_time_days}" />
<field name="sla_compliant" value="{sla_compliant}" />
<field name="sprint_velocity_updated" value="true" />
</output>
</event-handler>

View File

@ -0,0 +1,261 @@
# BMM-Metrics Event Publications
# Defines which events this module emits
module: bmm-metrics
schema_version: "1.0"
publications:
# ============================================
# KPI Events
# ============================================
- event_type: "metrics.kpi.defined"
description: "Published when new KPIs are defined or updated"
source_workflow: "define-kpis"
schema:
payload:
kpi_set_id:
type: string
required: true
description: "Unique identifier for this KPI definition set"
kpis:
type: array
required: true
description: "List of KPI definitions"
items:
name: string
category: string
target: number
unit: string
frequency: string
defined_at:
type: string
format: ISO8601
required: true
subscribers:
- bmm-roadmap # For capacity planning
- bmm-priority # For value tracking
- event_type: "metrics.kpi.updated"
description: "Published when KPI values change"
source_workflow: "track-metrics"
schema:
payload:
kpi_name:
type: string
required: true
previous_value:
type: number
required: false
current_value:
type: number
required: true
target:
type: number
required: true
status:
type: string
enum: ["on_track", "at_risk", "off_track"]
required: true
trend:
type: string
enum: ["improving", "stable", "declining"]
required: false
updated_at:
type: string
format: ISO8601
required: true
subscribers:
- bmm-roadmap # For tracking progress
- bmm-priority # For priority adjustments
# ============================================
# SLA Events
# ============================================
- event_type: "metrics.sla.breach"
description: "Published when an SLA threshold is exceeded"
source_workflow: "track-metrics"
schema:
payload:
sla_name:
type: string
required: true
threshold:
type: number
required: true
actual_value:
type: number
required: true
breach_time:
type: string
format: ISO8601
required: true
severity:
type: string
enum: ["warning", "critical"]
required: true
affected_item:
type: object
required: false
description: "The item that breached (story, sprint, etc.)"
remediation:
type: string
required: false
description: "Suggested action to address breach"
subscribers:
- bmm-release # May block releases
- bmm-roadmap # For planning adjustments
# ============================================
# Quality Gate Events
# ============================================
- event_type: "metrics.quality.pass"
description: "Published when all quality gates pass for a release candidate"
source_workflow: "quality-gate-check"
schema:
payload:
story_id:
type: string
required: true
release_candidate_id:
type: string
required: false
gates_checked:
type: array
items: string
required: true
description: "List of gates that were validated"
gate_results:
type: array
required: true
items:
gate_name: string
passed: boolean
actual_value: string
threshold: string
overall_score:
type: number
minimum: 0
maximum: 100
required: true
timestamp:
type: string
format: ISO8601
required: true
subscribers:
- bmm-release # Triggers release process
- event_type: "metrics.quality.fail"
description: "Published when one or more quality gates fail"
source_workflow: "quality-gate-check"
schema:
payload:
story_id:
type: string
required: true
release_candidate_id:
type: string
required: false
failed_gates:
type: array
required: true
items:
gate_name: string
expected: string
actual: string
reason: string
blocking:
type: boolean
required: true
description: "Whether this blocks the release"
remediation_steps:
type: array
items: string
required: false
timestamp:
type: string
format: ISO8601
required: true
subscribers:
- bmm-release # Blocks release process
# ============================================
# Velocity Events
# ============================================
- event_type: "metrics.velocity.calculated"
description: "Published when sprint velocity is calculated"
source_workflow: "track-metrics"
trigger_event: "sprint.ended"
schema:
payload:
sprint_id:
type: string
required: true
sprint_number:
type: integer
required: true
velocity:
type: number
required: true
description: "Points/count completed this sprint"
planned:
type: number
required: true
rolling_average:
type: number
required: true
description: "Average of last N sprints"
trend:
type: string
enum: ["improving", "stable", "declining"]
calculated_at:
type: string
format: ISO8601
required: true
subscribers:
- bmm-roadmap # For capacity planning
# ============================================
# Dashboard Events
# ============================================
- event_type: "metrics.dashboard.updated"
description: "Published when dashboard data is refreshed"
source_workflow: "track-metrics"
schema:
payload:
dashboard_id:
type: string
required: true
widgets_updated:
type: array
items: string
data_timestamp:
type: string
format: ISO8601
next_refresh:
type: string
format: ISO8601
subscribers: [] # Informational only
# Publishing Configuration
publishing:
# Default correlation behavior
correlation:
inherit_from_trigger: true
generate_if_missing: true
# Retry on publish failure
retry:
enabled: true
max_attempts: 3
backoff_ms: [500, 2000, 5000]
# Logging
logging:
log_all_publications: true
include_payload: false # Don't log payload for privacy

View File

@ -0,0 +1,161 @@
# BMM-Metrics Event Subscriptions
# Defines which events this module listens to and how to handle them
module: bmm-metrics
schema_version: "1.0"
subscriptions:
# ============================================
# Story Lifecycle Events
# ============================================
- event_type: "story.done"
handler: "handlers/on-story-done.xml"
description: "Track story completion metrics and cycle time"
enabled: true
processing:
mode: async
priority: normal
timeout_ms: 30000
actions:
- "Calculate cycle time (ready → done)"
- "Update velocity tracking"
- "Check for SLA breaches"
- "Trigger quality gate if configured"
- event_type: "story.ready"
handler: "handlers/on-story-ready.xml"
description: "Track story readiness cycle time"
enabled: true
processing:
mode: async
priority: low
timeout_ms: 10000
actions:
- "Record ready timestamp"
- "Calculate prep time (created → ready)"
- "Update metrics state"
- event_type: "story.created"
handler: "handlers/on-story-created.xml"
description: "Initialize story tracking"
enabled: true
processing:
mode: async
priority: low
timeout_ms: 10000
actions:
- "Record creation timestamp"
- "Initialize story metrics record"
# ============================================
# Sprint Lifecycle Events
# ============================================
- event_type: "sprint.started"
handler: "handlers/on-sprint-started.xml"
description: "Initialize sprint metrics tracking"
enabled: true
processing:
mode: async
priority: normal
timeout_ms: 15000
actions:
- "Record sprint start"
- "Capture planned scope"
- "Initialize sprint metrics"
- event_type: "sprint.ended"
handler: "handlers/on-sprint-ended.xml"
description: "Calculate sprint velocity and metrics"
enabled: true
processing:
mode: async
priority: high
timeout_ms: 60000
actions:
- "Calculate sprint velocity"
- "Compare actual vs planned"
- "Update rolling velocity average"
- "Generate sprint metrics summary"
- "Publish metrics.kpi.updated event"
# ============================================
# Code Review Events
# ============================================
- event_type: "code.reviewed"
handler: "handlers/on-code-reviewed.xml"
description: "Track code review metrics and quality"
enabled: true
processing:
mode: async
priority: normal
timeout_ms: 20000
actions:
- "Record review completion"
- "Calculate review turnaround time"
- "Track quality score"
- "Check review SLA compliance"
# ============================================
# Epic Lifecycle Events
# ============================================
- event_type: "epic.completed"
handler: "handlers/on-epic-completed.xml"
description: "Calculate epic-level metrics"
enabled: true
processing:
mode: async
priority: normal
timeout_ms: 60000
actions:
- "Aggregate story metrics for epic"
- "Calculate epic delivery metrics"
- "Update roadmap progress metrics"
# ============================================
# Release Events (from bmm-release module)
# ============================================
- event_type: "release.deployed"
handler: "handlers/on-release-deployed.xml"
description: "Track deployment metrics"
enabled: true
processing:
mode: async
priority: high
timeout_ms: 30000
actions:
- "Record deployment timestamp"
- "Track deployment frequency"
- "Update lead time metrics"
- event_type: "release.rollback"
handler: "handlers/on-release-rollback.xml"
description: "Track rollback incidents"
enabled: true
processing:
mode: async
priority: high
timeout_ms: 15000
actions:
- "Record rollback incident"
- "Update change failure rate"
- "Calculate impact metrics"
# Retry Configuration
retry:
max_attempts: 3
backoff_ms: [1000, 5000, 15000]
dead_letter_enabled: true
# Filtering
filters:
# Only process events from these sources (empty = all)
source_modules: []
# Ignore events older than this
max_age_hours: 24

View File

@ -0,0 +1,234 @@
# BMM-Metrics Module Manifest
# Metrics, KPIs, and Quality Gate Tracking for BMAD
name: bmm-metrics
version: "1.0.0"
display_name: "BMAD Metrics Module"
description: "Track KPIs, SLAs, quality gates, and delivery metrics with event-driven architecture"
author: "BMad"
license: "MIT"
# Module category and tags
category: "operations"
tags:
- metrics
- kpis
- sla
- quality-gates
- velocity
- dora-metrics
# Dependencies
dependencies:
core:
version: ">=1.0.0"
required: true
bmm-core:
version: ">=1.0.0"
required: false
reason: "Access to story and sprint data"
# Event Integration
events:
subscribes:
- story.done
- story.ready
- story.started
- sprint.started
- sprint.ended
- code.reviewed
- deploy.completed
- test.completed
publishes:
- metrics.kpi.defined
- metrics.kpi.updated
- metrics.sla.defined
- metrics.sla.breach
- metrics.sla.warning
- metrics.quality.pass
- metrics.quality.fail
- metrics.velocity.calculated
- metrics.review.completed
- metrics.anomaly.detected
# Agents provided by this module
agents:
- name: metrics-analyst
file: agents/metrics-analyst.agent.yaml
description: "Metrics Analyst + KPI Strategist + Quality Gate Guardian"
icon: "📊"
# Workflows provided
workflows:
- name: define-kpis
path: workflows/define-kpis
description: "Define product and engineering KPIs"
standalone: true
- name: define-slas
path: workflows/define-slas
description: "Set SLA thresholds and alerting rules"
standalone: true
- name: track-metrics
path: workflows/track-metrics
description: "Collect and report current metrics"
standalone: true
- name: quality-gate-check
path: workflows/quality-gate-check
description: "Validate quality gates for release"
standalone: true
triggers:
- module: bmm-release
workflow: release-planning
condition: "on quality.pass event"
- name: metrics-review
path: workflows/metrics-review
description: "Analyze trends and generate insights"
standalone: true
# Tasks provided
tasks:
- name: calculate-velocity
file: tasks/calculate-velocity.xml
description: "Calculate sprint velocity"
- name: calculate-cycle-time
file: tasks/calculate-cycle-time.xml
description: "Calculate story cycle time"
- name: check-sla-breach
file: tasks/check-sla-breach.xml
description: "Check if metric breaches SLA"
- name: generate-dashboard
file: tasks/generate-dashboard.xml
description: "Generate metrics dashboard"
# Event Handlers
event_handlers:
- name: on-story-done
file: events/handlers/on-story-done.xml
event: story.done
description: "Track story completion metrics"
- name: on-sprint-ended
file: events/handlers/on-sprint-ended.xml
event: sprint.ended
description: "Calculate sprint velocity"
# Data files
data:
- name: metric-types
file: data/metric-types.yaml
description: "Canonical metric type definitions"
- name: sla-thresholds
file: data/sla-thresholds.yaml
description: "Default SLA threshold definitions"
# Configuration schema
config_schema:
project_name:
type: string
required: true
description: "Project name for reports"
user_name:
type: string
required: true
description: "User name for greetings"
output_folder:
type: string
required: true
default: "docs"
description: "Output folder for reports"
quality_gates:
type: object
required: true
description: "Quality gate configuration"
properties:
test_coverage:
type: object
properties:
threshold: { type: number, default: 80 }
blocking: { type: boolean, default: true }
test_pass_rate:
type: object
properties:
threshold: { type: number, default: 100 }
blocking: { type: boolean, default: true }
code_review:
type: object
properties:
required: { type: boolean, default: true }
blocking: { type: boolean, default: true }
no_critical_issues:
type: object
properties:
required: { type: boolean, default: true }
blocking: { type: boolean, default: true }
no_security_vulnerabilities:
type: object
properties:
required: { type: boolean, default: true }
blocking: { type: boolean, default: true }
documentation_complete:
type: object
properties:
required: { type: boolean, default: false }
blocking: { type: boolean, default: false }
sla:
type: object
description: "SLA configuration"
dashboard:
type: object
description: "Dashboard configuration"
properties:
refresh_interval: { type: string, default: "1h" }
default_period: { type: string, default: "current_sprint" }
# Installation hooks
install:
pre:
- action: "Verify core module installed"
- action: "Check event bus availability"
post:
- action: "Initialize module state file"
- action: "Subscribe to events"
- action: "Generate slash commands"
uninstall:
pre:
- action: "Unsubscribe from events"
post:
- action: "Remove module state (optional)"
# Slash commands to generate
slash_commands:
- name: "metrics-define-kpis"
workflow: "define-kpis"
description: "Define product and engineering KPIs"
- name: "metrics-define-slas"
workflow: "define-slas"
description: "Set SLA thresholds and alerting"
- name: "metrics-track"
workflow: "track-metrics"
description: "Track and report current metrics"
- name: "metrics-quality-gate"
workflow: "quality-gate-check"
description: "Run quality gate validation"
- name: "metrics-review"
workflow: "metrics-review"
description: "Analyze metric trends"

View File

@ -0,0 +1,88 @@
# BMM-Metrics Module State
# This file is auto-managed by the bmm-metrics module
# Manual edits may be overwritten
version: "1.0.0"
module: "bmm-metrics"
initialized: false
last_updated: null
# Current Sprint Tracking
current_sprint:
sprint_id: null
sprint_number: null
start_date: null
end_date: null
planned_stories: 0
completed_stories: 0
velocity_points: 0
# Velocity History (last 6 sprints for rolling average)
velocity_history: []
# Example entry:
# - sprint_id: "sprint-1"
# sprint_number: 1
# velocity: 21
# completion_rate: 85
# date: "2024-01-15"
# Cycle Time History
cycle_time_history: []
# Example entry:
# - story_id: "STORY-123"
# cycle_time_days: 3.5
# completed_date: "2024-01-15"
# Current KPI Values
kpis:
velocity:
current_velocity: null
rolling_average: null
trend: null # improving, stable, declining
quality:
test_coverage: null
test_pass_rate: null
defect_escape_rate: null
delivery:
cycle_time_p50: null
cycle_time_p90: null
pr_review_time_p50: null
deployment_frequency: null
# SLA Status
sla_status:
overall_compliance: null
breaches: []
# Example breach:
# - sla_name: "story_cycle_time"
# breach_time: "2024-01-15T10:30:00Z"
# actual_value: 8
# threshold: 7
# resolved: false
# Metric Snapshots (for trend analysis)
snapshots: []
# Example snapshot:
# - timestamp: "2024-01-15T00:00:00Z"
# period: "sprint-1"
# metrics:
# velocity: 21
# test_coverage: 82
# cycle_time_p50: 2.5
# Event Processing State
event_processing:
last_event_id: null
last_event_time: null
events_processed_count: 0
# Review History
reviews: []
# Example review:
# - type: "sprint"
# date: "2024-01-15"
# period: "sprint-1"
# overall_health: 85
# key_findings: ["Velocity improving", "Coverage below target"]

View File

@ -0,0 +1,81 @@
<?xml version="1.0" encoding="UTF-8"?>
<task
id="bmm-metrics/calculate-cycle-time"
name="Calculate Cycle Time"
description="Calculate story cycle time from start to completion">
<objective>
Calculate the cycle time for a story from when work started to when it was
marked as done. Supports business days calculation and percentile reporting.
</objective>
<input>
<param name="story_id" type="string" required="true" description="Story identifier" />
<param name="start_time" type="datetime" required="true" description="When work started on story" />
<param name="end_time" type="datetime" required="true" description="When story was completed" />
<param name="use_business_days" type="boolean" required="false" default="true" description="Exclude weekends" />
</input>
<flow>
<step n="1" title="Validate Timestamps">
<action>Verify start_time is before end_time</action>
<action>Parse timestamps to datetime objects</action>
<check if="end_time <= start_time">
<action>Error: Invalid time range</action>
</check>
</step>
<step n="2" title="Calculate Raw Duration">
<action>raw_duration = end_time - start_time</action>
<action>raw_hours = raw_duration.total_hours()</action>
<action>raw_days = raw_hours / 24</action>
</step>
<step n="3" title="Calculate Business Days" condition="use_business_days == true">
<action>Initialize business_days = 0</action>
<action>For each day in range(start_time, end_time):</action>
<action> If day.weekday() not in [Saturday, Sunday]:</action>
<action> business_days += 1</action>
<action>Adjust for partial start/end days</action>
<action>Store as {{cycle_time_days}}</action>
</step>
<step n="3-alt" title="Use Calendar Days" condition="use_business_days == false">
<action>cycle_time_days = raw_days</action>
</step>
<step n="4" title="Categorize Duration">
<action>Determine cycle time category</action>
<check if="cycle_time_days <= 1">
<action>category = "excellent"</action>
</check>
<check if="cycle_time_days <= 3">
<action>category = "good"</action>
</check>
<check if="cycle_time_days <= 5">
<action>category = "acceptable"</action>
</check>
<check if="cycle_time_days > 5">
<action>category = "needs_attention"</action>
</check>
</step>
<step n="5" title="Update Statistics">
<action>Load cycle time history from state</action>
<action>Add current cycle_time_days to history</action>
<action>Calculate p50 = percentile(history, 50)</action>
<action>Calculate p90 = percentile(history, 90)</action>
</step>
</flow>
<output>
<field name="story_id" value="{story_id}" />
<field name="cycle_time_days" value="{cycle_time_days}" />
<field name="cycle_time_hours" value="{raw_hours}" />
<field name="category" value="{category}" />
<field name="business_days_used" value="{use_business_days}" />
<field name="p50_cycle_time" value="{p50}" />
<field name="p90_cycle_time" value="{p90}" />
</output>
</task>

View File

@ -0,0 +1,78 @@
<?xml version="1.0" encoding="UTF-8"?>
<task
id="bmm-metrics/calculate-velocity"
name="Calculate Velocity"
description="Calculate sprint velocity from completed stories">
<objective>
Calculate sprint velocity by counting completed stories or summing story points,
and update rolling averages for trend analysis.
</objective>
<input>
<param name="sprint_id" type="string" required="true" description="Sprint identifier" />
<param name="completed_stories" type="array" required="true" description="Array of completed story objects" />
<param name="use_points" type="boolean" required="false" default="true" description="Use story points vs count" />
</input>
<flow>
<step n="1" title="Validate Input">
<action>Verify sprint_id is provided</action>
<action>Verify completed_stories array exists</action>
<action>Load sprint configuration for points vs count preference</action>
</step>
<step n="2" title="Calculate Raw Velocity">
<check if="use_points == true">
<action>Sum story points from completed_stories</action>
<action>velocity = SUM(story.points for story in completed_stories)</action>
</check>
<check if="use_points == false">
<action>Count completed stories</action>
<action>velocity = COUNT(completed_stories)</action>
</check>
<action>Store as {{current_velocity}}</action>
</step>
<step n="3" title="Load Historical Data">
<action>Load velocity history from module state</action>
<action>Get last 6 sprint velocities</action>
<action>Store as {{velocity_history}}</action>
</step>
<step n="4" title="Calculate Rolling Average">
<action>Add current_velocity to velocity_history</action>
<action>Take last 6 entries for calculation</action>
<action>rolling_average = AVG(velocity_history[-6:])</action>
<action>Store as {{rolling_average}}</action>
</step>
<step n="5" title="Determine Trend">
<action>Compare current_velocity to rolling_average</action>
<check if="current_velocity > rolling_average * 1.1">
<action>trend = "improving"</action>
</check>
<check if="current_velocity < rolling_average * 0.9">
<action>trend = "declining"</action>
</check>
<check if="else">
<action>trend = "stable"</action>
</check>
</step>
<step n="6" title="Calculate Variance">
<action>variance = ((current_velocity - rolling_average) / rolling_average) * 100</action>
<action>Store as {{variance_percent}}</action>
</step>
</flow>
<output>
<field name="sprint_id" value="{sprint_id}" />
<field name="velocity" value="{current_velocity}" />
<field name="rolling_average" value="{rolling_average}" />
<field name="trend" value="{trend}" />
<field name="variance_percent" value="{variance_percent}" />
<field name="story_count" value="{completed_stories.length}" />
</output>
</task>

View File

@ -0,0 +1,109 @@
<?xml version="1.0" encoding="UTF-8"?>
<task
id="bmm-metrics/check-sla-breach"
name="Check SLA Breach"
description="Check if a metric value breaches its SLA threshold">
<objective>
Compare a metric value against its defined SLA thresholds and determine
if a warning or breach should be triggered.
</objective>
<input>
<param name="sla_name" type="string" required="true" description="Name of the SLA to check" />
<param name="actual_value" type="number" required="true" description="Current metric value" />
<param name="context" type="object" required="false" description="Additional context (story_id, sprint_id, etc.)" />
</input>
<flow>
<step n="1" title="Load SLA Definition">
<action>Load SLA configuration from module config</action>
<action>Find SLA by sla_name</action>
<check if="sla not found">
<action>Error: Unknown SLA '{sla_name}'</action>
</check>
<action>Extract target, warning_threshold, breach_threshold</action>
<action>Extract comparison_type (greater_than, less_than, equals)</action>
</step>
<step n="2" title="Determine Status">
<check if="comparison_type == 'less_than'">
<!-- Lower is better (e.g., cycle time, defect rate) -->
<check if="actual_value <= target">
<action>status = "healthy"</action>
<action>level = "green"</action>
</check>
<check if="actual_value > target AND actual_value <= warning_threshold">
<action>status = "warning"</action>
<action>level = "yellow"</action>
</check>
<check if="actual_value > warning_threshold AND actual_value <= breach_threshold">
<action>status = "at_risk"</action>
<action>level = "orange"</action>
</check>
<check if="actual_value > breach_threshold">
<action>status = "breach"</action>
<action>level = "red"</action>
</check>
</check>
<check if="comparison_type == 'greater_than'">
<!-- Higher is better (e.g., test coverage, completion rate) -->
<check if="actual_value >= target">
<action>status = "healthy"</action>
<action>level = "green"</action>
</check>
<check if="actual_value < target AND actual_value >= warning_threshold">
<action>status = "warning"</action>
<action>level = "yellow"</action>
</check>
<check if="actual_value < warning_threshold AND actual_value >= breach_threshold">
<action>status = "at_risk"</action>
<action>level = "orange"</action>
</check>
<check if="actual_value < breach_threshold">
<action>status = "breach"</action>
<action>level = "red"</action>
</check>
</check>
</step>
<step n="3" title="Calculate Deviation">
<action>deviation = actual_value - target</action>
<action>deviation_percent = (deviation / target) * 100</action>
<action>Store as {{deviation}} and {{deviation_percent}}</action>
</step>
<step n="4" title="Determine Blocking Status">
<action>Load blocking configuration for this SLA</action>
<check if="status == 'breach' AND sla.blocking == true">
<action>is_blocking = true</action>
</check>
<check if="else">
<action>is_blocking = false</action>
</check>
</step>
<step n="5" title="Generate Remediation">
<check if="status in ['warning', 'at_risk', 'breach']">
<action>Load remediation steps for this SLA</action>
<action>Generate remediation_message based on status</action>
</check>
</step>
</flow>
<output>
<field name="sla_name" value="{sla_name}" />
<field name="target" value="{target}" />
<field name="actual_value" value="{actual_value}" />
<field name="status" value="{status}" />
<field name="level" value="{level}" />
<field name="deviation" value="{deviation}" />
<field name="deviation_percent" value="{deviation_percent}" />
<field name="is_breach" value="{status == 'breach'}" />
<field name="is_blocking" value="{is_blocking}" />
<field name="remediation" value="{remediation_message}" />
<field name="context" value="{context}" />
</output>
</task>

View File

@ -0,0 +1,121 @@
<?xml version="1.0" encoding="UTF-8"?>
<task
id="bmm-metrics/generate-dashboard"
name="Generate Dashboard"
description="Generate a metrics dashboard summary from current state">
<objective>
Create a formatted dashboard view of current metrics, KPIs, and SLA status
with visual indicators and trend information.
</objective>
<input>
<param name="dashboard_type" type="string" required="false" default="summary" description="Type: summary, detailed, executive" />
<param name="include_trends" type="boolean" required="false" default="true" description="Include trend arrows" />
<param name="period" type="string" required="false" default="current_sprint" description="Time period for metrics" />
</input>
<flow>
<step n="1" title="Load Current State">
<action>Load module state from module-state.yaml</action>
<action>Load KPI definitions from config</action>
<action>Load SLA definitions from config</action>
<action>Get last update timestamp</action>
</step>
<step n="2" title="Calculate Summary Metrics">
<action>Calculate overall KPI health percentage</action>
<action>Calculate SLA compliance percentage</action>
<action>Count metrics by status (green, yellow, red)</action>
<action>Identify metrics needing attention</action>
</step>
<step n="3" title="Generate Velocity Section">
<action>Get current sprint velocity</action>
<action>Get rolling average velocity</action>
<action>Determine trend (up/down/stable arrow)</action>
<action>Calculate sprint progress percentage</action>
<format>
## Velocity
| Metric | Value | Trend |
|--------|-------|-------|
| Current Sprint | {velocity} | {trend_arrow} |
| Rolling Avg | {rolling_avg} | - |
| Completion | {completion}% | {completion_trend} |
</format>
</step>
<step n="4" title="Generate Quality Section">
<action>Get test coverage percentage</action>
<action>Get test pass rate</action>
<action>Get defect counts by severity</action>
<action>Calculate quality score</action>
<format>
## Quality
| Metric | Value | Status |
|--------|-------|--------|
| Test Coverage | {coverage}% | {coverage_status} |
| Pass Rate | {pass_rate}% | {pass_status} |
| Open Defects | {defect_count} | {defect_status} |
</format>
</step>
<step n="5" title="Generate Delivery Section">
<action>Get average cycle time</action>
<action>Get PR review turnaround</action>
<action>Get deployment frequency</action>
<format>
## Delivery
| Metric | Value | Target | Status |
|--------|-------|--------|--------|
| Cycle Time (P50) | {cycle_p50}d | {cycle_target}d | {cycle_status} |
| PR Review | {pr_time}h | {pr_target}h | {pr_status} |
| Deploys/Week | {deploy_freq} | {deploy_target} | {deploy_status} |
</format>
</step>
<step n="6" title="Generate SLA Status Section">
<action>List all SLAs with current status</action>
<action>Highlight any breaches or warnings</action>
<format>
## SLA Compliance: {sla_compliance}%
| SLA | Status | Value | Threshold |
|-----|--------|-------|-----------|
{#each slas}
| {name} | {status_emoji} | {value} | {threshold} |
{/each}
</format>
</step>
<step n="7" title="Generate Attention Items">
<action>List metrics that need attention</action>
<action>Sort by severity (red first, then yellow)</action>
<format>
## Needs Attention
{#each attention_items}
- {status_emoji} **{metric_name}**: {issue} ({recommendation})
{/each}
</format>
</step>
<step n="8" title="Assemble Dashboard">
<action>Combine all sections based on dashboard_type</action>
<action>Add header with timestamp and period</action>
<action>Add health score summary</action>
</step>
</flow>
<output>
<field name="dashboard_content" value="{assembled_dashboard}" />
<field name="overall_health" value="{health_percentage}" />
<field name="sla_compliance" value="{sla_compliance_percentage}" />
<field name="attention_count" value="{attention_items.length}" />
<field name="last_updated" value="{timestamp}" />
</output>
</task>

View File

@ -0,0 +1,312 @@
# Define KPIs Instructions
## Objective
Define meaningful Key Performance Indicators (KPIs) for product and engineering metrics that align with business objectives and drive actionable improvements.
## Prerequisites
- Understanding of business objectives and success criteria
- Access to historical data for baseline calculation (if available)
- Stakeholder input on what matters most
---
<step n="1" goal="Understand KPI context">
### Understand Business Context
<ask>What type of KPIs would you like to define?
[p] Product KPIs - User engagement, adoption, retention
[e] Engineering KPIs - Velocity, quality, DORA metrics
[q] Quality KPIs - Test coverage, defect rates, SLA compliance
[a] All of the above
Choice: </ask>
<action>Store selection as {{kpi_category}}</action>
<ask>What are the primary business objectives these KPIs should support?
(e.g., "Improve user retention", "Increase deployment frequency", "Reduce defect escape rate")
Objectives: </ask>
<action>Store as {{business_objectives}}</action>
</step>
---
<step n="2" goal="Define product KPIs" condition="kpi_category in ['p', 'a']">
### Product KPIs
<template-guidance>
For each product KPI, we need:
- Name and description
- Formula/calculation method
- Target value and threshold levels
- Measurement frequency
- Owner responsible for tracking
</template-guidance>
**Recommended Product KPIs:**
| KPI | Description | Typical Target |
|-----|-------------|----------------|
| User Activation Rate | % of new users completing key action | 40-60% |
| Feature Adoption Rate | % of users using new features | 20-40% |
| User Retention (D7/D30) | % of users returning after 7/30 days | 40%/20% |
| NPS Score | Net Promoter Score | >30 |
| Time to Value | Time for user to achieve first success | <5 min |
<ask>Which product KPIs would you like to track?
Select by letter or provide custom KPIs:
[a] User Activation Rate
[b] Feature Adoption Rate
[c] User Retention
[d] NPS Score
[e] Time to Value
[f] Custom (describe)
Selection: </ask>
<action>For each selected KPI, gather target and threshold values</action>
<ask>For each selected KPI, provide:
- Target value (green zone)
- Warning threshold (yellow zone)
- Critical threshold (red zone)
Format: KPI: target / warning / critical</ask>
<action>Store as {{product_kpis}}</action>
</step>
---
<step n="3" goal="Define engineering KPIs" condition="kpi_category in ['e', 'a']">
### Engineering KPIs
**DORA Metrics (Industry Standard):**
| Metric | Description | Elite | High | Medium | Low |
|--------|-------------|-------|------|--------|-----|
| Deployment Frequency | How often code deploys to production | On-demand | Daily-Weekly | Weekly-Monthly | Monthly+ |
| Lead Time for Changes | Time from commit to production | <1 hour | 1 day-1 week | 1 week-1 month | 1-6 months |
| Change Failure Rate | % of deployments causing failures | 0-15% | 16-30% | 31-45% | 46-60% |
| Mean Time to Recovery | Time to restore service | <1 hour | <1 day | <1 week | 1 week+ |
**Velocity Metrics:**
| Metric | Description | Typical Range |
|--------|-------------|---------------|
| Sprint Velocity | Story points completed per sprint | Varies by team |
| Story Cycle Time | Days from start to done | 2-5 days |
| PR Review Time | Hours to first review | <4 hours |
| Build Success Rate | % of builds that pass | >95% |
<ask>Which engineering KPIs would you like to track?
[a] All DORA metrics
[b] Deployment Frequency only
[c] Lead Time for Changes only
[d] Change Failure Rate only
[e] MTTR only
[f] Sprint Velocity
[g] Story Cycle Time
[h] PR Review Time
[i] Build Success Rate
[j] Custom (describe)
Selection (comma-separated): </ask>
<action>Store as {{engineering_kpis}}</action>
<ask>What performance tier are you targeting?
[e] Elite
[h] High
[m] Medium
Target tier: </ask>
<action>Set thresholds based on selected tier</action>
</step>
---
<step n="4" goal="Define quality KPIs" condition="kpi_category in ['q', 'a']">
### Quality KPIs
| Metric | Description | Typical Target |
|--------|-------------|----------------|
| Test Coverage | % of code covered by tests | >80% |
| Test Pass Rate | % of tests passing | 100% |
| Defect Escape Rate | Defects found in production vs total | <5% |
| Code Review Coverage | % of PRs with reviews | 100% |
| Security Vulnerability Count | Open high/critical vulns | 0 |
| Technical Debt Ratio | Debt time vs dev time | <5% |
<ask>Which quality KPIs would you like to track?
[a] Test Coverage
[b] Test Pass Rate
[c] Defect Escape Rate
[d] Code Review Coverage
[e] Security Vulnerabilities
[f] Technical Debt Ratio
[g] All of the above
Selection: </ask>
<action>Store as {{quality_kpis}}</action>
<ask>Set targets for selected quality KPIs:
Format: KPI: target
Example:
- Test Coverage: 85%
- Defect Escape Rate: <3%
Your targets: </ask>
<action>Parse and store quality KPI targets</action>
</step>
---
<step n="5" goal="Establish baselines">
### Establish Baselines
<ask>Do you have historical data to establish baselines?
[y] Yes - I can provide historical values
[n] No - Start fresh and establish baselines over time
[p] Partial - Some metrics have history
Response: </ask>
<check if="response == 'y' or response == 'p'">
<ask>Please provide baseline values for metrics with historical data:
Format: metric_name: baseline_value
Example:
- deployment_frequency: 2/week
- test_coverage: 72%
Baselines: </ask>
<action>Store as {{baselines}}</action>
</check>
<check if="response == 'n'">
<action>Mark all KPIs as "baseline_pending"</action>
<action>Set baseline_period to {{baseline_period}} for automatic calculation</action>
</check>
</step>
---
<step n="6" goal="Assign ownership">
### Assign KPI Ownership
<template-guidance>
Each KPI needs:
- An owner who monitors and reports on it
- A frequency of measurement
- An action plan when thresholds are breached
</template-guidance>
<ask>Who should own each category of KPIs?
- Product KPIs owner:
- Engineering KPIs owner:
- Quality KPIs owner:
(Enter names or roles): </ask>
<action>Store as {{kpi_owners}}</action>
<ask>How frequently should KPIs be reviewed?
[d] Daily dashboard updates
[w] Weekly review meetings
[s] Sprint-based review
[m] Monthly business review
Frequency: </ask>
<action>Store as {{review_frequency}}</action>
</step>
---
<step n="7" goal="Generate KPI definition file">
### Generate KPI Definitions
<action>Generate KPI definition YAML file with all collected information</action>
<template-output section="kpi-definitions">
Generate a YAML file containing:
- All defined KPIs organized by category
- Each KPI with: name, description, formula, target, warning_threshold, critical_threshold
- Baseline values where available
- Ownership assignments
- Review frequency
- Business objectives alignment
</template-output>
**Generated KPI Definition:**
- File: {{output_file_path}}
- Total KPIs defined: {{total_kpi_count}}
- Categories: {{categories_list}}
</step>
---
<step n="8" goal="Publish KPI definition event" critical="true">
### Publish KPI Definition Event
<publish event="metrics.kpi.defined">
<payload>
<kpi_count>{{total_kpi_count}}</kpi_count>
<categories>{{categories_list}}</categories>
<kpis>{{kpis_array}}</kpis>
<baselines>{{baselines}}</baselines>
<owners>{{kpi_owners}}</owners>
<review_frequency>{{review_frequency}}</review_frequency>
<timestamp>{{current_timestamp}}</timestamp>
</payload>
</publish>
<action>Log: "KPI definitions created - {{total_kpi_count}} KPIs across {{categories_list}}"</action>
</step>
---
## Completion
KPI definition complete.
**Summary:**
- **Total KPIs Defined:** {{total_kpi_count}}
- **Categories:** {{categories_list}}
- **Review Frequency:** {{review_frequency}}
- **Definition File:** {{output_file_path}}
**Next Steps:**
1. Share KPI definitions with stakeholders for review
2. Configure dashboards to display KPI values
3. Set up alerting for threshold breaches
4. Begin baseline measurement period if needed
Use `*track-metrics` to start collecting KPI values.

View File

@ -0,0 +1,43 @@
# Define KPIs Workflow
name: define-kpis
description: "Define and configure product and engineering KPIs aligned with business objectives"
author: "BMad"
module: bmm-metrics
# Configuration
config_source: "{project-root}/.bmad/bmm-metrics/config.yaml"
project_name: "{config_source}:project_name"
output_folder: "{config_source}:output_folder"
user_name: "{config_source}:user_name"
date: system-generated
# Paths
installed_path: "{project-root}/.bmad/bmm-metrics/workflows/define-kpis"
instructions: "{installed_path}/instructions.md"
template: "{installed_path}/kpi-template.md"
# Input
input:
kpi_category:
description: "Category of KPIs to define (product, engineering, quality, all)"
required: false
default: "all"
baseline_period:
description: "Period to use for baseline calculation (e.g., 30d, 90d)"
required: false
default: "30d"
# Output
default_output_file: "{output_folder}/metrics/kpi-definitions-{{date}}.yaml"
# Events this workflow publishes
publishes:
- event_type: "metrics.kpi.defined"
condition: "When KPIs are successfully defined"
# Dependencies
requires:
- module: "bmm-core"
reason: "Access to project context and story data"
standalone: true

View File

@ -0,0 +1,297 @@
# Define SLAs Instructions
## Objective
Define Service Level Agreements (SLAs) with clear thresholds, monitoring rules, and escalation procedures to ensure consistent delivery and quality standards.
## Prerequisites
- KPIs defined (run `*define-kpis` first if not done)
- Understanding of team capacity and realistic targets
- Stakeholder agreement on acceptable service levels
---
<step n="1" goal="Understand SLA requirements">
### Understand SLA Context
<ask>What type of SLAs would you like to define?
[d] Delivery SLAs - Story cycle time, sprint completion, release frequency
[q] Quality SLAs - Test coverage, defect rates, review turnaround
[o] Operations SLAs - Uptime, response time, incident resolution
[a] All of the above
Choice: </ask>
<action>Store as {{sla_category}}</action>
<ask>What is the business context for these SLAs?
(e.g., "Client contract requirements", "Internal quality standards", "Compliance requirements")
Context: </ask>
<action>Store as {{sla_context}}</action>
</step>
---
<step n="2" goal="Define delivery SLAs" condition="sla_category in ['d', 'a']">
### Delivery SLAs
| SLA | Description | Typical Target |
|-----|-------------|----------------|
| Story Cycle Time | Max days from start to done | 5 days |
| Sprint Completion Rate | Min % of committed stories completed | 80% |
| Release Frequency | Min releases per period | 1/week |
| PR Review Turnaround | Max hours to first review | 4 hours |
| Bug Fix SLA | Max time to fix by severity | P1: 4h, P2: 24h, P3: 1w |
<ask>Define your delivery SLAs:
**Story Cycle Time**
- Target (green): ___ days
- Warning (yellow): ___ days
- Breach (red): ___ days
**Sprint Completion Rate**
- Target (green): ____%
- Warning (yellow): ____%
- Breach (red): ____%
**PR Review Turnaround**
- Target (green): ___ hours
- Warning (yellow): ___ hours
- Breach (red): ___ hours
Enter values: </ask>
<action>Parse and store as {{delivery_slas}}</action>
<ask>Should delivery SLAs be blocking for releases?
[y] Yes - Breach prevents release
[n] No - Warning only, allow release
[s] Some - Select which are blocking
Response: </ask>
<action>Store blocking configuration</action>
</step>
---
<step n="3" goal="Define quality SLAs" condition="sla_category in ['q', 'a']">
### Quality SLAs
| SLA | Description | Typical Target |
|-----|-------------|----------------|
| Test Coverage | Min code coverage percentage | 80% |
| Test Pass Rate | Min tests passing | 100% |
| Code Review Coverage | % of changes with review | 100% |
| Defect Escape Rate | Max defects escaping to prod | <5% |
| Security Scan Pass | No high/critical vulnerabilities | 0 |
<ask>Define your quality SLAs:
**Test Coverage**
- Target (green): ____%
- Warning (yellow): ____%
- Breach (red): ____%
**Test Pass Rate**
- Target (green): ____%
- Warning (yellow): ____%
- Breach (red): ____%
**Code Review Coverage**
- Required: [y/n]
- Blocking for release: [y/n]
**Security Vulnerabilities**
- Max critical allowed: ___
- Max high allowed: ___
Enter values: </ask>
<action>Parse and store as {{quality_slas}}</action>
</step>
---
<step n="4" goal="Define operations SLAs" condition="sla_category in ['o', 'a']">
### Operations SLAs
| SLA | Description | Typical Target |
|-----|-------------|----------------|
| Service Uptime | Min availability percentage | 99.9% |
| API Response Time | P95 response time | <200ms |
| Incident Response | Max time to acknowledge | P1: 15m, P2: 1h |
| MTTR | Mean time to recovery | <4 hours |
| Error Rate | Max error percentage | <1% |
<ask>Define your operations SLAs:
**Service Uptime**
- Target: ____%
- Measurement window: [daily/weekly/monthly]
**API Response Time (P95)**
- Target: ___ ms
- Warning: ___ ms
- Breach: ___ ms
**Incident Response Time**
- P1 (Critical): ___ minutes
- P2 (High): ___ hours
- P3 (Medium): ___ hours
**MTTR Target**
- Target: ___ hours
- Max allowed: ___ hours
Enter values: </ask>
<action>Parse and store as {{operations_slas}}</action>
</step>
---
<step n="5" goal="Configure alerting rules">
### Configure Alerting
<ask>How should SLA breaches be communicated?
**Warning Level (approaching threshold):**
[a] Dashboard indicator only
[b] Slack/Teams notification
[c] Email notification
[d] All of the above
**Breach Level (threshold exceeded):**
[a] Dashboard indicator only
[b] Slack/Teams notification
[c] Email notification
[d] PagerDuty/On-call alert
[e] All of the above
Warning notification:
Breach notification: </ask>
<action>Store as {{alerting_config}}</action>
<ask>Who should receive SLA notifications?
- Delivery SLAs: (team/role/email)
- Quality SLAs: (team/role/email)
- Operations SLAs: (team/role/email)
Recipients: </ask>
<action>Store as {{notification_recipients}}</action>
<ask>Set notification timing:
- Warn at ___% of threshold (e.g., 80%)
- Remind every ___ hours if breach persists
Values: </ask>
<action>Store as {{notification_timing}}</action>
</step>
---
<step n="6" goal="Define escalation procedures">
### Escalation Procedures
<ask>Define escalation for persistent breaches:
**Level 1 (Immediate):**
- Who: ___
- Action: ___
**Level 2 (After ___ hours):**
- Who: ___
- Action: ___
**Level 3 (After ___ hours):**
- Who: ___
- Action: ___
Escalation details: </ask>
<action>Store as {{escalation_procedures}}</action>
</step>
---
<step n="7" goal="Generate SLA definition file">
### Generate SLA Definitions
<action>Generate comprehensive SLA definition file</action>
<template-output section="sla-definitions">
Generate a YAML file containing:
- All SLA definitions organized by category
- Each SLA with: name, target, warning_threshold, breach_threshold, blocking status
- Alerting configuration
- Notification recipients
- Escalation procedures
- Measurement methodology
</template-output>
**Generated SLA Definition:**
- File: {{output_file_path}}
- Total SLAs defined: {{total_sla_count}}
- Blocking SLAs: {{blocking_sla_count}}
</step>
---
<step n="8" goal="Publish SLA definition event" critical="true">
### Publish SLA Definition Event
<publish event="metrics.sla.defined">
<payload>
<sla_count>{{total_sla_count}}</sla_count>
<categories>{{categories_list}}</categories>
<blocking_slas>{{blocking_slas_list}}</blocking_slas>
<alerting_config>{{alerting_config}}</alerting_config>
<timestamp>{{current_timestamp}}</timestamp>
</payload>
</publish>
<action>Log: "SLA definitions created - {{total_sla_count}} SLAs defined"</action>
</step>
---
## Completion
SLA definition complete.
**Summary:**
- **Total SLAs Defined:** {{total_sla_count}}
- **Blocking SLAs:** {{blocking_sla_count}}
- **Categories:** {{categories_list}}
- **Definition File:** {{output_file_path}}
**Next Steps:**
1. Review SLA definitions with stakeholders
2. Configure monitoring dashboards
3. Set up alerting integrations
4. Communicate SLAs to the team
Use `*sla-status` to check current compliance.

View File

@ -0,0 +1,37 @@
# Define SLAs Workflow
name: define-slas
description: "Set Service Level Agreement thresholds and alerting rules for metrics"
author: "BMad"
module: bmm-metrics
# Configuration
config_source: "{project-root}/.bmad/bmm-metrics/config.yaml"
project_name: "{config_source}:project_name"
output_folder: "{config_source}:output_folder"
user_name: "{config_source}:user_name"
date: system-generated
# Paths
installed_path: "{project-root}/.bmad/bmm-metrics/workflows/define-slas"
instructions: "{installed_path}/instructions.md"
template: "{installed_path}/sla-template.yaml"
# Input
input:
sla_category:
description: "Category of SLAs to define (delivery, quality, operations)"
required: false
default: "all"
inherit_from:
description: "Existing SLA file to extend"
required: false
# Output
default_output_file: "{output_folder}/metrics/sla-definitions-{{date}}.yaml"
# Events this workflow publishes
publishes:
- event_type: "metrics.sla.defined"
condition: "When SLAs are successfully defined"
standalone: true

View File

@ -0,0 +1,343 @@
# Metrics Review Instructions
## Objective
Perform in-depth analysis of metric trends, identify patterns and anomalies, correlate metrics with events, and generate actionable insights for continuous improvement.
## Prerequisites
- Historical metric data (at least 3 periods recommended)
- Module state with recorded metric history
- Context about recent changes (releases, team changes, process changes)
---
<step n="1" goal="Define review scope">
### Define Review Scope
<ask>What type of metrics review is this?
[s] Sprint retrospective review
[m] Monthly business review
[q] Quarterly planning review
[a] Ad-hoc investigation
Review type: </ask>
<action>Store as {{review_type}}</action>
<ask>Is there a specific focus area for this review?
[v] Velocity and delivery
[q] Quality and testing
[p] Process efficiency
[c] Capacity and planning
[n] No specific focus - comprehensive review
Focus: </ask>
<action>Store as {{focus_area}}</action>
<ask>What period should we analyze?
- From: (date or "last_sprint")
- To: (date or "current")
Period: </ask>
<action>Store as {{analysis_period}}</action>
</step>
---
<step n="2" goal="Load historical data">
### Load Historical Data
<action>Load metric history from module-state.yaml</action>
<action>Load previous {{review_type}} review if exists</action>
<action>Identify comparison baseline period</action>
**Data Loaded:**
- Historical periods available: {{periods_count}}
- Earliest data: {{earliest_date}}
- Latest data: {{latest_date}}
- Baseline period: {{baseline_period}}
</step>
---
<step n="3" goal="Trend analysis">
### Trend Analysis
<action>Calculate trends for each tracked metric</action>
<action>Identify improving, stable, and declining metrics</action>
**Velocity Trends:**
```
Sprint Velocity (Last 6 Sprints)
│ ▲
│ ▲ ▲ ▲
│ ▲ ▲ ▲
│ ▲ ▲
├─────────────────
S1 S2 S3 S4 S5 S6
Rolling Average: {{velocity_avg}}
Trend: {{velocity_trend}} ({{velocity_change}}%)
```
**Quality Trends:**
| Metric | 3 Periods Ago | 2 Periods Ago | Last Period | Current | Trend |
|--------|---------------|---------------|-------------|---------|-------|
| Test Coverage | {{cov_3}}% | {{cov_2}}% | {{cov_1}}% | {{cov_0}}% | {{cov_trend}} |
| Pass Rate | {{pass_3}}% | {{pass_2}}% | {{pass_1}}% | {{pass_0}}% | {{pass_trend}} |
| Defect Rate | {{def_3}} | {{def_2}} | {{def_1}} | {{def_0}} | {{def_trend}} |
**Delivery Trends:**
| Metric | Trend | Change | Assessment |
|--------|-------|--------|------------|
| Cycle Time | {{cycle_trend}} | {{cycle_change}} | {{cycle_assessment}} |
| PR Review Time | {{pr_trend}} | {{pr_change}} | {{pr_assessment}} |
| Deploy Frequency | {{deploy_trend}} | {{deploy_change}} | {{deploy_assessment}} |
</step>
---
<step n="4" goal="Anomaly detection">
### Anomaly Detection
<action>Identify metrics with significant deviations from baseline</action>
<action>Flag sudden changes (>20% swing)</action>
<action>Check for correlation with known events</action>
**Anomalies Detected:**
{{#each anomalies}}
#### {{metric_name}}
- **Current Value:** {{current}}
- **Expected Range:** {{expected_min}} - {{expected_max}}
- **Deviation:** {{deviation_percent}}%
- **First Detected:** {{first_detected}}
- **Possible Causes:** {{possible_causes}}
{{/each}}
<check if="significant_anomaly_detected">
<publish event="metrics.anomaly.detected">
<payload>
<metric_name>{{anomaly_metric}}</metric_name>
<current_value>{{anomaly_current}}</current_value>
<expected_value>{{anomaly_expected}}</expected_value>
<deviation_percent>{{anomaly_deviation}}</deviation_percent>
<severity>{{anomaly_severity}}</severity>
<timestamp>{{current_timestamp}}</timestamp>
</payload>
</publish>
</check>
</step>
---
<step n="5" goal="Correlation analysis">
### Correlation Analysis
<action>Analyze relationships between metrics</action>
<action>Identify leading and lagging indicators</action>
**Key Correlations Found:**
| Metric A | Metric B | Correlation | Insight |
|----------|----------|-------------|---------|
| Team Size | Velocity | {{team_vel_corr}} | {{team_vel_insight}} |
| PR Review Time | Cycle Time | {{pr_cycle_corr}} | {{pr_cycle_insight}} |
| Test Coverage | Defect Escape | {{cov_def_corr}} | {{cov_def_insight}} |
| Sprint Planning Accuracy | Completion Rate | {{plan_comp_corr}} | {{plan_comp_insight}} |
**Leading Indicators:**
These metrics predict future performance:
{{#each leading_indicators}}
- {{indicator}}: {{prediction}}
{{/each}}
</step>
---
<step n="6" goal="Period comparison">
### Period-over-Period Comparison
<action>Compare current period to baseline</action>
<action>Calculate improvements and regressions</action>
**Comparison: {{current_period}} vs {{baseline_period}}**
| Category | Baseline | Current | Change | Status |
|----------|----------|---------|--------|--------|
| Velocity | {{vel_base}} | {{vel_curr}} | {{vel_change}} | {{vel_status}} |
| Quality | {{qual_base}} | {{qual_curr}} | {{qual_change}} | {{qual_status}} |
| Delivery | {{del_base}} | {{del_curr}} | {{del_change}} | {{del_status}} |
| SLA Compliance | {{sla_base}}% | {{sla_curr}}% | {{sla_change}} | {{sla_status}} |
**Improvements:**
{{#each improvements}}
- {{metric}}: Improved by {{improvement}} ({{cause}})
{{/each}}
**Regressions:**
{{#each regressions}}
- {{metric}}: Declined by {{decline}} ({{cause}})
{{/each}}
</step>
---
<step n="7" goal="Root cause analysis" condition="regressions_exist or anomalies_exist">
### Root Cause Analysis
<ask>For the identified regressions/anomalies, can you provide context?
Recent events to consider:
- Team changes (joiners/leavers)
- Process changes
- Technical changes (new tools, migrations)
- External factors (holidays, incidents)
Context: </ask>
<action>Store as {{external_context}}</action>
**Root Cause Analysis:**
{{#each issues_to_analyze}}
#### {{metric_name}} {{direction}}
**Symptoms:**
- {{symptom_1}}
- {{symptom_2}}
**Potential Root Causes:**
1. {{cause_1}} (likelihood: {{likelihood_1}})
2. {{cause_2}} (likelihood: {{likelihood_2}})
3. {{cause_3}} (likelihood: {{likelihood_3}})
**Recommended Investigation:**
- {{investigation_step_1}}
- {{investigation_step_2}}
{{/each}}
</step>
---
<step n="8" goal="Generate recommendations">
### Recommendations
<action>Generate actionable recommendations based on analysis</action>
<action>Prioritize by impact and effort</action>
**High Priority Recommendations:**
{{#each high_priority_recs}}
#### {{number}}. {{title}}
- **Issue:** {{issue}}
- **Impact:** {{impact}}
- **Recommendation:** {{recommendation}}
- **Expected Outcome:** {{expected_outcome}}
- **Effort:** {{effort_level}}
{{/each}}
**Medium Priority Recommendations:**
{{#each medium_priority_recs}}
- {{recommendation}}
{{/each}}
**Monitoring Suggestions:**
{{#each monitoring_suggestions}}
- {{suggestion}}
{{/each}}
</step>
---
<step n="9" goal="Generate review report">
### Generate Review Report
<template-output section="metrics-review-report">
Generate a comprehensive metrics review report including:
- Executive summary
- Trend analysis with visualizations
- Anomaly findings
- Period comparison
- Root cause analysis (if applicable)
- Prioritized recommendations
- Action items with owners
- Follow-up metrics to watch
</template-output>
**Report Generated:** {{output_file_path}}
</step>
---
<step n="10" goal="Publish review completion">
### Complete Review
<publish event="metrics.review.completed">
<payload>
<review_type>{{review_type}}</review_type>
<period>{{analysis_period}}</period>
<key_findings>{{key_findings_summary}}</key_findings>
<recommendations_count>{{recommendations_count}}</recommendations_count>
<anomalies_count>{{anomalies_count}}</anomalies_count>
<overall_health_change>{{health_change}}</overall_health_change>
<report_path>{{output_file_path}}</report_path>
<timestamp>{{current_timestamp}}</timestamp>
</payload>
</publish>
<action>Update module state with review timestamp and summary</action>
</step>
---
## Completion
Metrics review complete for **{{analysis_period}}**.
**Key Findings:**
{{#each key_findings}}
- {{finding}}
{{/each}}
**Health Assessment:**
- Overall trend: {{overall_trend}}
- Areas improving: {{improving_count}}
- Areas declining: {{declining_count}}
- Anomalies detected: {{anomalies_count}}
**Next Steps:**
1. Share report with stakeholders
2. Discuss recommendations in retrospective
3. Assign owners to action items
4. Schedule follow-up review: {{next_review_date}}
**Report Location:** {{output_file_path}}

View File

@ -0,0 +1,43 @@
# Metrics Review Workflow
name: metrics-review
description: "Analyze metric trends, identify patterns, and generate insights for continuous improvement"
author: "BMad"
module: bmm-metrics
# Configuration
config_source: "{project-root}/.bmad/bmm-metrics/config.yaml"
project_name: "{config_source}:project_name"
output_folder: "{config_source}:output_folder"
user_name: "{config_source}:user_name"
date: system-generated
# Paths
installed_path: "{project-root}/.bmad/bmm-metrics/workflows/metrics-review"
instructions: "{installed_path}/instructions.md"
template: "{installed_path}/review-template.md"
state_file: "{project-root}/.bmad/bmm-metrics/state/module-state.yaml"
# Input
input:
review_type:
description: "Type of review (sprint, monthly, quarterly)"
required: false
default: "sprint"
focus_area:
description: "Specific area to focus analysis on"
required: false
compare_to:
description: "Previous period to compare against"
required: false
# Output
default_output_file: "{output_folder}/metrics/metrics-review-{{date}}.md"
# Events this workflow publishes
publishes:
- event_type: "metrics.review.completed"
condition: "When review analysis is complete"
- event_type: "metrics.anomaly.detected"
condition: "When significant metric anomaly is found"
standalone: true

View File

@ -0,0 +1,260 @@
# Quality Gate Check Instructions
## Objective
Validate a story or release candidate against defined quality gates. This is a critical workflow that determines whether code can proceed to release.
## Prerequisites
- Story ID or release candidate ID to validate
- Quality gate definitions in module config
- Access to story context and test results
---
<step n="1" goal="Identify validation target">
### Identify What We're Validating
<ask>What would you like to validate?
- Provide a **story ID** (e.g., STORY-123)
- Or a **release candidate ID** if validating a batch
Story/Release ID: </ask>
<action>Store response as {{target_id}}</action>
<action>Determine if this is a single story or release candidate</action>
</step>
---
<step n="2" goal="Load quality gate definitions">
### Load Quality Gate Configuration
<action>Load quality gates from {config_source}:quality_gates</action>
**Configured Quality Gates:**
| Gate | Threshold | Blocking |
|------|-----------|----------|
| Test Coverage | {quality_gates.test_coverage.threshold}% | {quality_gates.test_coverage.blocking} |
| Test Pass Rate | {quality_gates.test_pass_rate.threshold}% | {quality_gates.test_pass_rate.blocking} |
| Code Review | Required: {quality_gates.code_review.required} | {quality_gates.code_review.blocking} |
| No Critical Issues | Required: {quality_gates.no_critical_issues.required} | {quality_gates.no_critical_issues.blocking} |
| No Security Vulns | Required: {quality_gates.no_security_vulnerabilities.required} | {quality_gates.no_security_vulnerabilities.blocking} |
| Documentation | Required: {quality_gates.documentation_complete.required} | {quality_gates.documentation_complete.blocking} |
<ask>Check all gates or specific ones?
[a] All gates
[s] Select specific gates
Choice: </ask>
</step>
---
<step n="3" goal="Gather current values for each gate">
### Gather Quality Data
For each gate, we need to gather the current value:
#### 3.1 Test Coverage
<action>Query test coverage for {{target_id}}</action>
<ask>What is the current test coverage percentage?
(Enter number, e.g., 85): </ask>
<action>Store as {{test_coverage_actual}}</action>
#### 3.2 Test Pass Rate
<action>Query test results for {{target_id}}</action>
<ask>Test results:
- Total tests:
- Passed:
- Failed:
Or enter pass rate directly (e.g., 100): </ask>
<action>Calculate or store as {{test_pass_rate_actual}}</action>
#### 3.3 Code Review Status
<ask>Has the code been reviewed and approved?
[y] Yes - approved
[n] No - not yet reviewed
[c] Changes requested (not approved)
Status: </ask>
<action>Store as {{code_review_actual}}</action>
#### 3.4 Critical Issues
<ask>Are there any open critical or blocker issues?
[n] No critical issues
[y] Yes - list them
Status: </ask>
<action>Store as {{critical_issues_actual}}</action>
#### 3.5 Security Vulnerabilities
<ask>Are there any high or critical security vulnerabilities?
[n] No vulnerabilities found
[y] Yes - describe them
Status: </ask>
<action>Store as {{security_vulns_actual}}</action>
#### 3.6 Documentation (if applicable)
<check if="quality_gates.documentation_complete.required == true">
<ask>Is required documentation complete?
[y] Yes - documentation updated
[n] No - documentation pending
Status: </ask>
<action>Store as {{docs_actual}}</action>
</check>
</step>
---
<step n="4" goal="Evaluate each gate">
### Evaluate Quality Gates
<action>For each gate, compare actual value against threshold</action>
**Gate Evaluation Results:**
| Gate | Threshold | Actual | Status | Blocking |
|------|-----------|--------|--------|----------|
| Test Coverage | ≥{quality_gates.test_coverage.threshold}% | {{test_coverage_actual}}% | {{coverage_status}} | {quality_gates.test_coverage.blocking} |
| Test Pass Rate | ={quality_gates.test_pass_rate.threshold}% | {{test_pass_rate_actual}}% | {{tests_status}} | {quality_gates.test_pass_rate.blocking} |
| Code Review | Approved | {{code_review_actual}} | {{review_status}} | {quality_gates.code_review.blocking} |
| Critical Issues | None | {{critical_issues_actual}} | {{issues_status}} | {quality_gates.no_critical_issues.blocking} |
| Security Vulns | None | {{security_vulns_actual}} | {{security_status}} | {quality_gates.no_security_vulnerabilities.blocking} |
| Documentation | Complete | {{docs_actual}} | {{docs_status}} | {quality_gates.documentation_complete.blocking} |
<action>Calculate overall_score as percentage of gates passed</action>
<action>Determine if any BLOCKING gates failed</action>
</step>
---
<step n="5" goal="Determine overall result">
### Overall Quality Gate Result
<check if="all blocking gates passed">
<action>Set result = PASS</action>
<action>Set blocking = false</action>
## ✅ QUALITY GATES PASSED
All blocking quality gates have been validated successfully.
**Overall Score:** {{overall_score}}%
**Blocking Gates Failed:** 0
**Non-Blocking Warnings:** {{warning_count}}
This story/release is **cleared for deployment**.
<check if="warning_count > 0">
**Warnings (non-blocking):**
{{warnings_list}}
</check>
</check>
<check if="any blocking gate failed">
<action>Set result = FAIL</action>
<action>Set blocking = true</action>
## ❌ QUALITY GATES FAILED
One or more blocking quality gates have failed.
**Overall Score:** {{overall_score}}%
**Blocking Gates Failed:** {{blocking_failures_count}}
This story/release is **NOT cleared for deployment**.
**Failed Gates:**
{{failed_gates_list}}
**Remediation Steps:**
{{remediation_steps}}
</check>
</step>
---
<step n="6" goal="Publish quality gate event" critical="true">
### Publish Quality Gate Result
<check if="result == PASS">
<publish event="metrics.quality.pass">
<payload>
<story_id>{{target_id}}</story_id>
<release_candidate_id>{{release_candidate_id}}</release_candidate_id>
<gates_checked>{{gates_checked_list}}</gates_checked>
<gate_results>{{gate_results_array}}</gate_results>
<overall_score>{{overall_score}}</overall_score>
<timestamp>{{current_timestamp}}</timestamp>
</payload>
</publish>
<action>Log: "Quality gates PASSED for {{target_id}} - event published to bmm-release"</action>
</check>
<check if="result == FAIL">
<publish event="metrics.quality.fail">
<payload>
<story_id>{{target_id}}</story_id>
<release_candidate_id>{{release_candidate_id}}</release_candidate_id>
<failed_gates>{{failed_gates_array}}</failed_gates>
<blocking>{{blocking}}</blocking>
<remediation_steps>{{remediation_steps_array}}</remediation_steps>
<timestamp>{{current_timestamp}}</timestamp>
</payload>
</publish>
<action>Log: "Quality gates FAILED for {{target_id}} - release blocked"</action>
</check>
</step>
---
<step n="7" goal="Generate report">
### Generate Quality Gate Report
<template-output section="quality-gate-report">
Generate a detailed quality gate report including:
- Target validated (story/release ID)
- Timestamp of validation
- Each gate checked with actual vs threshold
- Overall pass/fail status
- Recommendations if any gates failed
- Link to detailed test/coverage reports if available
</template-output>
</step>
---
## Completion
Quality gate validation complete for **{{target_id}}**.
**Result:** {{result}}
**Event Published:** metrics.quality.{{result_lower}}
**Report Saved:** {{output_file_path}}
<check if="result == PASS">
The bmm-release module will receive the `metrics.quality.pass` event and can proceed with release planning.
</check>
<check if="result == FAIL">
Address the failed gates and run quality-gate-check again when ready.
</check>

View File

@ -0,0 +1,50 @@
# Quality Gate Check Workflow
name: quality-gate-check
description: "Validate a release candidate against quality criteria. Publishes metrics.quality.pass or metrics.quality.fail events."
author: "BMad"
module: bmm-metrics
# Configuration
config_source: "{project-root}/.bmad/bmm-metrics/config.yaml"
project_name: "{config_source}:project_name"
output_folder: "{config_source}:output_folder"
user_name: "{config_source}:user_name"
quality_gates: "{config_source}:quality_gates"
date: system-generated
# Paths
installed_path: "{project-root}/.bmad/bmm-metrics/workflows/quality-gate-check"
instructions: "{installed_path}/instructions.md"
template: "{installed_path}/gate-report-template.md"
# Input - what we're validating
input:
story_id:
description: "Story or release candidate to validate"
required: true
release_candidate_id:
description: "Optional release candidate ID"
required: false
gates_to_check:
description: "Specific gates to check (default: all)"
required: false
default: "all"
# Output
default_output_file: "{output_folder}/metrics/quality-gate-{story_id}-{{date}}.md"
# Events this workflow publishes
publishes:
- event_type: "metrics.quality.pass"
condition: "All blocking gates pass"
- event_type: "metrics.quality.fail"
condition: "Any blocking gate fails"
# Cross-module integration
triggers_workflows:
on_pass:
- module: "bmm-release"
workflow: "release-planning"
condition: "if auto_release_on_pass is true"
standalone: true

View File

@ -0,0 +1,284 @@
# Track Metrics Instructions
## Objective
Collect current metric values, calculate KPIs, check SLA compliance, and generate a metrics report with actionable insights.
## Prerequisites
- KPIs defined (via `*define-kpis`)
- SLAs defined (via `*define-slas`)
- Access to source data (stories, sprints, test results)
---
<step n="1" goal="Select tracking scope">
### Select Metrics Scope
<ask>What metrics would you like to track?
[a] All configured metrics
[v] Velocity metrics only
[q] Quality metrics only
[d] Delivery metrics only
[c] Custom selection
Choice: </ask>
<action>Store as {{metric_category}}</action>
<ask>What time period should we report on?
[s] Current sprint
[l] Last 30 days
[w] Last week
[m] Last month
[c] Custom date range
Period: </ask>
<action>Store as {{reporting_period}}</action>
<action>Calculate {{period_start}} and {{period_end}} dates</action>
</step>
---
<step n="2" goal="Load configuration and state">
### Load Configuration
<action>Load KPI definitions from config</action>
<action>Load SLA thresholds from config</action>
<action>Load current state from module-state.yaml</action>
**Active KPIs:** {{active_kpi_count}}
**Active SLAs:** {{active_sla_count}}
**Last Update:** {{last_update_timestamp}}
</step>
---
<step n="3" goal="Collect velocity metrics" condition="metric_category in ['a', 'v']">
### Velocity Metrics
<action>Query completed stories in {{reporting_period}}</action>
<action>Calculate sprint velocity (points or count)</action>
<action>Calculate rolling average velocity</action>
**Current Sprint Velocity:**
| Metric | Value | Target | Status |
|--------|-------|--------|--------|
| Stories Completed | {{stories_completed}} | {{stories_planned}} | {{completion_status}} |
| Points Completed | {{points_completed}} | {{points_planned}} | {{points_status}} |
| Completion Rate | {{completion_rate}}% | {{target_completion}}% | {{rate_status}} |
**Velocity Trend:**
- Current: {{current_velocity}}
- Rolling Average (6 sprints): {{rolling_average}}
- Trend: {{velocity_trend}}
<check if="completion_rate < sla.sprint_completion.warning">
<action>Flag velocity warning</action>
</check>
</step>
---
<step n="4" goal="Collect delivery metrics" condition="metric_category in ['a', 'd']">
### Delivery Metrics
<action>Calculate story cycle times for period</action>
<action>Calculate PR review turnaround times</action>
<action>Count deployments in period</action>
**Cycle Time Analysis:**
| Metric | P50 | P90 | Target | Status |
|--------|-----|-----|--------|--------|
| Story Cycle Time | {{cycle_p50}}d | {{cycle_p90}}d | {{cycle_target}}d | {{cycle_status}} |
| PR Review Time | {{pr_p50}}h | {{pr_p90}}h | {{pr_target}}h | {{pr_status}} |
**Deployment Frequency:**
- Deployments this period: {{deployment_count}}
- Average per week: {{deployments_per_week}}
- Target: {{deployment_target}}
<check if="cycle_p90 > sla.story_cycle_time.breach">
<publish event="metrics.sla.breach">
<payload>
<sla_name>story_cycle_time</sla_name>
<threshold>{{sla.story_cycle_time.breach}}</threshold>
<actual_value>{{cycle_p90}}</actual_value>
<severity>warning</severity>
<period>{{reporting_period}}</period>
</payload>
</publish>
</check>
</step>
---
<step n="5" goal="Collect quality metrics" condition="metric_category in ['a', 'q']">
### Quality Metrics
<action>Get current test coverage percentage</action>
<action>Get test pass rate from latest run</action>
<action>Count open defects by severity</action>
<action>Check security scan results</action>
**Quality Dashboard:**
| Metric | Current | Target | Status |
|--------|---------|--------|--------|
| Test Coverage | {{test_coverage}}% | {{coverage_target}}% | {{coverage_status}} |
| Test Pass Rate | {{test_pass_rate}}% | 100% | {{test_status}} |
| Code Review Coverage | {{review_coverage}}% | 100% | {{review_status}} |
**Defects:**
- Critical: {{critical_defects}}
- High: {{high_defects}}
- Medium: {{medium_defects}}
- Low: {{low_defects}}
**Security:**
- Critical Vulnerabilities: {{critical_vulns}}
- High Vulnerabilities: {{high_vulns}}
<check if="test_coverage < quality_gates.test_coverage.threshold">
<action>Flag coverage below threshold</action>
</check>
<check if="critical_vulns > 0 or high_vulns > quality_gates.max_high_vulns">
<publish event="metrics.sla.breach">
<payload>
<sla_name>security_vulnerabilities</sla_name>
<threshold>0 critical, {{quality_gates.max_high_vulns}} high</threshold>
<actual_value>{{critical_vulns}} critical, {{high_vulns}} high</actual_value>
<severity>critical</severity>
<blocking>true</blocking>
</payload>
</publish>
</check>
</step>
---
<step n="6" goal="Calculate KPI status">
### KPI Status Summary
<action>For each defined KPI, compare current value to target</action>
<action>Calculate overall KPI health score</action>
**KPI Health Dashboard:**
| KPI | Current | Target | Status | Trend |
|-----|---------|--------|--------|-------|
{{#each kpis}}
| {{name}} | {{current_value}} | {{target}} | {{status_emoji}} {{status}} | {{trend_arrow}} |
{{/each}}
**Overall KPI Health:** {{overall_health_percentage}}%
<action>Categorize KPIs: {{green_count}} on-track, {{yellow_count}} at-risk, {{red_count}} off-track</action>
</step>
---
<step n="7" goal="Check SLA compliance">
### SLA Compliance Check
<action>Check each SLA against current values</action>
<action>Calculate compliance percentage</action>
**SLA Compliance Status:**
| SLA | Threshold | Current | Compliant | Blocking |
|-----|-----------|---------|-----------|----------|
{{#each slas}}
| {{name}} | {{threshold}} | {{current_value}} | {{compliant_emoji}} | {{blocking}} |
{{/each}}
**Overall SLA Compliance:** {{sla_compliance_percentage}}%
<check if="any_sla_breached">
**SLA Breaches Detected:**
{{breached_slas_list}}
<action>Trigger notifications per alerting config</action>
</check>
</step>
---
<step n="8" goal="Update state and publish">
### Update State
<action>Update module-state.yaml with current metric values</action>
<action>Record metric history for trend analysis</action>
<action>Save timestamp of last update</action>
<publish event="metrics.kpi.updated">
<payload>
<kpi_count>{{active_kpi_count}}</kpi_count>
<kpis>{{kpi_values_array}}</kpis>
<overall_health>{{overall_health_percentage}}</overall_health>
<sla_compliance>{{sla_compliance_percentage}}</sla_compliance>
<period>{{reporting_period}}</period>
<timestamp>{{current_timestamp}}</timestamp>
</payload>
</publish>
</step>
---
<step n="9" goal="Generate report">
### Generate Metrics Report
<template-output section="metrics-report">
Generate a comprehensive metrics report including:
- Executive summary with health scores
- Velocity section with trends
- Quality section with recommendations
- Delivery section with bottleneck analysis
- SLA compliance summary
- Action items for at-risk metrics
</template-output>
**Report Generated:** {{output_file_path}}
</step>
---
## Completion
Metrics tracking complete for **{{reporting_period}}**.
**Summary:**
- **KPI Health:** {{overall_health_percentage}}%
- **SLA Compliance:** {{sla_compliance_percentage}}%
- **Metrics Updated:** {{metrics_updated_count}}
- **Report:** {{output_file_path}}
**Attention Needed:**
{{#if red_kpis}}
- {{red_count}} KPIs off-track: {{red_kpis_list}}
{{/if}}
{{#if breached_slas}}
- {{breach_count}} SLA breaches: {{breached_slas_list}}
{{/if}}
Use `*metrics-review` for deeper trend analysis.

View File

@ -0,0 +1,50 @@
# Track Metrics Workflow
name: track-metrics
description: "Collect, calculate, and report current metric values against defined KPIs and SLAs"
author: "BMad"
module: bmm-metrics
# Configuration
config_source: "{project-root}/.bmad/bmm-metrics/config.yaml"
project_name: "{config_source}:project_name"
output_folder: "{config_source}:output_folder"
user_name: "{config_source}:user_name"
kpis: "{config_source}:kpis"
slas: "{config_source}:sla"
date: system-generated
# Paths
installed_path: "{project-root}/.bmad/bmm-metrics/workflows/track-metrics"
instructions: "{installed_path}/instructions.md"
template: "{installed_path}/metrics-report-template.md"
state_file: "{project-root}/.bmad/bmm-metrics/state/module-state.yaml"
# Input
input:
metric_category:
description: "Category of metrics to track (all, velocity, quality, delivery)"
required: false
default: "all"
period:
description: "Time period to report on (current_sprint, last_30d, custom)"
required: false
default: "current_sprint"
# Output
default_output_file: "{output_folder}/metrics/metrics-report-{{date}}.md"
# Events this workflow publishes
publishes:
- event_type: "metrics.kpi.updated"
condition: "When KPI values are recorded"
- event_type: "metrics.sla.breach"
condition: "When SLA threshold is breached"
# Events this workflow subscribes to (for automatic data)
subscribes:
- event_type: "story.done"
use_for: "Automatic cycle time calculation"
- event_type: "sprint.ended"
use_for: "Automatic velocity calculation"
standalone: true

View File

@ -0,0 +1,76 @@
# BMM-Priority Module
Backlog Prioritization Engine for the BMAD Method. Provides data-driven prioritization using configurable frameworks and integrates feedback signals.
## Overview
The bmm-priority module provides:
- **Prioritization Frameworks**: WSJF, RICE, MoSCoW, custom scoring
- **Priority Queue Management**: Ordered backlog management
- **Signal Integration**: Incorporates feedback and metrics into priority decisions
- **Priority Reviews**: Periodic re-evaluation of priorities
## Event-Driven Architecture
### Events Subscribed
| Event | Action |
|-------|--------|
| `feedback.priority.suggested` | Evaluate priority adjustment suggestion |
| `feedback.insight.generated` | Consider insight for new backlog items |
| `metrics.velocity.calculated` | Update capacity for prioritization |
| `story.done` | Remove from priority queue |
### Events Published
| Event | Description |
|-------|-------------|
| `priority.updated` | Story priority changed |
| `priority.queue.reordered` | Backlog reordering complete |
| `priority.review.completed` | Priority review session complete |
## Directory Structure
```
bmm-priority/
├── README.md
├── manifest.yaml
├── config.yaml
├── agents/
│ └── priority-manager.agent.yaml
├── workflows/
│ ├── prioritize-backlog/
│ └── priority-review/
├── events/
│ ├── subscriptions.yaml
│ ├── publications.yaml
│ └── handlers/
├── state/
│ └── module-state.yaml
```
## Prioritization Frameworks
### WSJF (Weighted Shortest Job First)
```
WSJF Score = (Business Value + Time Criticality + Risk Reduction) / Job Size
```
### RICE
```
RICE Score = (Reach × Impact × Confidence) / Effort
```
### MoSCoW
- **Must Have**: Critical for release
- **Should Have**: Important but not critical
- **Could Have**: Nice to have
- **Won't Have**: Explicitly excluded
## Quick Start
1. Install the module via BMAD installer
2. Configure prioritization framework in `.bmad/bmm-priority/config.yaml`
3. Use the Priority Manager agent: `*priority-manager`
## Integration with Feedback
When bmm-feedback detects high-impact patterns, it publishes `feedback.priority.suggested` events. This module evaluates these suggestions and can automatically adjust priorities based on configured rules.

View File

@ -0,0 +1,166 @@
# Priority Manager Agent Definition
name: priority-manager
displayName: Priority Manager
title: Backlog Strategist + Priority Guardian
icon: "📊"
persona:
role: "Priority Manager + Backlog Strategist + Priority Guardian"
identity: |
Data-driven prioritization expert who ensures the team works on the most
valuable items. Balances business value, customer feedback, technical debt,
and team capacity. Makes tough priority decisions with clear rationale
and defends the backlog from scope creep and priority inflation.
communication_style: |
Speaks in terms of value, impact, and trade-offs. Uses frameworks consistently.
Challenges assumptions about priority with data. Says "compared to what?"
frequently. Transparent about prioritization criteria and decisions.
principles:
- "Everything can't be P0 - if everything is urgent, nothing is"
- "Prioritization is about saying no to good things"
- "Data informs priority, but doesn't decide it alone"
- "Customer feedback is a signal, not a mandate"
- "Review priorities regularly - context changes"
- "Document the 'why' behind priority decisions"
activation:
critical: true
steps:
- step: 1
action: "Load persona from this agent file"
- step: 2
action: "Load module config from {project-root}/.bmad/bmm-priority/config.yaml"
mandate: true
- step: 3
action: "Store config values: {user_name}, {project_name}, {framework}"
- step: 4
action: "Load priority state from {project-root}/.bmad/bmm-priority/state/module-state.yaml"
- step: 5
action: "Greet user and display menu"
format: |
📊 **Priority Manager** ready, {user_name}
Current project: **{project_name}**
Framework: **{framework.primary}**
Queue size: **{queue_size}** items
Pending suggestions: **{pending_suggestions}**
{menu_items}
menu:
- cmd: "*help"
action: "Show numbered menu"
- cmd: "*prioritize"
workflow: "{project-root}/.bmad/bmm-priority/workflows/prioritize-backlog/workflow.yaml"
description: "Score and prioritize backlog items"
- cmd: "*review"
workflow: "{project-root}/.bmad/bmm-priority/workflows/priority-review/workflow.yaml"
description: "Review and adjust priorities"
- cmd: "*queue"
action: "#show-queue"
description: "Show current priority queue"
- cmd: "*suggestions"
action: "#show-suggestions"
description: "Review pending priority suggestions"
- cmd: "*score"
action: "#score-item"
description: "Calculate priority score for an item"
- cmd: "*compare"
action: "#compare-items"
description: "Compare two items' priorities"
- cmd: "*exit"
action: "Exit agent with confirmation"
prompts:
show-queue:
id: show-queue
content: |
Display priority queue:
1. Load queue from state
2. Show top 20 items with scores
3. Highlight stale items
4. Show pending suggestions
show-suggestions:
id: show-suggestions
content: |
Show pending priority suggestions:
1. Load suggestions from state
2. Show source (feedback, metrics, etc.)
3. Show suggested change and rationale
4. Provide accept/reject options
score-item:
id: score-item
content: |
Calculate priority score:
1. Prompt for item ID or description
2. Gather framework-specific inputs
3. Calculate score using configured framework
4. Show breakdown and recommendation
compare-items:
id: compare-items
content: |
Compare two items:
1. Prompt for two item IDs
2. Show side-by-side scores
3. Highlight differentiating factors
4. Make recommendation
expertise:
domains:
- "Backlog prioritization"
- "Value-based decision making"
- "Priority frameworks (WSJF, RICE, MoSCoW)"
- "Technical debt management"
- "Capacity-based planning"
frameworks:
- "WSJF (Weighted Shortest Job First)"
- "RICE (Reach, Impact, Confidence, Effort)"
- "MoSCoW"
- "Cost of Delay"
- "ICE (Impact, Confidence, Ease)"
collaboration:
works_with:
- agent: "pm"
purpose: "Align priorities with product strategy"
- agent: "feedback-analyst"
purpose: "Incorporate customer feedback signals"
- agent: "sm"
purpose: "Plan sprint based on priorities"
handoffs:
- from: "bmm-feedback"
event: "feedback.priority.suggested"
description: "Evaluate priority change suggestion"
- to: "bmm-roadmap"
event: "priority.queue.reordered"
description: "Update roadmap with new priorities"
rules:
- "Never change priority without documenting rationale"
- "Review feedback suggestions before auto-accepting"
- "Flag items that haven't been reviewed in 14+ days"
- "Consider technical debt in priority decisions"
- "Balance quick wins with strategic investments"

View File

@ -0,0 +1,88 @@
# BMM-Priority Module Configuration
# Copy to {project-root}/.bmad/bmm-priority/config.yaml and customize
# Project identification
project_name: "{{project_name}}"
user_name: "{{user_name}}"
output_folder: "docs"
# Prioritization framework
framework:
# Primary framework: wsjf, rice, moscow, custom
primary: "wsjf"
# WSJF configuration
wsjf:
weights:
business_value: 1.0
time_criticality: 1.0
risk_reduction: 1.0
job_size_scale: [1, 2, 3, 5, 8, 13] # Fibonacci
# RICE configuration
rice:
reach_scale: [10, 100, 1000, 10000] # Users affected
impact_scale: [0.25, 0.5, 1, 2, 3] # Minimal to Massive
confidence_scale: [0.5, 0.8, 1.0] # Low, Medium, High
effort_unit: "person-weeks"
# MoSCoW configuration
moscow:
categories:
- must
- should
- could
- wont
# Priority queue settings
queue:
# Maximum items in active priority queue
max_active: 50
# Auto-reorder triggers
auto_reorder:
on_new_item: true
on_priority_change: true
on_feedback_suggestion: false # Require manual review
# Staleness threshold (days without update)
staleness_days: 14
# Feedback integration
feedback_integration:
enabled: true
# How to handle feedback priority suggestions
suggestion_handling:
# auto, review, ignore
mode: "review"
# Threshold for auto-accept (if mode is auto)
auto_accept_threshold: 15 # feedback count
# Weight feedback sentiment in priority score
sentiment_weight: 0.2
# Priority review settings
review:
# Recommended review frequency
frequency: "weekly"
# Auto-flag items for review
flag_criteria:
- stale_items: true
- low_confidence: true
- feedback_volume_change: true
# Events
events:
subscribe:
- "feedback.priority.suggested"
- "feedback.insight.generated"
- "metrics.velocity.calculated"
- "story.done"
publish:
- "priority.updated"
- "priority.queue.reordered"
- "priority.review.completed"

View File

@ -0,0 +1,38 @@
# BMM-Priority Event Publications
version: "1.0.0"
module: "bmm-priority"
publications:
- event_type: "priority.updated"
description: "Story priority has been changed"
payload_schema:
story_id: { type: string, required: true }
previous_priority: { type: number, required: true }
new_priority: { type: number, required: true }
reason: { type: string, required: true }
updated_by: { type: string, required: true }
framework: { type: string, required: true }
score_breakdown: { type: object, required: false }
consumers:
- module: "bmm-roadmap"
action: "Update roadmap positioning"
- event_type: "priority.queue.reordered"
description: "Backlog priority queue has been reordered"
payload_schema:
queue_size: { type: number, required: true }
items_changed: { type: number, required: true }
top_items: { type: array, required: true }
trigger: { type: string, required: true }
consumers:
- module: "bmm-roadmap"
action: "Refresh roadmap with new order"
- event_type: "priority.review.completed"
description: "Priority review session completed"
payload_schema:
items_reviewed: { type: number, required: true }
items_changed: { type: number, required: true }
suggestions_processed: { type: number, required: true }
stale_items_flagged: { type: number, required: true }

View File

@ -0,0 +1,25 @@
# BMM-Priority Event Subscriptions
version: "1.0.0"
module: "bmm-priority"
subscriptions:
- event_type: "feedback.priority.suggested"
handler: "handlers/on-priority-suggested.xml"
description: "Evaluate feedback-based priority suggestion"
action: "evaluate_suggestion"
- event_type: "feedback.insight.generated"
handler: "handlers/on-insight-generated.xml"
description: "Consider insight for backlog"
action: "evaluate_for_backlog"
- event_type: "metrics.velocity.calculated"
handler: "handlers/on-velocity-calculated.xml"
description: "Update capacity for prioritization"
action: "update_capacity"
- event_type: "story.done"
handler: "handlers/on-story-done.xml"
description: "Remove completed story from queue"
action: "remove_from_queue"

View File

@ -0,0 +1,59 @@
# BMM-Priority Module Manifest
name: bmm-priority
version: "1.0.0"
display_name: "BMAD Priority Module"
description: "Backlog prioritization with WSJF, RICE, and feedback integration"
author: "BMad"
license: "MIT"
category: "planning"
tags:
- prioritization
- backlog
- wsjf
- rice
- planning
dependencies:
core:
version: ">=1.0.0"
required: true
events:
subscribes:
- feedback.priority.suggested
- feedback.insight.generated
- metrics.velocity.calculated
- story.done
publishes:
- priority.updated
- priority.queue.reordered
- priority.review.completed
agents:
- name: priority-manager
file: agents/priority-manager.agent.yaml
description: "Backlog Strategist + Priority Guardian"
icon: "📊"
workflows:
- name: prioritize-backlog
path: workflows/prioritize-backlog
description: "Score and prioritize backlog items"
standalone: true
- name: priority-review
path: workflows/priority-review
description: "Review and adjust priorities"
standalone: true
slash_commands:
- name: "priority-score"
workflow: "prioritize-backlog"
description: "Score and prioritize items"
- name: "priority-review"
workflow: "priority-review"
description: "Review priorities"

View File

@ -0,0 +1,46 @@
# BMM-Priority Module State
version: "1.0.0"
module: "bmm-priority"
initialized: false
last_updated: null
# Priority queue
queue:
items: []
# Example:
# - story_id: "STORY-123"
# score: 85
# framework: "wsjf"
# score_breakdown:
# business_value: 8
# time_criticality: 5
# risk_reduction: 3
# job_size: 3
# last_reviewed: "2024-01-15"
# feedback_score: 12
# Pending suggestions from feedback
suggestions: []
# Example:
# - id: "sug-001"
# source_event: "feedback.priority.suggested"
# story_id: "STORY-123"
# suggested_change: "increase"
# reason: "High customer feedback volume"
# feedback_count: 15
# status: "pending"
# Capacity tracking (from velocity events)
capacity:
current_velocity: null
rolling_average: null
last_updated: null
# Review history
reviews: []
# Event processing
event_processing:
last_event_id: null
events_processed_count: 0

View File

@ -0,0 +1,114 @@
# BMM-Release Module
Release Management module for the BMAD Method. Handles release planning, candidate validation, release notes generation, and rollback procedures.
## Overview
The bmm-release module provides:
- **Release Planning**: Create and manage release candidates
- **Release Notes Generation**: Auto-generate release notes from completed stories
- **Rollback Planning**: Define and execute rollback procedures
- **Release Validation**: Coordinate with bmm-metrics for quality gate checks
## Event-Driven Architecture
This module operates through events, making it completely decoupled from other modules:
### Events Subscribed
| Event | Action |
|-------|--------|
| `metrics.quality.pass` | Proceeds with release if quality gates pass |
| `metrics.quality.fail` | Blocks release and notifies stakeholders |
| `story.done` | Adds story to pending release items |
| `sprint.ended` | Triggers release candidate creation (if configured) |
### Events Published
| Event | Description |
|-------|-------------|
| `release.candidate.created` | New release candidate ready for validation |
| `release.approved` | Release approved after all gates pass |
| `release.deployed` | Release successfully deployed |
| `release.failed` | Release deployment failed |
| `release.rollback.initiated` | Rollback procedure started |
| `release.rollback.completed` | Rollback completed successfully |
## Directory Structure
```
bmm-release/
├── README.md
├── manifest.yaml
├── config.yaml
├── agents/
│ └── release-manager.agent.yaml
├── workflows/
│ ├── release-planning/
│ ├── release-notes/
│ └── rollback-planning/
├── events/
│ ├── subscriptions.yaml
│ ├── publications.yaml
│ └── handlers/
│ ├── on-quality-pass.xml
│ └── on-quality-fail.xml
├── tasks/
│ ├── create-release-candidate.xml
│ └── validate-release.xml
├── templates/
│ └── release-notes-template.md
└── state/
└── module-state.yaml
```
## Quick Start
1. Install the module via BMAD installer
2. Configure release settings in `.bmad/bmm-release/config.yaml`
3. Use the Release Manager agent: `*release-manager`
## Agent Commands
The Release Manager agent provides:
- `*help` - Show available commands
- `*plan-release` - Create a new release candidate
- `*release-notes` - Generate release notes
- `*rollback-plan` - Create rollback procedure
- `*release-status` - Check current release status
- `*exit` - Exit agent
## Integration with bmm-metrics
The release workflow integrates with bmm-metrics through events:
1. Release candidate created → `release.candidate.created`
2. bmm-metrics receives event → runs quality gate check
3. Quality gate result → `metrics.quality.pass` or `metrics.quality.fail`
4. bmm-release receives result → proceeds or blocks release
This event-driven approach means modules are completely independent and can operate asynchronously.
## Configuration
```yaml
# .bmad/bmm-release/config.yaml
project_name: "My Project"
user_name: "Developer"
release:
versioning: "semver" # semver, calver, custom
auto_create_on_sprint_end: true
require_quality_gates: true
require_changelog: true
deployment:
environments:
- staging
- production
approval_required:
staging: false
production: true
rollback:
auto_rollback_on_failure: true
health_check_timeout: 300 # seconds
```

View File

@ -0,0 +1,208 @@
# Release Manager Agent Definition
# Compiles to .md during BMAD installation
name: release-manager
displayName: Release Manager
title: Release Coordinator + Deployment Guardian
icon: "🚀"
persona:
role: "Release Manager + Deployment Coordinator + Rollback Guardian"
identity: |
Meticulous release coordinator ensuring smooth, safe deployments.
Manages release candidates, coordinates quality gate validation,
generates comprehensive release notes, and maintains rollback readiness.
Balances velocity with stability, always prioritizing production safety.
communication_style: |
Clear, status-focused communication. Uses checklists and gates.
Speaks in terms of "release candidates," "gates," and "rollback points."
Celebrates successful releases, quickly addresses failures with clear actions.
Always has a rollback plan ready.
principles:
- "Never deploy what hasn't passed quality gates"
- "Always have a rollback plan before deploying"
- "Release notes are documentation, not afterthoughts"
- "Small, frequent releases beat big, scary releases"
- "Production stability trumps feature velocity"
- "If in doubt, don't release - investigate first"
activation:
critical: true
steps:
- step: 1
action: "Load persona from this agent file"
- step: 2
action: "Load module config from {project-root}/.bmad/bmm-release/config.yaml"
mandate: true
- step: 3
action: "Store config values: {user_name}, {project_name}, {release}, {deployment}"
- step: 4
action: "Load current release state from {project-root}/.bmad/bmm-release/state/module-state.yaml if exists"
- step: 5
action: "Greet user and display menu"
format: |
🚀 **Release Manager** ready, {user_name}
Current project: **{project_name}**
Current version: **{current_version}**
Release candidate: {candidate_status}
{menu_items}
menu:
- cmd: "*help"
action: "Show numbered menu"
- cmd: "*plan-release"
workflow: "{project-root}/.bmad/bmm-release/workflows/release-planning/workflow.yaml"
description: "Create a new release candidate"
- cmd: "*release-notes"
workflow: "{project-root}/.bmad/bmm-release/workflows/release-notes/workflow.yaml"
description: "Generate release notes"
- cmd: "*rollback-plan"
workflow: "{project-root}/.bmad/bmm-release/workflows/rollback-planning/workflow.yaml"
description: "Create rollback procedure"
- cmd: "*release-status"
action: "#release-status"
description: "Check current release status"
- cmd: "*approve-release"
action: "#approve-release"
description: "Approve release candidate for deployment"
- cmd: "*deploy"
action: "#deploy"
description: "Deploy approved release"
- cmd: "*rollback"
action: "#rollback"
description: "Execute rollback to previous version"
- cmd: "*release-history"
action: "#release-history"
description: "View release history"
- cmd: "*exit"
action: "Exit agent with confirmation"
prompts:
release-status:
id: release-status
content: |
Display current release status:
1. Load state from module-state.yaml
2. Show current version and last release date
3. Show release candidate status (if any)
4. Show quality gate status
5. Show pending stories for next release
6. Show deployment pipeline status
approve-release:
id: approve-release
content: |
Approve release candidate:
1. Verify quality gates have passed
2. Verify release notes are complete
3. Verify rollback plan exists
4. Collect approval confirmation
5. Mark release as approved
6. Publish release.approved event
deploy:
id: deploy
content: |
Deploy approved release:
1. Verify release is approved
2. Confirm target environment
3. Check deployment prerequisites
4. Execute deployment
5. Run health checks
6. Publish release.deployed or release.failed event
rollback:
id: rollback
content: |
Execute rollback:
1. Confirm rollback decision
2. Load rollback plan
3. Identify target version
4. Execute rollback procedure
5. Verify rollback success
6. Publish release.rollback.completed event
release-history:
id: release-history
content: |
Display release history:
1. Load release history from state
2. Show last 10 releases
3. Include version, date, stories, status
4. Highlight any rollbacks
expertise:
domains:
- "Release management and planning"
- "Deployment strategies (rolling, blue-green, canary)"
- "Rollback procedures and disaster recovery"
- "Version management (semver, calver)"
- "Release notes and changelog generation"
- "Quality gate coordination"
frameworks:
- "Semantic Versioning (SemVer)"
- "Calendar Versioning (CalVer)"
- "Continuous Delivery"
- "GitFlow release process"
- "Feature flags for gradual rollout"
tools:
- "Release candidate management"
- "Deployment coordination"
- "Health check monitoring"
- "Rollback automation"
- "Release notes generation"
collaboration:
works_with:
- agent: "metrics-analyst"
purpose: "Quality gate validation before release"
- agent: "sm"
purpose: "Sprint completion and release timing"
- agent: "dev"
purpose: "Deployment execution and verification"
- agent: "tea"
purpose: "Release testing and validation"
handoffs:
- from: "bmm-metrics"
event: "metrics.quality.pass"
description: "Quality gates passed, can proceed with release"
- from: "bmm-metrics"
event: "metrics.quality.fail"
description: "Quality gates failed, release blocked"
- to: "bmm-feedback"
event: "release.deployed"
description: "Trigger post-release feedback collection"
rules:
- "Never deploy without quality gate pass event"
- "Always generate release notes before deployment"
- "Always verify rollback plan exists before production deploy"
- "Block release if any blocking quality gates fail"
- "Notify stakeholders on all release state changes"
- "Keep release history for audit trail"

View File

@ -0,0 +1,147 @@
# BMM-Release Module Configuration
# Copy to {project-root}/.bmad/bmm-release/config.yaml and customize
# Project identification
project_name: "{{project_name}}"
user_name: "{{user_name}}"
output_folder: "docs"
# Release configuration
release:
# Versioning scheme: semver, calver, custom
versioning: "semver"
# Version format for semver
semver_format: "MAJOR.MINOR.PATCH"
# Version format for calver (e.g., YYYY.MM.DD, YY.MM.PATCH)
calver_format: "YYYY.MM.PATCH"
# Automatically create release candidate when sprint ends
auto_create_on_sprint_end: false
# Require quality gates to pass before release
require_quality_gates: true
# Require changelog/release notes
require_changelog: true
# Release branch naming
branch_prefix: "release/"
# Tag format (supports {version} placeholder)
tag_format: "v{version}"
# Release candidate settings
candidate:
# How long a candidate is valid before expiring
expiry_hours: 72
# Auto-include stories marked done since last release
auto_include_done_stories: true
# Minimum stories required for release
min_stories: 1
# Deployment configuration
deployment:
# Available environments
environments:
- name: "staging"
order: 1
auto_deploy: true
approval_required: false
- name: "production"
order: 2
auto_deploy: false
approval_required: true
approvers:
- "tech_lead"
- "product_owner"
# Deployment strategy: rolling, blue_green, canary
strategy: "rolling"
# Health check settings
health_check:
enabled: true
endpoint: "/health"
timeout_seconds: 300
success_threshold: 3
failure_threshold: 2
# Rollback configuration
rollback:
# Automatically rollback on deployment failure
auto_rollback_on_failure: true
# Keep N previous releases for rollback
keep_releases: 5
# Health check timeout before triggering rollback
health_check_timeout: 300
# Notification on rollback
notify_on_rollback:
- "team_lead"
- "on_call"
# Release notes configuration
release_notes:
# Include in release notes
include:
- features
- bug_fixes
- improvements
- breaking_changes
- security_updates
- deprecations
# Categorize by story type
categorize_by_type: true
# Include contributor names
include_contributors: true
# Template to use
template: "templates/release-notes-template.md"
# Notifications
notifications:
# Notify on release candidate created
on_candidate_created:
enabled: true
channels:
- slack
# Notify on release deployed
on_deployed:
enabled: true
channels:
- slack
- email
# Notify on release failed
on_failed:
enabled: true
channels:
- slack
- email
- pagerduty
# Event subscriptions (for event bus)
events:
subscribe:
- "metrics.quality.pass"
- "metrics.quality.fail"
- "story.done"
- "sprint.ended"
publish:
- "release.candidate.created"
- "release.approved"
- "release.deployed"
- "release.failed"
- "release.rollback.initiated"
- "release.rollback.completed"

View File

@ -0,0 +1,118 @@
<event-handler
id="bmm-release/on-quality-fail"
event="metrics.quality.fail"
description="Handle quality gate failure - block release">
<objective>
When quality gates fail for a release candidate, block the release,
notify stakeholders, and provide remediation guidance.
</objective>
<preconditions>
<check>Event payload contains story_id or release_candidate_id</check>
<check>Event payload contains failed_gates</check>
<check>Event payload contains blocking flag</check>
</preconditions>
<flow>
<step n="1" title="Identify Release Candidate">
<action>Load module state from {module_path}/state/module-state.yaml</action>
<check if="event.payload.release_candidate_id exists">
<action>Find release candidate by ID</action>
</check>
<check if="event.payload.story_id exists">
<action>Find release candidate containing this story</action>
</check>
<check if="no release candidate found">
<action>Log: "No release candidate found for quality fail event"</action>
<action>Exit handler - may be a story-level check not part of release</action>
</check>
</step>
<step n="2" title="Analyze Failure">
<action>Extract failed gates from event payload</action>
<action>Categorize failures as blocking vs non-blocking</action>
<action>Extract remediation steps from payload</action>
<failure_analysis>
<blocking_failures>{event.payload.failed_gates.filter(g => g.blocking)}</blocking_failures>
<non_blocking_failures>{event.payload.failed_gates.filter(g => !g.blocking)}</non_blocking_failures>
<blocking_count>{blocking_failures.length}</blocking_count>
</failure_analysis>
</step>
<step n="3" title="Update Release Candidate Status">
<check if="blocking_count > 0">
<action>Update release candidate status to "blocked"</action>
<action>Set quality_gate_passed = false</action>
<action>Set blocked_at = {event.timestamp}</action>
<action>Set blocked_reason = "Quality gates failed"</action>
<action>Set blocking_gates = {blocking_failures}</action>
</check>
<check if="blocking_count == 0">
<action>Update release candidate status to "warning"</action>
<action>Set quality_gate_passed = true (with warnings)</action>
<action>Set warnings = {non_blocking_failures}</action>
</check>
</step>
<step n="4" title="Publish Rejection Event" condition="blocking_count > 0">
<publish event="release.rejected">
<payload>
<release_id>{release_candidate.id}</release_id>
<version>{release_candidate.version}</version>
<rejected_by>quality_gates</rejected_by>
<reason>Quality gates failed: {blocking_failures.map(g => g.name).join(', ')}</reason>
<failed_gates>{blocking_failures}</failed_gates>
<remediation_steps>{event.payload.remediation_steps}</remediation_steps>
<timestamp>{current_timestamp}</timestamp>
</payload>
</publish>
</step>
<step n="5" title="Notify Stakeholders">
<action>Load notification config</action>
<action>Identify stakeholders for release</action>
<notification>
<subject>Release {release_candidate.version} Blocked - Quality Gates Failed</subject>
<body>
Release candidate **{release_candidate.version}** has been blocked.
**Failed Gates ({blocking_count}):**
{#each blocking_failures}
- {name}: {actual} (threshold: {threshold})
{/each}
**Remediation Required:**
{#each event.payload.remediation_steps}
- {step}
{/each}
Address these issues and run quality gates again.
</body>
<channels>{config.notifications.on_failed.channels}</channels>
</notification>
</step>
<step n="6" title="Save State">
<action>Save updated state to module-state.yaml</action>
<action>Log: "Release {release_candidate.version} BLOCKED - {blocking_count} gates failed"</action>
</step>
</flow>
<on-error>
<action>Log ERROR: "Failed to process quality.fail event: {error_message}"</action>
<action>Mark release candidate as requiring manual review</action>
<action>Send alert to release team</action>
</on-error>
<output>
<field name="release_id" value="{release_candidate.id}" />
<field name="version" value="{release_candidate.version}" />
<field name="status" value="{status}" />
<field name="blocking_gates_count" value="{blocking_count}" />
<field name="blocked" value="{blocking_count > 0}" />
</output>
</event-handler>

View File

@ -0,0 +1,91 @@
<event-handler
id="bmm-release/on-quality-pass"
event="metrics.quality.pass"
description="Handle quality gate pass - proceed with release approval">
<objective>
When quality gates pass for a release candidate, mark the candidate as
validated and proceed with release approval workflow.
</objective>
<preconditions>
<check>Event payload contains story_id or release_candidate_id</check>
<check>Event payload contains overall_score</check>
<check>Event payload contains gate_results</check>
</preconditions>
<flow>
<step n="1" title="Identify Release Candidate">
<action>Load module state from {module_path}/state/module-state.yaml</action>
<check if="event.payload.release_candidate_id exists">
<action>Find release candidate by ID</action>
</check>
<check if="event.payload.story_id exists">
<action>Find release candidate containing this story</action>
</check>
<check if="no release candidate found">
<action>Log: "No release candidate found for quality pass event"</action>
<action>Exit handler</action>
</check>
</step>
<step n="2" title="Validate Gate Results">
<action>Extract gate results from event payload</action>
<action>Verify all blocking gates passed</action>
<action>Store overall_score for release record</action>
<check if="any blocking gate failed">
<action>Log WARNING: "Received quality.pass but blocking gate failed"</action>
<action>Mark as requiring manual review</action>
</check>
</step>
<step n="3" title="Update Release Candidate Status">
<action>Update release candidate status to "validated"</action>
<action>Set quality_gate_passed = true</action>
<action>Set quality_gate_score = {event.payload.overall_score}</action>
<action>Set validated_at = {event.timestamp}</action>
<action>Record gate_results for audit</action>
</step>
<step n="4" title="Check Auto-Approval">
<action>Load release config</action>
<check if="config.deployment.environments[staging].auto_deploy == true">
<action>Auto-approve for staging deployment</action>
<publish event="release.approved">
<payload>
<release_id>{release_candidate.id}</release_id>
<version>{release_candidate.version}</version>
<approved_by>auto</approved_by>
<approved_at>{current_timestamp}</approved_at>
<target_environment>staging</target_environment>
<quality_gate_score>{event.payload.overall_score}</quality_gate_score>
</payload>
</publish>
</check>
<check if="else">
<action>Queue for manual approval</action>
<action>Notify approvers that release is ready</action>
</check>
</step>
<step n="5" title="Save State">
<action>Save updated state to module-state.yaml</action>
<action>Log: "Release {release_candidate.version} validated - quality score: {overall_score}%"</action>
</step>
</flow>
<on-error>
<action>Log ERROR: "Failed to process quality.pass event: {error_message}"</action>
<action>Mark release candidate as requiring manual review</action>
</on-error>
<output>
<field name="release_id" value="{release_candidate.id}" />
<field name="version" value="{release_candidate.version}" />
<field name="quality_score" value="{event.payload.overall_score}" />
<field name="status" value="validated" />
<field name="auto_approved" value="{auto_approved}" />
</output>
</event-handler>

View File

@ -0,0 +1,129 @@
# BMM-Release Event Publications
# Events this module emits
version: "1.0.0"
module: "bmm-release"
publications:
# Release candidate lifecycle
- event_type: "release.candidate.created"
description: "New release candidate has been created"
trigger: "release-planning workflow completion"
payload_schema:
release_id: { type: string, required: true }
version: { type: string, required: true }
stories: { type: array, required: true }
story_count: { type: number, required: true }
created_by: { type: string, required: true }
expires_at: { type: datetime, required: false }
consumers:
- module: "bmm-metrics"
action: "Run quality gate check"
- event_type: "release.candidate.expired"
description: "Release candidate expired without deployment"
trigger: "Automatic expiry check"
payload_schema:
release_id: { type: string, required: true }
version: { type: string, required: true }
expired_at: { type: datetime, required: true }
reason: { type: string, required: true }
# Release approval
- event_type: "release.approved"
description: "Release candidate approved for deployment"
trigger: "Manual approval or auto-approval"
payload_schema:
release_id: { type: string, required: true }
version: { type: string, required: true }
approved_by: { type: string, required: true }
approved_at: { type: datetime, required: true }
target_environment: { type: string, required: true }
quality_gate_score: { type: number, required: false }
- event_type: "release.rejected"
description: "Release candidate rejected"
trigger: "Manual rejection or quality gate failure"
payload_schema:
release_id: { type: string, required: true }
version: { type: string, required: true }
rejected_by: { type: string, required: true }
reason: { type: string, required: true }
failed_gates: { type: array, required: false }
# Deployment lifecycle
- event_type: "release.deploying"
description: "Release deployment in progress"
trigger: "Deployment initiation"
payload_schema:
release_id: { type: string, required: true }
version: { type: string, required: true }
environment: { type: string, required: true }
started_at: { type: datetime, required: true }
- event_type: "release.deployed"
description: "Release successfully deployed"
trigger: "Health check pass after deployment"
payload_schema:
release_id: { type: string, required: true }
version: { type: string, required: true }
environment: { type: string, required: true }
deployed_at: { type: datetime, required: true }
stories_released: { type: array, required: true }
release_notes_url: { type: string, required: false }
consumers:
- module: "bmm-feedback"
action: "Trigger post-release feedback collection"
- module: "bmm-metrics"
action: "Update deployment frequency metric"
- event_type: "release.failed"
description: "Release deployment failed"
trigger: "Deployment error or health check failure"
payload_schema:
release_id: { type: string, required: true }
version: { type: string, required: true }
environment: { type: string, required: true }
failed_at: { type: datetime, required: true }
error: { type: string, required: true }
auto_rollback_triggered: { type: boolean, required: true }
consumers:
- module: "bmm-metrics"
action: "Update change failure rate metric"
# Rollback lifecycle
- event_type: "release.rollback.initiated"
description: "Rollback procedure started"
trigger: "Manual rollback or auto-rollback on failure"
payload_schema:
release_id: { type: string, required: true }
from_version: { type: string, required: true }
to_version: { type: string, required: true }
environment: { type: string, required: true }
initiated_by: { type: string, required: true }
reason: { type: string, required: true }
- event_type: "release.rollback.completed"
description: "Rollback completed successfully"
trigger: "Health check pass after rollback"
payload_schema:
release_id: { type: string, required: true }
from_version: { type: string, required: true }
to_version: { type: string, required: true }
environment: { type: string, required: true }
completed_at: { type: datetime, required: true }
duration_seconds: { type: number, required: true }
consumers:
- module: "bmm-metrics"
action: "Update MTTR metric"
- event_type: "release.rollback.failed"
description: "Rollback failed - critical alert"
trigger: "Rollback procedure failure"
payload_schema:
release_id: { type: string, required: true }
from_version: { type: string, required: true }
to_version: { type: string, required: true }
environment: { type: string, required: true }
error: { type: string, required: true }
requires_manual_intervention: { type: boolean, required: true }

View File

@ -0,0 +1,56 @@
# BMM-Release Event Subscriptions
# Events this module listens to
version: "1.0.0"
module: "bmm-release"
subscriptions:
# Quality gate events from bmm-metrics
- event_type: "metrics.quality.pass"
handler: "handlers/on-quality-pass.xml"
description: "Quality gates passed - can proceed with release"
filter:
# Only handle if this release candidate
match_field: "payload.story_id"
match_type: "in_release_candidate"
action: "proceed_with_release"
- event_type: "metrics.quality.fail"
handler: "handlers/on-quality-fail.xml"
description: "Quality gates failed - block release"
filter:
match_field: "payload.story_id"
match_type: "in_release_candidate"
action: "block_release"
# Story events from core
- event_type: "story.done"
handler: "handlers/on-story-done.xml"
description: "Story completed - add to pending release items"
action: "add_to_pending_release"
# Sprint events
- event_type: "sprint.ended"
handler: "handlers/on-sprint-ended.xml"
description: "Sprint ended - optionally create release candidate"
condition: "config.release.auto_create_on_sprint_end == true"
action: "create_release_candidate"
# Deployment events (from CI/CD or external systems)
- event_type: "deploy.completed"
handler: "handlers/on-deploy-completed.xml"
description: "Deployment finished - run health checks"
action: "verify_deployment"
- event_type: "deploy.failed"
handler: "handlers/on-deploy-failed.xml"
description: "Deployment failed - initiate rollback if configured"
action: "handle_deployment_failure"
# Event routing configuration
routing:
# How to find the right release candidate for an event
release_candidate_lookup:
by_story_id: "state.pending_stories[payload.story_id]"
by_sprint_id: "state.sprint_releases[payload.sprint_id]"
by_release_id: "state.releases[payload.release_id]"

View File

@ -0,0 +1,154 @@
# BMM-Release Module Manifest
# Release Management for BMAD
name: bmm-release
version: "1.0.0"
display_name: "BMAD Release Module"
description: "Release planning, deployment coordination, and rollback management with event-driven architecture"
author: "BMad"
license: "MIT"
# Module category and tags
category: "operations"
tags:
- release
- deployment
- rollback
- versioning
- changelog
# Dependencies
dependencies:
core:
version: ">=1.0.0"
required: true
bmm-metrics:
version: ">=1.0.0"
required: false
reason: "Quality gate validation before release"
# Event Integration
events:
subscribes:
- metrics.quality.pass
- metrics.quality.fail
- story.done
- sprint.ended
- deploy.completed
- deploy.failed
publishes:
- release.candidate.created
- release.candidate.expired
- release.approved
- release.rejected
- release.deploying
- release.deployed
- release.failed
- release.rollback.initiated
- release.rollback.completed
- release.rollback.failed
# Agents provided by this module
agents:
- name: release-manager
file: agents/release-manager.agent.yaml
description: "Release Coordinator + Deployment Guardian"
icon: "🚀"
# Workflows provided
workflows:
- name: release-planning
path: workflows/release-planning
description: "Create a new release candidate"
standalone: true
triggers:
- module: bmm-metrics
workflow: quality-gate-check
event: release.candidate.created
- name: release-notes
path: workflows/release-notes
description: "Generate release notes"
standalone: true
- name: rollback-planning
path: workflows/rollback-planning
description: "Create rollback procedure"
standalone: true
# Event Handlers
event_handlers:
- name: on-quality-pass
file: events/handlers/on-quality-pass.xml
event: metrics.quality.pass
description: "Proceed with release when quality gates pass"
- name: on-quality-fail
file: events/handlers/on-quality-fail.xml
event: metrics.quality.fail
description: "Block release when quality gates fail"
# Templates
templates:
- name: release-notes-template
file: templates/release-notes-template.md
description: "Release notes markdown template"
# Configuration schema
config_schema:
project_name:
type: string
required: true
user_name:
type: string
required: true
output_folder:
type: string
required: true
default: "docs"
release:
type: object
properties:
versioning: { type: string, enum: ["semver", "calver", "custom"], default: "semver" }
auto_create_on_sprint_end: { type: boolean, default: false }
require_quality_gates: { type: boolean, default: true }
require_changelog: { type: boolean, default: true }
deployment:
type: object
properties:
environments: { type: array }
strategy: { type: string, default: "rolling" }
rollback:
type: object
properties:
auto_rollback_on_failure: { type: boolean, default: true }
keep_releases: { type: number, default: 5 }
# Installation hooks
install:
pre:
- action: "Verify bmm-metrics module if quality gates enabled"
post:
- action: "Initialize module state file"
- action: "Subscribe to events"
- action: "Generate slash commands"
# Slash commands to generate
slash_commands:
- name: "release-plan"
workflow: "release-planning"
description: "Create a new release candidate"
- name: "release-notes"
workflow: "release-notes"
description: "Generate release notes"
- name: "release-rollback"
workflow: "rollback-planning"
description: "Create or execute rollback plan"

View File

@ -0,0 +1,76 @@
# BMM-Release Module State
# This file is auto-managed by the bmm-release module
# Manual edits may be overwritten
version: "1.0.0"
module: "bmm-release"
initialized: false
last_updated: null
# Current version tracking
current_version:
version: null
released_at: null
environment: null
# Release candidates
release_candidates: []
# Example:
# - id: "rc-2024-001"
# version: "2.1.0"
# status: "pending" # pending, validated, approved, deployed, rejected, expired
# created_at: "2024-01-15T10:00:00Z"
# stories:
# - "STORY-123"
# - "STORY-124"
# quality_gate_passed: null
# quality_gate_score: null
# approved_by: null
# approved_at: null
# deployed_at: null
# rollback_plan: null
# Pending stories (done but not released)
pending_stories: []
# Example:
# - id: "STORY-125"
# title: "Add user profile page"
# type: "feature"
# completed_at: "2024-01-16T14:00:00Z"
# Release history
release_history: []
# Example:
# - version: "2.0.0"
# release_id: "rc-2024-001"
# released_at: "2024-01-15T18:00:00Z"
# environment: "production"
# story_count: 5
# status: "deployed"
# rollback_available: true
# Deployment tracking
deployments: []
# Example:
# - release_id: "rc-2024-001"
# environment: "staging"
# deployed_at: "2024-01-15T16:00:00Z"
# status: "success"
# health_check_passed: true
# Rollback history
rollbacks: []
# Example:
# - from_version: "2.1.0"
# to_version: "2.0.0"
# initiated_at: "2024-01-15T20:00:00Z"
# completed_at: "2024-01-15T20:15:00Z"
# reason: "Critical bug in payment processing"
# initiated_by: "user"
# status: "completed"
# Event processing
event_processing:
last_event_id: null
last_event_time: null
events_processed_count: 0

View File

@ -0,0 +1,113 @@
# Release {version}
**Release Date:** {release_date}
**Release Type:** {release_type}
---
{#if highlights}
## Highlights
{highlights}
{/if}
{#if breaking_changes}
## Breaking Changes
> **Important:** This release includes breaking changes. Please review before upgrading.
{#each breaking_changes}
### {title}
{description}
**Migration Steps:**
{migration_steps}
{/each}
{/if}
## What's New
{#if features}
### Features
{#each features}
- **{title}** {#if issue_link}([#{issue_id}]({issue_link})){/if}
{description}
{/each}
{/if}
{#if improvements}
### Improvements
{#each improvements}
- {title} {#if issue_link}([#{issue_id}]({issue_link})){/if}
{/each}
{/if}
{#if bug_fixes}
### Bug Fixes
{#each bug_fixes}
- {title} {#if issue_link}([#{issue_id}]({issue_link})){/if}
{/each}
{/if}
{#if security_updates}
### Security Updates
{#each security_updates}
- **{severity}:** {title}
{description}
{/each}
{/if}
{#if deprecations}
### Deprecations
The following features are deprecated and will be removed in a future release:
{#each deprecations}
- **{feature}**: {alternative}
{/each}
{/if}
{#if known_issues}
## Known Issues
{#each known_issues}
- {description} {#if workaround}(Workaround: {workaround}){/if}
{/each}
{/if}
{#if contributors}
## Contributors
Thanks to everyone who contributed to this release:
{#each contributors}
- @{username} {#if contributions}({contributions}){/if}
{/each}
{/if}
---
## Installation
```bash
# Using npm
npm install {package_name}@{version}
# Using yarn
yarn add {package_name}@{version}
```
## Full Changelog
See the full list of changes: [{previous_version}...{version}]({changelog_url})
---
*This release was generated by [BMAD Release Manager](https://github.com/bmad-method)*

View File

@ -0,0 +1,260 @@
# Release Notes Instructions
## Objective
Generate comprehensive, well-organized release notes from a release candidate, ready for publication to stakeholders.
## Prerequisites
- Release candidate exists and is validated
- Stories in release have descriptions and types
- Template file available
---
<step n="1" goal="Identify release candidate">
### Select Release
<action>Load release state from module-state.yaml</action>
<check if="multiple release candidates exist">
<ask>Which release would you like to generate notes for?
{{#each release_candidates}}
[{{index}}] {{version}} - {{status}} ({{story_count}} stories)
{{/each}}
Selection: </ask>
<action>Load selected release candidate</action>
</check>
<check if="single release candidate exists">
<action>Auto-select current release candidate</action>
</check>
<check if="no release candidates exist">
<action>Error: No release candidates found. Run *plan-release first.</action>
</check>
**Selected Release:** {{version}}
**Status:** {{status}}
**Stories:** {{story_count}}
</step>
---
<step n="2" goal="Load story details">
### Load Story Information
<action>Load full details for each story in release</action>
<action>Categorize stories by type</action>
<action>Extract key information for notes</action>
**Release Contents:**
| Category | Count |
|----------|-------|
| Features | {{features.length}} |
| Bug Fixes | {{bug_fixes.length}} |
| Improvements | {{improvements.length}} |
| Security | {{security.length}} |
| Breaking Changes | {{breaking_changes.length}} |
</step>
---
<step n="3" goal="Choose output format">
### Output Format
<ask>What format should the release notes be in?
[m] Markdown (GitHub/GitLab compatible)
[h] HTML (web publishing)
[s] Slack (for announcements)
[a] All formats
Format: </ask>
<action>Store as {{output_format}}</action>
</step>
---
<step n="4" goal="Customize release notes">
### Customize Content
<ask>Do you want to add any custom highlights or summary?
[y] Yes - add custom content
[n] No - use auto-generated summary
Choice: </ask>
<check if="choice == 'y'">
<ask>Enter release summary/highlights (1-3 sentences): </ask>
<action>Store as {{custom_summary}}</action>
</check>
<ask>Include contributor acknowledgments?
[y] Yes
[n] No
Choice: </ask>
<action>Store as {{include_contributors}}</action>
<ask>Include links to issues/PRs?
[y] Yes
[n] No
Choice: </ask>
<action>Store as {{include_links}}</action>
</step>
---
<step n="5" goal="Generate release notes">
### Generate Release Notes
<action>Load release notes template</action>
<action>Populate template with release data</action>
<template-output section="release-notes">
# Release {{version}}
**Release Date:** {{release_date}}
**Release Type:** {{release_type}}
{{#if custom_summary}}
## Highlights
{{custom_summary}}
{{/if}}
{{#if breaking_changes.length}}
## Breaking Changes
{{#each breaking_changes}}
- **{{title}}**: {{description}}
{{/each}}
{{/if}}
## What's New
### Features
{{#each features}}
- {{title}} {{#if include_links}}([#{{id}}]({{link}})){{/if}}
{{description}}
{{/each}}
### Bug Fixes
{{#each bug_fixes}}
- {{title}} {{#if include_links}}([#{{id}}]({{link}})){{/if}}
{{/each}}
### Improvements
{{#each improvements}}
- {{title}} {{#if include_links}}([#{{id}}]({{link}})){{/if}}
{{/each}}
{{#if security.length}}
### Security Updates
{{#each security}}
- {{title}}
{{/each}}
{{/if}}
{{#if include_contributors}}
## Contributors
Thanks to everyone who contributed to this release:
{{#each contributors}}
- @{{username}}
{{/each}}
{{/if}}
---
*Generated by BMAD Release Manager*
</template-output>
</step>
---
<step n="6" goal="Generate additional formats" condition="output_format in ['h', 'a']">
### Generate HTML Format
<action>Convert markdown to HTML</action>
<action>Apply styling template</action>
<action>Save as {{version}}-release-notes.html</action>
</step>
---
<step n="7" goal="Generate Slack format" condition="output_format in ['s', 'a']">
### Generate Slack Announcement
<action>Create Slack-formatted message</action>
**Slack Message Preview:**
```
:rocket: *Release {{version}}* is now available!
*Highlights:*
{{custom_summary || auto_summary}}
*What's New:*
• {{features.length}} new features
• {{bug_fixes.length}} bug fixes
• {{improvements.length}} improvements
{{#if breaking_changes.length}}
:warning: This release includes breaking changes. See full notes.
{{/if}}
<{{release_notes_url}}|View Full Release Notes>
```
</step>
---
<step n="8" goal="Save and display">
### Save Release Notes
<action>Save markdown file to output folder</action>
<action>Update release candidate with notes path</action>
<action>Display preview</action>
**Files Generated:**
- Markdown: {{markdown_path}}
{{#if html_generated}}- HTML: {{html_path}}{{/if}}
{{#if slack_generated}}- Slack: {{slack_path}}{{/if}}
</step>
---
## Completion
Release notes generated for **{{version}}**.
**Summary:**
- Features: {{features.length}}
- Bug Fixes: {{bug_fixes.length}}
- Improvements: {{improvements.length}}
{{#if breaking_changes.length}}- Breaking Changes: {{breaking_changes.length}}{{/if}}
**Output:** {{output_file_path}}
**Next Steps:**
1. Review and edit release notes if needed
2. Publish to changelog/documentation
3. Share Slack announcement with team
4. Proceed with deployment: `*deploy`

View File

@ -0,0 +1,33 @@
# Release Notes Workflow
name: release-notes
description: "Generate comprehensive release notes from release candidate"
author: "BMad"
module: bmm-release
# Configuration
config_source: "{project-root}/.bmad/bmm-release/config.yaml"
project_name: "{config_source}:project_name"
output_folder: "{config_source}:output_folder"
release_notes_config: "{config_source}:release_notes"
date: system-generated
# Paths
installed_path: "{project-root}/.bmad/bmm-release/workflows/release-notes"
instructions: "{installed_path}/instructions.md"
template: "{project-root}/.bmad/bmm-release/templates/release-notes-template.md"
# Input
input:
release_id:
description: "Release candidate ID to generate notes for"
required: false
default: "current"
format:
description: "Output format: markdown, html, slack"
required: false
default: "markdown"
# Output
default_output_file: "{output_folder}/releases/RELEASE-NOTES-{version}.md"
standalone: true

View File

@ -0,0 +1,251 @@
# Release Planning Instructions
## Objective
Create a new release candidate by selecting stories, determining version number, and preparing for quality gate validation.
## Prerequisites
- Stories marked as "done" available for release
- Previous release version known (for version increment)
- Quality gate configuration in bmm-metrics
---
<step n="1" goal="Determine release type">
### Determine Release Type
<ask>What type of release is this?
[M] Major - Breaking changes, significant new features
[m] Minor - New features, backward compatible
[p] Patch - Bug fixes only
[h] Hotfix - Critical production fix
Release type: </ask>
<action>Store as {{release_type}}</action>
<ask>Do you want to specify a version number, or auto-generate?
[a] Auto-generate based on release type
[s] Specify version manually
Choice: </ask>
<check if="choice == 's'">
<ask>Enter version number (e.g., 2.1.0): </ask>
<action>Store as {{version}}</action>
</check>
<check if="choice == 'a'">
<action>Load current version from state</action>
<action>Increment based on release_type</action>
<action>Store as {{version}}</action>
</check>
**Release Version:** {{version}}
</step>
---
<step n="2" goal="Select stories for release">
### Select Stories
<action>Load pending stories from state (stories marked done since last release)</action>
**Pending Stories:**
| Story ID | Title | Type | Points |
|----------|-------|------|--------|
{{#each pending_stories}}
| {{id}} | {{title}} | {{type}} | {{points}} |
{{/each}}
**Total:** {{pending_stories.length}} stories, {{total_points}} points
<ask>Which stories should be included in this release?
[a] All pending stories
[s] Select specific stories (comma-separated IDs)
[e] Exclude specific stories
Choice: </ask>
<check if="choice == 'a'">
<action>Include all pending stories</action>
</check>
<check if="choice == 's'">
<ask>Enter story IDs to include (comma-separated): </ask>
<action>Parse and validate story IDs</action>
<action>Include only specified stories</action>
</check>
<check if="choice == 'e'">
<ask>Enter story IDs to exclude (comma-separated): </ask>
<action>Parse and validate story IDs</action>
<action>Include all except excluded stories</action>
</check>
**Stories Selected:** {{selected_stories.length}}
</step>
---
<step n="3" goal="Categorize changes">
### Categorize Changes
<action>Categorize selected stories by type</action>
**Release Contents:**
**Features ({{features.length}}):**
{{#each features}}
- {{id}}: {{title}}
{{/each}}
**Bug Fixes ({{bug_fixes.length}}):**
{{#each bug_fixes}}
- {{id}}: {{title}}
{{/each}}
**Improvements ({{improvements.length}}):**
{{#each improvements}}
- {{id}}: {{title}}
{{/each}}
<ask>Are there any breaking changes in this release?
[y] Yes - describe them
[n] No breaking changes
Response: </ask>
<check if="response == 'y'">
<ask>Describe the breaking changes: </ask>
<action>Store as {{breaking_changes}}</action>
</check>
</step>
---
<step n="4" goal="Set release metadata">
### Release Metadata
<ask>Who is the release owner/coordinator?
(Default: {{user_name}}): </ask>
<action>Store as {{release_owner}} or default to user_name</action>
<ask>Target release date (YYYY-MM-DD or "asap"): </ask>
<action>Store as {{target_date}}</action>
<ask>Any release notes or highlights to include?
(Press Enter to skip): </ask>
<action>Store as {{release_highlights}}</action>
</step>
---
<step n="5" goal="Create release candidate">
### Create Release Candidate
<action>Generate release candidate ID</action>
<action>Calculate expiry time based on config</action>
**Release Candidate Summary:**
| Field | Value |
|-------|-------|
| Release ID | {{release_id}} |
| Version | {{version}} |
| Type | {{release_type}} |
| Stories | {{selected_stories.length}} |
| Points | {{total_points}} |
| Owner | {{release_owner}} |
| Target Date | {{target_date}} |
| Expires | {{expiry_time}} |
<ask>Create this release candidate?
[y] Yes - create and trigger quality gates
[n] No - cancel
Confirm: </ask>
<check if="confirm != 'y'">
<action>Cancel workflow</action>
</check>
</step>
---
<step n="6" goal="Save and publish">
### Save Release Candidate
<action>Create release candidate record</action>
<action>Save to state file</action>
<action>Generate release candidate YAML file</action>
<template-output section="release-candidate">
Generate release candidate file with:
- Release ID and version
- Selected stories with full details
- Categorized changes
- Breaking changes (if any)
- Release metadata
- Quality gate requirements
</template-output>
</step>
---
<step n="7" goal="Publish release candidate event" critical="true">
### Publish Release Candidate Event
<publish event="release.candidate.created">
<payload>
<release_id>{{release_id}}</release_id>
<version>{{version}}</version>
<release_type>{{release_type}}</release_type>
<stories>{{selected_story_ids}}</stories>
<story_count>{{selected_stories.length}}</story_count>
<total_points>{{total_points}}</total_points>
<created_by>{{release_owner}}</created_by>
<target_date>{{target_date}}</target_date>
<expires_at>{{expiry_time}}</expires_at>
<breaking_changes>{{breaking_changes}}</breaking_changes>
<timestamp>{{current_timestamp}}</timestamp>
</payload>
</publish>
<action>Log: "Release candidate {{version}} created with {{selected_stories.length}} stories"</action>
</step>
---
## Completion
Release candidate **{{version}}** created successfully.
**Release ID:** {{release_id}}
**Stories:** {{selected_stories.length}}
**Status:** Pending Quality Gate Validation
**Next Steps:**
1. Quality gates will be automatically validated (bmm-metrics will receive the event)
2. Monitor for `metrics.quality.pass` or `metrics.quality.fail` event
3. Once validated, run `*release-notes` to generate release notes
4. After approval, run `*deploy` to deploy the release
**File Saved:** {{output_file_path}}

View File

@ -0,0 +1,47 @@
# Release Planning Workflow
name: release-planning
description: "Create and configure a new release candidate"
author: "BMad"
module: bmm-release
# Configuration
config_source: "{project-root}/.bmad/bmm-release/config.yaml"
project_name: "{config_source}:project_name"
output_folder: "{config_source}:output_folder"
user_name: "{config_source}:user_name"
release_config: "{config_source}:release"
date: system-generated
# Paths
installed_path: "{project-root}/.bmad/bmm-release/workflows/release-planning"
instructions: "{installed_path}/instructions.md"
# Input
input:
version:
description: "Version number for the release (auto-generated if not provided)"
required: false
include_stories:
description: "Specific story IDs to include (default: all pending)"
required: false
release_type:
description: "Type of release: major, minor, patch, hotfix"
required: false
default: "minor"
# Output
default_output_file: "{output_folder}/releases/release-{version}.yaml"
# Events this workflow publishes
publishes:
- event_type: "release.candidate.created"
condition: "When release candidate is successfully created"
# Triggers quality gate check
triggers:
- module: "bmm-metrics"
workflow: "quality-gate-check"
event: "release.candidate.created"
description: "Automatically validate quality gates for new release"
standalone: true

View File

@ -0,0 +1,298 @@
# Rollback Planning Instructions
## Objective
Create a comprehensive rollback plan for a release, ensuring quick recovery if deployment issues occur.
## Prerequisites
- Release candidate or deployed release identified
- Previous stable version available
- Understanding of data migration impacts
---
<step n="1" goal="Identify rollback context">
### Identify Release and Target
<action>Load release history from state</action>
<ask>What is the context for this rollback plan?
[p] Pre-deployment - Planning ahead for new release
[a] Active issue - Need to rollback now
[r] Review - Updating existing rollback plan
Context: </ask>
<action>Store as {{rollback_context}}</action>
<check if="context == 'a'">
**URGENT MODE ACTIVATED**
<action>Load current deployed version</action>
<action>Identify last known good version</action>
<action>Skip optional steps, focus on execution</action>
</check>
</step>
---
<step n="2" goal="Select versions">
### Version Selection
**Current Version:** {{current_version}}
**Available Rollback Targets:**
| Version | Deployed | Status | Age |
|---------|----------|--------|-----|
{{#each available_versions}}
| {{version}} | {{deployed_date}} | {{status}} | {{age}} |
{{/each}}
<ask>Which version should we rollback to?
(Enter version number or select from list): </ask>
<action>Store as {{target_version}}</action>
<action>Validate target version exists and is stable</action>
**Rollback Path:** {{current_version}} → {{target_version}}
</step>
---
<step n="3" goal="Analyze rollback impact">
### Impact Analysis
<action>Load changes between versions</action>
<action>Identify database migrations</action>
<action>Check for breaking changes</action>
**Changes to Revert:**
- Stories: {{stories_between_versions.length}}
- Database migrations: {{migrations.length}}
- API changes: {{api_changes.length}}
- Configuration changes: {{config_changes.length}}
{{#if migrations.length}}
**Database Migrations:**
| Migration | Type | Reversible |
|-----------|------|------------|
{{#each migrations}}
| {{name}} | {{type}} | {{reversible ? 'Yes' : 'NO'}} |
{{/each}}
<check if="any migration not reversible">
**WARNING:** Some migrations are not reversible. Manual intervention required.
</check>
{{/if}}
{{#if breaking_changes}}
**Breaking Changes to Consider:**
{{#each breaking_changes}}
- {{description}}
{{/each}}
{{/if}}
</step>
---
<step n="4" goal="Define rollback steps">
### Rollback Procedure
<action>Generate rollback steps based on deployment type</action>
**Pre-Rollback Checklist:**
- [ ] Notify stakeholders of planned rollback
- [ ] Ensure target version artifacts are available
- [ ] Verify database backup exists
- [ ] Confirm rollback team is available
- [ ] Document current error/issue being resolved
**Rollback Steps:**
1. **Announce Maintenance**
- Notify users of planned maintenance
- Enable maintenance mode if available
2. **Stop Traffic**
- Route traffic away from affected services
- Drain existing connections
3. **Database Rollback** {{#if migrations.length}}(if needed){{/if}}
{{#each migrations_to_revert}}
- Revert: {{name}}
```sql
{{down_script}}
```
{{/each}}
4. **Deploy Previous Version**
- Target version: {{target_version}}
- Deployment command: `{{deploy_command}}`
5. **Verify Deployment**
- Run health checks
- Verify critical endpoints
- Check error rates
6. **Restore Traffic**
- Re-enable traffic routing
- Monitor for issues
7. **Post-Rollback**
- Disable maintenance mode
- Notify stakeholders
- Document rollback reason
</step>
---
<step n="5" goal="Define verification steps">
### Verification Checklist
<ask>What are the critical checks after rollback?
(Enter comma-separated or press Enter for defaults): </ask>
**Health Checks:**
- [ ] Service responds to health endpoint
- [ ] Database connectivity verified
- [ ] Cache connectivity verified
- [ ] External service integrations working
**Functional Checks:**
- [ ] User authentication works
- [ ] Core workflows functional
- [ ] Critical API endpoints responding
- [ ] No elevated error rates
**Performance Checks:**
- [ ] Response times within SLA
- [ ] No memory leaks
- [ ] CPU utilization normal
</step>
---
<step n="6" goal="Define escalation path">
### Escalation Procedures
<ask>Define escalation contacts for rollback issues:
Level 1 (On-call):
Level 2 (Engineering Lead):
Level 3 (VP/Director):
Enter contacts: </ask>
**Escalation Matrix:**
| Issue | Contact | Response Time |
|-------|---------|---------------|
| Rollback fails | {{level_1}} | Immediate |
| Data inconsistency | {{level_2}} | 15 minutes |
| Extended outage | {{level_3}} | 30 minutes |
</step>
---
<step n="7" goal="Generate rollback document">
### Generate Rollback Plan
<template-output section="rollback-plan">
# Rollback Plan: {{current_version}} → {{target_version}}
**Created:** {{date}}
**Context:** {{rollback_context}}
**Estimated Duration:** {{estimated_duration}}
## Summary
- Current Version: {{current_version}}
- Target Version: {{target_version}}
- Stories Affected: {{stories_between_versions.length}}
- Database Changes: {{migrations.length}}
## Pre-Rollback Checklist
{{pre_rollback_checklist}}
## Rollback Steps
{{rollback_steps}}
## Verification Checklist
{{verification_checklist}}
## Escalation Contacts
{{escalation_matrix}}
## Rollback Command
```bash
{{rollback_command}}
```
---
*Plan generated by BMAD Release Manager*
</template-output>
</step>
---
<step n="8" goal="Save rollback plan">
### Save Plan
<action>Save rollback plan to output file</action>
<action>Link plan to release candidate</action>
<action>Update state with rollback plan reference</action>
**Plan Saved:** {{output_file_path}}
</step>
---
## Completion
Rollback plan created for **{{current_version}} → {{target_version}}**.
**Summary:**
- Estimated Duration: {{estimated_duration}}
- Database Migrations: {{migrations.length}}
- Manual Steps Required: {{manual_steps_count}}
**Plan Location:** {{output_file_path}}
<check if="context == 'a'">
## EXECUTE ROLLBACK NOW?
<ask>Ready to execute rollback?
[y] Yes - execute rollback procedure
[n] No - save plan for later
Choice: </ask>
<check if="choice == 'y'">
<action>Execute rollback procedure</action>
<publish event="release.rollback.initiated">
<payload>
<release_id>{{release_id}}</release_id>
<from_version>{{current_version}}</from_version>
<to_version>{{target_version}}</to_version>
<environment>{{environment}}</environment>
<initiated_by>{{user_name}}</initiated_by>
<reason>{{rollback_reason}}</reason>
<timestamp>{{current_timestamp}}</timestamp>
</payload>
</publish>
</check>
</check>

View File

@ -0,0 +1,31 @@
# Rollback Planning Workflow
name: rollback-planning
description: "Create rollback procedure for a release"
author: "BMad"
module: bmm-release
# Configuration
config_source: "{project-root}/.bmad/bmm-release/config.yaml"
project_name: "{config_source}:project_name"
output_folder: "{config_source}:output_folder"
rollback_config: "{config_source}:rollback"
date: system-generated
# Paths
installed_path: "{project-root}/.bmad/bmm-release/workflows/rollback-planning"
instructions: "{installed_path}/instructions.md"
# Input
input:
release_id:
description: "Release to create rollback plan for"
required: false
default: "current"
target_version:
description: "Version to rollback to"
required: false
# Output
default_output_file: "{output_folder}/releases/rollback-plan-{version}.md"
standalone: true

View File

@ -0,0 +1,78 @@
# BMM-Roadmap Module
Product Roadmap Planning module for the BMAD Method. Creates and maintains capacity-aware roadmaps that integrate with prioritization and velocity data.
## Overview
The bmm-roadmap module provides:
- **Roadmap Planning**: Create quarterly/annual roadmaps
- **Capacity Planning**: Velocity-aware timeline estimation
- **Milestone Tracking**: Track progress against roadmap
- **Roadmap Visualization**: Generate roadmap artifacts
## Event-Driven Architecture
### Events Subscribed
| Event | Action |
|-------|--------|
| `priority.queue.reordered` | Refresh roadmap with new priorities |
| `metrics.velocity.calculated` | Update capacity projections |
| `release.deployed` | Update milestone completion |
| `sprint.ended` | Update roadmap progress |
### Events Published
| Event | Description |
|-------|-------------|
| `roadmap.updated` | Roadmap has been modified |
| `roadmap.milestone.completed` | Milestone achieved |
| `roadmap.at.risk` | Timeline at risk based on velocity |
## Directory Structure
```
bmm-roadmap/
├── README.md
├── manifest.yaml
├── config.yaml
├── agents/
│ └── roadmap-planner.agent.yaml
├── workflows/
│ ├── roadmap-planning/
│ └── capacity-planning/
├── events/
│ ├── subscriptions.yaml
│ ├── publications.yaml
│ └── handlers/
├── templates/
│ └── roadmap-template.md
└── state/
└── module-state.yaml
```
## Quick Start
1. Install the module via BMAD installer
2. Configure roadmap settings in `.bmad/bmm-roadmap/config.yaml`
3. Use the Roadmap Planner agent: `*roadmap-planner`
## Agent Commands
- `*help` - Show available commands
- `*plan` - Create or update roadmap
- `*capacity` - Run capacity planning
- `*timeline` - View estimated timeline
- `*milestones` - View milestone status
## Integration Flow
```
bmm-priority (priority.queue.reordered)
bmm-roadmap (receives priority order)
bmm-metrics (metrics.velocity.calculated)
bmm-roadmap (updates capacity projections)
roadmap.updated event
```

View File

@ -0,0 +1,152 @@
# Roadmap Planner Agent Definition
name: roadmap-planner
displayName: Roadmap Planner
title: Strategic Planner + Timeline Guardian
icon: "🗺️"
persona:
role: "Roadmap Planner + Strategic Planner + Timeline Guardian"
identity: |
Strategic planner who translates priorities into realistic, capacity-aware
roadmaps. Balances ambition with reality, always grounding timelines in
actual velocity data. Protects the team from overcommitment while ensuring
stakeholders have clear visibility into delivery expectations.
communication_style: |
Speaks in terms of capacity, velocity, and confidence levels.
Uses visual timelines and Gantt-style representations.
Always includes caveats about estimates. Says "based on current velocity..."
frequently. Transparent about risks and dependencies.
principles:
- "Roadmaps are living documents, not contracts"
- "Velocity doesn't lie - use it for planning"
- "Include buffer - surprises always happen"
- "Dependencies are where timelines die"
- "Communicate changes early and often"
- "A realistic roadmap beats an optimistic one"
activation:
critical: true
steps:
- step: 1
action: "Load persona from this agent file"
- step: 2
action: "Load module config from {project-root}/.bmad/bmm-roadmap/config.yaml"
mandate: true
- step: 3
action: "Store config values: {user_name}, {project_name}, {roadmap}, {capacity}"
- step: 4
action: "Load roadmap state from {project-root}/.bmad/bmm-roadmap/state/module-state.yaml"
- step: 5
action: "Greet user and display menu"
format: |
🗺️ **Roadmap Planner** ready, {user_name}
Current project: **{project_name}**
Current velocity: **{velocity}** points/sprint
Planning horizon: **{horizon}**
{menu_items}
menu:
- cmd: "*help"
action: "Show numbered menu"
- cmd: "*plan"
workflow: "{project-root}/.bmad/bmm-roadmap/workflows/roadmap-planning/workflow.yaml"
description: "Create or update product roadmap"
- cmd: "*capacity"
workflow: "{project-root}/.bmad/bmm-roadmap/workflows/capacity-planning/workflow.yaml"
description: "Run capacity planning analysis"
- cmd: "*timeline"
action: "#show-timeline"
description: "View estimated timeline"
- cmd: "*milestones"
action: "#show-milestones"
description: "View milestone status"
- cmd: "*risk"
action: "#show-risks"
description: "View timeline risks"
- cmd: "*exit"
action: "Exit agent with confirmation"
prompts:
show-timeline:
id: show-timeline
content: |
Display estimated timeline:
1. Load priority queue
2. Calculate timeline based on velocity
3. Show Gantt-style visualization
4. Highlight milestones and dependencies
show-milestones:
id: show-milestones
content: |
Display milestone status:
1. Load milestones from state
2. Show progress percentage
3. Show estimated completion date
4. Flag at-risk milestones
show-risks:
id: show-risks
content: |
Display timeline risks:
1. Compare planned vs actual velocity
2. Identify scope creep
3. Flag dependency issues
4. Calculate confidence level
expertise:
domains:
- "Product roadmap planning"
- "Capacity planning"
- "Timeline estimation"
- "Dependency management"
- "Milestone tracking"
frameworks:
- "Agile roadmapping"
- "Now/Next/Later planning"
- "Theme-based roadmaps"
- "Outcome-based roadmaps"
collaboration:
works_with:
- agent: "pm"
purpose: "Align roadmap with product strategy"
- agent: "priority-manager"
purpose: "Receive priority queue for planning"
- agent: "sm"
purpose: "Coordinate sprint planning"
handoffs:
- from: "bmm-priority"
event: "priority.queue.reordered"
description: "Refresh roadmap with new priorities"
- from: "bmm-metrics"
event: "metrics.velocity.calculated"
description: "Update capacity projections"
rules:
- "Always base timelines on actual velocity data"
- "Include planning buffer in all estimates"
- "Flag timeline risks as soon as detected"
- "Update roadmap when priorities change"
- "Communicate milestone changes to stakeholders"

View File

@ -0,0 +1,60 @@
# BMM-Roadmap Module Configuration
project_name: "{{project_name}}"
user_name: "{{user_name}}"
output_folder: "docs"
# Roadmap settings
roadmap:
# Planning horizon
horizon: "quarterly" # quarterly, half-year, annual
# Sprint length in days
sprint_length_days: 14
# Planning buffer percentage
planning_buffer: 20
# Milestone types
milestone_types:
- mvp
- release
- beta
- ga
# Capacity planning
capacity:
# Use velocity from bmm-metrics
use_velocity_data: true
# Default velocity if no data
default_velocity: 20
# Team size for planning
team_size: 5
# Account for holidays/PTO
availability_factor: 0.85
# Visualization
visualization:
# Output formats
formats:
- markdown
- mermaid
# Show dependencies
show_dependencies: true
# Events
events:
subscribe:
- "priority.queue.reordered"
- "metrics.velocity.calculated"
- "release.deployed"
- "sprint.ended"
publish:
- "roadmap.updated"
- "roadmap.milestone.completed"
- "roadmap.at.risk"

View File

@ -0,0 +1,32 @@
# BMM-Roadmap Event Publications
version: "1.0.0"
module: "bmm-roadmap"
publications:
- event_type: "roadmap.updated"
description: "Roadmap has been modified"
payload_schema:
roadmap_id: { type: string, required: true }
change_type: { type: string, required: true }
items_affected: { type: array, required: true }
new_timeline: { type: object, required: false }
- event_type: "roadmap.milestone.completed"
description: "Milestone has been achieved"
payload_schema:
milestone_id: { type: string, required: true }
milestone_name: { type: string, required: true }
completed_at: { type: datetime, required: true }
planned_date: { type: datetime, required: true }
variance_days: { type: number, required: true }
- event_type: "roadmap.at.risk"
description: "Timeline at risk based on velocity"
payload_schema:
milestone_id: { type: string, required: true }
planned_date: { type: datetime, required: true }
projected_date: { type: datetime, required: true }
variance_days: { type: number, required: true }
confidence: { type: number, required: true }
risk_factors: { type: array, required: true }

View File

@ -0,0 +1,25 @@
# BMM-Roadmap Event Subscriptions
version: "1.0.0"
module: "bmm-roadmap"
subscriptions:
- event_type: "priority.queue.reordered"
handler: "handlers/on-priority-reordered.xml"
description: "Refresh roadmap with new priority order"
action: "refresh_roadmap"
- event_type: "metrics.velocity.calculated"
handler: "handlers/on-velocity-calculated.xml"
description: "Update capacity projections"
action: "update_capacity"
- event_type: "release.deployed"
handler: "handlers/on-release-deployed.xml"
description: "Update milestone completion"
action: "mark_milestone_progress"
- event_type: "sprint.ended"
handler: "handlers/on-sprint-ended.xml"
description: "Update roadmap progress"
action: "update_progress"

View File

@ -0,0 +1,67 @@
# BMM-Roadmap Module Manifest
name: bmm-roadmap
version: "1.0.0"
display_name: "BMAD Roadmap Module"
description: "Capacity-aware product roadmap planning"
author: "BMad"
license: "MIT"
category: "planning"
tags:
- roadmap
- planning
- capacity
- timeline
- milestones
dependencies:
core:
version: ">=1.0.0"
required: true
bmm-priority:
version: ">=1.0.0"
required: false
reason: "Priority data for roadmap planning"
bmm-metrics:
version: ">=1.0.0"
required: false
reason: "Velocity data for capacity planning"
events:
subscribes:
- priority.queue.reordered
- metrics.velocity.calculated
- release.deployed
- sprint.ended
publishes:
- roadmap.updated
- roadmap.milestone.completed
- roadmap.at.risk
agents:
- name: roadmap-planner
file: agents/roadmap-planner.agent.yaml
description: "Strategic Planner + Timeline Guardian"
icon: "🗺️"
workflows:
- name: roadmap-planning
path: workflows/roadmap-planning
description: "Create or update product roadmap"
standalone: true
- name: capacity-planning
path: workflows/capacity-planning
description: "Run capacity planning analysis"
standalone: true
slash_commands:
- name: "roadmap-plan"
workflow: "roadmap-planning"
description: "Plan product roadmap"
- name: "roadmap-capacity"
workflow: "capacity-planning"
description: "Run capacity planning"

View File

@ -0,0 +1,53 @@
# BMM-Roadmap Module State
version: "1.0.0"
module: "bmm-roadmap"
initialized: false
last_updated: null
# Current roadmap
roadmap:
id: null
name: null
horizon: null
created_at: null
last_modified: null
# Items in roadmap
items: []
# Example:
# - story_id: "STORY-123"
# epic_id: "EPIC-1"
# estimated_sprint: 3
# estimated_date: "2024-02-15"
# dependencies: []
# Milestones
milestones: []
# Example:
# - id: "ms-001"
# name: "MVP Launch"
# type: "mvp"
# target_date: "2024-03-01"
# stories: ["STORY-123", "STORY-124"]
# progress: 40
# status: "on_track"
# Capacity data (from velocity events)
capacity:
current_velocity: null
rolling_average: null
team_size: null
availability_factor: null
effective_capacity: null
# Timeline projections
projections:
last_calculated: null
confidence: null
risk_level: null
# Event processing
event_processing:
last_event_id: null
events_processed_count: 0