excorcise the deamons part 1 - remove dead uneeed artifacts and items no longer being supported beyond beta of the BMM or core - but could return later as a module

This commit is contained in:
Brian Madison 2026-02-04 15:43:48 -06:00
parent 11d2fc6d5e
commit f699a3683f
53 changed files with 139 additions and 3798 deletions

View File

@ -58,8 +58,7 @@
"tmpl",
"Trae",
"Unsharded",
"VNET",
"webskip"
"VNET"
],
"json.schemas": [
{

View File

@ -1,7 +1,6 @@
# Dev Implementation Agent Definition (v6)
agent:
webskip: true
metadata:
id: "_bmad/bmm/agents/dev.md"
name: Amelia

View File

@ -4,15 +4,11 @@ bmm,anytime,Generate Project Context,GPC,,_bmad/bmm/workflows/generate-project-c
bmm,anytime,Quick Spec,QS,,_bmad/bmm/workflows/bmad-quick-flow/quick-spec/workflow.md,bmad-bmm-quick-spec,false,quick-flow-solo-dev,Create Mode,"Do not suggest for potentially very complex things unless requested or if the user complains that they do not want to follow the extensive planning of the bmad method. Quick one-off tasks small changes simple apps brownfield additions to well established patterns utilities without extensive planning",planning_artifacts,"tech spec",
bmm,anytime,Quick Dev,QD,,_bmad/bmm/workflows/bmad-quick-flow/quick-dev/workflow.md,bmad-bmm-quick-dev,false,quick-flow-solo-dev,Create Mode,"Quick one-off tasks small changes simple apps utilities without extensive planning - Do not suggest for potentially very complex things unless requested or if the user complains that they do not want to follow the extensive planning of the bmad method, unless the user is already working through the implementation phase and just requests a 1 off things not already in the plan",,,
bmm,anytime,Correct Course,CC,,_bmad/bmm/workflows/4-implementation/correct-course/workflow.yaml,bmad-bmm-correct-course,false,sm,Create Mode,"Anytime: Navigate significant changes. May recommend start over update PRD redo architecture sprint planning or correct epics and stories",planning_artifacts,"change proposal",
bmm,anytime,Create Dataflow,CDF,,_bmad/bmm/workflows/excalidraw-diagrams/create-dataflow/workflow.yaml,bmad-bmm-create-excalidraw-dataflow,false,ux-designer,Create Mode,"Create data flow diagrams (DFD) in Excalidraw format - can be called standalone or during any workflow to add visual documentation",planning_artifacts,"dataflow diagram",
bmm,anytime,Create Diagram,CED,,_bmad/bmm/workflows/excalidraw-diagrams/create-diagram/workflow.yaml,bmad-bmm-create-excalidraw-diagram,false,ux-designer,Create Mode,"Create system architecture diagrams ERDs UML diagrams or general technical diagrams in Excalidraw format - use anytime or call from architecture workflow to add visual documentation",planning_artifacts,"diagram",
bmm,anytime,Create Flowchart,CFC,,_bmad/bmm/workflows/excalidraw-diagrams/create-flowchart/workflow.yaml,bmad-bmm-create-excalidraw-flowchart,false,ux-designer,Create Mode,"Create a flowchart visualization in Excalidraw format for processes pipelines or logic flows - use anytime or during architecture to add process documentation",planning_artifacts,"flowchart",
bmm,anytime,Create Wireframe,CEW,,_bmad/bmm/workflows/excalidraw-diagrams/create-wireframe/workflow.yaml,bmad-bmm-create-excalidraw-wireframe,false,ux-designer,Create Mode,"Create website or app wireframes in Excalidraw format - use anytime standalone or call from UX workflow to add UI mockups",planning_artifacts,"wireframe",
bmm,anytime,Write Document,WD,,_bmad/bmm/agents/tech-writer/tech-writer.agent.yaml,bmad-bmm-write-document,false,tech-writer,,"Describe in detail what you want, and the agent will follow the documentation best practices defined in agent memory. Multi-turn conversation with subprocess for research/review.",project-knowledge,"document",
bmm,anytime,Update Standards,US,,_bmad/bmm/agents/tech-writer/tech-writer.agent.yaml,bmad-bmm-update-standards,false,tech-writer,,"Update agent memory documentation-standards.md with your specific preferences if you discover missing document conventions.",_bmad/_memory/tech-writer-sidecar,"standards",
bmm,anytime,Mermaid Generate,MG,,_bmad/bmm/agents/tech-writer/tech-writer.agent.yaml,bmad-bmm-mermaid-generate,false,tech-writer,,"Create a Mermaid diagram based on user description. Will suggest diagram types if not specified.",planning_artifacts,"mermaid diagram",
bmm,anytime,Validate Document,VD,,_bmad/bmm/agents/tech-writer/tech-writer.agent.yaml,bmad-bmm-validate-document,false,tech-writer,,"Review the specified document against documentation standards and best practices. Returns specific actionable improvement suggestions organized by priority.",planning_artifacts,"validation report",
bmm,anytime,Explain Concept,EC,,_bmad/bmm/agents/tech-writer/tech-writer.agent.yaml,bmad-bmm-explain-concept,false,tech-writer,,"Create clear technical explanations with examples and diagrams for complex concepts. Breaks down into digestible sections using task-oriented approach.",project_knowledge,"explanation",
bmm,anytime,Write Document,WD,,_bmad/bmm/agents/tech-writer/tech-writer.agent.yaml,,false,tech-writer,,"Describe in detail what you want, and the agent will follow the documentation best practices defined in agent memory. Multi-turn conversation with subprocess for research/review.",project-knowledge,"document",
bmm,anytime,Update Standards,US,,_bmad/bmm/agents/tech-writer/tech-writer.agent.yaml,,false,tech-writer,,"Update agent memory documentation-standards.md with your specific preferences if you discover missing document conventions.",_bmad/_memory/tech-writer-sidecar,"standards",
bmm,anytime,Mermaid Generate,MG,,_bmad/bmm/agents/tech-writer/tech-writer.agent.yaml,,false,tech-writer,,"Create a Mermaid diagram based on user description. Will suggest diagram types if not specified.",planning_artifacts,"mermaid diagram",
bmm,anytime,Validate Document,VD,,_bmad/bmm/agents/tech-writer/tech-writer.agent.yaml,,false,tech-writer,,"Review the specified document against documentation standards and best practices. Returns specific actionable improvement suggestions organized by priority.",planning_artifacts,"validation report",
bmm,anytime,Explain Concept,EC,,_bmad/bmm/agents/tech-writer/tech-writer.agent.yaml,,false,tech-writer,,"Create clear technical explanations with examples and diagrams for complex concepts. Breaks down into digestible sections using task-oriented approach.",project_knowledge,"explanation",
bmm,1-analysis,Brainstorm Project,BP,10,_bmad/core/workflows/brainstorming/workflow.md,bmad-brainstorming,false,analyst,data=_bmad/bmm/data/project-context-template.md,"Expert Guided Facilitation through a single or multiple techniques",planning_artifacts,"brainstorming session",
bmm,1-analysis,Market Research,MR,20,_bmad/bmm/workflows/1-analysis/research/workflow.md,bmad-bmm-research,false,analyst,Create Mode research_type=market,"Market analysis competitive landscape customer needs and trends","planning_artifacts|project-knowledge","research documents",
bmm,1-analysis,Domain Research,DR,21,_bmad/bmm/workflows/1-analysis/research/workflow.md,bmad-bmm-research,false,analyst,Create Mode research_type=domain,"Industry domain deep dive subject matter expertise and terminology","planning_artifacts|project_knowledge","research documents",

1 module phase name code sequence workflow-file command required agent options description output-location outputs
4 bmm anytime Quick Spec QS _bmad/bmm/workflows/bmad-quick-flow/quick-spec/workflow.md bmad-bmm-quick-spec false quick-flow-solo-dev Create Mode Do not suggest for potentially very complex things unless requested or if the user complains that they do not want to follow the extensive planning of the bmad method. Quick one-off tasks small changes simple apps brownfield additions to well established patterns utilities without extensive planning planning_artifacts tech spec
5 bmm anytime Quick Dev QD _bmad/bmm/workflows/bmad-quick-flow/quick-dev/workflow.md bmad-bmm-quick-dev false quick-flow-solo-dev Create Mode Quick one-off tasks small changes simple apps utilities without extensive planning - Do not suggest for potentially very complex things unless requested or if the user complains that they do not want to follow the extensive planning of the bmad method, unless the user is already working through the implementation phase and just requests a 1 off things not already in the plan
6 bmm anytime Correct Course CC _bmad/bmm/workflows/4-implementation/correct-course/workflow.yaml bmad-bmm-correct-course false sm Create Mode Anytime: Navigate significant changes. May recommend start over update PRD redo architecture sprint planning or correct epics and stories planning_artifacts change proposal
7 bmm anytime Create Dataflow Write Document CDF WD _bmad/bmm/workflows/excalidraw-diagrams/create-dataflow/workflow.yaml _bmad/bmm/agents/tech-writer/tech-writer.agent.yaml bmad-bmm-create-excalidraw-dataflow false ux-designer tech-writer Create Mode Create data flow diagrams (DFD) in Excalidraw format - can be called standalone or during any workflow to add visual documentation Describe in detail what you want, and the agent will follow the documentation best practices defined in agent memory. Multi-turn conversation with subprocess for research/review. planning_artifacts project-knowledge dataflow diagram document
8 bmm anytime Create Diagram Update Standards CED US _bmad/bmm/workflows/excalidraw-diagrams/create-diagram/workflow.yaml _bmad/bmm/agents/tech-writer/tech-writer.agent.yaml bmad-bmm-create-excalidraw-diagram false ux-designer tech-writer Create Mode Create system architecture diagrams ERDs UML diagrams or general technical diagrams in Excalidraw format - use anytime or call from architecture workflow to add visual documentation Update agent memory documentation-standards.md with your specific preferences if you discover missing document conventions. planning_artifacts _bmad/_memory/tech-writer-sidecar diagram standards
9 bmm anytime Create Flowchart Mermaid Generate CFC MG _bmad/bmm/workflows/excalidraw-diagrams/create-flowchart/workflow.yaml _bmad/bmm/agents/tech-writer/tech-writer.agent.yaml bmad-bmm-create-excalidraw-flowchart false ux-designer tech-writer Create Mode Create a flowchart visualization in Excalidraw format for processes pipelines or logic flows - use anytime or during architecture to add process documentation Create a Mermaid diagram based on user description. Will suggest diagram types if not specified. planning_artifacts flowchart mermaid diagram
10 bmm anytime Create Wireframe Validate Document CEW VD _bmad/bmm/workflows/excalidraw-diagrams/create-wireframe/workflow.yaml _bmad/bmm/agents/tech-writer/tech-writer.agent.yaml bmad-bmm-create-excalidraw-wireframe false ux-designer tech-writer Create Mode Create website or app wireframes in Excalidraw format - use anytime standalone or call from UX workflow to add UI mockups Review the specified document against documentation standards and best practices. Returns specific actionable improvement suggestions organized by priority. planning_artifacts wireframe validation report
11 bmm anytime Write Document Explain Concept WD EC _bmad/bmm/agents/tech-writer/tech-writer.agent.yaml bmad-bmm-write-document false tech-writer Describe in detail what you want, and the agent will follow the documentation best practices defined in agent memory. Multi-turn conversation with subprocess for research/review. Create clear technical explanations with examples and diagrams for complex concepts. Breaks down into digestible sections using task-oriented approach. project-knowledge project_knowledge document explanation
bmm anytime Update Standards US _bmad/bmm/agents/tech-writer/tech-writer.agent.yaml bmad-bmm-update-standards false tech-writer Update agent memory documentation-standards.md with your specific preferences if you discover missing document conventions. _bmad/_memory/tech-writer-sidecar standards
bmm anytime Mermaid Generate MG _bmad/bmm/agents/tech-writer/tech-writer.agent.yaml bmad-bmm-mermaid-generate false tech-writer Create a Mermaid diagram based on user description. Will suggest diagram types if not specified. planning_artifacts mermaid diagram
bmm anytime Validate Document VD _bmad/bmm/agents/tech-writer/tech-writer.agent.yaml bmad-bmm-validate-document false tech-writer Review the specified document against documentation standards and best practices. Returns specific actionable improvement suggestions organized by priority. planning_artifacts validation report
bmm anytime Explain Concept EC _bmad/bmm/agents/tech-writer/tech-writer.agent.yaml bmad-bmm-explain-concept false tech-writer Create clear technical explanations with examples and diagrams for complex concepts. Breaks down into digestible sections using task-oriented approach. project_knowledge explanation
12 bmm 1-analysis Brainstorm Project BP 10 _bmad/core/workflows/brainstorming/workflow.md bmad-brainstorming false analyst data=_bmad/bmm/data/project-context-template.md Expert Guided Facilitation through a single or multiple techniques planning_artifacts brainstorming session
13 bmm 1-analysis Market Research MR 20 _bmad/bmm/workflows/1-analysis/research/workflow.md bmad-bmm-research false analyst Create Mode research_type=market Market analysis competitive landscape customer needs and trends planning_artifacts|project-knowledge research documents
14 bmm 1-analysis Domain Research DR 21 _bmad/bmm/workflows/1-analysis/research/workflow.md bmad-bmm-research false analyst Create Mode research_type=domain Industry domain deep dive subject matter expertise and terminology planning_artifacts|project_knowledge research documents

View File

@ -47,5 +47,4 @@ input_file_patterns:
sharded_single: "{planning_artifacts}/*epic*/epic-{{epic_num}}.md"
load_strategy: "SELECTIVE_LOAD"
standalone: true
web_bundle: false

View File

@ -55,6 +55,4 @@ validation: "{installed_path}/checklist.md"
checklist: "{installed_path}/checklist.md"
default_output_file: "{planning_artifacts}/sprint-change-proposal-{date}.md"
standalone: true
web_bundle: false

View File

@ -56,6 +56,4 @@ input_file_patterns:
sharded: "{planning_artifacts}/*epic*/*.md"
load_strategy: "SELECTIVE_LOAD" # Only load needed epic
standalone: true
web_bundle: false

View File

@ -22,6 +22,4 @@ implementation_artifacts: "{config_source}:implementation_artifacts"
sprint_status: "{implementation_artifacts}/sprint-status.yaml"
project_context: "**/project-context.md"
standalone: true
web_bundle: false

View File

@ -54,5 +54,4 @@ sprint_status_file: "{implementation_artifacts}/sprint-status.yaml"
story_directory: "{implementation_artifacts}"
retrospectives_folder: "{implementation_artifacts}"
standalone: true
web_bundle: false

View File

@ -50,6 +50,4 @@ input_file_patterns:
# Output configuration
default_output_file: "{status_file}"
standalone: true
web_bundle: false

View File

@ -29,8 +29,5 @@ input_file_patterns:
whole: "{implementation_artifacts}/sprint-status.yaml"
load_strategy: "FULL_LOAD"
# Standalone so IDE commands get generated
standalone: true
# No web bundle needed
web_bundle: false

View File

@ -21,10 +21,4 @@ validation: "{installed_path}/checklist.md"
# Required data files - CRITICAL for project type detection and documentation requirements
documentation_requirements_csv: "{installed_path}/documentation-requirements.csv"
# Output configuration - Multiple files generated in output folder
# Primary output: {output_folder}/project-documentation/
# Additional files generated by sub-workflows based on project structure
standalone: true
web_bundle: false

View File

@ -1,90 +0,0 @@
{
"type": "excalidrawlib",
"version": 2,
"library": [
{
"id": "start-end-circle",
"status": "published",
"elements": [
{
"type": "ellipse",
"width": 120,
"height": 60,
"strokeColor": "#1976d2",
"backgroundColor": "#e3f2fd",
"fillStyle": "solid",
"strokeWidth": 2,
"roughness": 0
}
]
},
{
"id": "process-rectangle",
"status": "published",
"elements": [
{
"type": "rectangle",
"width": 160,
"height": 80,
"strokeColor": "#1976d2",
"backgroundColor": "#e3f2fd",
"fillStyle": "solid",
"strokeWidth": 2,
"roughness": 0,
"roundness": {
"type": 3,
"value": 8
}
}
]
},
{
"id": "decision-diamond",
"status": "published",
"elements": [
{
"type": "diamond",
"width": 140,
"height": 100,
"strokeColor": "#f57c00",
"backgroundColor": "#fff3e0",
"fillStyle": "solid",
"strokeWidth": 2,
"roughness": 0
}
]
},
{
"id": "data-store",
"status": "published",
"elements": [
{
"type": "rectangle",
"width": 140,
"height": 80,
"strokeColor": "#388e3c",
"backgroundColor": "#e8f5e9",
"fillStyle": "solid",
"strokeWidth": 2,
"roughness": 0
}
]
},
{
"id": "external-entity",
"status": "published",
"elements": [
{
"type": "rectangle",
"width": 120,
"height": 80,
"strokeColor": "#7b1fa2",
"backgroundColor": "#f3e5f5",
"fillStyle": "solid",
"strokeWidth": 3,
"roughness": 0
}
]
}
]
}

View File

@ -1,127 +0,0 @@
flowchart:
viewport:
x: 0
y: 0
zoom: 1
grid:
size: 20
spacing:
vertical: 100
horizontal: 180
elements:
start:
type: ellipse
width: 120
height: 60
label: "Start"
process:
type: rectangle
width: 160
height: 80
roundness: 8
decision:
type: diamond
width: 140
height: 100
end:
type: ellipse
width: 120
height: 60
label: "End"
diagram:
viewport:
x: 0
y: 0
zoom: 1
grid:
size: 20
spacing:
vertical: 120
horizontal: 200
elements:
component:
type: rectangle
width: 180
height: 100
roundness: 8
database:
type: rectangle
width: 140
height: 80
service:
type: rectangle
width: 160
height: 90
roundness: 12
external:
type: rectangle
width: 140
height: 80
wireframe:
viewport:
x: 0
y: 0
zoom: 0.8
grid:
size: 20
spacing:
vertical: 40
horizontal: 40
elements:
container:
type: rectangle
width: 800
height: 600
strokeStyle: solid
strokeWidth: 2
header:
type: rectangle
width: 800
height: 80
button:
type: rectangle
width: 120
height: 40
roundness: 4
input:
type: rectangle
width: 300
height: 40
roundness: 4
text:
type: text
fontSize: 16
dataflow:
viewport:
x: 0
y: 0
zoom: 1
grid:
size: 20
spacing:
vertical: 120
horizontal: 200
elements:
process:
type: ellipse
width: 140
height: 80
label: "Process"
datastore:
type: rectangle
width: 140
height: 80
label: "Data Store"
external:
type: rectangle
width: 120
height: 80
strokeWidth: 3
label: "External Entity"
dataflow:
type: arrow
strokeWidth: 2
label: "Data Flow"

View File

@ -1,39 +0,0 @@
# Create Data Flow Diagram - Validation Checklist
## DFD Notation
- [ ] Processes shown as circles/ellipses
- [ ] Data stores shown as parallel lines or rectangles
- [ ] External entities shown as rectangles
- [ ] Data flows shown as labeled arrows
- [ ] Follows standard DFD notation
## Structure
- [ ] All processes numbered correctly
- [ ] All data flows labeled with data names
- [ ] All data stores named appropriately
- [ ] External entities clearly identified
## Completeness
- [ ] All inputs and outputs accounted for
- [ ] No orphaned processes (unconnected)
- [ ] Data conservation maintained
- [ ] Level appropriate (context/level 0/level 1)
## Layout
- [ ] Logical flow direction (left to right, top to bottom)
- [ ] No crossing data flows where avoidable
- [ ] Balanced layout
- [ ] Grid alignment maintained
## Technical Quality
- [ ] All elements properly grouped
- [ ] Arrows have proper bindings
- [ ] Text readable and properly sized
- [ ] No elements with `isDeleted: true`
- [ ] JSON is valid
- [ ] File saved to correct location

View File

@ -1,130 +0,0 @@
# Create Data Flow Diagram - Workflow Instructions
```xml
<critical>The workflow execution engine is governed by: {project-root}/_bmad/core/tasks/workflow.xml</critical>
<critical>You MUST have already loaded and processed: {installed_path}/workflow.yaml</critical>
<critical>This workflow creates data flow diagrams (DFD) in Excalidraw format.</critical>
<workflow>
<step n="0" goal="Contextual Analysis">
<action>Review user's request and extract: DFD level, processes, data stores, external entities</action>
<check if="ALL requirements clear"><action>Skip to Step 4</action></check>
</step>
<step n="1" goal="Identify DFD Level" elicit="true">
<action>Ask: "What level of DFD do you need?"</action>
<action>Present options:
1. Context Diagram (Level 0) - Single process showing system boundaries
2. Level 1 DFD - Major processes and data flows
3. Level 2 DFD - Detailed sub-processes
4. Custom - Specify your requirements
</action>
<action>WAIT for selection</action>
</step>
<step n="2" goal="Gather Requirements" elicit="true">
<action>Ask: "Describe the processes, data stores, and external entities in your system"</action>
<action>WAIT for user description</action>
<action>Summarize what will be included and confirm with user</action>
</step>
<step n="3" goal="Theme Setup" elicit="true">
<action>Check for existing theme.json, ask to use if exists</action>
<check if="no existing theme">
<action>Ask: "Choose a DFD color scheme:"</action>
<action>Present numbered options:
1. Standard DFD
- Process: #e3f2fd (light blue)
- Data Store: #e8f5e9 (light green)
- External Entity: #f3e5f5 (light purple)
- Border: #1976d2 (blue)
2. Colorful DFD
- Process: #fff9c4 (light yellow)
- Data Store: #c5e1a5 (light lime)
- External Entity: #ffccbc (light coral)
- Border: #f57c00 (orange)
3. Minimal DFD
- Process: #f5f5f5 (light gray)
- Data Store: #eeeeee (gray)
- External Entity: #e0e0e0 (medium gray)
- Border: #616161 (dark gray)
4. Custom - Define your own colors
</action>
<action>WAIT for selection</action>
<action>Create theme.json based on selection</action>
</check>
</step>
<step n="4" goal="Plan DFD Structure">
<action>List all processes with numbers (1.0, 2.0, etc.)</action>
<action>List all data stores (D1, D2, etc.)</action>
<action>List all external entities</action>
<action>Map all data flows with labels</action>
<action>Show planned structure, confirm with user</action>
</step>
<step n="5" goal="Load Resources">
<action>Load {{templates}} and extract `dataflow` section</action>
<action>Load {{library}}</action>
<action>Load theme.json</action>
<action>Load {{helpers}}</action>
</step>
<step n="6" goal="Build DFD Elements">
<critical>Follow standard DFD notation from {{helpers}}</critical>
<substep>Build Order:
1. External entities (rectangles, bold border)
2. Processes (circles/ellipses with numbers)
3. Data stores (parallel lines or rectangles)
4. Data flows (labeled arrows)
</substep>
<substep>DFD Rules:
- Processes: Numbered (1.0, 2.0), verb phrases
- Data stores: Named (D1, D2), noun phrases
- External entities: Named, noun phrases
- Data flows: Labeled with data names, arrows show direction
- No direct flow between external entities
- No direct flow between data stores
</substep>
<substep>Layout:
- External entities at edges
- Processes in center
- Data stores between processes
- Minimize crossing flows
- Left-to-right or top-to-bottom flow
</substep>
</step>
<step n="7" goal="Optimize and Save">
<action>Verify DFD rules compliance</action>
<action>Strip unused elements and elements with isDeleted: true</action>
<action>Save to {{default_output_file}}</action>
</step>
<step n="8" goal="Validate JSON Syntax">
<critical>NEVER delete the file if validation fails - always fix syntax errors</critical>
<action>Run: node -e "JSON.parse(require('fs').readFileSync('{{default_output_file}}', 'utf8')); console.log('✓ Valid JSON')"</action>
<check if="validation fails (exit code 1)">
<action>Read the error message carefully - it shows the syntax error and position</action>
<action>Open the file and navigate to the error location</action>
<action>Fix the syntax error (add missing comma, bracket, or quote as indicated)</action>
<action>Save the file</action>
<action>Re-run validation with the same command</action>
<action>Repeat until validation passes</action>
</check>
<action>Once validation passes, confirm with user</action>
</step>
<step n="9" goal="Validate Content">
<invoke-task>Validate against {{validation}}</invoke-task>
</step>
</workflow>
```

View File

@ -1,27 +0,0 @@
name: create-excalidraw-dataflow
description: "Create data flow diagrams (DFD) in Excalidraw format"
author: "BMad"
# Config values
config_source: "{project-root}/_bmad/bmm/config.yaml"
output_folder: "{config_source}:output_folder"
# Workflow components
installed_path: "{project-root}/_bmad/bmm/workflows/excalidraw-diagrams/create-dataflow"
shared_path: "{project-root}/_bmad/bmm/workflows/excalidraw-diagrams/_shared"
instructions: "{installed_path}/instructions.md"
validation: "{installed_path}/checklist.md"
# Core Excalidraw resources (universal knowledge)
helpers: "{project-root}/_bmad/core/resources/excalidraw/excalidraw-helpers.md"
json_validation: "{project-root}/_bmad/core/resources/excalidraw/validate-json-instructions.md"
# Domain-specific resources (technical diagrams)
templates: "{shared_path}/excalidraw-templates.yaml"
library: "{shared_path}/excalidraw-library.json"
# Output file (respects user's configured output_folder)
default_output_file: "{output_folder}/excalidraw-diagrams/dataflow-{timestamp}.excalidraw"
standalone: true
web_bundle: false

View File

@ -1,43 +0,0 @@
# Create Diagram - Validation Checklist
## Element Structure
- [ ] All components with labels have matching `groupIds`
- [ ] All text elements have `containerId` pointing to parent component
- [ ] Text width calculated properly (no cutoff)
- [ ] Text alignment appropriate for diagram type
## Layout and Alignment
- [ ] All elements snapped to 20px grid
- [ ] Component spacing consistent (40px/60px)
- [ ] Hierarchical alignment maintained
- [ ] No overlapping elements
## Connections
- [ ] All arrows have `startBinding` and `endBinding`
- [ ] `boundElements` array updated on connected components
- [ ] Arrow routing avoids overlaps
- [ ] Relationship types clearly indicated
## Notation and Standards
- [ ] Follows specified notation standard (UML/ERD/etc)
- [ ] Symbols used correctly
- [ ] Cardinality/multiplicity shown where needed
- [ ] Labels and annotations clear
## Theme and Styling
- [ ] Theme colors applied consistently
- [ ] Component types visually distinguishable
- [ ] Text is readable
- [ ] Professional appearance
## Output Quality
- [ ] Element count under 80
- [ ] No elements with `isDeleted: true`
- [ ] JSON is valid
- [ ] File saved to correct location

View File

@ -1,141 +0,0 @@
# Create Diagram - Workflow Instructions
```xml
<critical>The workflow execution engine is governed by: {project-root}/_bmad/core/tasks/workflow.xml</critical>
<critical>You MUST have already loaded and processed: {installed_path}/workflow.yaml</critical>
<critical>This workflow creates system architecture diagrams, ERDs, UML diagrams, or general technical diagrams in Excalidraw format.</critical>
<workflow>
<step n="0" goal="Contextual Analysis">
<action>Review user's request and extract: diagram type, components/entities, relationships, notation preferences</action>
<check if="ALL requirements clear"><action>Skip to Step 5</action></check>
<check if="SOME requirements clear"><action>Only ask about missing info in Steps 1-2</action></check>
</step>
<step n="1" goal="Identify Diagram Type" elicit="true">
<action>Ask: "What type of technical diagram do you need?"</action>
<action>Present options:
1. System Architecture
2. Entity-Relationship Diagram (ERD)
3. UML Class Diagram
4. UML Sequence Diagram
5. UML Use Case Diagram
6. Network Diagram
7. Other
</action>
<action>WAIT for selection</action>
</step>
<step n="2" goal="Gather Requirements" elicit="true">
<action>Ask: "Describe the components/entities and their relationships"</action>
<action>Ask: "What notation standard? (Standard/Simplified/Strict UML-ERD)"</action>
<action>WAIT for user input</action>
<action>Summarize what will be included and confirm with user</action>
</step>
<step n="3" goal="Check for Existing Theme" elicit="true">
<action>Check if theme.json exists at output location</action>
<check if="exists"><action>Ask to use it, load if yes, else proceed to Step 4</action></check>
<check if="not exists"><action>Proceed to Step 4</action></check>
</step>
<step n="4" goal="Create Theme" elicit="true">
<action>Ask: "Choose a color scheme for your diagram:"</action>
<action>Present numbered options:
1. Professional
- Component: #e3f2fd (light blue)
- Database: #e8f5e9 (light green)
- Service: #fff3e0 (light orange)
- Border: #1976d2 (blue)
2. Colorful
- Component: #e1bee7 (light purple)
- Database: #c5e1a5 (light lime)
- Service: #ffccbc (light coral)
- Border: #7b1fa2 (purple)
3. Minimal
- Component: #f5f5f5 (light gray)
- Database: #eeeeee (gray)
- Service: #e0e0e0 (medium gray)
- Border: #616161 (dark gray)
4. Custom - Define your own colors
</action>
<action>WAIT for selection</action>
<action>Create theme.json based on selection</action>
<action>Show preview and confirm</action>
</step>
<step n="5" goal="Plan Diagram Structure">
<action>List all components/entities</action>
<action>Map all relationships</action>
<action>Show planned layout</action>
<action>Ask: "Structure looks correct? (yes/no)"</action>
<check if="no"><action>Adjust and repeat</action></check>
</step>
<step n="6" goal="Load Resources">
<action>Load {{templates}} and extract `diagram` section</action>
<action>Load {{library}}</action>
<action>Load theme.json and merge with template</action>
<action>Load {{helpers}} for guidelines</action>
</step>
<step n="7" goal="Build Diagram Elements">
<critical>Follow {{helpers}} for proper element creation</critical>
<substep>For Each Component:
- Generate unique IDs (component-id, text-id, group-id)
- Create shape with groupIds
- Calculate text width
- Create text with containerId and matching groupIds
- Add boundElements
</substep>
<substep>For Each Connection:
- Determine arrow type (straight/elbow)
- Create with startBinding and endBinding
- Update boundElements on both components
</substep>
<substep>Build Order by Type:
- Architecture: Services → Databases → Connections → Labels
- ERD: Entities → Attributes → Relationships → Cardinality
- UML Class: Classes → Attributes → Methods → Relationships
- UML Sequence: Actors → Lifelines → Messages → Returns
- UML Use Case: Actors → Use Cases → Relationships
</substep>
<substep>Alignment:
- Snap to 20px grid
- Space: 40px between components, 60px between sections
</substep>
</step>
<step n="8" goal="Optimize and Save">
<action>Strip unused elements and elements with isDeleted: true</action>
<action>Save to {{default_output_file}}</action>
</step>
<step n="9" goal="Validate JSON Syntax">
<critical>NEVER delete the file if validation fails - always fix syntax errors</critical>
<action>Run: node -e "JSON.parse(require('fs').readFileSync('{{default_output_file}}', 'utf8')); console.log('✓ Valid JSON')"</action>
<check if="validation fails (exit code 1)">
<action>Read the error message carefully - it shows the syntax error and position</action>
<action>Open the file and navigate to the error location</action>
<action>Fix the syntax error (add missing comma, bracket, or quote as indicated)</action>
<action>Save the file</action>
<action>Re-run validation with the same command</action>
<action>Repeat until validation passes</action>
</check>
<action>Once validation passes, confirm: "Diagram created at {{default_output_file}}. Open to view?"</action>
</step>
<step n="10" goal="Validate Content">
<invoke-task>Validate against {{validation}} using {_bmad}/core/tasks/validate-workflow.xml</invoke-task>
</step>
</workflow>
```

View File

@ -1,27 +0,0 @@
name: create-excalidraw-diagram
description: "Create system architecture diagrams, ERDs, UML diagrams, or general technical diagrams in Excalidraw format"
author: "BMad"
# Config values
config_source: "{project-root}/_bmad/bmm/config.yaml"
output_folder: "{config_source}:output_folder"
# Workflow components
installed_path: "{project-root}/_bmad/bmm/workflows/excalidraw-diagrams/create-diagram"
shared_path: "{project-root}/_bmad/bmm/workflows/excalidraw-diagrams/_shared"
instructions: "{installed_path}/instructions.md"
validation: "{installed_path}/checklist.md"
# Core Excalidraw resources (universal knowledge)
helpers: "{project-root}/_bmad/core/resources/excalidraw/excalidraw-helpers.md"
json_validation: "{project-root}/_bmad/core/resources/excalidraw/validate-json-instructions.md"
# Domain-specific resources (technical diagrams)
templates: "{shared_path}/excalidraw-templates.yaml"
library: "{shared_path}/excalidraw-library.json"
# Output file (respects user's configured output_folder)
default_output_file: "{output_folder}/excalidraw-diagrams/diagram-{timestamp}.excalidraw"
standalone: true
web_bundle: false

View File

@ -1,49 +0,0 @@
# Create Flowchart - Validation Checklist
## Element Structure
- [ ] All shapes with labels have matching `groupIds`
- [ ] All text elements have `containerId` pointing to parent shape
- [ ] Text width calculated properly (no cutoff)
- [ ] Text alignment set (`textAlign` + `verticalAlign`)
## Layout and Alignment
- [ ] All elements snapped to 20px grid
- [ ] Consistent spacing between elements (60px minimum)
- [ ] Vertical alignment maintained for flow direction
- [ ] No overlapping elements
## Connections
- [ ] All arrows have `startBinding` and `endBinding`
- [ ] `boundElements` array updated on connected shapes
- [ ] Arrow types appropriate (straight for forward, elbow for backward/upward)
- [ ] Gap set to 10 for all bindings
## Theme and Styling
- [ ] Theme colors applied consistently
- [ ] All shapes use theme primary fill color
- [ ] All borders use theme accent color
- [ ] Text color is readable (#1e1e1e)
## Composition
- [ ] Element count under 50
- [ ] Library components referenced where possible
- [ ] No duplicate element definitions
## Output Quality
- [ ] No elements with `isDeleted: true`
- [ ] JSON is valid
- [ ] File saved to correct location
## Functional Requirements
- [ ] Start point clearly marked
- [ ] End point clearly marked
- [ ] All process steps labeled
- [ ] Decision points use diamond shapes
- [ ] Flow direction is clear and logical

View File

@ -1,241 +0,0 @@
# Create Flowchart - Workflow Instructions
```xml
<critical>The workflow execution engine is governed by: {project-root}/_bmad/core/tasks/workflow.xml</critical>
<critical>You MUST have already loaded and processed: {installed_path}/workflow.yaml</critical>
<critical>This workflow creates a flowchart visualization in Excalidraw format for processes, pipelines, or logic flows.</critical>
<workflow>
<step n="0" goal="Contextual Analysis (Smart Elicitation)">
<critical>Before asking any questions, analyze what the user has already told you</critical>
<action>Review the user's initial request and conversation history</action>
<action>Extract any mentioned: flowchart type, complexity, decision points, save location</action>
<check if="ALL requirements are clear from context">
<action>Summarize your understanding</action>
<action>Skip directly to Step 4 (Plan Flowchart Layout)</action>
</check>
<check if="SOME requirements are clear">
<action>Note what you already know</action>
<action>Only ask about missing information in Step 1</action>
</check>
<check if="requirements are unclear or minimal">
<action>Proceed with full elicitation in Step 1</action>
</check>
</step>
<step n="1" goal="Gather Requirements" elicit="true">
<action>Ask Question 1: "What type of process flow do you need to visualize?"</action>
<action>Present numbered options:
1. Business Process Flow - Document business workflows, approval processes, or operational procedures
2. Algorithm/Logic Flow - Visualize code logic, decision trees, or computational processes
3. User Journey Flow - Map user interactions, navigation paths, or experience flows
4. Data Processing Pipeline - Show data transformation, ETL processes, or processing stages
5. Other - Describe your specific flowchart needs
</action>
<action>WAIT for user selection (1-5)</action>
<action>Ask Question 2: "How many main steps are in this flow?"</action>
<action>Present numbered options:
1. Simple (3-5 steps) - Quick process with few decision points
2. Medium (6-10 steps) - Standard workflow with some branching
3. Complex (11-20 steps) - Detailed process with multiple decision points
4. Very Complex (20+ steps) - Comprehensive workflow requiring careful layout
</action>
<action>WAIT for user selection (1-4)</action>
<action>Store selection in {{complexity}}</action>
<action>Ask Question 3: "Does your flow include decision points (yes/no branches)?"</action>
<action>Present numbered options:
1. No decisions - Linear flow from start to end
2. Few decisions (1-2) - Simple branching with yes/no paths
3. Multiple decisions (3-5) - Several conditional branches
4. Complex decisions (6+) - Extensive branching logic
</action>
<action>WAIT for user selection (1-4)</action>
<action>Store selection in {{decision_points}}</action>
<action>Ask Question 4: "Where should the flowchart be saved?"</action>
<action>Present numbered options:
1. Default location - docs/flowcharts/[auto-generated-name].excalidraw
2. Custom path - Specify your own file path
3. Project root - Save in main project directory
4. Specific folder - Choose from existing folders
</action>
<action>WAIT for user selection (1-4)</action>
<check if="selection is 2 or 4">
<action>Ask for specific path</action>
<action>WAIT for user input</action>
</check>
<action>Store final path in {{default_output_file}}</action>
</step>
<step n="2" goal="Check for Existing Theme" elicit="true">
<action>Check if theme.json exists at output location</action>
<check if="theme.json exists">
<action>Ask: "Found existing theme. Use it? (yes/no)"</action>
<action>WAIT for user response</action>
<check if="user says yes">
<action>Load and use existing theme</action>
<action>Skip to Step 4</action>
</check>
<check if="user says no">
<action>Proceed to Step 3</action>
</check>
</check>
<check if="theme.json does not exist">
<action>Proceed to Step 3</action>
</check>
</step>
<step n="3" goal="Create Theme" elicit="true">
<action>Ask: "Let's create a theme for your flowchart. Choose a color scheme:"</action>
<action>Present numbered options:
1. Professional Blue
- Primary Fill: #e3f2fd (light blue)
- Accent/Border: #1976d2 (blue)
- Decision: #fff3e0 (light orange)
- Text: #1e1e1e (dark gray)
2. Success Green
- Primary Fill: #e8f5e9 (light green)
- Accent/Border: #388e3c (green)
- Decision: #fff9c4 (light yellow)
- Text: #1e1e1e (dark gray)
3. Neutral Gray
- Primary Fill: #f5f5f5 (light gray)
- Accent/Border: #616161 (gray)
- Decision: #e0e0e0 (medium gray)
- Text: #1e1e1e (dark gray)
4. Warm Orange
- Primary Fill: #fff3e0 (light orange)
- Accent/Border: #f57c00 (orange)
- Decision: #ffe0b2 (peach)
- Text: #1e1e1e (dark gray)
5. Custom Colors - Define your own color palette
</action>
<action>WAIT for user selection (1-5)</action>
<action>Store selection in {{theme_choice}}</action>
<check if="selection is 5 (Custom)">
<action>Ask: "Primary fill color (hex code)?"</action>
<action>WAIT for user input</action>
<action>Store in {{custom_colors.primary_fill}}</action>
<action>Ask: "Accent/border color (hex code)?"</action>
<action>WAIT for user input</action>
<action>Store in {{custom_colors.accent}}</action>
<action>Ask: "Decision color (hex code)?"</action>
<action>WAIT for user input</action>
<action>Store in {{custom_colors.decision}}</action>
</check>
<action>Create theme.json with selected colors</action>
<action>Show theme preview with all colors</action>
<action>Ask: "Theme looks good?"</action>
<action>Present numbered options:
1. Yes, use this theme - Proceed with theme
2. No, adjust colors - Modify color selections
3. Start over - Choose different preset
</action>
<action>WAIT for selection (1-3)</action>
<check if="selection is 2 or 3">
<action>Repeat Step 3</action>
</check>
</step>
<step n="4" goal="Plan Flowchart Layout">
<action>List all steps and decision points based on gathered requirements</action>
<action>Show user the planned structure</action>
<action>Ask: "Structure looks correct? (yes/no)"</action>
<action>WAIT for user response</action>
<check if="user says no">
<action>Adjust structure based on feedback</action>
<action>Repeat this step</action>
</check>
</step>
<step n="5" goal="Load Template and Resources">
<action>Load {{templates}} file</action>
<action>Extract `flowchart` section from YAML</action>
<action>Load {{library}} file</action>
<action>Load theme.json and merge colors with template</action>
<action>Load {{helpers}} for element creation guidelines</action>
</step>
<step n="6" goal="Build Flowchart Elements">
<critical>Follow guidelines from {{helpers}} for proper element creation</critical>
<action>Build ONE section at a time following these rules:</action>
<substep>For Each Shape with Label:
1. Generate unique IDs (shape-id, text-id, group-id)
2. Create shape with groupIds: [group-id]
3. Calculate text width: (text.length × fontSize × 0.6) + 20, round to nearest 10
4. Create text element with:
- containerId: shape-id
- groupIds: [group-id] (SAME as shape)
- textAlign: "center"
- verticalAlign: "middle"
- width: calculated width
5. Add boundElements to shape referencing text
</substep>
<substep>For Each Arrow:
1. Determine arrow type needed:
- Straight: For forward flow (left-to-right, top-to-bottom)
- Elbow: For upward flow, backward flow, or complex routing
2. Create arrow with startBinding and endBinding
3. Set startBinding.elementId to source shape ID
4. Set endBinding.elementId to target shape ID
5. Set gap: 10 for both bindings
6. If elbow arrow, add intermediate points for direction changes
7. Update boundElements on both connected shapes
</substep>
<substep>Alignment:
- Snap all x, y to 20px grid
- Align shapes vertically (same x for vertical flow)
- Space elements: 60px between shapes
</substep>
<substep>Build Order:
1. Start point (circle) with label
2. Each process step (rectangle) with label
3. Each decision point (diamond) with label
4. End point (circle) with label
5. Connect all with bound arrows
</substep>
</step>
<step n="7" goal="Optimize and Save">
<action>Strip unused elements and elements with isDeleted: true</action>
<action>Save to {{default_output_file}}</action>
</step>
<step n="8" goal="Validate JSON Syntax">
<critical>NEVER delete the file if validation fails - always fix syntax errors</critical>
<action>Run: node -e "JSON.parse(require('fs').readFileSync('{{default_output_file}}', 'utf8')); console.log('✓ Valid JSON')"</action>
<check if="validation fails (exit code 1)">
<action>Read the error message carefully - it shows the syntax error and position</action>
<action>Open the file and navigate to the error location</action>
<action>Fix the syntax error (add missing comma, bracket, or quote as indicated)</action>
<action>Save the file</action>
<action>Re-run validation with the same command</action>
<action>Repeat until validation passes</action>
</check>
<action>Once validation passes, confirm with user: "Flowchart created at {{default_output_file}}. Open to view?"</action>
</step>
<step n="9" goal="Validate Content">
<invoke-task>Validate against checklist at {{validation}} using {_bmad}/core/tasks/validate-workflow.xml</invoke-task>
</step>
</workflow>
```

View File

@ -1,27 +0,0 @@
name: create-excalidraw-flowchart
description: "Create a flowchart visualization in Excalidraw format for processes, pipelines, or logic flows"
author: "BMad"
# Config values
config_source: "{project-root}/_bmad/bmm/config.yaml"
output_folder: "{config_source}:output_folder"
# Workflow components
installed_path: "{project-root}/_bmad/bmm/workflows/excalidraw-diagrams/create-flowchart"
shared_path: "{project-root}/_bmad/bmm/workflows/excalidraw-diagrams/_shared"
instructions: "{installed_path}/instructions.md"
validation: "{installed_path}/checklist.md"
# Core Excalidraw resources (universal knowledge)
helpers: "{project-root}/_bmad/core/resources/excalidraw/excalidraw-helpers.md"
json_validation: "{project-root}/_bmad/core/resources/excalidraw/validate-json-instructions.md"
# Domain-specific resources (technical diagrams)
templates: "{shared_path}/excalidraw-templates.yaml"
library: "{shared_path}/excalidraw-library.json"
# Output file (respects user's configured output_folder)
default_output_file: "{output_folder}/excalidraw-diagrams/flowchart-{timestamp}.excalidraw"
standalone: true
web_bundle: false

View File

@ -1,38 +0,0 @@
# Create Wireframe - Validation Checklist
## Layout Structure
- [ ] Screen dimensions appropriate for device type
- [ ] Grid alignment (20px) maintained
- [ ] Consistent spacing between UI elements
- [ ] Proper hierarchy (header, content, footer)
## UI Elements
- [ ] All interactive elements clearly marked
- [ ] Buttons, inputs, and controls properly sized
- [ ] Text labels readable and appropriately sized
- [ ] Navigation elements clearly indicated
## Fidelity
- [ ] Matches requested fidelity level (low/medium/high)
- [ ] Appropriate level of detail
- [ ] Placeholder content used where needed
- [ ] No unnecessary decoration for low-fidelity
## Annotations
- [ ] Key interactions annotated
- [ ] Flow indicators present if multi-screen
- [ ] Important notes included
- [ ] Element purposes clear
## Technical Quality
- [ ] All elements properly grouped
- [ ] Text elements have containerId
- [ ] Snapped to grid
- [ ] No elements with `isDeleted: true`
- [ ] JSON is valid
- [ ] File saved to correct location

View File

@ -1,133 +0,0 @@
# Create Wireframe - Workflow Instructions
```xml
<critical>The workflow execution engine is governed by: {project-root}/_bmad/core/tasks/workflow.xml</critical>
<critical>You MUST have already loaded and processed: {installed_path}/workflow.yaml</critical>
<critical>This workflow creates website or app wireframes in Excalidraw format.</critical>
<workflow>
<step n="0" goal="Contextual Analysis">
<action>Review user's request and extract: wireframe type, fidelity level, screen count, device type, save location</action>
<check if="ALL requirements clear"><action>Skip to Step 5</action></check>
</step>
<step n="1" goal="Identify Wireframe Type" elicit="true">
<action>Ask: "What type of wireframe do you need?"</action>
<action>Present options:
1. Website (Desktop)
2. Mobile App (iOS/Android)
3. Web App (Responsive)
4. Tablet App
5. Multi-platform
</action>
<action>WAIT for selection</action>
</step>
<step n="2" goal="Gather Requirements" elicit="true">
<action>Ask fidelity level (Low/Medium/High)</action>
<action>Ask screen count (Single/Few 2-3/Multiple 4-6/Many 7+)</action>
<action>Ask device dimensions or use standard</action>
<action>Ask save location</action>
</step>
<step n="3" goal="Check Theme" elicit="true">
<action>Check for existing theme.json, ask to use if exists</action>
</step>
<step n="4" goal="Create Theme" elicit="true">
<action>Ask: "Choose a wireframe style:"</action>
<action>Present numbered options:
1. Classic Wireframe
- Background: #ffffff (white)
- Container: #f5f5f5 (light gray)
- Border: #9e9e9e (gray)
- Text: #424242 (dark gray)
2. High Contrast
- Background: #ffffff (white)
- Container: #eeeeee (light gray)
- Border: #212121 (black)
- Text: #000000 (black)
3. Blueprint Style
- Background: #1a237e (dark blue)
- Container: #3949ab (blue)
- Border: #7986cb (light blue)
- Text: #ffffff (white)
4. Custom - Define your own colors
</action>
<action>WAIT for selection</action>
<action>Create theme.json based on selection</action>
<action>Confirm with user</action>
</step>
<step n="5" goal="Plan Wireframe Structure">
<action>List all screens and their purposes</action>
<action>Map navigation flow between screens</action>
<action>Identify key UI elements for each screen</action>
<action>Show planned structure, confirm with user</action>
</step>
<step n="6" goal="Load Resources">
<action>Load {{templates}} and extract `wireframe` section</action>
<action>Load {{library}}</action>
<action>Load theme.json</action>
<action>Load {{helpers}}</action>
</step>
<step n="7" goal="Build Wireframe Elements">
<critical>Follow {{helpers}} for proper element creation</critical>
<substep>For Each Screen:
- Create container/frame
- Add header section
- Add content areas
- Add navigation elements
- Add interactive elements (buttons, inputs)
- Add labels and annotations
</substep>
<substep>Build Order:
1. Screen containers
2. Layout sections (header, content, footer)
3. Navigation elements
4. Content blocks
5. Interactive elements
6. Labels and annotations
7. Flow indicators (if multi-screen)
</substep>
<substep>Fidelity Guidelines:
- Low: Basic shapes, minimal detail, placeholder text
- Medium: More defined elements, some styling, representative content
- High: Detailed elements, realistic sizing, actual content examples
</substep>
</step>
<step n="8" goal="Optimize and Save">
<action>Strip unused elements and elements with isDeleted: true</action>
<action>Save to {{default_output_file}}</action>
</step>
<step n="9" goal="Validate JSON Syntax">
<critical>NEVER delete the file if validation fails - always fix syntax errors</critical>
<action>Run: node -e "JSON.parse(require('fs').readFileSync('{{default_output_file}}', 'utf8')); console.log('✓ Valid JSON')"</action>
<check if="validation fails (exit code 1)">
<action>Read the error message carefully - it shows the syntax error and position</action>
<action>Open the file and navigate to the error location</action>
<action>Fix the syntax error (add missing comma, bracket, or quote as indicated)</action>
<action>Save the file</action>
<action>Re-run validation with the same command</action>
<action>Repeat until validation passes</action>
</check>
<action>Once validation passes, confirm with user</action>
</step>
<step n="10" goal="Validate Content">
<invoke-task>Validate against {{validation}}</invoke-task>
</step>
</workflow>
```

View File

@ -1,27 +0,0 @@
name: create-excalidraw-wireframe
description: "Create website or app wireframes in Excalidraw format"
author: "BMad"
# Config values
config_source: "{project-root}/_bmad/bmm/config.yaml"
output_folder: "{config_source}:output_folder"
# Workflow components
installed_path: "{project-root}/_bmad/bmm/workflows/excalidraw-diagrams/create-wireframe"
shared_path: "{project-root}/_bmad/bmm/workflows/excalidraw-diagrams/_shared"
instructions: "{installed_path}/instructions.md"
validation: "{installed_path}/checklist.md"
# Core Excalidraw resources (universal knowledge)
helpers: "{project-root}/_bmad/core/resources/excalidraw/excalidraw-helpers.md"
json_validation: "{project-root}/_bmad/core/resources/excalidraw/validate-json-instructions.md"
# Domain-specific resources (technical diagrams)
templates: "{shared_path}/excalidraw-templates.yaml"
library: "{shared_path}/excalidraw-library.json"
# Output file (respects user's configured output_folder)
default_output_file: "{output_folder}/excalidraw-diagrams/wireframe-{timestamp}.excalidraw"
standalone: true
web_bundle: false

View File

@ -1,160 +0,0 @@
# Core Excalidraw Resources
Universal knowledge for creating Excalidraw diagrams. All agents that create Excalidraw files should reference these resources.
## Purpose
Provides the **HOW** (universal knowledge) while agents provide the **WHAT** (domain-specific application).
**Core = "How to create Excalidraw elements"**
- How to group shapes with text labels
- How to calculate text width
- How to create arrows with proper bindings
- How to validate JSON syntax
- Base structure and primitives
**Agents = "What diagrams to create"**
- Frame Expert (BMM): Technical flowcharts, architecture diagrams, wireframes
- Presentation Master (CIS): Pitch decks, creative visuals, Rube Goldberg machines
- Tech Writer (BMM): Documentation diagrams, concept explanations
## Files in This Directory
### excalidraw-helpers.md
**Universal element creation patterns**
- Text width calculation
- Element grouping rules (shapes + labels)
- Grid alignment
- Arrow creation (straight, elbow)
- Theme application
- Validation checklist
- Optimization rules
**Agents reference this to:**
- Create properly grouped shapes
- Calculate text dimensions
- Connect elements with arrows
- Ensure valid structure
### validate-json-instructions.md
**Universal JSON validation process**
- How to validate Excalidraw JSON
- Common errors and fixes
- Workflow integration
- Error recovery
**Agents reference this to:**
- Validate files after creation
- Fix syntax errors
- Ensure files can be opened in Excalidraw
### library-loader.md (Future)
**How to load external .excalidrawlib files**
- Programmatic library loading
- Community library integration
- Custom library management
**Status:** To be developed when implementing external library support.
## How Agents Use These Resources
### Example: Frame Expert (Technical Diagrams)
```yaml
# workflows/excalidraw-diagrams/create-flowchart/workflow.yaml
helpers: '{project-root}/_bmad/core/resources/excalidraw/excalidraw-helpers.md'
json_validation: '{project-root}/_bmad/core/resources/excalidraw/validate-json-instructions.md'
```
**Domain-specific additions:**
```yaml
# workflows/excalidraw-diagrams/_shared/flowchart-templates.yaml
flowchart:
start_node:
type: ellipse
width: 120
height: 60
process_box:
type: rectangle
width: 160
height: 80
decision_diamond:
type: diamond
width: 140
height: 100
```
### Example: Presentation Master (Creative Visuals)
```yaml
# workflows/create-visual-metaphor/workflow.yaml
helpers: '{project-root}/_bmad/core/resources/excalidraw/excalidraw-helpers.md'
json_validation: '{project-root}/_bmad/core/resources/excalidraw/validate-json-instructions.md'
```
**Domain-specific additions:**
```yaml
# workflows/_shared/creative-templates.yaml
rube_goldberg:
whimsical_connector:
type: arrow
strokeStyle: dashed
roughness: 2
playful_box:
type: rectangle
roundness: 12
```
## What Doesn't Belong in Core
**Domain-Specific Elements:**
- Flowchart-specific templates (belongs in Frame Expert)
- Pitch deck layouts (belongs in Presentation Master)
- Documentation-specific styles (belongs in Tech Writer)
**Agent Workflows:**
- How to create a flowchart (Frame Expert workflow)
- How to create a pitch deck (Presentation Master workflow)
- Step-by-step diagram creation (agent-specific)
**Theming:**
- Currently in agent workflows
- **Future:** Will be refactored to core as user-configurable themes
## Architecture Principle
**Single Source of Truth:**
- Core holds universal knowledge
- Agents reference core, don't duplicate
- Updates to core benefit all agents
- Agents specialize with domain knowledge
**DRY (Don't Repeat Yourself):**
- Element creation logic: ONCE in core
- Text width calculation: ONCE in core
- Validation process: ONCE in core
- Arrow binding patterns: ONCE in core
## Future Enhancements
1. **External Library Loader** - Load .excalidrawlib files from libraries.excalidraw.com
2. **Theme Management** - User-configurable color themes saved in core
3. **Component Library** - Shared reusable components across agents
4. **Layout Algorithms** - Auto-layout helpers for positioning elements

View File

@ -1,127 +0,0 @@
# Excalidraw Element Creation Guidelines
## Text Width Calculation
For text elements inside shapes (labels):
```
text_width = (text.length × fontSize × 0.6) + 20
```
Round to nearest 10 for grid alignment.
## Element Grouping Rules
**CRITICAL:** When creating shapes with labels:
1. Generate unique IDs:
- `shape-id` for the shape
- `text-id` for the text
- `group-id` for the group
2. Shape element must have:
- `groupIds: [group-id]`
- `boundElements: [{type: "text", id: text-id}]`
3. Text element must have:
- `containerId: shape-id`
- `groupIds: [group-id]` (SAME as shape)
- `textAlign: "center"`
- `verticalAlign: "middle"`
- `width: calculated_width`
## Grid Alignment
- Snap all `x`, `y` coordinates to 20px grid
- Formula: `Math.round(value / 20) * 20`
- Spacing between elements: 60px minimum
## Arrow Creation
### Straight Arrows
Use for forward flow (left-to-right, top-to-bottom):
```json
{
"type": "arrow",
"startBinding": {
"elementId": "source-shape-id",
"focus": 0,
"gap": 10
},
"endBinding": {
"elementId": "target-shape-id",
"focus": 0,
"gap": 10
},
"points": [[0, 0], [distance_x, distance_y]]
}
```
### Elbow Arrows
Use for upward flow, backward flow, or complex routing:
```json
{
"type": "arrow",
"startBinding": {...},
"endBinding": {...},
"points": [
[0, 0],
[intermediate_x, 0],
[intermediate_x, intermediate_y],
[final_x, final_y]
],
"elbowed": true
}
```
### Update Connected Shapes
After creating arrow, update `boundElements` on both connected shapes:
```json
{
"id": "shape-id",
"boundElements": [
{ "type": "text", "id": "text-id" },
{ "type": "arrow", "id": "arrow-id" }
]
}
```
## Theme Application
Theme colors should be applied consistently:
- **Shapes**: `backgroundColor` from theme primary fill
- **Borders**: `strokeColor` from theme accent
- **Text**: `strokeColor` = "#1e1e1e" (dark text)
- **Arrows**: `strokeColor` from theme accent
## Validation Checklist
Before saving, verify:
- [ ] All shapes with labels have matching `groupIds`
- [ ] All text elements have `containerId` pointing to parent shape
- [ ] Text width calculated properly (no cutoff)
- [ ] Text alignment set (`textAlign` + `verticalAlign`)
- [ ] All elements snapped to 20px grid
- [ ] All arrows have `startBinding` and `endBinding`
- [ ] `boundElements` array updated on connected shapes
- [ ] Theme colors applied consistently
- [ ] No metadata or history in final output
- [ ] All IDs are unique
## Optimization
Remove from final output:
- `appState` object
- `files` object (unless images used)
- All elements with `isDeleted: true`
- Unused library items
- Version history

View File

@ -1,50 +0,0 @@
# External Library Loader
**Status:** Placeholder for future implementation
## Purpose
Load external .excalidrawlib files from <https://libraries.excalidraw.com> or custom sources.
## Planned Capabilities
- Load libraries by URL
- Load libraries from local files
- Merge multiple libraries
- Filter library components
- Cache loaded libraries
## API Reference
Will document how to use:
- `importLibrary(url)` - Load library from URL
- `loadSceneOrLibraryFromBlob()` - Load from file
- `mergeLibraryItems()` - Combine libraries
## Usage Example
```yaml
# Future workflow.yaml structure
libraries:
- url: 'https://libraries.excalidraw.com/libraries/...'
filter: ['aws', 'cloud']
- path: '{project-root}/_data/custom-library.excalidrawlib'
```
## Implementation Notes
This will be developed when agents need to leverage the extensive library ecosystem available at <https://libraries.excalidraw.com>.
Hundreds of pre-built component libraries exist for:
- AWS/Cloud icons
- UI/UX components
- Business diagrams
- Mind map shapes
- Floor plans
- And much more...
## User Configuration
Future: Users will be able to configure favorite libraries in their BMAD config for automatic loading.

View File

@ -1,79 +0,0 @@
# JSON Validation Instructions
## Purpose
Validate Excalidraw JSON files after saving to catch syntax errors (missing commas, brackets, quotes).
## How to Validate
Use Node.js built-in JSON parsing to validate the file:
```bash
node -e "JSON.parse(require('fs').readFileSync('FILE_PATH', 'utf8')); console.log('✓ Valid JSON')"
```
Replace `FILE_PATH` with the actual file path.
## Exit Codes
- Exit code 0 = Valid JSON
- Exit code 1 = Invalid JSON (syntax error)
## Error Output
If invalid, Node.js will output:
- Error message with description
- Position in file where error occurred
- Line and column information (if available)
## Common Errors and Fixes
### Missing Comma
```
SyntaxError: Expected ',' or '}' after property value
```
**Fix:** Add comma after the property value
### Missing Bracket/Brace
```
SyntaxError: Unexpected end of JSON input
```
**Fix:** Add missing closing bracket `]` or brace `}`
### Extra Comma (Trailing)
```
SyntaxError: Unexpected token ,
```
**Fix:** Remove the trailing comma before `]` or `}`
### Missing Quote
```
SyntaxError: Unexpected token
```
**Fix:** Add missing quote around string value
## Workflow Integration
After saving an Excalidraw file, run validation:
1. Save the file
2. Run: `node -e "JSON.parse(require('fs').readFileSync('{{save_location}}', 'utf8')); console.log('✓ Valid JSON')"`
3. If validation fails:
- Read the error message for line/position
- Open the file at that location
- Fix the syntax error
- Save and re-validate
4. Repeat until validation passes
## Critical Rule
**NEVER delete the file due to validation errors - always fix the syntax error at the reported location.**

View File

@ -1,7 +1,6 @@
<task id="_bmad/core/tasks/editorial-review-prose.xml"
name="Editorial Review - Prose"
description="Clinical copy-editor that reviews text for communication issues"
standalone="true">
description="Clinical copy-editor that reviews text for communication issues">
<objective>Review text for communication issues that impede comprehension and output suggested fixes in a three-column table</objective>
@ -10,7 +9,7 @@
<input name="style_guide" required="false"
desc="Project-specific style guide. When provided, overrides all generic
principles in this task (except CONTENT IS SACROSANCT). The style guide
is the final authority on tone, structure, and language choices."/>
is the final authority on tone, structure, and language choices." />
<input name="reader_type" required="false" default="humans" desc="'humans' (default) for standard editorial, 'llm' for precision focus" />
</inputs>
@ -62,7 +61,8 @@
</step>
<step n="3" title="Editorial Review" critical="true">
<action if="style_guide provided">Consult style_guide now and note its key requirements—these override default principles for this review</action>
<action if="style_guide provided">Consult style_guide now and note its key requirements—these override default principles for this
review</action>
<action>Review all prose sections (skip code blocks, frontmatter, structural markup)</action>
<action>Identify communication issues that impede comprehension</action>
<action>For each issue, determine the minimal fix that achieves clarity</action>
@ -77,16 +77,18 @@
<action if="no issues found">Output: "No editorial issues identified"</action>
<output-format>
| Original Text | Revised Text | Changes |
|---------------|--------------|---------|
| The exact original passage | The suggested revision | Brief explanation of what changed and why |
| Original Text | Revised Text | Changes |
|---------------|--------------|---------|
| The exact original passage | The suggested revision | Brief explanation of what changed and why |
</output-format>
<example title="Correct output format">
| Original Text | Revised Text | Changes |
|---------------|--------------|---------|
| The system will processes data and it handles errors. | The system processes data and handles errors. | Fixed subject-verb agreement ("will processes" to "processes"); removed redundant "it" |
| Users can chose from options (lines 12, 45, 78) | Users can choose from options | Fixed spelling: "chose" to "choose" (appears in 3 locations) |
| Original Text | Revised Text | Changes |
|---------------|--------------|---------|
| The system will processes data and it handles errors. | The system processes data and handles errors. | Fixed subject-verb
agreement ("will processes" to "processes"); removed redundant "it" |
| Users can chose from options (lines 12, 45, 78) | Users can choose from options | Fixed spelling: "chose" to "choose" (appears in
3 locations) |
</example>
</step>
</flow>
@ -97,4 +99,4 @@
<condition>If no issues found after thorough review, output "No editorial issues identified" (this is valid completion, not an error)</condition>
</halt-conditions>
</task>
</task>

View File

@ -4,29 +4,28 @@
<task id="_bmad/core/tasks/editorial-review-structure.xml"
name="Editorial Review - Structure"
description="Structural editor that proposes cuts, reorganization,
and simplification while preserving comprehension"
standalone="true">
and simplification while preserving comprehension">
<objective>Review document structure and propose substantive changes
to improve clarity and flow-run this BEFORE copy editing</objective>
<inputs>
<input name="content" required="true"
desc="Document to review (markdown, plain text, or structured content)"/>
desc="Document to review (markdown, plain text, or structured content)" />
<input name="style_guide" required="false"
desc="Project-specific style guide. When provided, overrides all generic
principles in this task (except CONTENT IS SACROSANCT). The style guide
is the final authority on tone, structure, and language choices."/>
is the final authority on tone, structure, and language choices." />
<input name="purpose" required="false"
desc="Document's intended purpose (e.g., 'quickstart tutorial',
'API reference', 'conceptual overview')"/>
'API reference', 'conceptual overview')" />
<input name="target_audience" required="false"
desc="Who reads this? (e.g., 'new users', 'experienced developers',
'decision makers')"/>
'decision makers')" />
<input name="reader_type" required="false" default="humans"
desc="'humans' (default) preserves comprehension aids;
'llm' optimizes for precision and density"/>
'llm' optimizes for precision and density" />
<input name="length_target" required="false"
desc="Target reduction (e.g., '30% shorter', 'half the length',
'no limit')"/>
'no limit')" />
</inputs>
<llm critical="true">
<i>MANDATORY: Execute ALL steps in the flow section IN EXACT ORDER</i>
@ -69,7 +68,7 @@
<i>Cut emotional language, encouragement, and orientation sections</i>
<i>
IF concept is well-known from training (e.g., "conventional
commits", "REST APIs"): Reference the standard-don't re-teach it
commits", "REST APIs"): Reference the standard-don't re-teach it
ELSE: Be explicit-don't assume the LLM will infer correctly
</i>
<i>Use consistent terminology-same word for same concept throughout</i>
@ -132,7 +131,8 @@
<action>Note reader_type and which principles apply (human-reader-principles or llm-reader-principles)</action>
</step>
<step n="3" title="Structural Analysis" critical="true">
<action if="style_guide provided">Consult style_guide now and note its key requirements—these override default principles for this analysis</action>
<action if="style_guide provided">Consult style_guide now and note its key requirements—these override default principles for this
analysis</action>
<action>Map the document structure: list each major section with its word count</action>
<action>Evaluate structure against the selected model's primary rules
(e.g., 'Does recommendation come first?' for Pyramid)</action>
@ -176,27 +176,27 @@
<action>Output estimated total reduction if all recommendations accepted</action>
<action if="no recommendations">Output: "No substantive changes recommended-document structure is sound"</action>
<output-format>
## Document Summary
- **Purpose:** [inferred or provided purpose]
- **Audience:** [inferred or provided audience]
- **Reader type:** [selected reader type]
- **Structure model:** [selected structure model]
- **Current length:** [X] words across [Y] sections
## Document Summary
- **Purpose:** [inferred or provided purpose]
- **Audience:** [inferred or provided audience]
- **Reader type:** [selected reader type]
- **Structure model:** [selected structure model]
- **Current length:** [X] words across [Y] sections
## Recommendations
## Recommendations
### 1. [CUT/MERGE/MOVE/CONDENSE/QUESTION/PRESERVE] - [Section or element name]
**Rationale:** [One sentence explanation]
**Impact:** ~[X] words
**Comprehension note:** [If applicable, note impact on reader understanding]
### 1. [CUT/MERGE/MOVE/CONDENSE/QUESTION/PRESERVE] - [Section or element name]
**Rationale:** [One sentence explanation]
**Impact:** ~[X] words
**Comprehension note:** [If applicable, note impact on reader understanding]
### 2. ...
### 2. ...
## Summary
- **Total recommendations:** [N]
- **Estimated reduction:** [X] words ([Y]% of original)
- **Meets length target:** [Yes/No/No target specified]
- **Comprehension trade-offs:** [Note any cuts that sacrifice reader engagement for brevity]
## Summary
- **Total recommendations:** [N]
- **Estimated reduction:** [X] words ([Y]% of original)
- **Meets length target:** [Yes/No/No target specified]
- **Comprehension trade-offs:** [Note any cuts that sacrifice reader engagement for brevity]
</output-format>
</step>
</flow>
@ -206,4 +206,4 @@
<condition>If no structural issues found, output "No substantive changes
recommended" (this is valid completion, not an error)</condition>
</halt-conditions>
</task>
</task>

View File

@ -1,12 +1,11 @@
---
name: help
description: Get unstuck by showing what workflow steps come next or answering questions about what to do
standalone: true
---
# Task: BMAD Help
## KEY RULES
## ROUTING RULES
- **Empty `phase` = anytime** — Universal tools work regardless of workflow state
- **Numbered phases indicate sequence** — Phases like `1-discover``2-define``3-build``4-ship` flow in order (naming varies by module)
@ -15,6 +14,26 @@ standalone: true
- **`required=true` blocks progress** — Required workflows must complete before proceeding to later phases
- **Artifacts reveal completion** — Search resolved output paths for `outputs` patterns, fuzzy-match found files to workflow rows
## DISPLAY RULES
### Command-Based Workflows
When `command` field has a value:
- Show the command prefixed with `/` (e.g., `/bmad-bmm-create-prd`)
### Agent-Based Workflows
When `command` field is empty:
- User loads agent first via `/agent-command`
- Then invokes by referencing the `code` field or describing the `name` field
- Do NOT show a slash command — show the code value and agent load instruction instead
Example presentation for empty command:
```
Explain Concept (EC)
Load: /tech-writer, then ask to "EC about [topic]"
Agent: Tech Writer
Description: Create clear technical explanations with examples...
```
## MODULE DETECTION
- **Empty `module` column** → universal tools (work across all modules)
@ -25,10 +44,10 @@ Detect the active module from conversation context, recent workflows, or user qu
## INPUT ANALYSIS
Determine what was just completed:
- Did someone state they completed something? Proceed as if that was the input.
- Was a workflow just completed in this conversation? Proceed as if that was the input.
- Search resolved artifact locations for files; fuzzy-match to workflow `outputs` patterns.
- If an `index.md` exists, read it for additional context.
- Explicit completion stated by user
- Workflow completed in current conversation
- Artifacts found matching `outputs` patterns
- If `index.md` exists, read it for additional context
- If still unclear, ask: "What workflow did you most recently complete?"
## EXECUTION
@ -37,26 +56,27 @@ Determine what was just completed:
2. **Resolve output locations** — Scan each folder under `_bmad/` (except `_config`) for `config.yaml`. For each workflow row, resolve its `output-location` variables against that module's config so artifact paths can be searched.
3. **Analyze input** — Task may provide a workflow name/code, conversational phrase, or nothing. Infer what was just completed using INPUT ANALYSIS above.
3. **Detect active module** — Use MODULE DETECTION above
4. **Detect active module** — Use MODULE DETECTION above to determine which module the user is working in.
4. **Analyze input** — Task may provide a workflow name/code, conversational phrase, or nothing. Infer what was just completed using INPUT ANALYSIS above.
5. **Present recommendations** — Show next steps based on completed workflows, phase/sequence ordering (KEY RULES), and artifact detection. Format per the following
## RECOMMENDED OUTPUT FORMAT
5. **Present recommendations** — Show next steps based on:
- Completed workflows detected
- Phase/sequence ordering (ROUTING RULES)
- Artifact presence
**Optional items first** — List optional workflows until a required step is reached
**Required items next** — List the next required workflow
For each item show:
For each item, apply DISPLAY RULES above and include:
- Workflow **name**
- **Command** (prefixed with `/`, e.g., `/bmad:example:build-prototype`)
- **Command** OR **Code + Agent load instruction** (per DISPLAY RULES)
- **Agent** title and display name from the CSV (e.g., "🎨 Alex (Designer)")
- Brief **description**
### Additional response output guidance to convey:
6. **Additional guidance to convey**:
- Run each workflow in a **fresh context window**
- Load the agent using (`/` + `agent-command`), or run the workflow command directly
- For **validation workflows**: recommend using a different high-quality LLM if available
- For conversational requests: match the user's tone while presenting clearly
6. Return to the calling process after presenting recommendations.
7. Return to the calling process after presenting recommendations.

View File

@ -1,5 +1,5 @@
<task id="_bmad/core/tasks/index-docs" name="Index Docs"
description="Generates or updates an index.md of all documents in the specified directory" webskip="true" standalone="true">
description="Generates or updates an index.md of all documents in the specified directory">
<llm critical="true">
<i>MANDATORY: Execute ALL steps in the flow section IN EXACT ORDER</i>
<i>DO NOT skip steps or change the sequence</i>

View File

@ -1,7 +1,7 @@
<!-- if possible, run this in a separate subagent or process with read access to the project,
but no context except the content to review -->
<task id="_bmad/core/tasks/review-adversarial-general.xml" name="Adversarial Review (General)" standalone="true">
<task id="_bmad/core/tasks/review-adversarial-general.xml" name="Adversarial Review (General)">
<objective>Cynically review content and produce findings</objective>
<inputs>
@ -45,4 +45,4 @@
<condition>HALT if content is empty or unreadable</condition>
</halt-conditions>
</task>
</task>

View File

@ -1,6 +1,5 @@
<task id="_bmad/core/tasks/shard-doc" name="Shard Document"
description="Splits large markdown documents into smaller, organized files based on level 2 (default) sections" webskip="true"
standalone="true">
description="Splits large markdown documents into smaller, organized files based on level 2 (default) sections">
<objective>Split large markdown documents into smaller, organized files based on level 2 sections using @kayvan/markdown-tree-parser tool</objective>
<llm critical="true">

View File

@ -1,4 +1,4 @@
<task id="_bmad/core/tasks/workflow.xml" name="Execute Workflow" standalone="false" internal="true">
<task id="_bmad/core/tasks/workflow.xml" name="Execute Workflow" internal="true">
<objective>Execute given workflow by loading its configuration, following instructions, and producing output</objective>
<llm critical="true">

View File

@ -1,4 +1,4 @@
<task id="_bmad/core/workflows/advanced-elicitation/workflow.xml" name="Advanced Elicitation" standalone="true"
<task id="_bmad/core/workflows/advanced-elicitation/workflow.xml" name="Advanced Elicitation"
methods="{project-root}/_bmad/core/workflows/advanced-elicitation/methods.csv"
agent-party="{project-root}/_bmad/_config/agent-manifest.csv">
<llm critical="true">

View File

@ -409,10 +409,14 @@ class ManifestGenerator {
name = frontmatter.name || name;
displayName = frontmatter.displayName || frontmatter.name || name;
description = this.cleanForCSV(frontmatter.description || '');
standalone = frontmatter.standalone === true || frontmatter.standalone === 'true';
// Tasks are standalone by default unless explicitly false (internal=true is already filtered above)
standalone = frontmatter.standalone !== false && frontmatter.standalone !== 'false';
} catch {
// If YAML parsing fails, use defaults
standalone = true; // Default to standalone
}
} else {
standalone = true; // No frontmatter means standalone
}
} else {
// For .xml tasks, extract from tag attributes
@ -423,8 +427,8 @@ class ManifestGenerator {
const objMatch = content.match(/<objective>([^<]+)<\/objective>/);
description = this.cleanForCSV(descMatch ? descMatch[1] : objMatch ? objMatch[1].trim() : '');
const standaloneMatch = content.match(/<task[^>]+standalone="true"/);
standalone = !!standaloneMatch;
const standaloneFalseMatch = content.match(/<task[^>]+standalone="false"/);
standalone = !standaloneFalseMatch;
}
// Build relative path for installation
@ -503,10 +507,14 @@ class ManifestGenerator {
name = frontmatter.name || name;
displayName = frontmatter.displayName || frontmatter.name || name;
description = this.cleanForCSV(frontmatter.description || '');
standalone = frontmatter.standalone === true || frontmatter.standalone === 'true';
// Tools are standalone by default unless explicitly false (internal=true is already filtered above)
standalone = frontmatter.standalone !== false && frontmatter.standalone !== 'false';
} catch {
// If YAML parsing fails, use defaults
standalone = true; // Default to standalone
}
} else {
standalone = true; // No frontmatter means standalone
}
} else {
// For .xml tools, extract from tag attributes
@ -517,8 +525,8 @@ class ManifestGenerator {
const objMatch = content.match(/<objective>([^<]+)<\/objective>/);
description = this.cleanForCSV(descMatch ? descMatch[1] : objMatch ? objMatch[1].trim() : '');
const standaloneMatch = content.match(/<tool[^>]+standalone="true"/);
standalone = !!standaloneMatch;
const standaloneFalseMatch = content.match(/<tool[^>]+standalone="false"/);
standalone = !standaloneFalseMatch;
}
// Build relative path for installation

View File

@ -352,13 +352,15 @@ class BaseIdeSetup {
const workflowData = yaml.parse(content);
if (workflowData && workflowData.name) {
// Workflows are standalone by default unless explicitly false
const standalone = workflowData.standalone !== false && workflowData.standalone !== 'false';
workflows.push({
name: workflowData.name,
path: fullPath,
relativePath: path.relative(dir, fullPath),
filename: entry.name,
description: workflowData.description || '',
standalone: workflowData.standalone === true, // Check standalone property
standalone: standalone,
});
}
} catch {
@ -442,36 +444,38 @@ class BaseIdeSetup {
const matchedExt = extensions.find((e) => entry.name.endsWith(e));
if (matchedExt) {
// Read file content to check for standalone attribute
let standalone = false;
// All non-internal files are considered standalone by default
let standalone = true;
try {
const content = await fs.readFile(fullPath, 'utf8');
// Skip internal/engine files (not user-facing tasks/tools)
// Skip internal/engine files (not user-facing)
if (content.includes('internal="true"')) {
continue;
}
// Check for standalone="true" in XML files
// Check for explicit standalone: false
if (entry.name.endsWith('.xml')) {
// Look for standalone="true" in the opening tag (task or tool)
const standaloneMatch = content.match(/<(?:task|tool)[^>]+standalone="true"/);
standalone = !!standaloneMatch;
// For XML files, check for standalone="false" attribute
const tagMatch = content.match(/<(task|tool)[^>]*standalone="false"/);
standalone = !tagMatch;
} else if (entry.name.endsWith('.md')) {
// Check for standalone: true in YAML frontmatter
const frontmatterMatch = content.match(/^---\s*\n([\s\S]*?)\n---/);
// For MD files, parse YAML frontmatter
const frontmatterMatch = content.match(/^---\r?\n([\s\S]*?)\r?\n---/);
if (frontmatterMatch) {
const yaml = require('yaml');
try {
const yaml = require('yaml');
const frontmatter = yaml.parse(frontmatterMatch[1]);
standalone = frontmatter.standalone === true;
standalone = frontmatter.standalone !== false && frontmatter.standalone !== 'false';
} catch {
// Ignore YAML parse errors
// If YAML parsing fails, default to standalone
}
}
// No frontmatter means standalone (default)
}
} catch {
// If we can't read the file, assume not standalone
standalone = false;
// If we can't read the file, default to standalone
standalone = true;
}
files.push({

View File

@ -28,15 +28,12 @@ class TaskToolCommandGenerator {
const tasks = await this.loadTaskManifest(bmadDir);
const tools = await this.loadToolManifest(bmadDir);
// Filter to only standalone items
const standaloneTasks = tasks ? tasks.filter((t) => t.standalone === 'true' || t.standalone === true) : [];
const standaloneTools = tools ? tools.filter((t) => t.standalone === 'true' || t.standalone === true) : [];
// All tasks/tools in manifest are standalone (internal=true items are filtered during manifest generation)
const artifacts = [];
const bmadPrefix = `${BMAD_FOLDER_NAME}/`;
// Collect task artifacts
for (const task of standaloneTasks) {
for (const task of tasks || []) {
let taskPath = (task.path || '').replaceAll('\\', '/');
// Convert absolute paths to relative paths
if (path.isAbsolute(taskPath)) {
@ -61,7 +58,7 @@ class TaskToolCommandGenerator {
}
// Collect tool artifacts
for (const tool of standaloneTools) {
for (const tool of tools || []) {
let toolPath = (tool.path || '').replaceAll('\\', '/');
// Convert absolute paths to relative paths
if (path.isAbsolute(toolPath)) {
@ -88,8 +85,8 @@ class TaskToolCommandGenerator {
return {
artifacts,
counts: {
tasks: standaloneTasks.length,
tools: standaloneTools.length,
tasks: (tasks || []).length,
tools: (tools || []).length,
},
};
}
@ -104,17 +101,13 @@ class TaskToolCommandGenerator {
const tasks = await this.loadTaskManifest(bmadDir);
const tools = await this.loadToolManifest(bmadDir);
// Filter to only standalone items
const standaloneTasks = tasks ? tasks.filter((t) => t.standalone === 'true' || t.standalone === true) : [];
const standaloneTools = tools ? tools.filter((t) => t.standalone === 'true' || t.standalone === true) : [];
// Base commands directory - use provided or default to Claude Code structure
const commandsDir = baseCommandsDir || path.join(projectDir, '.claude', 'commands', 'bmad');
let generatedCount = 0;
// Generate command files for tasks
for (const task of standaloneTasks) {
for (const task of tasks || []) {
const moduleTasksDir = path.join(commandsDir, task.module, 'tasks');
await fs.ensureDir(moduleTasksDir);
@ -126,7 +119,7 @@ class TaskToolCommandGenerator {
}
// Generate command files for tools
for (const tool of standaloneTools) {
for (const tool of tools || []) {
const moduleToolsDir = path.join(commandsDir, tool.module, 'tools');
await fs.ensureDir(moduleToolsDir);
@ -139,8 +132,8 @@ class TaskToolCommandGenerator {
return {
generated: generatedCount,
tasks: standaloneTasks.length,
tools: standaloneTools.length,
tasks: (tasks || []).length,
tools: (tools || []).length,
};
}
@ -242,14 +235,10 @@ Follow all instructions in the ${type} file exactly as written.
const tasks = await this.loadTaskManifest(bmadDir);
const tools = await this.loadToolManifest(bmadDir);
// Filter to only standalone items
const standaloneTasks = tasks ? tasks.filter((t) => t.standalone === 'true' || t.standalone === true) : [];
const standaloneTools = tools ? tools.filter((t) => t.standalone === 'true' || t.standalone === true) : [];
let generatedCount = 0;
// Generate command files for tasks
for (const task of standaloneTasks) {
for (const task of tasks || []) {
const commandContent = this.generateCommandContent(task, 'task');
// Use underscore format: bmad_bmm_name.md
const flatName = toColonName(task.module, 'tasks', task.name);
@ -260,7 +249,7 @@ Follow all instructions in the ${type} file exactly as written.
}
// Generate command files for tools
for (const tool of standaloneTools) {
for (const tool of tools || []) {
const commandContent = this.generateCommandContent(tool, 'tool');
// Use underscore format: bmad_bmm_name.md
const flatName = toColonName(tool.module, 'tools', tool.name);
@ -272,8 +261,8 @@ Follow all instructions in the ${type} file exactly as written.
return {
generated: generatedCount,
tasks: standaloneTasks.length,
tools: standaloneTools.length,
tasks: (tasks || []).length,
tools: (tools || []).length,
};
}
@ -290,14 +279,10 @@ Follow all instructions in the ${type} file exactly as written.
const tasks = await this.loadTaskManifest(bmadDir);
const tools = await this.loadToolManifest(bmadDir);
// Filter to only standalone items
const standaloneTasks = tasks ? tasks.filter((t) => t.standalone === 'true' || t.standalone === true) : [];
const standaloneTools = tools ? tools.filter((t) => t.standalone === 'true' || t.standalone === true) : [];
let generatedCount = 0;
// Generate command files for tasks
for (const task of standaloneTasks) {
for (const task of tasks || []) {
const commandContent = this.generateCommandContent(task, 'task');
// Use dash format: bmad-bmm-name.md
const flatName = toDashPath(`${task.module}/tasks/${task.name}.md`);
@ -308,7 +293,7 @@ Follow all instructions in the ${type} file exactly as written.
}
// Generate command files for tools
for (const tool of standaloneTools) {
for (const tool of tools || []) {
const commandContent = this.generateCommandContent(tool, 'tool');
// Use dash format: bmad-bmm-name.md
const flatName = toDashPath(`${tool.module}/tools/${tool.name}.md`);
@ -320,8 +305,8 @@ Follow all instructions in the ${type} file exactly as written.
return {
generated: generatedCount,
tasks: standaloneTasks.length,
tools: standaloneTools.length,
tasks: (tasks || []).length,
tools: (tools || []).length,
};
}

View File

@ -1,76 +0,0 @@
const fs = require('fs-extra');
const path = require('node:path');
const os = require('node:os');
const { isBinaryFile } = require('./binary.js');
/**
* Aggregate file contents with bounded concurrency.
* Returns text files, binary files (with size), and errors.
* @param {string[]} files absolute file paths
* @param {string} rootDir
* @param {{ text?: string, warn?: (msg: string) => void } | null} spinner
*/
async function aggregateFileContents(files, rootDir, spinner = null) {
const results = {
textFiles: [],
binaryFiles: [],
errors: [],
totalFiles: files.length,
processedFiles: 0,
};
// Automatic concurrency selection based on CPU count and workload size.
// - Base on 2x logical CPUs, clamped to [2, 64]
// - For very small workloads, avoid excessive parallelism
const cpuCount = os.cpus && Array.isArray(os.cpus()) ? os.cpus().length : os.cpus?.length || 4;
let concurrency = Math.min(64, Math.max(2, (Number(cpuCount) || 4) * 2));
if (files.length > 0 && files.length < concurrency) {
concurrency = Math.max(1, Math.min(concurrency, Math.ceil(files.length / 2)));
}
async function processOne(filePath) {
try {
const relativePath = path.relative(rootDir, filePath);
if (spinner) {
spinner.text = `Processing: ${relativePath} (${results.processedFiles + 1}/${results.totalFiles})`;
}
const binary = await isBinaryFile(filePath);
if (binary) {
const { size } = await fs.stat(filePath);
results.binaryFiles.push({ path: relativePath, absolutePath: filePath, size });
} else {
const content = await fs.readFile(filePath, 'utf8');
results.textFiles.push({
path: relativePath,
absolutePath: filePath,
content,
size: content.length,
lines: content.split('\n').length,
});
}
} catch (error) {
const relativePath = path.relative(rootDir, filePath);
const errorInfo = { path: relativePath, absolutePath: filePath, error: error.message };
results.errors.push(errorInfo);
if (spinner) {
spinner.warn(`Warning: Could not read file ${relativePath}: ${error.message}`);
} else {
console.warn(`Warning: Could not read file ${relativePath}: ${error.message}`);
}
} finally {
results.processedFiles++;
}
}
for (let index = 0; index < files.length; index += concurrency) {
const slice = files.slice(index, index + concurrency);
await Promise.all(slice.map(processOne));
}
return results;
}
module.exports = {
aggregateFileContents,
};

View File

@ -1,80 +0,0 @@
const fsp = require('node:fs/promises');
const path = require('node:path');
const { Buffer } = require('node:buffer');
/**
* Efficiently determine if a file is binary without reading the whole file.
* - Fast path by extension for common binaries
* - Otherwise read a small prefix and check for NUL bytes
* @param {string} filePath
* @returns {Promise<boolean>}
*/
async function isBinaryFile(filePath) {
try {
const stats = await fsp.stat(filePath);
if (stats.isDirectory()) {
throw new Error('EISDIR: illegal operation on a directory');
}
const binaryExtensions = new Set([
'.jpg',
'.jpeg',
'.png',
'.gif',
'.bmp',
'.ico',
'.svg',
'.pdf',
'.doc',
'.docx',
'.xls',
'.xlsx',
'.ppt',
'.pptx',
'.zip',
'.tar',
'.gz',
'.rar',
'.7z',
'.exe',
'.dll',
'.so',
'.dylib',
'.mp3',
'.mp4',
'.avi',
'.mov',
'.wav',
'.ttf',
'.otf',
'.woff',
'.woff2',
'.bin',
'.dat',
'.db',
'.sqlite',
]);
const extension = path.extname(filePath).toLowerCase();
if (binaryExtensions.has(extension)) return true;
if (stats.size === 0) return false;
const sampleSize = Math.min(4096, stats.size);
const fd = await fsp.open(filePath, 'r');
try {
const buffer = Buffer.allocUnsafe(sampleSize);
const { bytesRead } = await fd.read(buffer, 0, sampleSize, 0);
const slice = bytesRead === sampleSize ? buffer : buffer.subarray(0, bytesRead);
return slice.includes(0);
} finally {
await fd.close();
}
} catch (error) {
console.warn(`Warning: Could not determine if file is binary: ${filePath} - ${error.message}`);
return false;
}
}
module.exports = {
isBinaryFile,
};

View File

@ -1,71 +0,0 @@
const path = require('node:path');
const { execFile } = require('node:child_process');
const { promisify } = require('node:util');
const { glob } = require('glob');
const { loadIgnore } = require('./ignoreRules.js');
const pExecFile = promisify(execFile);
async function isGitRepo(rootDir) {
try {
const { stdout } = await pExecFile('git', ['rev-parse', '--is-inside-work-tree'], {
cwd: rootDir,
});
return (
String(stdout || '')
.toString()
.trim() === 'true'
);
} catch {
return false;
}
}
async function gitListFiles(rootDir) {
try {
const { stdout } = await pExecFile('git', ['ls-files', '-co', '--exclude-standard'], {
cwd: rootDir,
});
return String(stdout || '')
.split(/\r?\n/)
.map((s) => s.trim())
.filter(Boolean);
} catch {
return [];
}
}
/**
* Discover files under rootDir.
* - Prefer git ls-files when available for speed/correctness
* - Fallback to glob and apply unified ignore rules
* @param {string} rootDir
* @param {object} [options]
* @param {boolean} [options.preferGit=true]
* @returns {Promise<string[]>} absolute file paths
*/
async function discoverFiles(rootDir, options = {}) {
const { preferGit = true } = options;
const { filter } = await loadIgnore(rootDir);
// Try git first
if (preferGit && (await isGitRepo(rootDir))) {
const relFiles = await gitListFiles(rootDir);
const filteredRel = relFiles.filter((p) => filter(p));
return filteredRel.map((p) => path.resolve(rootDir, p));
}
// Glob fallback
const globbed = await glob('**/*', {
cwd: rootDir,
nodir: true,
dot: true,
follow: false,
});
const filteredRel = globbed.filter((p) => filter(p));
return filteredRel.map((p) => path.resolve(rootDir, p));
}
module.exports = {
discoverFiles,
};

View File

@ -1,35 +0,0 @@
const path = require('node:path');
const discovery = require('./discovery.js');
const ignoreRules = require('./ignoreRules.js');
const { isBinaryFile } = require('./binary.js');
const { aggregateFileContents } = require('./aggregate.js');
// Backward-compatible signature; delegate to central loader
async function parseGitignore(gitignorePath) {
return await ignoreRules.parseGitignore(gitignorePath);
}
async function discoverFiles(rootDir) {
try {
// Delegate to discovery module which respects .gitignore and defaults
return await discovery.discoverFiles(rootDir, { preferGit: true });
} catch (error) {
console.error('Error discovering files:', error.message);
return [];
}
}
async function filterFiles(files, rootDir) {
const { filter } = await ignoreRules.loadIgnore(rootDir);
const relativeFiles = files.map((f) => path.relative(rootDir, f));
const filteredRelative = relativeFiles.filter((p) => filter(p));
return filteredRelative.map((p) => path.resolve(rootDir, p));
}
module.exports = {
parseGitignore,
discoverFiles,
isBinaryFile,
aggregateFileContents,
filterFiles,
};

View File

@ -1,172 +0,0 @@
const fs = require('fs-extra');
const path = require('node:path');
const ignore = require('ignore');
// Central default ignore patterns for discovery and filtering.
// These complement .gitignore and are applied regardless of VCS presence.
const DEFAULT_PATTERNS = [
// Project/VCS
'**/_bmad/**',
'**/.git/**',
'**/.svn/**',
'**/.hg/**',
'**/.bzr/**',
// Package/build outputs
'**/node_modules/**',
'**/bower_components/**',
'**/vendor/**',
'**/packages/**',
'**/build/**',
'**/dist/**',
'**/out/**',
'**/target/**',
'**/bin/**',
'**/obj/**',
'**/release/**',
'**/debug/**',
// Environments
'**/.venv/**',
'**/venv/**',
'**/.virtualenv/**',
'**/virtualenv/**',
'**/env/**',
// Logs & coverage
'**/*.log',
'**/npm-debug.log*',
'**/yarn-debug.log*',
'**/yarn-error.log*',
'**/lerna-debug.log*',
'**/coverage/**',
'**/.nyc_output/**',
'**/.coverage/**',
'**/test-results/**',
// Caches & temp
'**/.cache/**',
'**/.tmp/**',
'**/.temp/**',
'**/tmp/**',
'**/temp/**',
'**/.sass-cache/**',
// IDE/editor
'**/.vscode/**',
'**/.idea/**',
'**/*.swp',
'**/*.swo',
'**/*~',
'**/.project',
'**/.classpath',
'**/.settings/**',
'**/*.sublime-project',
'**/*.sublime-workspace',
// Lockfiles
'**/package-lock.json',
'**/yarn.lock',
'**/pnpm-lock.yaml',
'**/composer.lock',
'**/Pipfile.lock',
// Python/Java/compiled artifacts
'**/*.pyc',
'**/*.pyo',
'**/*.pyd',
'**/__pycache__/**',
'**/*.class',
'**/*.jar',
'**/*.war',
'**/*.ear',
'**/*.o',
'**/*.so',
'**/*.dll',
'**/*.exe',
// System junk
'**/lib64/**',
'**/.venv/lib64/**',
'**/venv/lib64/**',
'**/_site/**',
'**/.jekyll-cache/**',
'**/.jekyll-metadata',
'**/.DS_Store',
'**/.DS_Store?',
'**/._*',
'**/.Spotlight-V100/**',
'**/.Trashes/**',
'**/ehthumbs.db',
'**/Thumbs.db',
'**/desktop.ini',
// XML outputs
'**/flattened-codebase.xml',
'**/repomix-output.xml',
// Images, media, fonts, archives, docs, dylibs
'**/*.jpg',
'**/*.jpeg',
'**/*.png',
'**/*.gif',
'**/*.bmp',
'**/*.ico',
'**/*.svg',
'**/*.pdf',
'**/*.doc',
'**/*.docx',
'**/*.xls',
'**/*.xlsx',
'**/*.ppt',
'**/*.pptx',
'**/*.zip',
'**/*.tar',
'**/*.gz',
'**/*.rar',
'**/*.7z',
'**/*.dylib',
'**/*.mp3',
'**/*.mp4',
'**/*.avi',
'**/*.mov',
'**/*.wav',
'**/*.ttf',
'**/*.otf',
'**/*.woff',
'**/*.woff2',
// Env files
'**/.env',
'**/.env.*',
'**/*.env',
// Misc
'**/junit.xml',
];
async function readIgnoreFile(filePath) {
try {
if (!(await fs.pathExists(filePath))) return [];
const content = await fs.readFile(filePath, 'utf8');
return content
.split('\n')
.map((l) => l.trim())
.filter((l) => l && !l.startsWith('#'));
} catch {
return [];
}
}
// Backward compatible export matching previous signature
async function parseGitignore(gitignorePath) {
return readIgnoreFile(gitignorePath);
}
async function loadIgnore(rootDir, extraPatterns = []) {
const ig = ignore();
const gitignorePath = path.join(rootDir, '.gitignore');
const patterns = [...(await readIgnoreFile(gitignorePath)), ...DEFAULT_PATTERNS, ...extraPatterns];
// De-duplicate
const unique = [...new Set(patterns.map(String))];
ig.add(unique);
// Include-only filter: return true if path should be included
const filter = (relativePath) => !ig.ignores(relativePath.replaceAll('\\', '/'));
return { ig, filter, patterns: unique };
}
module.exports = {
DEFAULT_PATTERNS,
parseGitignore,
loadIgnore,
};

View File

@ -1,483 +0,0 @@
const { Command } = require('commander');
const fs = require('fs-extra');
const path = require('node:path');
const process = require('node:process');
// Modularized components
const { findProjectRoot } = require('./projectRoot.js');
const { promptYesNo, promptPath } = require('./prompts.js');
const { discoverFiles, filterFiles, aggregateFileContents } = require('./files.js');
const { generateXMLOutput } = require('./xml.js');
const { calculateStatistics } = require('./stats.js');
/**
* Recursively discover all files in a directory
* @param {string} rootDir - The root directory to scan
* @returns {Promise<string[]>} Array of file paths
*/
/**
* Parse .gitignore file and return ignore patterns
* @param {string} gitignorePath - Path to .gitignore file
* @returns {Promise<string[]>} Array of ignore patterns
*/
/**
* Check if a file is binary using file command and heuristics
* @param {string} filePath - Path to the file
* @returns {Promise<boolean>} True if file is binary
*/
/**
* Read and aggregate content from text files
* @param {string[]} files - Array of file paths
* @param {string} rootDir - The root directory
* @param {Object} spinner - Optional spinner instance for progress display
* @returns {Promise<Object>} Object containing file contents and metadata
*/
/**
* Generate XML output with aggregated file contents using streaming
* @param {Object} aggregatedContent - The aggregated content object
* @param {string} outputPath - The output file path
* @returns {Promise<void>} Promise that resolves when writing is complete
*/
/**
* Calculate statistics for the processed files
* @param {Object} aggregatedContent - The aggregated content object
* @param {number} xmlFileSize - The size of the generated XML file in bytes
* @returns {Object} Statistics object
*/
/**
* Filter files based on .gitignore patterns
* @param {string[]} files - Array of file paths
* @param {string} rootDir - The root directory
* @returns {Promise<string[]>} Filtered array of file paths
*/
/**
* Attempt to find the project root by walking up from startDir
* Looks for common project markers like .git, package.json, pyproject.toml, etc.
* @param {string} startDir
* @returns {Promise<string|null>} project root directory or null if not found
*/
const program = new Command();
program
.name('bmad-flatten')
.description('BMad-Method codebase flattener tool')
.version('1.0.0')
.option('-i, --input <path>', 'Input directory to flatten', process.cwd())
.option('-o, --output <path>', 'Output file path', 'flattened-codebase.xml')
.action(async (options) => {
let inputDir = path.resolve(options.input);
let outputPath = path.resolve(options.output);
// Detect if user explicitly provided -i/--input or -o/--output
const argv = process.argv.slice(2);
const userSpecifiedInput = argv.some((a) => a === '-i' || a === '--input' || a.startsWith('--input='));
const userSpecifiedOutput = argv.some((a) => a === '-o' || a === '--output' || a.startsWith('--output='));
const noPathArguments = !userSpecifiedInput && !userSpecifiedOutput;
if (noPathArguments) {
const detectedRoot = await findProjectRoot(process.cwd());
const suggestedOutput = detectedRoot ? path.join(detectedRoot, 'flattened-codebase.xml') : path.resolve('flattened-codebase.xml');
if (detectedRoot) {
const useDefaults = await promptYesNo(
`Detected project root at "${detectedRoot}". Use it as input and write output to "${suggestedOutput}"?`,
true,
);
if (useDefaults) {
inputDir = detectedRoot;
outputPath = suggestedOutput;
} else {
inputDir = await promptPath('Enter input directory path', process.cwd());
outputPath = await promptPath('Enter output file path', path.join(inputDir, 'flattened-codebase.xml'));
}
} else {
console.log('Could not auto-detect a project root.');
inputDir = await promptPath('Enter input directory path', process.cwd());
outputPath = await promptPath('Enter output file path', path.join(inputDir, 'flattened-codebase.xml'));
}
}
// Ensure output directory exists
await fs.ensureDir(path.dirname(outputPath));
try {
// Verify input directory exists
if (!(await fs.pathExists(inputDir))) {
console.error(`❌ Error: Input directory does not exist: ${inputDir}`);
process.exit(1);
}
// Import ora dynamically
const { default: ora } = await import('ora');
// Start file discovery with spinner
const discoverySpinner = ora('🔍 Discovering files...').start();
const files = await discoverFiles(inputDir);
const filteredFiles = await filterFiles(files, inputDir);
discoverySpinner.succeed(`📁 Found ${filteredFiles.length} files to include`);
// Process files with progress tracking
console.log('Reading file contents');
const processingSpinner = ora('📄 Processing files...').start();
const aggregatedContent = await aggregateFileContents(filteredFiles, inputDir, processingSpinner);
processingSpinner.succeed(`✅ Processed ${aggregatedContent.processedFiles}/${filteredFiles.length} files`);
if (aggregatedContent.errors.length > 0) {
console.log(`Errors: ${aggregatedContent.errors.length}`);
}
// Generate XML output using streaming
const xmlSpinner = ora('🔧 Generating XML output...').start();
await generateXMLOutput(aggregatedContent, outputPath);
xmlSpinner.succeed('📝 XML generation completed');
// Calculate and display statistics
const outputStats = await fs.stat(outputPath);
const stats = await calculateStatistics(aggregatedContent, outputStats.size, inputDir);
// Display completion summary
console.log('\n📊 Completion Summary:');
console.log(`✅ Successfully processed ${filteredFiles.length} files into ${path.basename(outputPath)}`);
console.log(`📁 Output file: ${outputPath}`);
console.log(`📏 Total source size: ${stats.totalSize}`);
console.log(`📄 Generated XML size: ${stats.xmlSize}`);
console.log(`📝 Total lines of code: ${stats.totalLines.toLocaleString()}`);
console.log(`🔢 Estimated tokens: ${stats.estimatedTokens}`);
console.log(`📊 File breakdown: ${stats.textFiles} text, ${stats.binaryFiles} binary, ${stats.errorFiles} errors\n`);
// Ask user if they want detailed stats + markdown report
const generateDetailed = await promptYesNo('Generate detailed stats (console + markdown) now?', true);
if (generateDetailed) {
// Additional detailed stats
console.log('\n📈 Size Percentiles:');
console.log(
` Avg: ${Math.round(stats.avgFileSize).toLocaleString()} B, Median: ${Math.round(
stats.medianFileSize,
).toLocaleString()} B, p90: ${stats.p90.toLocaleString()} B, p95: ${stats.p95.toLocaleString()} B, p99: ${stats.p99.toLocaleString()} B`,
);
if (Array.isArray(stats.histogram) && stats.histogram.length > 0) {
console.log('\n🧮 Size Histogram:');
for (const b of stats.histogram.slice(0, 2)) {
console.log(` ${b.label}: ${b.count} files, ${b.bytes.toLocaleString()} bytes`);
}
if (stats.histogram.length > 2) {
console.log(` … and ${stats.histogram.length - 2} more buckets`);
}
}
if (Array.isArray(stats.byExtension) && stats.byExtension.length > 0) {
const topExt = stats.byExtension.slice(0, 2);
console.log('\n📦 Top Extensions:');
for (const e of topExt) {
const pct = stats.totalBytes ? (e.bytes / stats.totalBytes) * 100 : 0;
console.log(` ${e.ext}: ${e.count} files, ${e.bytes.toLocaleString()} bytes (${pct.toFixed(2)}%)`);
}
if (stats.byExtension.length > 2) {
console.log(` … and ${stats.byExtension.length - 2} more extensions`);
}
}
if (Array.isArray(stats.byDirectory) && stats.byDirectory.length > 0) {
const topDir = stats.byDirectory.slice(0, 2);
console.log('\n📂 Top Directories:');
for (const d of topDir) {
const pct = stats.totalBytes ? (d.bytes / stats.totalBytes) * 100 : 0;
console.log(` ${d.dir}: ${d.count} files, ${d.bytes.toLocaleString()} bytes (${pct.toFixed(2)}%)`);
}
if (stats.byDirectory.length > 2) {
console.log(` … and ${stats.byDirectory.length - 2} more directories`);
}
}
if (Array.isArray(stats.depthDistribution) && stats.depthDistribution.length > 0) {
console.log('\n🌳 Depth Distribution:');
const dd = stats.depthDistribution.slice(0, 2);
let line = ' ' + dd.map((d) => `${d.depth}:${d.count}`).join(' ');
if (stats.depthDistribution.length > 2) {
line += ` … +${stats.depthDistribution.length - 2} more`;
}
console.log(line);
}
if (Array.isArray(stats.longestPaths) && stats.longestPaths.length > 0) {
console.log('\n🧵 Longest Paths:');
for (const p of stats.longestPaths.slice(0, 2)) {
console.log(` ${p.path} (${p.length} chars, ${p.size.toLocaleString()} bytes)`);
}
if (stats.longestPaths.length > 2) {
console.log(` … and ${stats.longestPaths.length - 2} more paths`);
}
}
if (stats.temporal) {
console.log('\n⏱ Temporal:');
if (stats.temporal.oldest) {
console.log(` Oldest: ${stats.temporal.oldest.path} (${stats.temporal.oldest.mtime})`);
}
if (stats.temporal.newest) {
console.log(` Newest: ${stats.temporal.newest.path} (${stats.temporal.newest.mtime})`);
}
if (Array.isArray(stats.temporal.ageBuckets)) {
console.log(' Age buckets:');
for (const b of stats.temporal.ageBuckets.slice(0, 2)) {
console.log(` ${b.label}: ${b.count} files, ${b.bytes.toLocaleString()} bytes`);
}
if (stats.temporal.ageBuckets.length > 2) {
console.log(` … and ${stats.temporal.ageBuckets.length - 2} more buckets`);
}
}
}
if (stats.quality) {
console.log('\n✅ Quality Signals:');
console.log(` Zero-byte files: ${stats.quality.zeroByteFiles}`);
console.log(` Empty text files: ${stats.quality.emptyTextFiles}`);
console.log(` Hidden files: ${stats.quality.hiddenFiles}`);
console.log(` Symlinks: ${stats.quality.symlinks}`);
console.log(
` Large files (>= ${(stats.quality.largeThreshold / (1024 * 1024)).toFixed(0)} MB): ${stats.quality.largeFilesCount}`,
);
console.log(` Suspiciously large files (>= 100 MB): ${stats.quality.suspiciousLargeFilesCount}`);
}
if (Array.isArray(stats.duplicateCandidates) && stats.duplicateCandidates.length > 0) {
console.log('\n🧬 Duplicate Candidates:');
for (const d of stats.duplicateCandidates.slice(0, 2)) {
console.log(` ${d.reason}: ${d.count} files @ ${d.size.toLocaleString()} bytes`);
}
if (stats.duplicateCandidates.length > 2) {
console.log(` … and ${stats.duplicateCandidates.length - 2} more groups`);
}
}
if (typeof stats.compressibilityRatio === 'number') {
console.log(`\n🗜️ Compressibility ratio (sampled): ${(stats.compressibilityRatio * 100).toFixed(2)}%`);
}
if (stats.git && stats.git.isRepo) {
console.log('\n🔧 Git:');
console.log(` Tracked: ${stats.git.trackedCount} files, ${stats.git.trackedBytes.toLocaleString()} bytes`);
console.log(` Untracked: ${stats.git.untrackedCount} files, ${stats.git.untrackedBytes.toLocaleString()} bytes`);
if (Array.isArray(stats.git.lfsCandidates) && stats.git.lfsCandidates.length > 0) {
console.log(' LFS candidates (top 2):');
for (const f of stats.git.lfsCandidates.slice(0, 2)) {
console.log(` ${f.path} (${f.size.toLocaleString()} bytes)`);
}
if (stats.git.lfsCandidates.length > 2) {
console.log(` … and ${stats.git.lfsCandidates.length - 2} more`);
}
}
}
if (Array.isArray(stats.largestFiles) && stats.largestFiles.length > 0) {
console.log('\n📚 Largest Files (top 2):');
for (const f of stats.largestFiles.slice(0, 2)) {
// Show LOC for text files when available; omit ext and mtime
let locStr = '';
if (!f.isBinary && Array.isArray(aggregatedContent?.textFiles)) {
const tf = aggregatedContent.textFiles.find((t) => t.path === f.path);
if (tf && typeof tf.lines === 'number') {
locStr = `, LOC: ${tf.lines.toLocaleString()}`;
}
}
console.log(` ${f.path} ${f.sizeFormatted} (${f.percentOfTotal.toFixed(2)}%)${locStr}`);
}
if (stats.largestFiles.length > 2) {
console.log(` … and ${stats.largestFiles.length - 2} more files`);
}
}
// Write a comprehensive markdown report next to the XML
{
const mdPath = outputPath.endsWith('.xml') ? outputPath.replace(/\.xml$/i, '.stats.md') : outputPath + '.stats.md';
try {
const pct = (num, den) => (den ? (num / den) * 100 : 0);
const md = [];
md.push(
`# 🧾 Flatten Stats for ${path.basename(outputPath)}`,
'',
'## 📊 Summary',
`- Total source size: ${stats.totalSize}`,
`- Generated XML size: ${stats.xmlSize}`,
`- Total lines of code: ${stats.totalLines.toLocaleString()}`,
`- Estimated tokens: ${stats.estimatedTokens}`,
`- File breakdown: ${stats.textFiles} text, ${stats.binaryFiles} binary, ${stats.errorFiles} errors`,
'',
'## 📈 Size Percentiles',
`Avg: ${Math.round(stats.avgFileSize).toLocaleString()} B, Median: ${Math.round(
stats.medianFileSize,
).toLocaleString()} B, p90: ${stats.p90.toLocaleString()} B, p95: ${stats.p95.toLocaleString()} B, p99: ${stats.p99.toLocaleString()} B`,
'',
);
// Histogram
if (Array.isArray(stats.histogram) && stats.histogram.length > 0) {
md.push('## 🧮 Size Histogram', '| Bucket | Files | Bytes |', '| --- | ---: | ---: |');
for (const b of stats.histogram) {
md.push(`| ${b.label} | ${b.count} | ${b.bytes.toLocaleString()} |`);
}
md.push('');
}
// Top Extensions
if (Array.isArray(stats.byExtension) && stats.byExtension.length > 0) {
md.push('## 📦 Top Extensions by Bytes (Top 20)', '| Ext | Files | Bytes | % of total |', '| --- | ---: | ---: | ---: |');
for (const e of stats.byExtension.slice(0, 20)) {
const p = pct(e.bytes, stats.totalBytes);
md.push(`| ${e.ext} | ${e.count} | ${e.bytes.toLocaleString()} | ${p.toFixed(2)}% |`);
}
md.push('');
}
// Top Directories
if (Array.isArray(stats.byDirectory) && stats.byDirectory.length > 0) {
md.push(
'## 📂 Top Directories by Bytes (Top 20)',
'| Directory | Files | Bytes | % of total |',
'| --- | ---: | ---: | ---: |',
);
for (const d of stats.byDirectory.slice(0, 20)) {
const p = pct(d.bytes, stats.totalBytes);
md.push(`| ${d.dir} | ${d.count} | ${d.bytes.toLocaleString()} | ${p.toFixed(2)}% |`);
}
md.push('');
}
// Depth distribution
if (Array.isArray(stats.depthDistribution) && stats.depthDistribution.length > 0) {
md.push('## 🌳 Depth Distribution', '| Depth | Count |', '| ---: | ---: |');
for (const d of stats.depthDistribution) {
md.push(`| ${d.depth} | ${d.count} |`);
}
md.push('');
}
// Longest paths
if (Array.isArray(stats.longestPaths) && stats.longestPaths.length > 0) {
md.push('## 🧵 Longest Paths (Top 25)', '| Path | Length | Bytes |', '| --- | ---: | ---: |');
for (const pth of stats.longestPaths) {
md.push(`| ${pth.path} | ${pth.length} | ${pth.size.toLocaleString()} |`);
}
md.push('');
}
// Temporal
if (stats.temporal) {
md.push('## ⏱️ Temporal');
if (stats.temporal.oldest) {
md.push(`- Oldest: ${stats.temporal.oldest.path} (${stats.temporal.oldest.mtime})`);
}
if (stats.temporal.newest) {
md.push(`- Newest: ${stats.temporal.newest.path} (${stats.temporal.newest.mtime})`);
}
if (Array.isArray(stats.temporal.ageBuckets)) {
md.push('', '| Age | Files | Bytes |', '| --- | ---: | ---: |');
for (const b of stats.temporal.ageBuckets) {
md.push(`| ${b.label} | ${b.count} | ${b.bytes.toLocaleString()} |`);
}
}
md.push('');
}
// Quality signals
if (stats.quality) {
md.push(
'## ✅ Quality Signals',
`- Zero-byte files: ${stats.quality.zeroByteFiles}`,
`- Empty text files: ${stats.quality.emptyTextFiles}`,
`- Hidden files: ${stats.quality.hiddenFiles}`,
`- Symlinks: ${stats.quality.symlinks}`,
`- Large files (>= ${(stats.quality.largeThreshold / (1024 * 1024)).toFixed(0)} MB): ${stats.quality.largeFilesCount}`,
`- Suspiciously large files (>= 100 MB): ${stats.quality.suspiciousLargeFilesCount}`,
'',
);
}
// Duplicates
if (Array.isArray(stats.duplicateCandidates) && stats.duplicateCandidates.length > 0) {
md.push('## 🧬 Duplicate Candidates', '| Reason | Files | Size (bytes) |', '| --- | ---: | ---: |');
for (const d of stats.duplicateCandidates) {
md.push(`| ${d.reason} | ${d.count} | ${d.size.toLocaleString()} |`);
}
md.push('', '### 🧬 Duplicate Groups Details');
let dupIndex = 1;
for (const d of stats.duplicateCandidates) {
md.push(`#### Group ${dupIndex}: ${d.count} files @ ${d.size.toLocaleString()} bytes (${d.reason})`);
if (Array.isArray(d.files) && d.files.length > 0) {
for (const fp of d.files) {
md.push(`- ${fp}`);
}
} else {
md.push('- (file list unavailable)');
}
md.push('');
dupIndex++;
}
md.push('');
}
// Compressibility
if (typeof stats.compressibilityRatio === 'number') {
md.push('## 🗜️ Compressibility', `Sampled compressibility ratio: ${(stats.compressibilityRatio * 100).toFixed(2)}%`, '');
}
// Git
if (stats.git && stats.git.isRepo) {
md.push(
'## 🔧 Git',
`- Tracked: ${stats.git.trackedCount} files, ${stats.git.trackedBytes.toLocaleString()} bytes`,
`- Untracked: ${stats.git.untrackedCount} files, ${stats.git.untrackedBytes.toLocaleString()} bytes`,
);
if (Array.isArray(stats.git.lfsCandidates) && stats.git.lfsCandidates.length > 0) {
md.push('', '### 📦 LFS Candidates (Top 20)', '| Path | Bytes |', '| --- | ---: |');
for (const f of stats.git.lfsCandidates.slice(0, 20)) {
md.push(`| ${f.path} | ${f.size.toLocaleString()} |`);
}
}
md.push('');
}
// Largest Files
if (Array.isArray(stats.largestFiles) && stats.largestFiles.length > 0) {
md.push('## 📚 Largest Files (Top 50)', '| Path | Size | % of total | LOC |', '| --- | ---: | ---: | ---: |');
for (const f of stats.largestFiles) {
let loc = '';
if (!f.isBinary && Array.isArray(aggregatedContent?.textFiles)) {
const tf = aggregatedContent.textFiles.find((t) => t.path === f.path);
if (tf && typeof tf.lines === 'number') {
loc = tf.lines.toLocaleString();
}
}
md.push(`| ${f.path} | ${f.sizeFormatted} | ${f.percentOfTotal.toFixed(2)}% | ${loc} |`);
}
md.push('');
}
await fs.writeFile(mdPath, md.join('\n'));
console.log(`\n🧾 Detailed stats report written to: ${mdPath}`);
} catch (error) {
console.warn(`⚠️ Failed to write stats markdown: ${error.message}`);
}
}
}
} catch (error) {
console.error('❌ Critical error:', error.message);
console.error('An unexpected error occurred.');
process.exit(1);
}
});
if (require.main === module) {
program.parse();
}
module.exports = program;

View File

@ -1,201 +0,0 @@
const fs = require('fs-extra');
const path = require('node:path');
// Deno/Node compatibility: explicitly import process
const process = require('node:process');
const { execFile } = require('node:child_process');
const { promisify } = require('node:util');
const execFileAsync = promisify(execFile);
// Simple memoization across calls (keyed by realpath of startDir)
const _cache = new Map();
async function _tryRun(cmd, args, cwd, timeoutMs = 500) {
try {
const { stdout } = await execFileAsync(cmd, args, {
cwd,
timeout: timeoutMs,
windowsHide: true,
maxBuffer: 1024 * 1024,
});
const out = String(stdout || '').trim();
return out || null;
} catch {
return null;
}
}
async function _detectVcsTopLevel(startDir) {
// Run common VCS root queries in parallel; ignore failures
const gitP = _tryRun('git', ['rev-parse', '--show-toplevel'], startDir);
const hgP = _tryRun('hg', ['root'], startDir);
const svnP = (async () => {
const show = await _tryRun('svn', ['info', '--show-item', 'wc-root'], startDir);
if (show) return show;
const info = await _tryRun('svn', ['info'], startDir);
if (info) {
const line = info.split(/\r?\n/).find((l) => l.toLowerCase().startsWith('working copy root path:'));
if (line) return line.split(':').slice(1).join(':').trim();
}
return null;
})();
const [git, hg, svn] = await Promise.all([gitP, hgP, svnP]);
return git || hg || svn || null;
}
/**
* Attempt to find the project root by walking up from startDir.
* Uses a robust, prioritized set of ecosystem markers (VCS > workspaces/monorepo > lock/build > language config).
* Also recognizes package.json with "workspaces" as a workspace root.
* You can augment markers via env PROJECT_ROOT_MARKERS as a comma-separated list of file/dir names.
* @param {string} startDir
* @returns {Promise<string|null>} project root directory or null if not found
*/
async function findProjectRoot(startDir) {
try {
// Resolve symlinks for robustness (e.g., when invoked from a symlinked path)
let dir = path.resolve(startDir);
try {
dir = await fs.realpath(dir);
} catch {
// ignore if realpath fails; continue with resolved path
}
const startKey = dir; // preserve starting point for caching
if (_cache.has(startKey)) return _cache.get(startKey);
const fsRoot = path.parse(dir).root;
// Helper to safely check for existence
const exists = (p) => fs.pathExists(p);
// Build checks: an array of { makePath: (dir) => string, weight }
const checks = [];
const add = (rel, weight) => {
const makePath = (d) => (Array.isArray(rel) ? path.join(d, ...rel) : path.join(d, rel));
checks.push({ makePath, weight });
};
// Highest priority: explicit sentinel markers
add('.project-root', 110);
add('.workspace-root', 110);
add('.repo-root', 110);
// Highest priority: VCS roots
add('.git', 100);
add('.hg', 95);
add('.svn', 95);
// Monorepo/workspace indicators
add('pnpm-workspace.yaml', 90);
add('lerna.json', 90);
add('turbo.json', 90);
add('nx.json', 90);
add('rush.json', 90);
add('go.work', 90);
add('WORKSPACE', 90);
add('WORKSPACE.bazel', 90);
add('MODULE.bazel', 90);
add('pants.toml', 90);
// Lockfiles and package-manager/top-level locks
add('yarn.lock', 85);
add('pnpm-lock.yaml', 85);
add('package-lock.json', 85);
add('bun.lockb', 85);
add('Cargo.lock', 85);
add('composer.lock', 85);
add('poetry.lock', 85);
add('Pipfile.lock', 85);
add('Gemfile.lock', 85);
// Build-system root indicators
add('settings.gradle', 80);
add('settings.gradle.kts', 80);
add('gradlew', 80);
add('pom.xml', 80);
add('build.sbt', 80);
add(['project', 'build.properties'], 80);
// Language/project config markers
add('deno.json', 75);
add('deno.jsonc', 75);
add('pyproject.toml', 75);
add('Pipfile', 75);
add('requirements.txt', 75);
add('go.mod', 75);
add('Cargo.toml', 75);
add('composer.json', 75);
add('mix.exs', 75);
add('Gemfile', 75);
add('CMakeLists.txt', 75);
add('stack.yaml', 75);
add('cabal.project', 75);
add('rebar.config', 75);
add('pubspec.yaml', 75);
add('flake.nix', 75);
add('shell.nix', 75);
add('default.nix', 75);
add('.tool-versions', 75);
add('package.json', 74); // generic Node project (lower than lockfiles/workspaces)
// Changesets
add(['.changeset', 'config.json'], 70);
add('.changeset', 70);
// Custom markers via env (comma-separated names)
if (process.env.PROJECT_ROOT_MARKERS) {
for (const name of process.env.PROJECT_ROOT_MARKERS.split(',')
.map((s) => s.trim())
.filter(Boolean)) {
add(name, 72);
}
}
/** Check for package.json with "workspaces" */
const hasWorkspacePackageJson = async (d) => {
const pkgPath = path.join(d, 'package.json');
if (!(await exists(pkgPath))) return false;
try {
const raw = await fs.readFile(pkgPath, 'utf8');
const pkg = JSON.parse(raw);
return Boolean(pkg && pkg.workspaces);
} catch {
return false;
}
};
let best = null; // { dir, weight }
// Try to detect VCS toplevel once up-front; treat as authoritative slightly above .git marker
const vcsTop = await _detectVcsTopLevel(dir);
if (vcsTop) {
best = { dir: vcsTop, weight: 101 };
}
while (true) {
// Special check: package.json with "workspaces"
if ((await hasWorkspacePackageJson(dir)) && (!best || 90 >= best.weight)) best = { dir, weight: 90 };
// Evaluate all other checks in parallel
const results = await Promise.all(checks.map(async (c) => ({ c, ok: await exists(c.makePath(dir)) })));
for (const { c, ok } of results) {
if (!ok) continue;
if (!best || c.weight >= best.weight) {
best = { dir, weight: c.weight };
}
}
if (dir === fsRoot) break;
dir = path.dirname(dir);
}
const out = best ? best.dir : null;
_cache.set(startKey, out);
return out;
} catch {
return null;
}
}
module.exports = { findProjectRoot };

View File

@ -1,44 +0,0 @@
const os = require('node:os');
const path = require('node:path');
const readline = require('node:readline');
const process = require('node:process');
function expandHome(p) {
if (!p) return p;
if (p.startsWith('~')) return path.join(os.homedir(), p.slice(1));
return p;
}
function createRl() {
return readline.createInterface({
input: process.stdin,
output: process.stdout,
});
}
function promptQuestion(question) {
return new Promise((resolve) => {
const rl = createRl();
rl.question(question, (answer) => {
rl.close();
resolve(answer);
});
});
}
async function promptYesNo(question, defaultYes = true) {
const suffix = defaultYes ? ' [Y/n] ' : ' [y/N] ';
const ans = (await promptQuestion(`${question}${suffix}`)).trim().toLowerCase();
if (!ans) return defaultYes;
if (['y', 'yes'].includes(ans)) return true;
if (['n', 'no'].includes(ans)) return false;
return promptYesNo(question, defaultYes);
}
async function promptPath(question, defaultValue) {
const prompt = `${question}${defaultValue ? ` (default: ${defaultValue})` : ''}: `;
const ans = (await promptQuestion(prompt)).trim();
return expandHome(ans || defaultValue);
}
module.exports = { promptYesNo, promptPath, promptQuestion, expandHome };

View File

@ -1,368 +0,0 @@
'use strict';
const fs = require('node:fs/promises');
const path = require('node:path');
const zlib = require('node:zlib');
const { Buffer } = require('node:buffer');
const crypto = require('node:crypto');
const cp = require('node:child_process');
const KB = 1024;
const MB = 1024 * KB;
const formatSize = (bytes) => {
if (bytes < 1024) return `${bytes} B`;
if (bytes < 1024 * 1024) return `${(bytes / 1024).toFixed(1)} KB`;
if (bytes < 1024 * 1024 * 1024) return `${(bytes / (1024 * 1024)).toFixed(1)} MB`;
return `${(bytes / (1024 * 1024 * 1024)).toFixed(2)} GB`;
};
const percentile = (sorted, p) => {
if (sorted.length === 0) return 0;
const idx = Math.min(sorted.length - 1, Math.max(0, Math.ceil((p / 100) * sorted.length) - 1));
return sorted[idx];
};
async function processWithLimit(items, fn, concurrency = 64) {
for (let i = 0; i < items.length; i += concurrency) {
await Promise.all(items.slice(i, i + concurrency).map(fn));
}
}
async function enrichAllFiles(textFiles, binaryFiles) {
/** @type {Array<{ path: string; absolutePath: string; size: number; lines?: number; isBinary: boolean; ext: string; dir: string; depth: number; hidden: boolean; mtimeMs: number; isSymlink: boolean; }>} */
const allFiles = [];
async function enrich(file, isBinary) {
const ext = (path.extname(file.path) || '').toLowerCase();
const dir = path.dirname(file.path) || '.';
const depth = file.path.split(path.sep).filter(Boolean).length;
const hidden = file.path.split(path.sep).some((seg) => seg.startsWith('.'));
let mtimeMs = 0;
let isSymlink = false;
try {
const lst = await fs.lstat(file.absolutePath);
mtimeMs = lst.mtimeMs;
isSymlink = lst.isSymbolicLink();
} catch {
/* ignore lstat errors during enrichment */
}
allFiles.push({
path: file.path,
absolutePath: file.absolutePath,
size: file.size || 0,
lines: file.lines,
isBinary,
ext,
dir,
depth,
hidden,
mtimeMs,
isSymlink,
});
}
await processWithLimit(textFiles, (f) => enrich(f, false));
await processWithLimit(binaryFiles, (f) => enrich(f, true));
return allFiles;
}
function buildHistogram(allFiles) {
const buckets = [
[1 * KB, '01KB'],
[10 * KB, '110KB'],
[100 * KB, '10100KB'],
[1 * MB, '100KB1MB'],
[10 * MB, '110MB'],
[100 * MB, '10100MB'],
[Infinity, '>=100MB'],
];
const histogram = buckets.map(([_, label]) => ({ label, count: 0, bytes: 0 }));
for (const f of allFiles) {
for (const [i, bucket] of buckets.entries()) {
if (f.size < bucket[0]) {
histogram[i].count++;
histogram[i].bytes += f.size;
break;
}
}
}
return histogram;
}
function aggregateByExtension(allFiles) {
const byExtension = new Map();
for (const f of allFiles) {
const key = f.ext || '<none>';
const v = byExtension.get(key) || { ext: key, count: 0, bytes: 0 };
v.count++;
v.bytes += f.size;
byExtension.set(key, v);
}
return [...byExtension.values()].sort((a, b) => b.bytes - a.bytes);
}
function aggregateByDirectory(allFiles) {
const byDirectory = new Map();
function addDirBytes(dir, bytes) {
const v = byDirectory.get(dir) || { dir, count: 0, bytes: 0 };
v.count++;
v.bytes += bytes;
byDirectory.set(dir, v);
}
for (const f of allFiles) {
const parts = f.dir === '.' ? [] : f.dir.split(path.sep);
let acc = '';
for (let i = 0; i < parts.length; i++) {
acc = i === 0 ? parts[0] : acc + path.sep + parts[i];
addDirBytes(acc, f.size);
}
if (parts.length === 0) addDirBytes('.', f.size);
}
return [...byDirectory.values()].sort((a, b) => b.bytes - a.bytes);
}
function computeDepthAndLongest(allFiles) {
const depthDistribution = new Map();
for (const f of allFiles) {
depthDistribution.set(f.depth, (depthDistribution.get(f.depth) || 0) + 1);
}
const longestPaths = [...allFiles]
.sort((a, b) => b.path.length - a.path.length)
.slice(0, 25)
.map((f) => ({ path: f.path, length: f.path.length, size: f.size }));
const depthDist = [...depthDistribution.entries()].sort((a, b) => a[0] - b[0]).map(([depth, count]) => ({ depth, count }));
return { depthDist, longestPaths };
}
function computeTemporal(allFiles, nowMs) {
let oldest = null,
newest = null;
const ageBuckets = [
{ label: '> 1 year', minDays: 365, maxDays: Infinity, count: 0, bytes: 0 },
{ label: '612 months', minDays: 180, maxDays: 365, count: 0, bytes: 0 },
{ label: '16 months', minDays: 30, maxDays: 180, count: 0, bytes: 0 },
{ label: '730 days', minDays: 7, maxDays: 30, count: 0, bytes: 0 },
{ label: '17 days', minDays: 1, maxDays: 7, count: 0, bytes: 0 },
{ label: '< 1 day', minDays: 0, maxDays: 1, count: 0, bytes: 0 },
];
for (const f of allFiles) {
const ageDays = Math.max(0, (nowMs - (f.mtimeMs || nowMs)) / (24 * 60 * 60 * 1000));
for (const b of ageBuckets) {
if (ageDays >= b.minDays && ageDays < b.maxDays) {
b.count++;
b.bytes += f.size;
break;
}
}
if (!oldest || f.mtimeMs < oldest.mtimeMs) oldest = f;
if (!newest || f.mtimeMs > newest.mtimeMs) newest = f;
}
return {
oldest: oldest ? { path: oldest.path, mtime: oldest.mtimeMs ? new Date(oldest.mtimeMs).toISOString() : null } : null,
newest: newest ? { path: newest.path, mtime: newest.mtimeMs ? new Date(newest.mtimeMs).toISOString() : null } : null,
ageBuckets,
};
}
function computeQuality(allFiles, textFiles) {
const zeroByteFiles = allFiles.filter((f) => f.size === 0).length;
const emptyTextFiles = textFiles.filter((f) => (f.size || 0) === 0 || (f.lines || 0) === 0).length;
const hiddenFiles = allFiles.filter((f) => f.hidden).length;
const symlinks = allFiles.filter((f) => f.isSymlink).length;
const largeThreshold = 50 * MB;
const suspiciousThreshold = 100 * MB;
const largeFilesCount = allFiles.filter((f) => f.size >= largeThreshold).length;
const suspiciousLargeFilesCount = allFiles.filter((f) => f.size >= suspiciousThreshold).length;
return {
zeroByteFiles,
emptyTextFiles,
hiddenFiles,
symlinks,
largeFilesCount,
suspiciousLargeFilesCount,
largeThreshold,
};
}
function computeDuplicates(allFiles, textFiles) {
const duplicatesBySize = new Map();
for (const f of allFiles) {
const key = String(f.size);
const arr = duplicatesBySize.get(key) || [];
arr.push(f);
duplicatesBySize.set(key, arr);
}
const duplicateCandidates = [];
for (const [sizeKey, arr] of duplicatesBySize.entries()) {
if (arr.length < 2) continue;
const textGroup = arr.filter((f) => !f.isBinary);
const otherGroup = arr.filter((f) => f.isBinary);
const contentHashGroups = new Map();
for (const tf of textGroup) {
try {
const src = textFiles.find((x) => x.absolutePath === tf.absolutePath);
const content = src ? src.content : '';
const h = crypto.createHash('sha1').update(content).digest('hex');
const g = contentHashGroups.get(h) || [];
g.push(tf);
contentHashGroups.set(h, g);
} catch {
/* ignore hashing errors for duplicate detection */
}
}
for (const [_h, g] of contentHashGroups.entries()) {
if (g.length > 1)
duplicateCandidates.push({
reason: 'same-size+text-hash',
size: Number(sizeKey),
count: g.length,
files: g.map((f) => f.path),
});
}
if (otherGroup.length > 1) {
duplicateCandidates.push({
reason: 'same-size',
size: Number(sizeKey),
count: otherGroup.length,
files: otherGroup.map((f) => f.path),
});
}
}
return duplicateCandidates;
}
function estimateCompressibility(textFiles) {
let compSampleBytes = 0;
let compCompressedBytes = 0;
for (const tf of textFiles) {
try {
const sampleLen = Math.min(256 * 1024, tf.size || 0);
if (sampleLen <= 0) continue;
const sample = tf.content.slice(0, sampleLen);
const gz = zlib.gzipSync(Buffer.from(sample, 'utf8'));
compSampleBytes += sampleLen;
compCompressedBytes += gz.length;
} catch {
/* ignore compression errors during sampling */
}
}
return compSampleBytes > 0 ? compCompressedBytes / compSampleBytes : null;
}
function computeGitInfo(allFiles, rootDir, largeThreshold) {
const info = {
isRepo: false,
trackedCount: 0,
trackedBytes: 0,
untrackedCount: 0,
untrackedBytes: 0,
lfsCandidates: [],
};
try {
if (!rootDir) return info;
const top = cp
.execFileSync('git', ['rev-parse', '--show-toplevel'], {
cwd: rootDir,
stdio: ['ignore', 'pipe', 'ignore'],
})
.toString()
.trim();
if (!top) return info;
info.isRepo = true;
const out = cp.execFileSync('git', ['ls-files', '-z'], {
cwd: rootDir,
stdio: ['ignore', 'pipe', 'ignore'],
});
const tracked = new Set(out.toString().split('\0').filter(Boolean));
let trackedBytes = 0,
trackedCount = 0,
untrackedBytes = 0,
untrackedCount = 0;
const lfsCandidates = [];
for (const f of allFiles) {
const isTracked = tracked.has(f.path);
if (isTracked) {
trackedCount++;
trackedBytes += f.size;
if (f.size >= largeThreshold) lfsCandidates.push({ path: f.path, size: f.size });
} else {
untrackedCount++;
untrackedBytes += f.size;
}
}
info.trackedCount = trackedCount;
info.trackedBytes = trackedBytes;
info.untrackedCount = untrackedCount;
info.untrackedBytes = untrackedBytes;
info.lfsCandidates = lfsCandidates.sort((a, b) => b.size - a.size).slice(0, 50);
} catch {
/* git not available or not a repo, ignore */
}
return info;
}
function computeLargestFiles(allFiles, totalBytes) {
const toPct = (num, den) => (den === 0 ? 0 : (num / den) * 100);
return [...allFiles]
.sort((a, b) => b.size - a.size)
.slice(0, 50)
.map((f) => ({
path: f.path,
size: f.size,
sizeFormatted: formatSize(f.size),
percentOfTotal: toPct(f.size, totalBytes),
ext: f.ext || '',
isBinary: f.isBinary,
mtime: f.mtimeMs ? new Date(f.mtimeMs).toISOString() : null,
}));
}
function mdTable(rows, headers) {
const header = `| ${headers.join(' | ')} |`;
const sep = `| ${headers.map(() => '---').join(' | ')} |`;
const body = rows.map((r) => `| ${r.join(' | ')} |`).join('\n');
return `${header}\n${sep}\n${body}`;
}
function buildMarkdownReport(largestFiles, byExtensionArr, byDirectoryArr, totalBytes) {
const toPct = (num, den) => (den === 0 ? 0 : (num / den) * 100);
const md = [];
md.push(
'\n### Top Largest Files (Top 50)\n',
mdTable(
largestFiles.map((f) => [f.path, f.sizeFormatted, `${f.percentOfTotal.toFixed(2)}%`, f.ext || '', f.isBinary ? 'binary' : 'text']),
['Path', 'Size', '% of total', 'Ext', 'Type'],
),
'\n\n### Top Extensions by Bytes (Top 20)\n',
);
const topExtRows = byExtensionArr
.slice(0, 20)
.map((e) => [e.ext, String(e.count), formatSize(e.bytes), `${toPct(e.bytes, totalBytes).toFixed(2)}%`]);
md.push(mdTable(topExtRows, ['Ext', 'Count', 'Bytes', '% of total']), '\n\n### Top Directories by Bytes (Top 20)\n');
const topDirRows = byDirectoryArr
.slice(0, 20)
.map((d) => [d.dir, String(d.count), formatSize(d.bytes), `${toPct(d.bytes, totalBytes).toFixed(2)}%`]);
md.push(mdTable(topDirRows, ['Directory', 'Files', 'Bytes', '% of total']));
return md.join('\n');
}
module.exports = {
KB,
MB,
formatSize,
percentile,
processWithLimit,
enrichAllFiles,
buildHistogram,
aggregateByExtension,
aggregateByDirectory,
computeDepthAndLongest,
computeTemporal,
computeQuality,
computeDuplicates,
estimateCompressibility,
computeGitInfo,
computeLargestFiles,
buildMarkdownReport,
};

View File

@ -1,75 +0,0 @@
const H = require('./stats.helpers.js');
async function calculateStatistics(aggregatedContent, xmlFileSize, rootDir) {
const { textFiles, binaryFiles, errors } = aggregatedContent;
const totalLines = textFiles.reduce((sum, f) => sum + (f.lines || 0), 0);
const estimatedTokens = Math.ceil(xmlFileSize / 4);
// Build enriched file list
const allFiles = await H.enrichAllFiles(textFiles, binaryFiles);
const totalBytes = allFiles.reduce((s, f) => s + f.size, 0);
const sizes = allFiles.map((f) => f.size).sort((a, b) => a - b);
const avgSize = sizes.length > 0 ? totalBytes / sizes.length : 0;
const medianSize = sizes.length > 0 ? H.percentile(sizes, 50) : 0;
const p90 = H.percentile(sizes, 90);
const p95 = H.percentile(sizes, 95);
const p99 = H.percentile(sizes, 99);
const histogram = H.buildHistogram(allFiles);
const byExtensionArr = H.aggregateByExtension(allFiles);
const byDirectoryArr = H.aggregateByDirectory(allFiles);
const { depthDist, longestPaths } = H.computeDepthAndLongest(allFiles);
const temporal = H.computeTemporal(allFiles, Date.now());
const quality = H.computeQuality(allFiles, textFiles);
const duplicateCandidates = H.computeDuplicates(allFiles, textFiles);
const compressibilityRatio = H.estimateCompressibility(textFiles);
const git = H.computeGitInfo(allFiles, rootDir, quality.largeThreshold);
const largestFiles = H.computeLargestFiles(allFiles, totalBytes);
const markdownReport = H.buildMarkdownReport(largestFiles, byExtensionArr, byDirectoryArr, totalBytes);
return {
// Back-compat summary
totalFiles: textFiles.length + binaryFiles.length,
textFiles: textFiles.length,
binaryFiles: binaryFiles.length,
errorFiles: errors.length,
totalSize: H.formatSize(totalBytes),
totalBytes,
xmlSize: H.formatSize(xmlFileSize),
totalLines,
estimatedTokens: estimatedTokens.toLocaleString(),
// Distributions and percentiles
avgFileSize: avgSize,
medianFileSize: medianSize,
p90,
p95,
p99,
histogram,
// Extensions and directories
byExtension: byExtensionArr,
byDirectory: byDirectoryArr,
depthDistribution: depthDist,
longestPaths,
// Temporal
temporal,
// Quality signals
quality,
// Duplicates and compressibility
duplicateCandidates,
compressibilityRatio,
// Git-aware
git,
largestFiles,
markdownReport,
};
}
module.exports = { calculateStatistics };

View File

@ -1,409 +0,0 @@
/* deno-lint-ignore-file */
/*
Automatic test matrix for project root detection.
Creates temporary fixtures for various ecosystems and validates findProjectRoot().
No external options or flags required. Safe to run multiple times.
*/
const os = require('node:os');
const path = require('node:path');
const fs = require('fs-extra');
const { promisify } = require('node:util');
const { execFile } = require('node:child_process');
const process = require('node:process');
const execFileAsync = promisify(execFile);
const { findProjectRoot } = require('./projectRoot.js');
async function cmdAvailable(cmd) {
try {
await execFileAsync(cmd, ['--version'], { timeout: 500, windowsHide: true });
return true;
} catch {
return false;
}
async function testSvnMarker() {
const root = await mkTmpDir('svn');
const nested = path.join(root, 'proj', 'code');
await fs.ensureDir(nested);
await fs.ensureDir(path.join(root, '.svn'));
const found = await findProjectRoot(nested);
assertEqual(found, root, '.svn marker should be detected');
return { name: 'svn-marker', ok: true };
}
async function testSymlinkStart() {
const root = await mkTmpDir('symlink-start');
const nested = path.join(root, 'a', 'b');
await fs.ensureDir(nested);
await fs.writeFile(path.join(root, '.project-root'), '\n');
const tmp = await mkTmpDir('symlink-tmp');
const link = path.join(tmp, 'link-to-b');
try {
await fs.symlink(nested, link);
} catch {
// symlink may not be permitted on some systems; skip
return { name: 'symlink-start', ok: true, skipped: true };
}
const found = await findProjectRoot(link);
assertEqual(found, root, 'should resolve symlinked start to real root');
return { name: 'symlink-start', ok: true };
}
async function testSubmoduleLikeInnerGitFile() {
const root = await mkTmpDir('submodule-like');
const mid = path.join(root, 'mid');
const leaf = path.join(mid, 'leaf');
await fs.ensureDir(leaf);
// outer repo
await fs.ensureDir(path.join(root, '.git'));
// inner submodule-like .git file
await fs.writeFile(path.join(mid, '.git'), 'gitdir: ../.git/modules/mid\n');
const found = await findProjectRoot(leaf);
assertEqual(found, root, 'outermost .git should win on tie weight');
return { name: 'submodule-like-gitfile', ok: true };
}
}
async function mkTmpDir(name) {
const base = await fs.realpath(os.tmpdir());
const dir = await fs.mkdtemp(path.join(base, `flattener-${name}-`));
return dir;
}
function assertEqual(actual, expected, msg) {
if (actual !== expected) {
throw new Error(`${msg}: expected="${expected}" actual="${actual}"`);
}
}
async function testSentinel() {
const root = await mkTmpDir('sentinel');
const nested = path.join(root, 'a', 'b', 'c');
await fs.ensureDir(nested);
await fs.writeFile(path.join(root, '.project-root'), '\n');
const found = await findProjectRoot(nested);
await assertEqual(found, root, 'sentinel .project-root should win');
return { name: 'sentinel', ok: true };
}
async function testOtherSentinels() {
const root = await mkTmpDir('other-sentinels');
const nested = path.join(root, 'x', 'y');
await fs.ensureDir(nested);
await fs.writeFile(path.join(root, '.workspace-root'), '\n');
const found1 = await findProjectRoot(nested);
assertEqual(found1, root, 'sentinel .workspace-root should win');
await fs.remove(path.join(root, '.workspace-root'));
await fs.writeFile(path.join(root, '.repo-root'), '\n');
const found2 = await findProjectRoot(nested);
assertEqual(found2, root, 'sentinel .repo-root should win');
return { name: 'other-sentinels', ok: true };
}
async function testGitCliAndMarker() {
const hasGit = await cmdAvailable('git');
if (!hasGit) return { name: 'git-cli', ok: true, skipped: true };
const root = await mkTmpDir('git');
const nested = path.join(root, 'pkg', 'src');
await fs.ensureDir(nested);
await execFileAsync('git', ['init'], { cwd: root, timeout: 2000 });
const found = await findProjectRoot(nested);
await assertEqual(found, root, 'git toplevel should be detected');
return { name: 'git-cli', ok: true };
}
async function testHgMarkerOrCli() {
// Prefer simple marker test to avoid requiring Mercurial install
const root = await mkTmpDir('hg');
const nested = path.join(root, 'lib');
await fs.ensureDir(nested);
await fs.ensureDir(path.join(root, '.hg'));
const found = await findProjectRoot(nested);
await assertEqual(found, root, '.hg marker should be detected');
return { name: 'hg-marker', ok: true };
}
async function testWorkspacePnpm() {
const root = await mkTmpDir('pnpm-workspace');
const pkgA = path.join(root, 'packages', 'a');
await fs.ensureDir(pkgA);
await fs.writeFile(path.join(root, 'pnpm-workspace.yaml'), 'packages:\n - packages/*\n');
const found = await findProjectRoot(pkgA);
await assertEqual(found, root, 'pnpm-workspace.yaml should be detected');
return { name: 'pnpm-workspace', ok: true };
}
async function testPackageJsonWorkspaces() {
const root = await mkTmpDir('package-workspaces');
const pkgA = path.join(root, 'packages', 'a');
await fs.ensureDir(pkgA);
await fs.writeJson(path.join(root, 'package.json'), { private: true, workspaces: ['packages/*'] }, { spaces: 2 });
const found = await findProjectRoot(pkgA);
await assertEqual(found, root, 'package.json workspaces should be detected');
return { name: 'package.json-workspaces', ok: true };
}
async function testLockfiles() {
const root = await mkTmpDir('lockfiles');
const nested = path.join(root, 'src');
await fs.ensureDir(nested);
await fs.writeFile(path.join(root, 'yarn.lock'), '\n');
const found = await findProjectRoot(nested);
await assertEqual(found, root, 'yarn.lock should be detected');
return { name: 'lockfiles', ok: true };
}
async function testLanguageConfigs() {
const root = await mkTmpDir('lang-configs');
const nested = path.join(root, 'x', 'y');
await fs.ensureDir(nested);
await fs.writeFile(path.join(root, 'pyproject.toml'), "[tool.poetry]\nname='tmp'\n");
const found = await findProjectRoot(nested);
await assertEqual(found, root, 'pyproject.toml should be detected');
return { name: 'language-configs', ok: true };
}
async function testPreferOuterOnTie() {
const root = await mkTmpDir('tie');
const mid = path.join(root, 'mid');
const leaf = path.join(mid, 'leaf');
await fs.ensureDir(leaf);
// same weight marker at two levels
await fs.writeFile(path.join(root, 'requirements.txt'), '\n');
await fs.writeFile(path.join(mid, 'requirements.txt'), '\n');
const found = await findProjectRoot(leaf);
await assertEqual(found, root, 'outermost directory should win on equal weight');
return { name: 'prefer-outermost-tie', ok: true };
}
// Additional coverage: Bazel, Nx/Turbo/Rush, Go workspaces, Deno, Java/Scala, PHP, Rust, Nix, Changesets, env markers,
// and priority interaction between package.json and lockfiles.
async function testBazelWorkspace() {
const root = await mkTmpDir('bazel');
const nested = path.join(root, 'apps', 'svc');
await fs.ensureDir(nested);
await fs.writeFile(path.join(root, 'WORKSPACE'), 'workspace(name="tmp")\n');
const found = await findProjectRoot(nested);
await assertEqual(found, root, 'Bazel WORKSPACE should be detected');
return { name: 'bazel-workspace', ok: true };
}
async function testNx() {
const root = await mkTmpDir('nx');
const nested = path.join(root, 'apps', 'web');
await fs.ensureDir(nested);
await fs.writeJson(path.join(root, 'nx.json'), { npmScope: 'tmp' }, { spaces: 2 });
const found = await findProjectRoot(nested);
await assertEqual(found, root, 'nx.json should be detected');
return { name: 'nx', ok: true };
}
async function testTurbo() {
const root = await mkTmpDir('turbo');
const nested = path.join(root, 'packages', 'x');
await fs.ensureDir(nested);
await fs.writeJson(path.join(root, 'turbo.json'), { pipeline: {} }, { spaces: 2 });
const found = await findProjectRoot(nested);
await assertEqual(found, root, 'turbo.json should be detected');
return { name: 'turbo', ok: true };
}
async function testRush() {
const root = await mkTmpDir('rush');
const nested = path.join(root, 'apps', 'a');
await fs.ensureDir(nested);
await fs.writeJson(path.join(root, 'rush.json'), { projectFolderMinDepth: 1 }, { spaces: 2 });
const found = await findProjectRoot(nested);
await assertEqual(found, root, 'rush.json should be detected');
return { name: 'rush', ok: true };
}
async function testGoWorkAndMod() {
const root = await mkTmpDir('gowork');
const mod = path.join(root, 'modA');
const nested = path.join(mod, 'pkg');
await fs.ensureDir(nested);
await fs.writeFile(path.join(root, 'go.work'), 'go 1.22\nuse ./modA\n');
await fs.writeFile(path.join(mod, 'go.mod'), 'module example.com/a\ngo 1.22\n');
const found = await findProjectRoot(nested);
await assertEqual(found, root, 'go.work should define the workspace root');
return { name: 'go-work', ok: true };
}
async function testDenoJson() {
const root = await mkTmpDir('deno');
const nested = path.join(root, 'src');
await fs.ensureDir(nested);
await fs.writeJson(path.join(root, 'deno.json'), { tasks: {} }, { spaces: 2 });
const found = await findProjectRoot(nested);
await assertEqual(found, root, 'deno.json should be detected');
return { name: 'deno-json', ok: true };
}
async function testGradleSettings() {
const root = await mkTmpDir('gradle');
const nested = path.join(root, 'app');
await fs.ensureDir(nested);
await fs.writeFile(path.join(root, 'settings.gradle'), "rootProject.name='tmp'\n");
const found = await findProjectRoot(nested);
await assertEqual(found, root, 'settings.gradle should be detected');
return { name: 'gradle-settings', ok: true };
}
async function testMavenPom() {
const root = await mkTmpDir('maven');
const nested = path.join(root, 'module');
await fs.ensureDir(nested);
await fs.writeFile(path.join(root, 'pom.xml'), '<project></project>\n');
const found = await findProjectRoot(nested);
await assertEqual(found, root, 'pom.xml should be detected');
return { name: 'maven-pom', ok: true };
}
async function testSbtBuild() {
const root = await mkTmpDir('sbt');
const nested = path.join(root, 'sub');
await fs.ensureDir(nested);
await fs.writeFile(path.join(root, 'build.sbt'), 'name := "tmp"\n');
const found = await findProjectRoot(nested);
await assertEqual(found, root, 'build.sbt should be detected');
return { name: 'sbt-build', ok: true };
}
async function testComposer() {
const root = await mkTmpDir('composer');
const nested = path.join(root, 'src');
await fs.ensureDir(nested);
await fs.writeJson(path.join(root, 'composer.json'), { name: 'tmp/pkg' }, { spaces: 2 });
await fs.writeFile(path.join(root, 'composer.lock'), '{}\n');
const found = await findProjectRoot(nested);
await assertEqual(found, root, 'composer.{json,lock} should be detected');
return { name: 'composer', ok: true };
}
async function testCargo() {
const root = await mkTmpDir('cargo');
const nested = path.join(root, 'src');
await fs.ensureDir(nested);
await fs.writeFile(path.join(root, 'Cargo.toml'), "[package]\nname='tmp'\nversion='0.0.0'\n");
const found = await findProjectRoot(nested);
await assertEqual(found, root, 'Cargo.toml should be detected');
return { name: 'cargo', ok: true };
}
async function testNixFlake() {
const root = await mkTmpDir('nix');
const nested = path.join(root, 'work');
await fs.ensureDir(nested);
await fs.writeFile(path.join(root, 'flake.nix'), '{ }\n');
const found = await findProjectRoot(nested);
await assertEqual(found, root, 'flake.nix should be detected');
return { name: 'nix-flake', ok: true };
}
async function testChangesetConfig() {
const root = await mkTmpDir('changeset');
const nested = path.join(root, 'pkg');
await fs.ensureDir(nested);
await fs.ensureDir(path.join(root, '.changeset'));
await fs.writeJson(
path.join(root, '.changeset', 'config.json'),
{ $schema: 'https://unpkg.com/@changesets/config@2.3.1/schema.json' },
{ spaces: 2 },
);
const found = await findProjectRoot(nested);
await assertEqual(found, root, '.changeset/config.json should be detected');
return { name: 'changesets', ok: true };
}
async function testEnvCustomMarker() {
const root = await mkTmpDir('env-marker');
const nested = path.join(root, 'dir');
await fs.ensureDir(nested);
await fs.writeFile(path.join(root, 'MY_ROOT'), '\n');
const prev = process.env.PROJECT_ROOT_MARKERS;
process.env.PROJECT_ROOT_MARKERS = 'MY_ROOT';
try {
const found = await findProjectRoot(nested);
await assertEqual(found, root, 'custom env marker should be honored');
} finally {
if (prev === undefined) delete process.env.PROJECT_ROOT_MARKERS;
else process.env.PROJECT_ROOT_MARKERS = prev;
}
return { name: 'env-custom-marker', ok: true };
}
async function testPackageLowPriorityVsLock() {
const root = await mkTmpDir('pkg-vs-lock');
const nested = path.join(root, 'nested');
await fs.ensureDir(path.join(nested, 'deep'));
await fs.writeJson(path.join(nested, 'package.json'), { name: 'nested' }, { spaces: 2 });
await fs.writeFile(path.join(root, 'yarn.lock'), '\n');
const found = await findProjectRoot(path.join(nested, 'deep'));
await assertEqual(found, root, 'lockfile at root should outrank nested package.json');
return { name: 'package-vs-lock-priority', ok: true };
}
async function run() {
const tests = [
testSentinel,
testOtherSentinels,
testGitCliAndMarker,
testHgMarkerOrCli,
testWorkspacePnpm,
testPackageJsonWorkspaces,
testLockfiles,
testLanguageConfigs,
testPreferOuterOnTie,
testBazelWorkspace,
testNx,
testTurbo,
testRush,
testGoWorkAndMod,
testDenoJson,
testGradleSettings,
testMavenPom,
testSbtBuild,
testComposer,
testCargo,
testNixFlake,
testChangesetConfig,
testEnvCustomMarker,
testPackageLowPriorityVsLock,
testSvnMarker,
testSymlinkStart,
testSubmoduleLikeInnerGitFile,
];
const results = [];
for (const t of tests) {
try {
const r = await t();
results.push({ ...r, ok: true });
console.log(`${r.name}${r.skipped ? ' (skipped)' : ''}`);
} catch (error) {
console.error(`${t.name}:`, error && error.message ? error.message : error);
results.push({ name: t.name, ok: false, error: String(error) });
}
}
const failed = results.filter((r) => !r.ok);
console.log('\nSummary:');
for (const r of results) {
console.log(`- ${r.name}: ${r.ok ? 'ok' : 'FAIL'}${r.skipped ? ' (skipped)' : ''}`);
}
if (failed.length > 0) {
process.exitCode = 1;
}
}
run().catch((error) => {
console.error('Fatal error:', error);
process.exit(1);
});

View File

@ -1,82 +0,0 @@
const fs = require('fs-extra');
const { escapeXml } = require('../lib/xml-utils');
function indentFileContent(content) {
if (typeof content !== 'string') {
return String(content);
}
return content.split('\n').map((line) => ` ${line}`);
}
function generateXMLOutput(aggregatedContent, outputPath) {
const { textFiles } = aggregatedContent;
const writeStream = fs.createWriteStream(outputPath, { encoding: 'utf8' });
return new Promise((resolve, reject) => {
writeStream.on('error', reject);
writeStream.on('finish', resolve);
writeStream.write('<?xml version="1.0" encoding="UTF-8"?>\n');
writeStream.write('<files>\n');
// Sort files by path for deterministic order
const filesSorted = [...textFiles].sort((a, b) => a.path.localeCompare(b.path));
let index = 0;
const writeNext = () => {
if (index >= filesSorted.length) {
writeStream.write('</files>\n');
writeStream.end();
return;
}
const file = filesSorted[index++];
const p = escapeXml(file.path);
const content = typeof file.content === 'string' ? file.content : '';
if (content.length === 0) {
writeStream.write(`\t<file path='${p}'/>\n`);
setTimeout(writeNext, 0);
return;
}
const needsCdata = content.includes('<') || content.includes('&') || content.includes(']]>');
if (needsCdata) {
// Open tag and CDATA on their own line with tab indent; content lines indented with two tabs
writeStream.write(`\t<file path='${p}'><![CDATA[\n`);
// Safely split any occurrences of "]]>" inside content, trim trailing newlines, indent each line with two tabs
const safe = content.replaceAll(']]>', ']]]]><![CDATA[>');
const trimmed = safe.replace(/[\r\n]+$/, '');
const indented =
trimmed.length > 0
? trimmed
.split('\n')
.map((line) => `\t\t${line}`)
.join('\n')
: '';
writeStream.write(indented);
// Close CDATA and attach closing tag directly after the last content line
writeStream.write(']]></file>\n');
} else {
// Write opening tag then newline; indent content with two tabs; attach closing tag directly after last content char
writeStream.write(`\t<file path='${p}'>\n`);
const trimmed = content.replace(/[\r\n]+$/, '');
const indented =
trimmed.length > 0
? trimmed
.split('\n')
.map((line) => `\t\t${line}`)
.join('\n')
: '';
writeStream.write(indented);
writeStream.write(`</file>\n`);
}
setTimeout(writeNext, 0);
};
writeNext();
});
}
module.exports = { generateXMLOutput };

View File

@ -210,7 +210,6 @@ function buildAgentSchema(expectedModule) {
critical_actions: z.array(createNonEmptyString('agent.critical_actions[]')).optional(),
menu: z.array(buildMenuItemSchema()).min(1, { message: 'agent.menu must include at least one entry' }),
prompts: z.array(buildPromptSchema()).optional(),
webskip: z.boolean().optional(),
discussion: z.boolean().optional(),
conversational_knowledge: z.array(z.object({}).passthrough()).min(1).optional(),
})