Compare commits
11 Commits
be5e9b193c
...
68a8f5569e
| Author | SHA1 | Date |
|---|---|---|
|
|
68a8f5569e | |
|
|
9b9f43fcb9 | |
|
|
77a53a20ed | |
|
|
5d89298fe8 | |
|
|
421a811e87 | |
|
|
c9c3d31d3a | |
|
|
ec8ab0c638 | |
|
|
aae7923d5d | |
|
|
3734607994 | |
|
|
e29a1273e1 | |
|
|
5e841f9cac |
|
|
@ -81,6 +81,21 @@ export default [
|
|||
},
|
||||
},
|
||||
|
||||
// Test files using Vitest (ES modules)
|
||||
{
|
||||
files: ['test/unit/**/*.js', 'test/integration/**/*.js', 'test/helpers/**/*.js', 'test/setup.js', 'vitest.config.js'],
|
||||
languageOptions: {
|
||||
sourceType: 'module',
|
||||
ecmaVersion: 'latest',
|
||||
},
|
||||
rules: {
|
||||
// Allow dev dependencies in test files
|
||||
'n/no-unpublished-import': 'off',
|
||||
'unicorn/prefer-module': 'off',
|
||||
'no-unused-vars': 'off',
|
||||
},
|
||||
},
|
||||
|
||||
// CLI scripts under tools/** and test/**
|
||||
{
|
||||
files: ['tools/**/*.js', 'tools/**/*.mjs', 'test/**/*.js'],
|
||||
|
|
|
|||
|
|
@ -35,6 +35,8 @@
|
|||
"@astrojs/sitemap": "^3.6.0",
|
||||
"@astrojs/starlight": "^0.37.0",
|
||||
"@eslint/js": "^9.33.0",
|
||||
"@vitest/coverage-v8": "^4.0.16",
|
||||
"@vitest/ui": "^4.0.16",
|
||||
"archiver": "^7.0.1",
|
||||
"astro": "^5.16.0",
|
||||
"c8": "^10.1.3",
|
||||
|
|
@ -50,6 +52,7 @@
|
|||
"prettier": "^3.7.4",
|
||||
"prettier-plugin-packagejson": "^2.5.19",
|
||||
"sharp": "^0.33.5",
|
||||
"vitest": "^4.0.16",
|
||||
"yaml-eslint-parser": "^1.2.3",
|
||||
"yaml-lint": "^1.7.0"
|
||||
},
|
||||
|
|
@ -2983,6 +2986,13 @@
|
|||
"url": "https://opencollective.com/pkgr"
|
||||
}
|
||||
},
|
||||
"node_modules/@polka/url": {
|
||||
"version": "1.0.0-next.29",
|
||||
"resolved": "https://registry.npmjs.org/@polka/url/-/url-1.0.0-next.29.tgz",
|
||||
"integrity": "sha512-wwQAWhWSuHaag8c4q/KN/vCoeOJYshAIvMQwD4GpSb3OiZklFfvAgmj0VCBBImRpuF/aFgIRzllXlVX93Jevww==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@rollup/pluginutils": {
|
||||
"version": "5.3.0",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/pluginutils/-/pluginutils-5.3.0.tgz",
|
||||
|
|
@ -3435,6 +3445,13 @@
|
|||
"@sinonjs/commons": "^3.0.1"
|
||||
}
|
||||
},
|
||||
"node_modules/@standard-schema/spec": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/@standard-schema/spec/-/spec-1.1.0.tgz",
|
||||
"integrity": "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@swc/helpers": {
|
||||
"version": "0.5.18",
|
||||
"resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.18.tgz",
|
||||
|
|
@ -3501,6 +3518,17 @@
|
|||
"@babel/types": "^7.28.2"
|
||||
}
|
||||
},
|
||||
"node_modules/@types/chai": {
|
||||
"version": "5.2.3",
|
||||
"resolved": "https://registry.npmjs.org/@types/chai/-/chai-5.2.3.tgz",
|
||||
"integrity": "sha512-Mw558oeA9fFbv65/y4mHtXDs9bPnFMZAL/jxdPFUpOHHIXX91mcgEHbS5Lahr+pwZFR8A7GQleRWeI6cGFC2UA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@types/deep-eql": "*",
|
||||
"assertion-error": "^2.0.1"
|
||||
}
|
||||
},
|
||||
"node_modules/@types/debug": {
|
||||
"version": "4.1.12",
|
||||
"resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.12.tgz",
|
||||
|
|
@ -3510,6 +3538,13 @@
|
|||
"@types/ms": "*"
|
||||
}
|
||||
},
|
||||
"node_modules/@types/deep-eql": {
|
||||
"version": "4.0.2",
|
||||
"resolved": "https://registry.npmjs.org/@types/deep-eql/-/deep-eql-4.0.2.tgz",
|
||||
"integrity": "sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@types/estree": {
|
||||
"version": "1.0.8",
|
||||
"resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz",
|
||||
|
|
@ -3953,6 +3988,171 @@
|
|||
"win32"
|
||||
]
|
||||
},
|
||||
"node_modules/@vitest/coverage-v8": {
|
||||
"version": "4.0.16",
|
||||
"resolved": "https://registry.npmjs.org/@vitest/coverage-v8/-/coverage-v8-4.0.16.tgz",
|
||||
"integrity": "sha512-2rNdjEIsPRzsdu6/9Eq0AYAzYdpP6Bx9cje9tL3FE5XzXRQF1fNU9pe/1yE8fCrS0HD+fBtt6gLPh6LI57tX7A==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@bcoe/v8-coverage": "^1.0.2",
|
||||
"@vitest/utils": "4.0.16",
|
||||
"ast-v8-to-istanbul": "^0.3.8",
|
||||
"istanbul-lib-coverage": "^3.2.2",
|
||||
"istanbul-lib-report": "^3.0.1",
|
||||
"istanbul-lib-source-maps": "^5.0.6",
|
||||
"istanbul-reports": "^3.2.0",
|
||||
"magicast": "^0.5.1",
|
||||
"obug": "^2.1.1",
|
||||
"std-env": "^3.10.0",
|
||||
"tinyrainbow": "^3.0.3"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://opencollective.com/vitest"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@vitest/browser": "4.0.16",
|
||||
"vitest": "4.0.16"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@vitest/browser": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@vitest/expect": {
|
||||
"version": "4.0.16",
|
||||
"resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-4.0.16.tgz",
|
||||
"integrity": "sha512-eshqULT2It7McaJkQGLkPjPjNph+uevROGuIMJdG3V+0BSR2w9u6J9Lwu+E8cK5TETlfou8GRijhafIMhXsimA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@standard-schema/spec": "^1.0.0",
|
||||
"@types/chai": "^5.2.2",
|
||||
"@vitest/spy": "4.0.16",
|
||||
"@vitest/utils": "4.0.16",
|
||||
"chai": "^6.2.1",
|
||||
"tinyrainbow": "^3.0.3"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://opencollective.com/vitest"
|
||||
}
|
||||
},
|
||||
"node_modules/@vitest/mocker": {
|
||||
"version": "4.0.16",
|
||||
"resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-4.0.16.tgz",
|
||||
"integrity": "sha512-yb6k4AZxJTB+q9ycAvsoxGn+j/po0UaPgajllBgt1PzoMAAmJGYFdDk0uCcRcxb3BrME34I6u8gHZTQlkqSZpg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@vitest/spy": "4.0.16",
|
||||
"estree-walker": "^3.0.3",
|
||||
"magic-string": "^0.30.21"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://opencollective.com/vitest"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"msw": "^2.4.9",
|
||||
"vite": "^6.0.0 || ^7.0.0-0"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"msw": {
|
||||
"optional": true
|
||||
},
|
||||
"vite": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@vitest/pretty-format": {
|
||||
"version": "4.0.16",
|
||||
"resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-4.0.16.tgz",
|
||||
"integrity": "sha512-eNCYNsSty9xJKi/UdVD8Ou16alu7AYiS2fCPRs0b1OdhJiV89buAXQLpTbe+X8V9L6qrs9CqyvU7OaAopJYPsA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"tinyrainbow": "^3.0.3"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://opencollective.com/vitest"
|
||||
}
|
||||
},
|
||||
"node_modules/@vitest/runner": {
|
||||
"version": "4.0.16",
|
||||
"resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-4.0.16.tgz",
|
||||
"integrity": "sha512-VWEDm5Wv9xEo80ctjORcTQRJ539EGPB3Pb9ApvVRAY1U/WkHXmmYISqU5E79uCwcW7xYUV38gwZD+RV755fu3Q==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@vitest/utils": "4.0.16",
|
||||
"pathe": "^2.0.3"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://opencollective.com/vitest"
|
||||
}
|
||||
},
|
||||
"node_modules/@vitest/snapshot": {
|
||||
"version": "4.0.16",
|
||||
"resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-4.0.16.tgz",
|
||||
"integrity": "sha512-sf6NcrYhYBsSYefxnry+DR8n3UV4xWZwWxYbCJUt2YdvtqzSPR7VfGrY0zsv090DAbjFZsi7ZaMi1KnSRyK1XA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@vitest/pretty-format": "4.0.16",
|
||||
"magic-string": "^0.30.21",
|
||||
"pathe": "^2.0.3"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://opencollective.com/vitest"
|
||||
}
|
||||
},
|
||||
"node_modules/@vitest/spy": {
|
||||
"version": "4.0.16",
|
||||
"resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-4.0.16.tgz",
|
||||
"integrity": "sha512-4jIOWjKP0ZUaEmJm00E0cOBLU+5WE0BpeNr3XN6TEF05ltro6NJqHWxXD0kA8/Zc8Nh23AT8WQxwNG+WeROupw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"funding": {
|
||||
"url": "https://opencollective.com/vitest"
|
||||
}
|
||||
},
|
||||
"node_modules/@vitest/ui": {
|
||||
"version": "4.0.16",
|
||||
"resolved": "https://registry.npmjs.org/@vitest/ui/-/ui-4.0.16.tgz",
|
||||
"integrity": "sha512-rkoPH+RqWopVxDnCBE/ysIdfQ2A7j1eDmW8tCxxrR9nnFBa9jKf86VgsSAzxBd1x+ny0GC4JgiD3SNfRHv3pOg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@vitest/utils": "4.0.16",
|
||||
"fflate": "^0.8.2",
|
||||
"flatted": "^3.3.3",
|
||||
"pathe": "^2.0.3",
|
||||
"sirv": "^3.0.2",
|
||||
"tinyglobby": "^0.2.15",
|
||||
"tinyrainbow": "^3.0.3"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://opencollective.com/vitest"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"vitest": "4.0.16"
|
||||
}
|
||||
},
|
||||
"node_modules/@vitest/utils": {
|
||||
"version": "4.0.16",
|
||||
"resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-4.0.16.tgz",
|
||||
"integrity": "sha512-h8z9yYhV3e1LEfaQ3zdypIrnAg/9hguReGZoS7Gl0aBG5xgA410zBqECqmaF/+RkTggRsfnzc1XaAHA6bmUufA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@vitest/pretty-format": "4.0.16",
|
||||
"tinyrainbow": "^3.0.3"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://opencollective.com/vitest"
|
||||
}
|
||||
},
|
||||
"node_modules/abort-controller": {
|
||||
"version": "3.0.0",
|
||||
"resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz",
|
||||
|
|
@ -4264,6 +4464,35 @@
|
|||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/assertion-error": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz",
|
||||
"integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/ast-v8-to-istanbul": {
|
||||
"version": "0.3.10",
|
||||
"resolved": "https://registry.npmjs.org/ast-v8-to-istanbul/-/ast-v8-to-istanbul-0.3.10.tgz",
|
||||
"integrity": "sha512-p4K7vMz2ZSk3wN8l5o3y2bJAoZXT3VuJI5OLTATY/01CYWumWvwkUw0SqDBnNq6IiTO3qDa1eSQDibAV8g7XOQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@jridgewell/trace-mapping": "^0.3.31",
|
||||
"estree-walker": "^3.0.3",
|
||||
"js-tokens": "^9.0.1"
|
||||
}
|
||||
},
|
||||
"node_modules/ast-v8-to-istanbul/node_modules/js-tokens": {
|
||||
"version": "9.0.1",
|
||||
"resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-9.0.1.tgz",
|
||||
"integrity": "sha512-mxa9E9ITFOt0ban3j6L5MpjwegGz6lBQmM1IJkWeBZGcMxto50+eWdjC/52xDbS2vy0k7vIMK0Fe2wfL9OQSpQ==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/astring": {
|
||||
"version": "1.9.0",
|
||||
"resolved": "https://registry.npmjs.org/astring/-/astring-1.9.0.tgz",
|
||||
|
|
@ -5513,6 +5742,16 @@
|
|||
"url": "https://github.com/sponsors/wooorm"
|
||||
}
|
||||
},
|
||||
"node_modules/chai": {
|
||||
"version": "6.2.2",
|
||||
"resolved": "https://registry.npmjs.org/chai/-/chai-6.2.2.tgz",
|
||||
"integrity": "sha512-NUPRluOfOiTKBKvWPtSD4PhFvWCqOi0BGStNWs57X9js7XGTprSmFoz5F0tWhR4WPjNeR9jXqdC7/UpSJTnlRg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
}
|
||||
},
|
||||
"node_modules/chalk": {
|
||||
"version": "4.1.2",
|
||||
"resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
|
||||
|
|
@ -7248,6 +7487,16 @@
|
|||
"node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/expect-type": {
|
||||
"version": "1.3.0",
|
||||
"resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.3.0.tgz",
|
||||
"integrity": "sha512-knvyeauYhqjOYvQ66MznSMs83wmHrCycNEN6Ao+2AeYEfxUIkuiVxdEa1qlGEPK+We3n0THiDciYSsCcgW/DoA==",
|
||||
"dev": true,
|
||||
"license": "Apache-2.0",
|
||||
"engines": {
|
||||
"node": ">=12.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/expressive-code": {
|
||||
"version": "0.41.5",
|
||||
"resolved": "https://registry.npmjs.org/expressive-code/-/expressive-code-0.41.5.tgz",
|
||||
|
|
@ -7363,6 +7612,13 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"node_modules/fflate": {
|
||||
"version": "0.8.2",
|
||||
"resolved": "https://registry.npmjs.org/fflate/-/fflate-0.8.2.tgz",
|
||||
"integrity": "sha512-cPJU47OaAoCbg0pBvzsgpTPhmhqI5eJjh/JIu8tPj5q+T7iLvW/JAYUqmE7KOB4R1ZyEhzBaIQpQpardBF5z8A==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/figlet": {
|
||||
"version": "1.9.4",
|
||||
"resolved": "https://registry.npmjs.org/figlet/-/figlet-1.9.4.tgz",
|
||||
|
|
@ -11693,6 +11949,17 @@
|
|||
"url": "https://github.com/fb55/nth-check?sponsor=1"
|
||||
}
|
||||
},
|
||||
"node_modules/obug": {
|
||||
"version": "2.1.1",
|
||||
"resolved": "https://registry.npmjs.org/obug/-/obug-2.1.1.tgz",
|
||||
"integrity": "sha512-uTqF9MuPraAQ+IsnPf366RG4cP9RtUi7MLO1N3KEc+wb0a6yKpeL0lmk2IB1jY5KHPAlTc6T/JRdC/YqxHNwkQ==",
|
||||
"dev": true,
|
||||
"funding": [
|
||||
"https://github.com/sponsors/sxzz",
|
||||
"https://opencollective.com/debug"
|
||||
],
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/ofetch": {
|
||||
"version": "1.5.1",
|
||||
"resolved": "https://registry.npmjs.org/ofetch/-/ofetch-1.5.1.tgz",
|
||||
|
|
@ -12138,6 +12405,13 @@
|
|||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/pathe": {
|
||||
"version": "2.0.3",
|
||||
"resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz",
|
||||
"integrity": "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/piccolore": {
|
||||
"version": "0.1.3",
|
||||
"resolved": "https://registry.npmjs.org/piccolore/-/piccolore-0.1.3.tgz",
|
||||
|
|
@ -13362,6 +13636,13 @@
|
|||
"@types/hast": "^3.0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/siginfo": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz",
|
||||
"integrity": "sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==",
|
||||
"dev": true,
|
||||
"license": "ISC"
|
||||
},
|
||||
"node_modules/signal-exit": {
|
||||
"version": "4.1.0",
|
||||
"resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz",
|
||||
|
|
@ -13391,6 +13672,21 @@
|
|||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/sirv": {
|
||||
"version": "3.0.2",
|
||||
"resolved": "https://registry.npmjs.org/sirv/-/sirv-3.0.2.tgz",
|
||||
"integrity": "sha512-2wcC/oGxHis/BoHkkPwldgiPSYcpZK3JU28WoMVv55yHJgcZ8rlXvuG9iZggz+sU1d4bRgIGASwyWqjxu3FM0g==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@polka/url": "^1.0.0-next.24",
|
||||
"mrmime": "^2.0.0",
|
||||
"totalist": "^3.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
}
|
||||
},
|
||||
"node_modules/sisteransi": {
|
||||
"version": "1.0.5",
|
||||
"resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz",
|
||||
|
|
@ -13601,6 +13897,20 @@
|
|||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/stackback": {
|
||||
"version": "0.0.2",
|
||||
"resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz",
|
||||
"integrity": "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/std-env": {
|
||||
"version": "3.10.0",
|
||||
"resolved": "https://registry.npmjs.org/std-env/-/std-env-3.10.0.tgz",
|
||||
"integrity": "sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/stream-replace-string": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/stream-replace-string/-/stream-replace-string-2.0.0.tgz",
|
||||
|
|
@ -14015,6 +14325,13 @@
|
|||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/tinybench": {
|
||||
"version": "2.9.0",
|
||||
"resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.9.0.tgz",
|
||||
"integrity": "sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/tinyexec": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-1.0.2.tgz",
|
||||
|
|
@ -14042,6 +14359,16 @@
|
|||
"url": "https://github.com/sponsors/SuperchupuDev"
|
||||
}
|
||||
},
|
||||
"node_modules/tinyrainbow": {
|
||||
"version": "3.0.3",
|
||||
"resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-3.0.3.tgz",
|
||||
"integrity": "sha512-PSkbLUoxOFRzJYjjxHJt9xro7D+iilgMX/C9lawzVuYiIdcihh9DXmVibBe8lmcFrRi/VzlPjBxbN7rH24q8/Q==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=14.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/tmpl": {
|
||||
"version": "1.0.5",
|
||||
"resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.5.tgz",
|
||||
|
|
@ -14062,6 +14389,16 @@
|
|||
"node": ">=8.0"
|
||||
}
|
||||
},
|
||||
"node_modules/totalist": {
|
||||
"version": "3.0.1",
|
||||
"resolved": "https://registry.npmjs.org/totalist/-/totalist-3.0.1.tgz",
|
||||
"integrity": "sha512-sf4i37nQ2LBx4m3wB74y+ubopq6W/dIzXg0FDGjsYnZHVa1Da8FH853wlL2gtUhg+xJXjfk3kUZS3BRoQeoQBQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
}
|
||||
},
|
||||
"node_modules/trim-lines": {
|
||||
"version": "3.0.1",
|
||||
"resolved": "https://registry.npmjs.org/trim-lines/-/trim-lines-3.0.1.tgz",
|
||||
|
|
@ -14807,6 +15144,84 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"node_modules/vitest": {
|
||||
"version": "4.0.16",
|
||||
"resolved": "https://registry.npmjs.org/vitest/-/vitest-4.0.16.tgz",
|
||||
"integrity": "sha512-E4t7DJ9pESL6E3I8nFjPa4xGUd3PmiWDLsDztS2qXSJWfHtbQnwAWylaBvSNY48I3vr8PTqIZlyK8TE3V3CA4Q==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@vitest/expect": "4.0.16",
|
||||
"@vitest/mocker": "4.0.16",
|
||||
"@vitest/pretty-format": "4.0.16",
|
||||
"@vitest/runner": "4.0.16",
|
||||
"@vitest/snapshot": "4.0.16",
|
||||
"@vitest/spy": "4.0.16",
|
||||
"@vitest/utils": "4.0.16",
|
||||
"es-module-lexer": "^1.7.0",
|
||||
"expect-type": "^1.2.2",
|
||||
"magic-string": "^0.30.21",
|
||||
"obug": "^2.1.1",
|
||||
"pathe": "^2.0.3",
|
||||
"picomatch": "^4.0.3",
|
||||
"std-env": "^3.10.0",
|
||||
"tinybench": "^2.9.0",
|
||||
"tinyexec": "^1.0.2",
|
||||
"tinyglobby": "^0.2.15",
|
||||
"tinyrainbow": "^3.0.3",
|
||||
"vite": "^6.0.0 || ^7.0.0",
|
||||
"why-is-node-running": "^2.3.0"
|
||||
},
|
||||
"bin": {
|
||||
"vitest": "vitest.mjs"
|
||||
},
|
||||
"engines": {
|
||||
"node": "^20.0.0 || ^22.0.0 || >=24.0.0"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://opencollective.com/vitest"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@edge-runtime/vm": "*",
|
||||
"@opentelemetry/api": "^1.9.0",
|
||||
"@types/node": "^20.0.0 || ^22.0.0 || >=24.0.0",
|
||||
"@vitest/browser-playwright": "4.0.16",
|
||||
"@vitest/browser-preview": "4.0.16",
|
||||
"@vitest/browser-webdriverio": "4.0.16",
|
||||
"@vitest/ui": "4.0.16",
|
||||
"happy-dom": "*",
|
||||
"jsdom": "*"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@edge-runtime/vm": {
|
||||
"optional": true
|
||||
},
|
||||
"@opentelemetry/api": {
|
||||
"optional": true
|
||||
},
|
||||
"@types/node": {
|
||||
"optional": true
|
||||
},
|
||||
"@vitest/browser-playwright": {
|
||||
"optional": true
|
||||
},
|
||||
"@vitest/browser-preview": {
|
||||
"optional": true
|
||||
},
|
||||
"@vitest/browser-webdriverio": {
|
||||
"optional": true
|
||||
},
|
||||
"@vitest/ui": {
|
||||
"optional": true
|
||||
},
|
||||
"happy-dom": {
|
||||
"optional": true
|
||||
},
|
||||
"jsdom": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/walker": {
|
||||
"version": "1.0.8",
|
||||
"resolved": "https://registry.npmjs.org/walker/-/walker-1.0.8.tgz",
|
||||
|
|
@ -14862,6 +15277,23 @@
|
|||
"node": ">=4"
|
||||
}
|
||||
},
|
||||
"node_modules/why-is-node-running": {
|
||||
"version": "2.3.0",
|
||||
"resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.3.0.tgz",
|
||||
"integrity": "sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"siginfo": "^2.0.0",
|
||||
"stackback": "0.0.2"
|
||||
},
|
||||
"bin": {
|
||||
"why-is-node-running": "cli.js"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/widest-line": {
|
||||
"version": "3.1.0",
|
||||
"resolved": "https://registry.npmjs.org/widest-line/-/widest-line-3.1.0.tgz",
|
||||
|
|
|
|||
12
package.json
12
package.json
|
|
@ -45,10 +45,15 @@
|
|||
"release:minor": "gh workflow run \"Manual Release\" -f version_bump=minor",
|
||||
"release:patch": "gh workflow run \"Manual Release\" -f version_bump=patch",
|
||||
"release:watch": "gh run watch",
|
||||
"test": "npm run test:schemas && npm run test:install && npm run validate:schemas && npm run lint && npm run lint:md && npm run format:check",
|
||||
"test:coverage": "c8 --reporter=text --reporter=html npm run test:schemas",
|
||||
"test": "npm run test:schemas && npm run test:install && npm run test:unit && npm run validate:schemas && npm run lint && npm run lint:md && npm run format:check",
|
||||
"test:coverage": "vitest run --coverage",
|
||||
"test:install": "node test/test-installation-components.js",
|
||||
"test:integration": "vitest run test/integration",
|
||||
"test:quick": "vitest run --changed",
|
||||
"test:schemas": "node test/test-agent-schema.js",
|
||||
"test:ui": "vitest --ui",
|
||||
"test:unit": "vitest run",
|
||||
"test:unit:watch": "vitest",
|
||||
"validate:schemas": "node tools/validate-agent-schema.js"
|
||||
},
|
||||
"lint-staged": {
|
||||
|
|
@ -90,6 +95,8 @@
|
|||
"@astrojs/sitemap": "^3.6.0",
|
||||
"@astrojs/starlight": "^0.37.0",
|
||||
"@eslint/js": "^9.33.0",
|
||||
"@vitest/coverage-v8": "^4.0.16",
|
||||
"@vitest/ui": "^4.0.16",
|
||||
"archiver": "^7.0.1",
|
||||
"astro": "^5.16.0",
|
||||
"c8": "^10.1.3",
|
||||
|
|
@ -105,6 +112,7 @@
|
|||
"prettier": "^3.7.4",
|
||||
"prettier-plugin-packagejson": "^2.5.19",
|
||||
"sharp": "^0.33.5",
|
||||
"vitest": "^4.0.16",
|
||||
"yaml-eslint-parser": "^1.2.3",
|
||||
"yaml-lint": "^1.7.0"
|
||||
},
|
||||
|
|
|
|||
|
|
@ -16,25 +16,20 @@ agent:
|
|||
principles: |
|
||||
- Channel expert business analysis frameworks: draw upon Porter's Five Forces, SWOT analysis, root cause analysis, and competitive intelligence methodologies to uncover what others miss. Every business challenge has root causes waiting to be discovered. Ground findings in verifiable evidence.
|
||||
- Articulate requirements with absolute precision. Ensure all stakeholder voices heard.
|
||||
- Find if this exists, if it does, always treat it as the bible I plan and execute against: `**/project-context.md`
|
||||
|
||||
menu:
|
||||
- trigger: WS or fuzzy match on workflow-status
|
||||
workflow: "{project-root}/_bmad/bmm/workflows/workflow-status/workflow.yaml"
|
||||
description: "[WS] Workflow Status: Initialize, Get or Update the Project Workflow"
|
||||
|
||||
- trigger: BP or fuzzy match on brainstorm-project
|
||||
exec: "{project-root}/_bmad/core/workflows/brainstorming/workflow.md"
|
||||
data: "{project-root}/_bmad/bmm/data/project-context-template.md"
|
||||
description: "[BP] Project Brainstorming: Expert Guided Facilitation through a single or multiple techniques with a final report"
|
||||
description: "[BP] Brainstorm Project: Expert Guided Facilitation through a single or multiple techniques with a final report"
|
||||
|
||||
- trigger: RS or fuzzy match on research
|
||||
exec: "{project-root}/_bmad/bmm/workflows/1-analysis/research/workflow.md"
|
||||
description: "[RS] Research: Choose from or specify market, domain, competitive analysis, or technical research"
|
||||
|
||||
- trigger: PB or fuzzy match on product-brief
|
||||
- trigger: CB or fuzzy match on product-brief
|
||||
exec: "{project-root}/_bmad/bmm/workflows/1-analysis/create-product-brief/workflow.md"
|
||||
description: "[PB] Product Brief: A guided experience to nail down your product idea and use it as an input to define the requirements later"
|
||||
description: "[CB] Create Brief: A guided experience to nail down your product idea into an executive brief"
|
||||
|
||||
- trigger: DP or fuzzy match on document-project
|
||||
workflow: "{project-root}/_bmad/bmm/workflows/document-project/workflow.yaml"
|
||||
|
|
|
|||
|
|
@ -17,13 +17,8 @@ agent:
|
|||
- Channel expert lean architecture wisdom: draw upon deep knowledge of distributed systems, cloud patterns, scalability trade-offs, and what actually ships successfully
|
||||
- User journeys drive technical decisions. Embrace boring technology for stability.
|
||||
- Design simple solutions that scale when needed. Developer productivity is architecture. Connect every decision to business value and user impact.
|
||||
- Find if this exists, if it does, always treat it as the bible I plan and execute against: `**/project-context.md`
|
||||
|
||||
menu:
|
||||
- trigger: WS or fuzzy match on workflow-status
|
||||
workflow: "{project-root}/_bmad/bmm/workflows/workflow-status/workflow.yaml"
|
||||
description: "[WS] Workflow Status: Initialize, Get or Update the Project Workflow"
|
||||
|
||||
- trigger: CA or fuzzy match on create-architecture
|
||||
exec: "{project-root}/_bmad/bmm/workflows/3-solutioning/create-architecture/workflow.md"
|
||||
description: "[CA] Create Architecture: Guided Workflow to document technical decisions to keep implementation on track"
|
||||
|
|
|
|||
|
|
@ -19,13 +19,8 @@ agent:
|
|||
- PRDs emerge from user interviews, not template filling - discover what users actually need
|
||||
- Ship the smallest thing that validates the assumption - iteration over perfection
|
||||
- Technical feasibility is a constraint, not the driver - user value first
|
||||
- Find if this exists, if it does, always treat it as the bible I plan and execute against: `**/project-context.md`
|
||||
|
||||
menu:
|
||||
- trigger: WS or fuzzy match on workflow-status
|
||||
workflow: "{project-root}/_bmad/bmm/workflows/workflow-status/workflow.yaml"
|
||||
description: "[WS] Workflow Status: Initialize, Get or Update the Project Workflow"
|
||||
|
||||
- trigger: CP or fuzzy match on create-prd
|
||||
exec: "{project-root}/_bmad/bmm/workflows/2-plan-workflows/prd/workflow.md"
|
||||
description: "[CP] Create PRD: Expert led facilitation to produce your Product Requirements Document"
|
||||
|
|
@ -38,9 +33,9 @@ agent:
|
|||
exec: "{project-root}/_bmad/bmm/workflows/2-plan-workflows/prd/workflow.md"
|
||||
description: "[EP] Edit PRD: Update an existing Product Requirements Document"
|
||||
|
||||
- trigger: ES or fuzzy match on epics-stories
|
||||
- trigger: CE or fuzzy match on epics-stories
|
||||
exec: "{project-root}/_bmad/bmm/workflows/3-solutioning/create-epics-and-stories/workflow.md"
|
||||
description: "[ES] Epics Stories: Create the Epics and Stories Listing, these are the specs that will drive development"
|
||||
description: "[CE] Create Epics and Stories: Create the Epics and Stories Listing, these are the specs that will drive development"
|
||||
|
||||
- trigger: IR or fuzzy match on implementation-readiness
|
||||
exec: "{project-root}/_bmad/bmm/workflows/3-solutioning/check-implementation-readiness/workflow.md"
|
||||
|
|
|
|||
|
|
@ -14,21 +14,10 @@ agent:
|
|||
identity: Certified Scrum Master with deep technical background. Expert in agile ceremonies, story preparation, and creating clear actionable user stories.
|
||||
communication_style: "Crisp and checklist-driven. Every word has a purpose, every requirement crystal clear. Zero tolerance for ambiguity."
|
||||
principles: |
|
||||
- Strict boundaries between story prep and implementation
|
||||
- Stories are single source of truth
|
||||
- Perfect alignment between PRD and dev execution
|
||||
- Enable efficient sprints
|
||||
- Deliver developer-ready specs with precise handoffs
|
||||
|
||||
critical_actions:
|
||||
- "When running *create-story, always run as *yolo. Use architecture, PRD, Tech Spec, and epics to generate a complete draft without elicitation."
|
||||
- "Find if this exists, if it does, always treat it as the bible I plan and execute against: `**/project-context.md`"
|
||||
- I strive to be a servant leader and conduct myself accordingly, helping with any task and offering suggestions
|
||||
- I love to talk about Agile process and theory whenever anyone wants to talk about it
|
||||
|
||||
menu:
|
||||
- trigger: WS or fuzzy match on workflow-status
|
||||
workflow: "{project-root}/_bmad/bmm/workflows/workflow-status/workflow.yaml"
|
||||
description: "[WS] Workflow Status: Initialize, Get or Update the Project Workflow"
|
||||
|
||||
- trigger: SP or fuzzy match on sprint-planning
|
||||
workflow: "{project-root}/_bmad/bmm/workflows/4-implementation/sprint-planning/workflow.yaml"
|
||||
description: "[SP] Sprint Planning: Generate or update the record that will sequence the tasks to complete the full project that the dev agent will follow"
|
||||
|
|
|
|||
|
|
@ -28,13 +28,8 @@ agent:
|
|||
- "Consult {project-root}/_bmad/bmm/testarch/tea-index.csv to select knowledge fragments under knowledge/ and load only the files needed for the current task"
|
||||
- "Load the referenced fragment(s) from {project-root}/_bmad/bmm/testarch/knowledge/ before giving recommendations"
|
||||
- "Cross-check recommendations with the current official Playwright, Cypress, Pact, and CI platform documentation"
|
||||
- "Find if this exists, if it does, always treat it as the bible I plan and execute against: `**/project-context.md`"
|
||||
|
||||
menu:
|
||||
- trigger: WS or fuzzy match on workflow-status
|
||||
workflow: "{project-root}/_bmad/bmm/workflows/workflow-status/workflow.yaml"
|
||||
description: "[WS] Workflow Status: Initialize, Get or Update the Project Workflow"
|
||||
|
||||
- trigger: TF or fuzzy match on test-framework
|
||||
workflow: "{project-root}/_bmad/bmm/workflows/testarch/framework/workflow.yaml"
|
||||
description: "[TF] Test Framework: Initialize production-ready test framework architecture"
|
||||
|
|
|
|||
|
|
@ -20,10 +20,6 @@ agent:
|
|||
- I will always strive to follow `_bmad/_memory/tech-writer-sidecar/documentation-standards.md` best practices.
|
||||
|
||||
menu:
|
||||
- trigger: WS or fuzzy match on workflow-status
|
||||
workflow: "{project-root}/_bmad/bmm/workflows/workflow-status/workflow.yaml"
|
||||
description: "[WS] Workflow Status: Initialize, Get or Update the Project Workflow"
|
||||
|
||||
- trigger: DP or fuzzy match on document-project
|
||||
workflow: "{project-root}/_bmad/bmm/workflows/document-project/workflow.yaml"
|
||||
description: "[DP] Document Project: Generate comprehensive project documentation (brownfield analysis, architecture scanning)"
|
||||
|
|
|
|||
|
|
@ -21,10 +21,6 @@ agent:
|
|||
- Data-informed but always creative
|
||||
|
||||
menu:
|
||||
- trigger: WS or fuzzy match on workflow-status
|
||||
workflow: "{project-root}/_bmad/bmm/workflows/workflow-status/workflow.yaml"
|
||||
description: "[WS] Workflow Status: Initialize, Get or Update the Project Workflow"
|
||||
|
||||
- trigger: UX or fuzzy match on ux-design
|
||||
- trigger: CU or fuzzy match on ux-design
|
||||
exec: "{project-root}/_bmad/bmm/workflows/2-plan-workflows/create-ux-design/workflow.md"
|
||||
description: "[UX] UX: Guidance through realizing the plan for your UX to inform architecture and implementation. PRovides more details that what was discovered in the PRD"
|
||||
description: "[CU] Create UX: Guidance through realizing the plan for your UX to inform architecture and implementation. PRovides more details that what was discovered in the PRD"
|
||||
|
|
|
|||
|
|
@ -1,29 +0,0 @@
|
|||
# BMM Module Data
|
||||
|
||||
This directory contains module-specific data files used by BMM agents and workflows.
|
||||
|
||||
## Files
|
||||
|
||||
### `project-context-template.md`
|
||||
|
||||
Template for project-specific brainstorming context. Used by:
|
||||
|
||||
- Analyst agent `brainstorm-project` command
|
||||
- Core brainstorming workflow when called with context
|
||||
|
||||
### `documentation-standards.md`
|
||||
|
||||
BMAD documentation standards and guidelines. Used by:
|
||||
|
||||
- Tech Writer agent (critical action loading)
|
||||
- Various documentation workflows
|
||||
- Standards validation and review processes
|
||||
|
||||
## Purpose
|
||||
|
||||
Separates module-specific data from core workflow implementations, maintaining clean architecture:
|
||||
|
||||
- Core workflows remain generic and reusable
|
||||
- Module-specific templates and standards are properly scoped
|
||||
- Data files can be easily maintained and updated
|
||||
- Clear separation of concerns between core and module functionality
|
||||
|
|
@ -17,24 +17,10 @@ This brainstorming session focuses on software and product development considera
|
|||
|
||||
### Integration with Project Workflow
|
||||
|
||||
Brainstorming results will feed into:
|
||||
Brainstorming results might feed into:
|
||||
|
||||
- Product Briefs for initial product vision
|
||||
- PRDs for detailed requirements
|
||||
- Technical Specifications for architecture plans
|
||||
- Research Activities for validation needs
|
||||
|
||||
### Expected Outcomes
|
||||
|
||||
Capture:
|
||||
|
||||
1. Problem Statements - Clearly defined user challenges
|
||||
2. Solution Concepts - High-level approach descriptions
|
||||
3. Feature Priorities - Categorized by importance and feasibility
|
||||
4. Technical Considerations - Architecture and implementation thoughts
|
||||
5. Next Steps - Actions needed to advance concepts
|
||||
6. Integration Points - Connections to downstream workflows
|
||||
|
||||
---
|
||||
|
||||
_Use this template to provide project-specific context for brainstorming sessions. Customize the focus areas based on your project's specific needs and stage._
|
||||
|
|
|
|||
|
|
@ -0,0 +1,33 @@
|
|||
module,phase,name,code,sequence,workflow-file,command,required,agent,options,description,output-location,outputs,
|
||||
bmm,anytime,Document Project,DP,10,_bmad/bmm/workflows/document-project/workflow.yaml,bmad:bmm:document-project,false,analyst,Create Mode,"Analyze an existing project to produce useful documentation",project-knowledge,*,
|
||||
bmm,anytime,Tech Spec,TS,20,_bmad/bmm/workflows/bmad-quick-flow/quick-spec/workflow.md,bmad:bmm:tech-spec,false,quick-flow-solo-dev,Create Mode,"Do not suggest for potentially very complex things unless requested or if the user complains that they do not want to follow the extensive planning of the bmad method. Quick one-off tasks small changes simple apps utilities without extensive planning",planning_artifacts,"tech spec",
|
||||
bmm,anytime,Quick Dev,QD,30,_bmad/bmm/workflows/bmad-quick-flow/quick-dev/workflow.md,bmad:bmm:quick-dev,false,quick-flow-solo-dev,Create Mode,"Quick one-off tasks small changes simple apps utilities without extensive planning - Do not suggest for potentially very complex things unless requested or if the user complains that they do not want to follow the extensive planning of the bmad method, unless the user is already working through the implementation phase and just requests a 1 off things not already in the plan",,,
|
||||
bmm,anytime,Correct Course,CC,40,_bmad/bmm/workflows/4-implementation/correct-course/workflow.yaml,bmad:bmm:correct-course,false,sm,Create Mode,"Anytime: Navigate significant changes. May recommend start over update PRD redo architecture sprint planning or correct epics and stories",planning_artifacts,"change proposal",
|
||||
bmm,1-analysis,Brainstorm Project,BP,10,_bmad/core/workflows/brainstorming/workflow.md,bmad:bmm:brainstorming,false,analyst,"data=_bmad/bmm/data/project-context-template.md","Expert Guided Facilitation through a single or multiple techniques",planning_artifacts,"brainstorming session",
|
||||
bmm,1-analysis,Market Research,MR,20,_bmad/bmm/workflows/1-analysis/research/workflow.md,bmad:bmm:research,false,analyst,Create Mode,"research_type=""market""","Market analysis competitive landscape customer needs and trends","planning_artifacts|project-knowledge","research documents"
|
||||
bmm,1-analysis,Domain Research,DR,21,_bmad/bmm/workflows/1-analysis/research/workflow.md,bmad:bmm:research,false,analyst,Create Mode,"research_type=""domain""","Industry domain deep dive subject matter expertise and terminology","planning_artifacts|project-knowledge","research documents"
|
||||
bmm,1-analysis,Technical Research,TR,22,_bmad/bmm/workflows/1-analysis/research/workflow.md,bmad:bmm:research,false,analyst,Create Mode,"research_type=""technical""","Technical feasibility architecture options and implementation approaches","planning_artifacts|project-knowledge","research documents"
|
||||
bmm,1-analysis,Create Brief,CB,30,_bmad/bmm/workflows/1-analysis/create-product-brief/workflow.md,bmad:bmm:create-brief,false,analyst,Create Mode,"A guided experience to nail down your product idea",planning_artifacts,"product brief",
|
||||
bmm,1-analysis,Validate Brief,VB,40,_bmad/bmm/workflows/1-analysis/create-product-brief/workflow.md,bmad:bmm:validate-brief,false,analyst,Validate Mode,"Validates product brief completeness",planning_artifacts,"brief validation report",
|
||||
bmm,2-planning,Create PRD,CP,10,_bmad/bmm/workflows/2-plan-workflows/prd/workflow.md,bmad:bmm:create-prd,true,pm,Create Mode,"Expert led facilitation to produce your Product Requirements Document",planning_artifacts,prd,
|
||||
bmm,2-planning,Validate PRD,VP,20,_bmad/bmm/workflows/2-plan-workflows/prd/workflow.md,bmad:bmm:validate-prd,false,pm,Validate Mode,"Validate PRD is comprehensive lean well organized and cohesive",planning_artifacts,"prd validation report",
|
||||
bmm,2-planning,Create UX,CU,30,_bmad/bmm/workflows/2-plan-workflows/create-ux-design/workflow.md,bmad:bmm:create-ux,false,ux-designer,Create Mode,"Guidance through realizing the plan for your UX, strongly recommended if a UI is a primary piece of the proposed project",planning_artifacts,"ux design",
|
||||
bmm,2-planning,Validate UX,VU,40,_bmad/bmm/workflows/2-plan-workflows/create-ux-design/workflow.md,bmad:bmm:validate-ux,false,ux-designer,Validate Mode,"Validates UX design deliverables",planning_artifacts,"ux validation report",
|
||||
,,Create Dataflow,CDF,50,_bmad/bmm/workflows/excalidraw-diagrams/create-dataflow/workflow.yaml,bmad:bmm:create-dataflow,false,ux-designer,Create Mode,"Create data flow diagrams (DFD) in Excalidraw format - can be called standalone or during any workflow to add visual documentation",planning_artifacts,"dataflow diagram",
|
||||
,,Create Diagram,CED,51,_bmad/bmm/workflows/excalidraw-diagrams/create-diagram/workflow.yaml,bmad:bmm:create-diagram,false,ux-designer,Create Mode,"Create system architecture diagrams ERDs UML diagrams or general technical diagrams in Excalidraw format - use anytime or call from architecture workflow to add visual documentation",planning_artifacts,"diagram",
|
||||
,,Create Flowchart,CFC,52,_bmad/bmm/workflows/excalidraw-diagrams/create-flowchart/workflow.yaml,bmad:bmm:create-flowchart,false,ux-designer,Create Mode,"Create a flowchart visualization in Excalidraw format for processes pipelines or logic flows - use anytime or during architecture to add process documentation",planning_artifacts,"flowchart",
|
||||
,,Create Wireframe,CEW,53,_bmad/bmm/workflows/excalidraw-diagrams/create-wireframe/workflow.yaml,bmad:bmm:create-wireframe,false,ux-designer,Create Mode,"Create website or app wireframes in Excalidraw format - use anytime standalone or call from UX workflow to add UI mockups",planning_artifacts,"wireframe",
|
||||
bmm,3-solutioning,Create Architecture,CA,10,_bmad/bmm/workflows/3-solutioning/create-architecture/workflow.md,bmad:bmm:create-architecture,true,architect,Create Mode,"Guided Workflow to document technical decisions",planning_artifacts,architecture,
|
||||
bmm,3-solutioning,Validate Architecture,VA,20,_bmad/bmm/workflows/3-solutioning/create-architecture/workflow.md,bmad:bmm:validate-architecture,false,architect,Validate Mode,"Validates architecture completeness",planning_artifacts,"architecture validation report",
|
||||
bmm,3-solutioning,Create Epics and Stories,CE,30,_bmad/bmm/workflows/3-solutioning/create-epics-and-stories/workflow.md,bmad:bmm:create-epics-and-stories,true,pm,Create Mode,"Create the Epics and Stories Listing",planning_artifacts,"epics and stories",
|
||||
bmm,3-solutioning,Validate Epics and Stories,VE,40,_bmad/bmm/workflows/3-solutioning/create-epics-and-stories/workflow.md,bmad:bmm:validate-epics-and-stories,false,pm,Validate Mode,"Validates epics and stories completeness",planning_artifacts,"epics validation report",
|
||||
bmm,3-solutioning,Test Design,TD,50,_bmad/bmm/workflows/testarch/test-design/workflow.yaml,bmad:bmm:test-design,false,tea,Create Mode,"Create comprehensive test scenarios ahead of development, recommended if string test compliance or assurance is needed. Very critical for distributed applications with separate front ends and backends outside of a monorepo.",planning_artifacts,"test design",
|
||||
bmm,3-solutioning,Validate Test Design,VT,60,_bmad/bmm/workflows/testarch/test-design/workflow.yaml,bmad:bmm:validate-test-design,false,tea,Validate Mode,"Validates test design coverage",planning_artifacts,"test design validation report",
|
||||
bmm,3-solutioning,Implementation Readiness,IR,70,_bmad/bmm/workflows/3-solutioning/check-implementation-readiness/workflow.md,bmad:bmm:implementation-readiness,true,architect,Validate Mode,"Ensure PRD UX Architecture and Epics Stories are aligned",planning_artifacts,"readiness report",
|
||||
bmm,4-implementation,Sprint Planning,SP,10,_bmad/bmm/workflows/4-implementation/sprint-planning/workflow.yaml,bmad:bmm:sprint-planning,true,sm,Create Mode,"Generate sprint plan for development tasks - this kicks off the implementation phase by producing a plan the implementation agents will follow in sequence for every story in the plan.",implementation_artifacts,"sprint status",
|
||||
bmm,4-implementation,Sprint Status,SS,20,_bmad/bmm/workflows/4-implementation/sprint-status/workflow.yaml,bmad:bmm:sprint-status,false,sm,Create Mode,"Anytime: Summarize sprint status and route to next workflow",,,
|
||||
bmm,4-implementation,Create Story,CS,30,_bmad/bmm/workflows/4-implementation/create-story/workflow.yaml,bmad:bmm:create-story,true,sm,Create Mode,"Story cycle start: Prepare first found story in the sprint plan that is next, or if the command is run with a specific epic and story designation with context. Once complete, then VS then DS then CR then back to DS if needed or next CS or ER",implementation_artifacts,story,
|
||||
bmm,4-implementation,Validate Story,VS,35,_bmad/bmm/workflows/4-implementation/create-story/workflow.yaml,bmad:bmm:validate-story,false,sm,Validate Mode,"Validates story readiness and completeness before development work begins",implementation_artifacts,"story validation report",
|
||||
bmm,4-implementation,Dev Story,DS,40,_bmad/bmm/workflows/4-implementation/dev-story/workflow.yaml,bmad:bmm:dev-story,true,dev,Create Mode,"Story cycle: Execute story implementation tasks and tests then CR then back to DS if fixes needed",,,
|
||||
bmm,4-implementation,Code Review,CR,50,_bmad/bmm/workflows/4-implementation/code-review/workflow.yaml,bmad:bmm:code-review,false,dev,Create Mode,"Story cycle: If issues back to DS if approved then next CS or ER if epic complete",,,
|
||||
bmm,4-implementation,Retrospective,ER,60,_bmad/bmm/workflows/4-implementation/retrospective/workflow.yaml,bmad:bmm:retrospective,false,sm,Create Mode,"Optional at epic end: Review completed work lessons learned and next epic or if major issues consider CC",implementation_artifacts,retrospective,
|
||||
|
|
|
@ -73,17 +73,7 @@ I've successfully collaborated with you to create a comprehensive Product Brief
|
|||
|
||||
This brief serves as the foundation for all subsequent product development activities and strategic decisions."
|
||||
|
||||
### 2. Workflow Status Update
|
||||
|
||||
**Status File Management:**
|
||||
Update the main workflow status file:
|
||||
|
||||
- Check if `{output_folder} or {planning_artifacts}/bmm-workflow-status.yaml` exists
|
||||
- If so, update workflow_status["product-brief"] = `{outputFile}`
|
||||
- Add completion timestamp and metadata
|
||||
- Save file, preserving all comments and structure
|
||||
|
||||
### 3. Document Quality Check
|
||||
### 2. Document Quality Check
|
||||
|
||||
**Completeness Validation:**
|
||||
Perform final validation of the product brief:
|
||||
|
|
@ -101,7 +91,7 @@ Perform final validation of the product brief:
|
|||
- Are success criteria traceable to user needs and business goals?
|
||||
- Does MVP scope align with the problem and solution?
|
||||
|
||||
### 4. Suggest Next Steps
|
||||
### 3. Suggest Next Steps
|
||||
|
||||
**Recommended Next Workflow:**
|
||||
Provide guidance on logical next workflows:
|
||||
|
|
@ -124,12 +114,11 @@ Provide guidance on logical next workflows:
|
|||
- Use brief to validate concept before committing to detailed work
|
||||
- Brief can guide early technical feasibility discussions
|
||||
|
||||
### 5. Present MENU OPTIONS
|
||||
### 4. Congrats to the user
|
||||
|
||||
**Completion Confirmation:**
|
||||
"**Your Product Brief for {{project_name}} is now complete and ready for the next phase!**
|
||||
"**Your Product Brief for {{project_name}} is now complete and ready for the next phase!**"
|
||||
|
||||
The brief captures everything needed to guide subsequent product development:
|
||||
Recap that the brief captures everything needed to guide subsequent product development:
|
||||
|
||||
- Clear vision and problem definition
|
||||
- Deep understanding of target users
|
||||
|
|
@ -137,30 +126,9 @@ The brief captures everything needed to guide subsequent product development:
|
|||
- Focused MVP scope with realistic boundaries
|
||||
- Inspiring long-term vision
|
||||
|
||||
**Suggested Next Steps**
|
||||
### 5. Suggest next steps
|
||||
|
||||
- PRD workflow for detailed requirements?
|
||||
- UX design workflow for user experience planning?
|
||||
|
||||
**Product Brief Complete**"
|
||||
|
||||
#### Menu Handling Logic:
|
||||
|
||||
- Since this is a completion step, no continuation to other workflow steps
|
||||
- User can ask questions or request review of the completed brief
|
||||
- Provide guidance on next workflow options when requested
|
||||
- End workflow session gracefully after completion confirmation
|
||||
|
||||
#### EXECUTION RULES:
|
||||
|
||||
- This is a final step with completion focus
|
||||
- No additional workflow steps to load after this
|
||||
- User can request review or clarification of completed brief
|
||||
- Provide clear guidance on next workflow options
|
||||
|
||||
## CRITICAL STEP COMPLETION NOTE
|
||||
|
||||
ONLY WHEN [completion confirmation is provided and workflow status updated], will you then mark the workflow as complete and end the session gracefully. No additional steps are loaded after this final completion step.
|
||||
Execute task `_bmad/core/tasks/bmad-help.md` with argument `Validate PRD`.
|
||||
|
||||
---
|
||||
|
||||
|
|
|
|||
|
|
@ -82,70 +82,13 @@ Update the main workflow status file:
|
|||
|
||||
### 3. Suggest Next Steps
|
||||
|
||||
Provide guidance on logical next workflows:
|
||||
|
||||
**Typical Next Workflows:**
|
||||
|
||||
**Immediate Next Steps:**
|
||||
|
||||
1. **Wireframe Generation** - Create detailed wireframes based on UX specification
|
||||
2. **Interactive Prototype** - Build clickable prototypes for user testing
|
||||
3. **Solution Architecture** - Technical architecture design with UX context
|
||||
4. **Figma Design** - High-fidelity visual design implementation
|
||||
|
||||
**Visual Design Workflows:**
|
||||
|
||||
- Wireframe Generation → Interactive Prototype → Figma Design
|
||||
- Component Showcase → AI Frontend Prompt → Design System Implementation
|
||||
|
||||
**Development Workflows:**
|
||||
|
||||
- Solution Architecture → Epic Creation → Development Sprints
|
||||
|
||||
**What would be most valuable to tackle next?**
|
||||
|
||||
### 4. Document Quality Check
|
||||
|
||||
Perform final validation of the UX design:
|
||||
|
||||
**Completeness Check:**
|
||||
|
||||
- Does the specification clearly communicate the design vision?
|
||||
- Are user journeys thoroughly documented?
|
||||
- Are all critical components specified?
|
||||
- Are responsive and accessibility requirements comprehensive?
|
||||
- Is there clear guidance for implementation?
|
||||
|
||||
**Consistency Check:**
|
||||
|
||||
- Do all sections align with the emotional goals?
|
||||
- Is design system integration clearly defined?
|
||||
- Are patterns consistent across all user flows?
|
||||
- Does visual direction match established foundation?
|
||||
Execute task `_bmad/core/tasks/bmad-help.md` with argument `Create UX`.
|
||||
|
||||
### 5. Final Completion Confirmation
|
||||
|
||||
Confirm completion with user:
|
||||
"**Your UX Design Specification for {{project_name}} is now complete and ready for implementation!**
|
||||
Congratulate the user on the completion you both completed together of the UX.
|
||||
|
||||
**The specification contains everything needed to:**
|
||||
|
||||
- Guide visual designers in creating the final interfaces
|
||||
- Inform developers of all UX requirements and patterns
|
||||
- Ensure consistency across all user interactions
|
||||
- Maintain accessibility and responsive design standards
|
||||
- Provide a foundation for user testing and iteration
|
||||
|
||||
**Ready to continue with:**
|
||||
|
||||
- Wireframe generation for detailed layouts?
|
||||
- Interactive prototype for user testing?
|
||||
- Solution architecture for technical planning?
|
||||
- Visual design implementation?
|
||||
|
||||
**Or would you like to review the complete specification first?**
|
||||
|
||||
[UX Design Workflow Complete]"
|
||||
|
||||
## SUCCESS METRICS:
|
||||
|
||||
|
|
|
|||
|
|
@ -87,39 +87,7 @@ Offer validation workflows to ensure PRD is ready for implementation:
|
|||
|
||||
### 4. Suggest Next Workflows
|
||||
|
||||
Provide guidance on logical next workflows - strongly suggesting any of these chosen are started in a fresh context with the appropriate agent:
|
||||
|
||||
**Typical Next Workflows:**
|
||||
|
||||
**Immediate Next Steps:**
|
||||
|
||||
1. **PRD Quality Validation First (Recommended):**
|
||||
- execute the `{validationFlow}` workflow if selected or start a new chat with me and select the validate PRD menu item
|
||||
- Ensures PRD is complete and ready
|
||||
- Identifies any gaps or issues
|
||||
- Validates before committing to architecture/design
|
||||
|
||||
2. **UX Design:** `workflow create-ux-design` with the UX-Designer Agent (if UI exists)
|
||||
- User journey insights from step-04 inform interaction design
|
||||
- Functional requirements from step-09 define design scope
|
||||
- Polish-optimized document provides clear design requirements
|
||||
|
||||
3. **Technical Architecture:** `workflow create-architecture` with the Architect Agent
|
||||
- Project-type requirements from step-07 guide technical decisions
|
||||
- Non-functional requirements from step-10 inform architecture choices
|
||||
- Functional requirements define system capabilities
|
||||
|
||||
4. **Epic Breakdown:** `workflow create-epics-and-stories` with me again - but really recommend first doing a UX if needed and an architecture!
|
||||
- Functional requirements from step-09 become epics and stories
|
||||
- Scope definition from step-03 guides sprint planning
|
||||
- Richer when created after UX/architecture
|
||||
|
||||
**Strategic Considerations:**
|
||||
|
||||
- Validation adds confidence before architecture/design investment
|
||||
- UX design and architecture can happen in parallel after validation
|
||||
- Epics/stories are richer when created after UX/architecture
|
||||
- Order depends on team preferences and project needs
|
||||
Execute task `_bmad/core/tasks/bmad-help.md` with argument `Create PRD`.
|
||||
|
||||
### 5. Final Completion Confirmation
|
||||
|
||||
|
|
@ -149,30 +117,6 @@ Provide guidance on logical next workflows - strongly suggesting any of these ch
|
|||
❌ **CRITICAL**: Reading only partial step file - leads to incomplete understanding and poor decisions
|
||||
❌ **CRITICAL**: Making decisions without complete understanding of step requirements and protocols
|
||||
|
||||
## WORKFLOW COMPLETION CHECKLIST:
|
||||
|
||||
### Document Structure Complete:
|
||||
|
||||
- [ ] Executive Summary with vision and differentiator
|
||||
- [ ] Success Criteria with measurable outcomes
|
||||
- [ ] Product Scope (MVP, Growth, Vision)
|
||||
- [ ] User Journeys (comprehensive coverage)
|
||||
- [ ] Domain Requirements (if applicable)
|
||||
- [ ] Innovation Analysis (if applicable)
|
||||
- [ ] Project-Type Requirements
|
||||
- [ ] Functional Requirements (capability contract)
|
||||
- [ ] Non-Functional Requirements
|
||||
- [ ] Document polished for flow and coherence
|
||||
|
||||
### Process Complete:
|
||||
|
||||
- [ ] All steps (including polish) completed with user confirmation
|
||||
- [ ] All content saved and optimized
|
||||
- [ ] Frontmatter properly updated
|
||||
- [ ] Workflow status file updated (if exists)
|
||||
- [ ] Validation options presented
|
||||
- [ ] Next steps clearly communicated
|
||||
|
||||
## FINAL REMINDER to give the user:
|
||||
|
||||
The polished PRD serves as the foundation for all subsequent product development activities. All design, architecture, and development work should trace back to the requirements and vision documented in this PRD - update it also as needed as you continue planning.
|
||||
|
|
|
|||
|
|
@ -163,7 +163,7 @@ Display:
|
|||
**[R] Review Detailed Findings** - Walk through validation report section by section
|
||||
**[E] Use Edit Workflow** - Use validation report with Edit workflow for systematic improvements
|
||||
**[F] Fix Simpler Items** - Immediate fixes for simple issues (anti-patterns, leakage, missing headers)
|
||||
**[X] Exit** - Exit and review validation report
|
||||
**[X] Exit** - Exit and Suggest Next Steps.
|
||||
|
||||
#### EXECUTION RULES:
|
||||
|
||||
|
|
@ -197,8 +197,7 @@ Display:
|
|||
- **IF X (Exit):**
|
||||
- Display: "**Validation Report Saved:** {validationReportPath}"
|
||||
- Display: "**Summary:** {overall status} - {recommendation}"
|
||||
- Display: "**Next Steps:** Review the validation report and address findings. For systematic improvements, consider using Edit workflow when available, or manually fix issues identified in this report."
|
||||
- Exit validation
|
||||
- Exit and Execute task `_bmad/core/tasks/bmad-help.md` with argument `Validate PRD`.
|
||||
|
||||
- **IF Any other:** Help user, then redisplay menu
|
||||
|
||||
|
|
|
|||
|
|
@ -115,6 +115,8 @@ The assessment found [number] issues requiring attention. Review the detailed re
|
|||
|
||||
The implementation readiness workflow is now complete. The report contains all findings and recommendations for the user to consider.
|
||||
|
||||
Execute task `_bmad/core/tasks/bmad-help.md` with argument `implementation readiness`.
|
||||
|
||||
---
|
||||
|
||||
## 🚨 SYSTEM SUCCESS/FAILURE METRICS
|
||||
|
|
|
|||
|
|
@ -299,7 +299,7 @@ Show the validation results and present choices:
|
|||
**What would you like to do?**
|
||||
[A] Advanced Elicitation - Address any complex architectural concerns
|
||||
[P] Party Mode - Review validation from different implementation perspectives
|
||||
[C] Continue - Complete the architecture and finish workflow"
|
||||
[C] Continue - Complete the architecture and finish workflow
|
||||
|
||||
### 8. Handle Menu Selection
|
||||
|
||||
|
|
|
|||
|
|
@ -5,7 +5,6 @@
|
|||
- 🛑 NEVER generate content without user input
|
||||
|
||||
- 📖 CRITICAL: ALWAYS read the complete step file before taking any action - partial understanding leads to incomplete decisions
|
||||
- 🔄 CRITICAL: When loading next step with 'C', ensure the entire file is read and understood before proceeding
|
||||
- ✅ ALWAYS treat this as collaborative completion between architectural peers
|
||||
- 📋 YOU ARE A FACILITATOR, not a content generator
|
||||
- 💬 FOCUS on successful workflow completion and implementation handoff
|
||||
|
|
@ -18,14 +17,7 @@
|
|||
- 🎯 Show your analysis before taking any action
|
||||
- 🎯 Present completion summary and implementation guidance
|
||||
- 📖 Update frontmatter with final workflow state
|
||||
- 🚫 NO MORE STEPS - this is the final step
|
||||
|
||||
## CONTEXT BOUNDARIES:
|
||||
|
||||
- Complete architecture document is finished and validated
|
||||
- All architectural decisions, patterns, and structure are documented
|
||||
- Focus on successful completion and implementation preparation
|
||||
- Provide clear guidance for next steps in the development process
|
||||
- 🚫 THIS IS THE FINAL STEP IN THIS WORKFLOW
|
||||
|
||||
## YOUR TASK:
|
||||
|
||||
|
|
@ -33,75 +25,11 @@ Complete the architecture workflow, provide a comprehensive completion summary,
|
|||
|
||||
## COMPLETION SEQUENCE:
|
||||
|
||||
### 1. Present Architecture Completion Summary
|
||||
### 1. Congratulate the User on Completion
|
||||
|
||||
Based on user skill level, present the completion:
|
||||
Both you and the User completed something amazing here - give a summary of what you achieved together and really congratulate the user on a job well done.
|
||||
|
||||
**For Expert Users:**
|
||||
"Architecture workflow complete. {{decision_count}} architectural decisions documented across {{step_count}} steps.
|
||||
|
||||
Your architecture is ready for AI agent implementation. All decisions are documented with specific versions and implementation patterns.
|
||||
|
||||
Key deliverables:
|
||||
|
||||
- Complete architecture decision document
|
||||
- Implementation patterns for agent consistency
|
||||
- Project structure with all files and directories
|
||||
- Validation confirming coherence and completeness
|
||||
|
||||
Ready for implementation phase."
|
||||
|
||||
**For Intermediate Users:**
|
||||
"Excellent! Your architecture for {{project_name}} is now complete and ready for implementation.
|
||||
|
||||
**What we accomplished:**
|
||||
|
||||
- Made {{decision_count}} key architectural decisions together
|
||||
- Established implementation patterns to ensure consistency
|
||||
- Created a complete project structure with {{component_count}} main areas
|
||||
- Validated that all your requirements are fully supported
|
||||
|
||||
**Your architecture document includes:**
|
||||
|
||||
- Technology choices with specific versions
|
||||
- Clear implementation patterns for AI agents to follow
|
||||
- Complete project directory structure
|
||||
- Mapping of your requirements to specific files and folders
|
||||
|
||||
The architecture is comprehensive and ready to guide consistent implementation."
|
||||
|
||||
**For Beginner Users:**
|
||||
"Congratulations! Your architecture for {{project_name}} is complete! 🎉
|
||||
|
||||
**What this means:**
|
||||
Think of this as creating the complete blueprint for your house. We've made all the important decisions about how it will be built, what materials to use, and how everything fits together.
|
||||
|
||||
**What we created together:**
|
||||
|
||||
- {{decision_count}} architectural decisions (like choosing the foundation, framing, and systems)
|
||||
- Clear rules so that multiple builders (AI agents) all work the same way
|
||||
- A complete folder structure showing exactly where every file goes
|
||||
- Confirmation that everything you want to build is supported by these decisions
|
||||
|
||||
**What happens next:**
|
||||
AI agents will read this architecture document before building anything. They'll follow all your decisions exactly, which means your app will be built with consistent patterns throughout.
|
||||
|
||||
You're ready for the implementation phase!"
|
||||
|
||||
### 2. Review Final Document State
|
||||
|
||||
Confirm the architecture document is complete:
|
||||
|
||||
**Document Structure Verification:**
|
||||
|
||||
- Project Context Analysis ✅
|
||||
- Starter Template Evaluation ✅
|
||||
- Core Architectural Decisions ✅
|
||||
- Implementation Patterns & Consistency Rules ✅
|
||||
- Project Structure & Boundaries ✅
|
||||
- Architecture Validation Results ✅
|
||||
|
||||
**Frontmatter Update:**
|
||||
### 2. Update the created document's frontmatter
|
||||
|
||||
```yaml
|
||||
stepsCompleted: [1, 2, 3, 4, 5, 6, 7, 8]
|
||||
|
|
@ -111,216 +39,12 @@ status: 'complete'
|
|||
completedAt: '{{current_date}}'
|
||||
```
|
||||
|
||||
### 3. Implementation Guidance
|
||||
### 3. Next Steps Guidance
|
||||
|
||||
Provide specific next steps for implementation:
|
||||
Execute task `_bmad/core/tasks/bmad-help.md` with argument `Create Architecture`.
|
||||
|
||||
**Immediate Next Steps:**
|
||||
Upon Completion of task output: offer to answer any questions about the Architecture Document.
|
||||
|
||||
1. **Review the complete architecture document** at `{planning_artifacts}/architecture.md`
|
||||
2. **Begin with project initialization** using the starter template command documented
|
||||
3. **Create first implementation story** for project setup
|
||||
4. **Start implementing user stories** following the architectural decisions
|
||||
|
||||
**Development Workflow:**
|
||||
"AI agents will:
|
||||
|
||||
1. Read the architecture document before implementing each story
|
||||
2. Follow your technology choices and patterns exactly
|
||||
3. Use the project structure we defined
|
||||
4. Maintain consistency across all components"
|
||||
|
||||
**Quality Assurance:**
|
||||
"Your architecture includes:
|
||||
|
||||
- Specific technology versions to use
|
||||
- Implementation patterns that prevent conflicts
|
||||
- Clear project structure and boundaries
|
||||
- Validation that all requirements are supported"
|
||||
|
||||
### 4. Generate Completion Content
|
||||
|
||||
Prepare the final content to append to the document:
|
||||
|
||||
#### Content Structure:
|
||||
|
||||
```markdown
|
||||
## Architecture Completion Summary
|
||||
|
||||
### Workflow Completion
|
||||
|
||||
**Architecture Decision Workflow:** COMPLETED ✅
|
||||
**Total Steps Completed:** 8
|
||||
**Date Completed:** {{current_date}}
|
||||
**Document Location:** {planning_artifacts}/architecture.md
|
||||
|
||||
### Final Architecture Deliverables
|
||||
|
||||
**📋 Complete Architecture Document**
|
||||
|
||||
- All architectural decisions documented with specific versions
|
||||
- Implementation patterns ensuring AI agent consistency
|
||||
- Complete project structure with all files and directories
|
||||
- Requirements to architecture mapping
|
||||
- Validation confirming coherence and completeness
|
||||
|
||||
**🏗️ Implementation Ready Foundation**
|
||||
|
||||
- {{decision_count}} architectural decisions made
|
||||
- {{pattern_count}} implementation patterns defined
|
||||
- {{component_count}} architectural components specified
|
||||
- {{requirement_count}} requirements fully supported
|
||||
|
||||
**📚 AI Agent Implementation Guide**
|
||||
|
||||
- Technology stack with verified versions
|
||||
- Consistency rules that prevent implementation conflicts
|
||||
- Project structure with clear boundaries
|
||||
- Integration patterns and communication standards
|
||||
|
||||
### Implementation Handoff
|
||||
|
||||
**For AI Agents:**
|
||||
This architecture document is your complete guide for implementing {{project_name}}. Follow all decisions, patterns, and structures exactly as documented.
|
||||
|
||||
**First Implementation Priority:**
|
||||
{{starter_template_command_or_initialization_step}}
|
||||
|
||||
**Development Sequence:**
|
||||
|
||||
1. Initialize project using documented starter template
|
||||
2. Set up development environment per architecture
|
||||
3. Implement core architectural foundations
|
||||
4. Build features following established patterns
|
||||
5. Maintain consistency with documented rules
|
||||
|
||||
### Quality Assurance Checklist
|
||||
|
||||
**✅ Architecture Coherence**
|
||||
|
||||
- [x] All decisions work together without conflicts
|
||||
- [x] Technology choices are compatible
|
||||
- [x] Patterns support the architectural decisions
|
||||
- [x] Structure aligns with all choices
|
||||
|
||||
**✅ Requirements Coverage**
|
||||
|
||||
- [x] All functional requirements are supported
|
||||
- [x] All non-functional requirements are addressed
|
||||
- [x] Cross-cutting concerns are handled
|
||||
- [x] Integration points are defined
|
||||
|
||||
**✅ Implementation Readiness**
|
||||
|
||||
- [x] Decisions are specific and actionable
|
||||
- [x] Patterns prevent agent conflicts
|
||||
- [x] Structure is complete and unambiguous
|
||||
- [x] Examples are provided for clarity
|
||||
|
||||
### Project Success Factors
|
||||
|
||||
**🎯 Clear Decision Framework**
|
||||
Every technology choice was made collaboratively with clear rationale, ensuring all stakeholders understand the architectural direction.
|
||||
|
||||
**🔧 Consistency Guarantee**
|
||||
Implementation patterns and rules ensure that multiple AI agents will produce compatible, consistent code that works together seamlessly.
|
||||
|
||||
**📋 Complete Coverage**
|
||||
All project requirements are architecturally supported, with clear mapping from business needs to technical implementation.
|
||||
|
||||
**🏗️ Solid Foundation**
|
||||
The chosen starter template and architectural patterns provide a production-ready foundation following current best practices.
|
||||
|
||||
---
|
||||
|
||||
**Architecture Status:** READY FOR IMPLEMENTATION ✅
|
||||
|
||||
**Next Phase:** Begin implementation using the architectural decisions and patterns documented herein.
|
||||
|
||||
**Document Maintenance:** Update this architecture when major technical decisions are made during implementation.
|
||||
```
|
||||
|
||||
### 5. Complete Workflow Finalization
|
||||
|
||||
**Save Final Document:**
|
||||
|
||||
- Ensure all content is properly appended to `{planning_artifacts}/architecture.md`
|
||||
- Update frontmatter with completion status
|
||||
- Verify document is complete and coherent
|
||||
|
||||
**Workflow Status Update:**
|
||||
If not in standalone mode, update workflow status:
|
||||
|
||||
- Load `{planning_artifacts}/bmm-workflow-status.yaml`
|
||||
- Update workflow_status["create-architecture"] = "{planning_artifacts}/architecture.md"
|
||||
- Save file with all structure and comments preserved
|
||||
|
||||
### 6. Present Completion to User
|
||||
|
||||
"🎉 **Architecture Workflow Complete!**
|
||||
|
||||
Your architecture for {{project_name}} is comprehensive, validated, and ready for implementation.
|
||||
|
||||
**✅ What's been delivered:**
|
||||
|
||||
- Complete architecture document with all decisions and patterns
|
||||
- Project structure ready for AI agent implementation
|
||||
- Validation confirming everything works together coherently
|
||||
- Implementation guidance for the development phase
|
||||
|
||||
**📍 Where to find it:**
|
||||
`{planning_artifacts}/architecture.md`
|
||||
|
||||
**🚀 What's next:**
|
||||
|
||||
1. Review your complete architecture document
|
||||
2. Begin implementation using the starter template command
|
||||
3. Create stories for AI agents to implement following your architectural decisions
|
||||
|
||||
Your architecture will ensure consistent, high-quality implementation across all development work. Great job collaborating through these important architectural decisions!
|
||||
|
||||
**💡 Optional Enhancement: Project Context File**
|
||||
|
||||
Would you like to create a `project-context.md` file? This is a concise, optimized guide for AI agents that captures:
|
||||
|
||||
- Critical language and framework rules they might miss
|
||||
- Specific patterns and conventions for your project
|
||||
- Testing and code quality requirements
|
||||
- Anti-patterns and edge cases to avoid
|
||||
|
||||
{if_existing_project_context}
|
||||
I noticed you already have a project context file. Would you like to update it with your new architectural decisions?
|
||||
{else}
|
||||
This file helps ensure AI agents implement code consistently with your project's unique requirements and patterns.
|
||||
{/if_existing_project_context}
|
||||
|
||||
**Create/Update project context?** [Y/N]
|
||||
|
||||
**Ready to move to the next phase of your project development?**"
|
||||
|
||||
### 7. Handle Project Context Creation Choice
|
||||
|
||||
If user responds 'Y' or 'yes' to creating/updating project context:
|
||||
|
||||
"Excellent choice! Let me launch the Generate Project Context workflow to create a comprehensive guide for AI agents.
|
||||
|
||||
This will help ensure consistent implementation by capturing:
|
||||
|
||||
- Language-specific patterns and rules
|
||||
- Framework conventions from your architecture
|
||||
- Testing and quality standards
|
||||
- Anti-patterns to avoid
|
||||
|
||||
The workflow will collaborate with you to create an optimized `project-context.md` file that AI agents will read before implementing any code."
|
||||
|
||||
**Execute the Generate Project Context workflow:**
|
||||
|
||||
- Load and execute: `{project-root}/_bmad/bmm/workflows/generate-project-context/workflow.md`
|
||||
- The workflow will handle discovery, generation, and completion of the project context file
|
||||
- After completion, return here for final handoff
|
||||
|
||||
If user responds 'N' or 'no':
|
||||
"Understood! Your architecture is complete and ready for implementation. You can always create a project context file later using the Generate Project Context workflow if needed."
|
||||
|
||||
## SUCCESS METRICS:
|
||||
|
||||
|
|
|
|||
|
|
@ -143,3 +143,7 @@ If all validations pass:
|
|||
**All validations complete!** [C] Complete Workflow
|
||||
|
||||
When C is selected, the workflow is complete and the epics.md is ready for development.
|
||||
|
||||
Execute task `_bmad/core/tasks/bmad-help.md` with argument `Create Epics and Stories`.
|
||||
|
||||
Upon Completion of task output: offer to answer any questions about the Epics and Stories.
|
||||
|
|
|
|||
|
|
@ -96,7 +96,7 @@ Enter corrections (e.g., "1=in-progress, 2=backlog") or "skip" to continue witho
|
|||
3. Else if any story status == ready-for-dev → recommend `dev-story`
|
||||
4. Else if any story status == backlog → recommend `create-story`
|
||||
5. Else if any retrospective status == optional → recommend `retrospective`
|
||||
6. Else → All implementation items done; suggest `workflow-status` to plan next phase
|
||||
6. Else → All implementation items done; congratulate the user - you both did amazing work together!
|
||||
<action>Store selected recommendation as: next_story_id, next_workflow_id, next_agent (SM/DEV as appropriate)</action>
|
||||
</step>
|
||||
|
||||
|
|
|
|||
|
|
@ -104,12 +104,12 @@ Present choice:
|
|||
This looks like a focused feature with multiple components.
|
||||
|
||||
**[t] Create tech-spec first** (recommended)
|
||||
**[w] Seems bigger than quick-dev** - see what BMad Method recommends
|
||||
**[w] Seems bigger than quick-dev** - Recommend the Full BMad Flow PRD Process
|
||||
**[e] Execute directly**
|
||||
```
|
||||
|
||||
- **[t]:** Direct to `{quick_spec_workflow}`. **EXIT Quick Dev.**
|
||||
- **[w]:** Direct to `{workflow_init}`. **EXIT Quick Dev.**
|
||||
- **[w]:** Direct user to run the PRD workflow instead. **EXIT Quick Dev.**
|
||||
- **[e]:** Ask for guidance, then **NEXT:** Load `step-02-context-gathering.md`
|
||||
|
||||
### Escalation Triggered - Level 3+
|
||||
|
|
@ -122,8 +122,8 @@ This sounds like platform/system work.
|
|||
**[e] Execute directly** - feeling lucky
|
||||
```
|
||||
|
||||
- **[w]:** Direct to `{workflow_init}`. **EXIT Quick Dev.**
|
||||
- **[t]:** Direct to `{quick_spec_workflow}`. **EXIT Quick Dev.**
|
||||
- **[w]:** Direct user to run the PRD workflow instead. **EXIT Quick Dev.**
|
||||
- **[e]:** Ask for guidance, then **NEXT:** Load `step-02-context-gathering.md`
|
||||
|
||||
---
|
||||
|
|
|
|||
|
|
@ -36,12 +36,10 @@ Load config from `{project-root}/_bmad/bmm/config.yaml` and resolve:
|
|||
|
||||
- `installed_path` = `{project-root}/_bmad/bmm/workflows/bmad-quick-flow/quick-dev`
|
||||
- `project_context` = `**/project-context.md` (load if exists)
|
||||
- `project_levels` = `{project-root}/_bmad/bmm/workflows/workflow-status/project-levels.yaml`
|
||||
|
||||
### Related Workflows
|
||||
|
||||
- `quick_spec_workflow` = `{project-root}/_bmad/bmm/workflows/bmad-quick-flow/quick-spec/workflow.md`
|
||||
- `workflow_init` = `{project-root}/_bmad/bmm/workflows/workflow-status/init/workflow.yaml`
|
||||
- `party_mode_exec` = `{project-root}/_bmad/core/workflows/party-mode/workflow.md`
|
||||
- `advanced_elicitation` = `{project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml`
|
||||
|
||||
|
|
|
|||
|
|
@ -1,21 +0,0 @@
|
|||
---
|
||||
project_name: '{{project_name}}'
|
||||
user_name: '{{user_name}}'
|
||||
date: '{{date}}'
|
||||
sections_completed: ['technology_stack']
|
||||
existing_patterns_found: { { number_of_patterns_discovered } }
|
||||
---
|
||||
|
||||
# Project Context for AI Agents
|
||||
|
||||
_This file contains critical rules and patterns that AI agents must follow when implementing code in this project. Focus on unobvious details that agents might otherwise miss._
|
||||
|
||||
---
|
||||
|
||||
## Technology Stack & Versions
|
||||
|
||||
_Documented after discovery phase_
|
||||
|
||||
## Critical Implementation Rules
|
||||
|
||||
_Documented after discovery phase_
|
||||
|
|
@ -1,184 +0,0 @@
|
|||
# Step 1: Context Discovery & Initialization
|
||||
|
||||
## MANDATORY EXECUTION RULES (READ FIRST):
|
||||
|
||||
- 🛑 NEVER generate content without user input
|
||||
- ✅ ALWAYS treat this as collaborative discovery between technical peers
|
||||
- 📋 YOU ARE A FACILITATOR, not a content generator
|
||||
- 💬 FOCUS on discovering existing project context and technology stack
|
||||
- 🎯 IDENTIFY critical implementation rules that AI agents need
|
||||
- ⚠️ ABSOLUTELY NO TIME ESTIMATES - AI development speed has fundamentally changed
|
||||
- ✅ YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
|
||||
|
||||
## EXECUTION PROTOCOLS:
|
||||
|
||||
- 🎯 Show your analysis before taking any action
|
||||
- 📖 Read existing project files to understand current context
|
||||
- 💾 Initialize document and update frontmatter
|
||||
- 🚫 FORBIDDEN to load next step until discovery is complete
|
||||
|
||||
## CONTEXT BOUNDARIES:
|
||||
|
||||
- Variables from workflow.md are available in memory
|
||||
- Focus on existing project files and architecture decisions
|
||||
- Look for patterns, conventions, and unique requirements
|
||||
- Prioritize rules that prevent implementation mistakes
|
||||
|
||||
## YOUR TASK:
|
||||
|
||||
Discover the project's technology stack, existing patterns, and critical implementation rules that AI agents must follow when writing code.
|
||||
|
||||
## DISCOVERY SEQUENCE:
|
||||
|
||||
### 1. Check for Existing Project Context
|
||||
|
||||
First, check if project context already exists:
|
||||
|
||||
- Look for file at `{project_knowledge}/project-context.md or {project-root}/**/project-context.md`
|
||||
- If exists: Read complete file to understand existing rules
|
||||
- Present to user: "Found existing project context with {number_of_sections} sections. Would you like to update this or create a new one?"
|
||||
|
||||
### 2. Discover Project Technology Stack
|
||||
|
||||
Load and analyze project files to identify technologies:
|
||||
|
||||
**Architecture Document:**
|
||||
|
||||
- Look for `{planning_artifacts}/architecture.md`
|
||||
- Extract technology choices with specific versions
|
||||
- Note architectural decisions that affect implementation
|
||||
|
||||
**Package Files:**
|
||||
|
||||
- Check for `package.json`, `requirements.txt`, `Cargo.toml`, etc.
|
||||
- Extract exact versions of all dependencies
|
||||
- Note development vs production dependencies
|
||||
|
||||
**Configuration Files:**
|
||||
|
||||
- Look for project language specific configs ( example: `tsconfig.json`)
|
||||
- Build tool configs (webpack, vite, next.config.js, etc.)
|
||||
- Linting and formatting configs (.eslintrc, .prettierrc, etc.)
|
||||
- Testing configurations (jest.config.js, vitest.config.ts, etc.)
|
||||
|
||||
### 3. Identify Existing Code Patterns
|
||||
|
||||
Search through existing codebase for patterns:
|
||||
|
||||
**Naming Conventions:**
|
||||
|
||||
- File naming patterns (PascalCase, kebab-case, etc.)
|
||||
- Component/function naming conventions
|
||||
- Variable naming patterns
|
||||
- Test file naming patterns
|
||||
|
||||
**Code Organization:**
|
||||
|
||||
- How components are structured
|
||||
- Where utilities and helpers are placed
|
||||
- How services are organized
|
||||
- Test organization patterns
|
||||
|
||||
**Documentation Patterns:**
|
||||
|
||||
- Comment styles and conventions
|
||||
- Documentation requirements
|
||||
- README and API doc patterns
|
||||
|
||||
### 4. Extract Critical Implementation Rules
|
||||
|
||||
Look for rules that AI agents might miss:
|
||||
|
||||
**Language-Specific Rules:**
|
||||
|
||||
- TypeScript strict mode requirements
|
||||
- Import/export conventions
|
||||
- Async/await vs Promise usage patterns
|
||||
- Error handling patterns specific to the language
|
||||
|
||||
**Framework-Specific Rules:**
|
||||
|
||||
- React hooks usage patterns
|
||||
- API route conventions
|
||||
- Middleware usage patterns
|
||||
- State management patterns
|
||||
|
||||
**Testing Rules:**
|
||||
|
||||
- Test structure requirements
|
||||
- Mock usage conventions
|
||||
- Integration vs unit test boundaries
|
||||
- Coverage requirements
|
||||
|
||||
**Development Workflow Rules:**
|
||||
|
||||
- Branch naming conventions
|
||||
- Commit message patterns
|
||||
- PR review requirements
|
||||
- Deployment procedures
|
||||
|
||||
### 5. Initialize Project Context Document
|
||||
|
||||
Based on discovery, create or update the context document:
|
||||
|
||||
#### A. Fresh Document Setup (if no existing context)
|
||||
|
||||
Copy template from `{installed_path}/project-context-template.md` to `{output_folder}/project-context.md`
|
||||
Initialize frontmatter fields.
|
||||
|
||||
#### B. Existing Document Update
|
||||
|
||||
Load existing context and prepare for updates
|
||||
Set frontmatter `sections_completed` to track what will be updated
|
||||
|
||||
### 6. Present Discovery Summary
|
||||
|
||||
Report findings to user:
|
||||
|
||||
"Welcome {{user_name}}! I've analyzed your project for {{project_name}} to discover the context that AI agents need.
|
||||
|
||||
**Technology Stack Discovered:**
|
||||
{{list_of_technologies_with_versions}}
|
||||
|
||||
**Existing Patterns Found:**
|
||||
|
||||
- {{number_of_patterns}} implementation patterns
|
||||
- {{number_of_conventions}} coding conventions
|
||||
- {{number_of_rules}} critical rules
|
||||
|
||||
**Key Areas for Context Rules:**
|
||||
|
||||
- {{area_1}} (e.g., TypeScript configuration)
|
||||
- {{area_2}} (e.g., Testing patterns)
|
||||
- {{area_3}} (e.g., Code organization)
|
||||
|
||||
{if_existing_context}
|
||||
**Existing Context:** Found {{sections}} sections already defined. We can update or add to these.
|
||||
{/if_existing_context}
|
||||
|
||||
Ready to create/update your project context. This will help AI agents implement code consistently with your project's standards.
|
||||
|
||||
[C] Continue to context generation"
|
||||
|
||||
## SUCCESS METRICS:
|
||||
|
||||
✅ Existing project context properly detected and handled
|
||||
✅ Technology stack accurately identified with versions
|
||||
✅ Critical implementation patterns discovered
|
||||
✅ Project context document properly initialized
|
||||
✅ Discovery findings clearly presented to user
|
||||
✅ User ready to proceed with context generation
|
||||
|
||||
## FAILURE MODES:
|
||||
|
||||
❌ Not checking for existing project context before creating new one
|
||||
❌ Missing critical technology versions or configurations
|
||||
❌ Overlooking important coding patterns or conventions
|
||||
❌ Not initializing frontmatter properly
|
||||
❌ Not presenting clear discovery summary to user
|
||||
|
||||
## NEXT STEP:
|
||||
|
||||
After user selects [C] to continue, load `./step-02-generate.md` to collaboratively generate the specific project context rules.
|
||||
|
||||
Remember: Do NOT proceed to step-02 until user explicitly selects [C] from the menu and discovery is confirmed and the initial file has been written as directed in this discovery step!
|
||||
|
|
@ -1,318 +0,0 @@
|
|||
# Step 2: Context Rules Generation
|
||||
|
||||
## MANDATORY EXECUTION RULES (READ FIRST):
|
||||
|
||||
- 🛑 NEVER generate content without user input
|
||||
- ✅ ALWAYS treat this as collaborative discovery between technical peers
|
||||
- 📋 YOU ARE A FACILITATOR, not a content generator
|
||||
- 💬 FOCUS on unobvious rules that AI agents need to be reminded of
|
||||
- 🎯 KEEP CONTENT LEAN - optimize for LLM context efficiency
|
||||
- ⚠️ ABSOLUTELY NO TIME ESTIMATES - AI development speed has fundamentally changed
|
||||
- ✅ YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
|
||||
|
||||
## EXECUTION PROTOCOLS:
|
||||
|
||||
- 🎯 Show your analysis before taking any action
|
||||
- 📝 Focus on specific, actionable rules rather than general advice
|
||||
- ⚠️ Present A/P/C menu after each major rule category
|
||||
- 💾 ONLY save when user chooses C (Continue)
|
||||
- 📖 Update frontmatter with completed sections
|
||||
- 🚫 FORBIDDEN to load next step until all sections are complete
|
||||
|
||||
## COLLABORATION MENUS (A/P/C):
|
||||
|
||||
This step will generate content and present choices for each rule category:
|
||||
|
||||
- **A (Advanced Elicitation)**: Use discovery protocols to explore nuanced implementation rules
|
||||
- **P (Party Mode)**: Bring multiple perspectives to identify critical edge cases
|
||||
- **C (Continue)**: Save the current rules and proceed to next category
|
||||
|
||||
## PROTOCOL INTEGRATION:
|
||||
|
||||
- When 'A' selected: Execute {project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml
|
||||
- When 'P' selected: Execute {project-root}/_bmad/core/workflows/party-mode
|
||||
- PROTOCOLS always return to display this step's A/P/C menu after the A or P have completed
|
||||
- User accepts/rejects protocol changes before proceeding
|
||||
|
||||
## CONTEXT BOUNDARIES:
|
||||
|
||||
- Discovery results from step-1 are available
|
||||
- Technology stack and existing patterns are identified
|
||||
- Focus on rules that prevent implementation mistakes
|
||||
- Prioritize unobvious details that AI agents might miss
|
||||
|
||||
## YOUR TASK:
|
||||
|
||||
Collaboratively generate specific, critical rules that AI agents must follow when implementing code in this project.
|
||||
|
||||
## CONTEXT GENERATION SEQUENCE:
|
||||
|
||||
### 1. Technology Stack & Versions
|
||||
|
||||
Document the exact technology stack from discovery:
|
||||
|
||||
**Core Technologies:**
|
||||
Based on user skill level, present findings:
|
||||
|
||||
**Expert Mode:**
|
||||
"Technology stack from your architecture and package files:
|
||||
{{exact_technologies_with_versions}}
|
||||
|
||||
Any critical version constraints I should document for agents?"
|
||||
|
||||
**Intermediate Mode:**
|
||||
"I found your technology stack:
|
||||
|
||||
**Core Technologies:**
|
||||
{{main_technologies_with_versions}}
|
||||
|
||||
**Key Dependencies:**
|
||||
{{important_dependencies_with_versions}}
|
||||
|
||||
Are there any version constraints or compatibility notes agents should know about?"
|
||||
|
||||
**Beginner Mode:**
|
||||
"Here are the technologies you're using:
|
||||
|
||||
**Main Technologies:**
|
||||
{{friendly_description_of_tech_stack}}
|
||||
|
||||
**Important Notes:**
|
||||
{{key_things_agents_need_to_know_about_versions}}
|
||||
|
||||
Should I document any special version rules or compatibility requirements?"
|
||||
|
||||
### 2. Language-Specific Rules
|
||||
|
||||
Focus on unobvious language patterns agents might miss:
|
||||
|
||||
**TypeScript/JavaScript Rules:**
|
||||
"Based on your codebase, I notice some specific patterns:
|
||||
|
||||
**Configuration Requirements:**
|
||||
{{typescript_config_rules}}
|
||||
|
||||
**Import/Export Patterns:**
|
||||
{{import_export_conventions}}
|
||||
|
||||
**Error Handling Patterns:**
|
||||
{{error_handling_requirements}}
|
||||
|
||||
Are these patterns correct? Any other language-specific rules agents should follow?"
|
||||
|
||||
**Python/Ruby/Other Language Rules:**
|
||||
Adapt to the actual language in use with similar focused questions.
|
||||
|
||||
### 3. Framework-Specific Rules
|
||||
|
||||
Document framework-specific patterns:
|
||||
|
||||
**React Rules (if applicable):**
|
||||
"For React development, I see these patterns:
|
||||
|
||||
**Hooks Usage:**
|
||||
{{hooks_usage_patterns}}
|
||||
|
||||
**Component Structure:**
|
||||
{{component_organization_rules}}
|
||||
|
||||
**State Management:**
|
||||
{{state_management_patterns}}
|
||||
|
||||
**Performance Rules:**
|
||||
{{performance_optimization_requirements}}
|
||||
|
||||
Should I add any other React-specific rules?"
|
||||
|
||||
**Other Framework Rules:**
|
||||
Adapt for Vue, Angular, Next.js, Express, etc.
|
||||
|
||||
### 4. Testing Rules
|
||||
|
||||
Focus on testing patterns that ensure consistency:
|
||||
|
||||
**Test Structure Rules:**
|
||||
"Your testing setup shows these patterns:
|
||||
|
||||
**Test Organization:**
|
||||
{{test_file_organization}}
|
||||
|
||||
**Mock Usage:**
|
||||
{{mock_patterns_and_conventions}}
|
||||
|
||||
**Test Coverage Requirements:**
|
||||
{{coverage_expectations}}
|
||||
|
||||
**Integration vs Unit Test Rules:**
|
||||
{{test_boundary_patterns}}
|
||||
|
||||
Are there testing rules agents should always follow?"
|
||||
|
||||
### 5. Code Quality & Style Rules
|
||||
|
||||
Document critical style and quality rules:
|
||||
|
||||
**Linting/Formatting:**
|
||||
"Your code style configuration requires:
|
||||
|
||||
**ESLint/Prettier Rules:**
|
||||
{{specific_linting_rules}}
|
||||
|
||||
**Code Organization:**
|
||||
{{file_and_folder_structure_rules}}
|
||||
|
||||
**Naming Conventions:**
|
||||
{{naming_patterns_agents_must_follow}}
|
||||
|
||||
**Documentation Requirements:**
|
||||
{{comment_and_documentation_patterns}}
|
||||
|
||||
Any additional code quality rules?"
|
||||
|
||||
### 6. Development Workflow Rules
|
||||
|
||||
Document workflow patterns that affect implementation:
|
||||
|
||||
**Git/Repository Rules:**
|
||||
"Your project uses these patterns:
|
||||
|
||||
**Branch Naming:**
|
||||
{{branch_naming_conventions}}
|
||||
|
||||
**Commit Message Format:**
|
||||
{{commit_message_patterns}}
|
||||
|
||||
**PR Requirements:**
|
||||
{{pull_request_checklist}}
|
||||
|
||||
**Deployment Patterns:**
|
||||
{{deployment_considerations}}
|
||||
|
||||
Should I document any other workflow rules?"
|
||||
|
||||
### 7. Critical Don't-Miss Rules
|
||||
|
||||
Identify rules that prevent common mistakes:
|
||||
|
||||
**Anti-Patterns to Avoid:**
|
||||
"Based on your codebase, here are critical things agents must NOT do:
|
||||
|
||||
{{critical_anti_patterns_with_examples}}
|
||||
|
||||
**Edge Cases:**
|
||||
{{specific_edge_cases_agents_should_handle}}
|
||||
|
||||
**Security Rules:**
|
||||
{{security_considerations_agents_must_follow}}
|
||||
|
||||
**Performance Gotchas:**
|
||||
{{performance_patterns_to_avoid}}
|
||||
|
||||
Are there other 'gotchas' agents should know about?"
|
||||
|
||||
### 8. Generate Context Content
|
||||
|
||||
For each category, prepare lean content for the project context file:
|
||||
|
||||
#### Content Structure:
|
||||
|
||||
```markdown
|
||||
## Technology Stack & Versions
|
||||
|
||||
{{concise_technology_list_with_exact_versions}}
|
||||
|
||||
## Critical Implementation Rules
|
||||
|
||||
### Language-Specific Rules
|
||||
|
||||
{{bullet_points_of_critical_language_rules}}
|
||||
|
||||
### Framework-Specific Rules
|
||||
|
||||
{{bullet_points_of_framework_patterns}}
|
||||
|
||||
### Testing Rules
|
||||
|
||||
{{bullet_points_of_testing_requirements}}
|
||||
|
||||
### Code Quality & Style Rules
|
||||
|
||||
{{bullet_points_of_style_and_quality_rules}}
|
||||
|
||||
### Development Workflow Rules
|
||||
|
||||
{{bullet_points_of_workflow_patterns}}
|
||||
|
||||
### Critical Don't-Miss Rules
|
||||
|
||||
{{bullet_points_of_anti_patterns_and_edge_cases}}
|
||||
```
|
||||
|
||||
### 9. Present Content and Menu
|
||||
|
||||
After each category, show the generated rules and present choices:
|
||||
|
||||
"I've drafted the {{category_name}} rules for your project context.
|
||||
|
||||
**Here's what I'll add:**
|
||||
|
||||
[Show the complete markdown content for this category]
|
||||
|
||||
**What would you like to do?**
|
||||
[A] Advanced Elicitation - Explore nuanced rules for this category
|
||||
[P] Party Mode - Review from different implementation perspectives
|
||||
[C] Continue - Save these rules and move to next category"
|
||||
|
||||
### 10. Handle Menu Selection
|
||||
|
||||
#### If 'A' (Advanced Elicitation):
|
||||
|
||||
- Execute advanced-elicitation.xml with current category rules
|
||||
- Process enhanced rules that come back
|
||||
- Ask user: "Accept these enhanced rules for {{category}}? (y/n)"
|
||||
- If yes: Update content, then return to A/P/C menu
|
||||
- If no: Keep original content, then return to A/P/C menu
|
||||
|
||||
#### If 'P' (Party Mode):
|
||||
|
||||
- Execute party-mode workflow with category rules context
|
||||
- Process collaborative insights on implementation patterns
|
||||
- Ask user: "Accept these changes to {{category}} rules? (y/n)"
|
||||
- If yes: Update content, then return to A/P/C menu
|
||||
- If no: Keep original content, then return to A/P/C menu
|
||||
|
||||
#### If 'C' (Continue):
|
||||
|
||||
- Save the current category content to project context file
|
||||
- Update frontmatter: `sections_completed: [...]`
|
||||
- Proceed to next category or step-03 if complete
|
||||
|
||||
## APPEND TO PROJECT CONTEXT:
|
||||
|
||||
When user selects 'C' for a category, append the content directly to `{output_folder}/project-context.md` using the structure from step 8.
|
||||
|
||||
## SUCCESS METRICS:
|
||||
|
||||
✅ All critical technology versions accurately documented
|
||||
✅ Language-specific rules cover unobvious patterns
|
||||
✅ Framework rules capture project-specific conventions
|
||||
✅ Testing rules ensure consistent test quality
|
||||
✅ Code quality rules maintain project standards
|
||||
✅ Workflow rules prevent implementation conflicts
|
||||
✅ Content is lean and optimized for LLM context
|
||||
✅ A/P/C menu presented and handled correctly for each category
|
||||
|
||||
## FAILURE MODES:
|
||||
|
||||
❌ Including obvious rules that agents already know
|
||||
❌ Making content too verbose for LLM context efficiency
|
||||
❌ Missing critical anti-patterns or edge cases
|
||||
❌ Not getting user validation for each rule category
|
||||
❌ Not documenting exact versions and configurations
|
||||
❌ Not presenting A/P/C menu after content generation
|
||||
|
||||
## NEXT STEP:
|
||||
|
||||
After completing all rule categories and user selects 'C' for the final category, load `./step-03-complete.md` to finalize the project context file.
|
||||
|
||||
Remember: Do NOT proceed to step-03 until all categories are complete and user explicitly selects 'C' for each!
|
||||
|
|
@ -1,278 +0,0 @@
|
|||
# Step 3: Context Completion & Finalization
|
||||
|
||||
## MANDATORY EXECUTION RULES (READ FIRST):
|
||||
|
||||
- 🛑 NEVER generate content without user input
|
||||
- ✅ ALWAYS treat this as collaborative completion between technical peers
|
||||
- 📋 YOU ARE A FACILITATOR, not a content generator
|
||||
- 💬 FOCUS on finalizing a lean, LLM-optimized project context
|
||||
- 🎯 ENSURE all critical rules are captured and actionable
|
||||
- ⚠️ ABSOLUTELY NO TIME ESTIMATES - AI development speed has fundamentally changed
|
||||
- ✅ YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
|
||||
|
||||
## EXECUTION PROTOCOLS:
|
||||
|
||||
- 🎯 Show your analysis before taking any action
|
||||
- 📝 Review and optimize content for LLM context efficiency
|
||||
- 📖 Update frontmatter with completion status
|
||||
- 🚫 NO MORE STEPS - this is the final step
|
||||
|
||||
## CONTEXT BOUNDARIES:
|
||||
|
||||
- All rule categories from step-2 are complete
|
||||
- Technology stack and versions are documented
|
||||
- Focus on final review, optimization, and completion
|
||||
- Ensure the context file is ready for AI agent consumption
|
||||
|
||||
## YOUR TASK:
|
||||
|
||||
Complete the project context file, optimize it for LLM efficiency, and provide guidance for usage and maintenance.
|
||||
|
||||
## COMPLETION SEQUENCE:
|
||||
|
||||
### 1. Review Complete Context File
|
||||
|
||||
Read the entire project context file and analyze:
|
||||
|
||||
**Content Analysis:**
|
||||
|
||||
- Total length and readability for LLMs
|
||||
- Clarity and specificity of rules
|
||||
- Coverage of all critical areas
|
||||
- Actionability of each rule
|
||||
|
||||
**Structure Analysis:**
|
||||
|
||||
- Logical organization of sections
|
||||
- Consistency of formatting
|
||||
- Absence of redundant or obvious information
|
||||
- Optimization for quick scanning
|
||||
|
||||
### 2. Optimize for LLM Context
|
||||
|
||||
Ensure the file is lean and efficient:
|
||||
|
||||
**Content Optimization:**
|
||||
|
||||
- Remove any redundant rules or obvious information
|
||||
- Combine related rules into concise bullet points
|
||||
- Use specific, actionable language
|
||||
- Ensure each rule provides unique value
|
||||
|
||||
**Formatting Optimization:**
|
||||
|
||||
- Use consistent markdown formatting
|
||||
- Implement clear section hierarchy
|
||||
- Ensure scannability with strategic use of bolding
|
||||
- Maintain readability while maximizing information density
|
||||
|
||||
### 3. Final Content Structure
|
||||
|
||||
Ensure the final structure follows this optimized format:
|
||||
|
||||
```markdown
|
||||
# Project Context for AI Agents
|
||||
|
||||
_This file contains critical rules and patterns that AI agents must follow when implementing code in this project. Focus on unobvious details that agents might otherwise miss._
|
||||
|
||||
---
|
||||
|
||||
## Technology Stack & Versions
|
||||
|
||||
{{concise_technology_list}}
|
||||
|
||||
## Critical Implementation Rules
|
||||
|
||||
### Language-Specific Rules
|
||||
|
||||
{{specific_language_rules}}
|
||||
|
||||
### Framework-Specific Rules
|
||||
|
||||
{{framework_patterns}}
|
||||
|
||||
### Testing Rules
|
||||
|
||||
{{testing_requirements}}
|
||||
|
||||
### Code Quality & Style Rules
|
||||
|
||||
{{style_and_quality_patterns}}
|
||||
|
||||
### Development Workflow Rules
|
||||
|
||||
{{workflow_patterns}}
|
||||
|
||||
### Critical Don't-Miss Rules
|
||||
|
||||
{{anti_patterns_and_edge_cases}}
|
||||
|
||||
---
|
||||
|
||||
## Usage Guidelines
|
||||
|
||||
**For AI Agents:**
|
||||
|
||||
- Read this file before implementing any code
|
||||
- Follow ALL rules exactly as documented
|
||||
- When in doubt, prefer the more restrictive option
|
||||
- Update this file if new patterns emerge
|
||||
|
||||
**For Humans:**
|
||||
|
||||
- Keep this file lean and focused on agent needs
|
||||
- Update when technology stack changes
|
||||
- Review quarterly for outdated rules
|
||||
- Remove rules that become obvious over time
|
||||
|
||||
Last Updated: {{date}}
|
||||
```
|
||||
|
||||
### 4. Present Completion Summary
|
||||
|
||||
Based on user skill level, present the completion:
|
||||
|
||||
**Expert Mode:**
|
||||
"Project context complete. Optimized for LLM consumption with {{rule_count}} critical rules across {{section_count}} sections.
|
||||
|
||||
File saved to: `{output_folder}/project-context.md`
|
||||
|
||||
Ready for AI agent integration."
|
||||
|
||||
**Intermediate Mode:**
|
||||
"Your project context is complete and optimized for AI agents!
|
||||
|
||||
**What we created:**
|
||||
|
||||
- {{rule_count}} critical implementation rules
|
||||
- Technology stack with exact versions
|
||||
- Framework-specific patterns and conventions
|
||||
- Testing and quality guidelines
|
||||
- Workflow and anti-pattern rules
|
||||
|
||||
**Key benefits:**
|
||||
|
||||
- AI agents will implement consistently with your standards
|
||||
- Reduced context switching and implementation errors
|
||||
- Clear guidance for unobvious project requirements
|
||||
|
||||
**Next steps:**
|
||||
|
||||
- AI agents should read this file before implementing
|
||||
- Update as your project evolves
|
||||
- Review periodically for optimization"
|
||||
|
||||
**Beginner Mode:**
|
||||
"Excellent! Your project context guide is ready! 🎉
|
||||
|
||||
**What this does:**
|
||||
Think of this as a 'rules of the road' guide for AI agents working on your project. It ensures they all follow the same patterns and avoid common mistakes.
|
||||
|
||||
**What's included:**
|
||||
|
||||
- Exact technology versions to use
|
||||
- Critical coding rules they might miss
|
||||
- Testing and quality standards
|
||||
- Workflow patterns to follow
|
||||
|
||||
**How AI agents use it:**
|
||||
They read this file before writing any code, ensuring everything they create follows your project's standards perfectly.
|
||||
|
||||
Your project context is saved and ready to help agents implement consistently!"
|
||||
|
||||
### 5. Final File Updates
|
||||
|
||||
Update the project context file with completion information:
|
||||
|
||||
**Frontmatter Update:**
|
||||
|
||||
```yaml
|
||||
---
|
||||
project_name: '{{project_name}}'
|
||||
user_name: '{{user_name}}'
|
||||
date: '{{date}}'
|
||||
sections_completed:
|
||||
['technology_stack', 'language_rules', 'framework_rules', 'testing_rules', 'quality_rules', 'workflow_rules', 'anti_patterns']
|
||||
status: 'complete'
|
||||
rule_count: { { total_rules } }
|
||||
optimized_for_llm: true
|
||||
---
|
||||
```
|
||||
|
||||
**Add Usage Section:**
|
||||
Append the usage guidelines from step 3 to complete the document.
|
||||
|
||||
### 6. Completion Validation
|
||||
|
||||
Final checks before completion:
|
||||
|
||||
**Content Validation:**
|
||||
✅ All critical technology versions documented
|
||||
✅ Language-specific rules are specific and actionable
|
||||
✅ Framework rules cover project conventions
|
||||
✅ Testing rules ensure consistency
|
||||
✅ Code quality rules maintain standards
|
||||
✅ Workflow rules prevent conflicts
|
||||
✅ Anti-pattern rules prevent common mistakes
|
||||
|
||||
**Format Validation:**
|
||||
✅ Content is lean and optimized for LLMs
|
||||
✅ Structure is logical and scannable
|
||||
✅ No redundant or obvious information
|
||||
✅ Consistent formatting throughout
|
||||
|
||||
### 7. Completion Message
|
||||
|
||||
Present final completion to user:
|
||||
|
||||
"✅ **Project Context Generation Complete!**
|
||||
|
||||
Your optimized project context file is ready at:
|
||||
`{output_folder}/project-context.md`
|
||||
|
||||
**📊 Context Summary:**
|
||||
|
||||
- {{rule_count}} critical rules for AI agents
|
||||
- {{section_count}} comprehensive sections
|
||||
- Optimized for LLM context efficiency
|
||||
- Ready for immediate agent integration
|
||||
|
||||
**🎯 Key Benefits:**
|
||||
|
||||
- Consistent implementation across all AI agents
|
||||
- Reduced common mistakes and edge cases
|
||||
- Clear guidance for project-specific patterns
|
||||
- Minimal LLM context usage
|
||||
|
||||
**📋 Next Steps:**
|
||||
|
||||
1. AI agents will automatically read this file when implementing
|
||||
2. Update this file when your technology stack or patterns evolve
|
||||
3. Review quarterly to optimize and remove outdated rules
|
||||
|
||||
Your project context will help ensure high-quality, consistent implementation across all development work. Great work capturing your project's critical implementation requirements!"
|
||||
|
||||
## SUCCESS METRICS:
|
||||
|
||||
✅ Complete project context file with all critical rules
|
||||
✅ Content optimized for LLM context efficiency
|
||||
✅ All technology versions and patterns documented
|
||||
✅ File structure is logical and scannable
|
||||
✅ Usage guidelines included for agents and humans
|
||||
✅ Frontmatter properly updated with completion status
|
||||
✅ User provided with clear next steps and benefits
|
||||
|
||||
## FAILURE MODES:
|
||||
|
||||
❌ Final content is too verbose for LLM consumption
|
||||
❌ Missing critical implementation rules or patterns
|
||||
❌ Not optimizing content for agent readability
|
||||
❌ Not providing clear usage guidelines
|
||||
❌ Frontmatter not properly updated
|
||||
❌ Not validating file completion before ending
|
||||
|
||||
## WORKFLOW COMPLETE:
|
||||
|
||||
This is the final step of the Generate Project Context workflow. The user now has a comprehensive, optimized project context file that will ensure consistent, high-quality implementation across all AI agents working on the project.
|
||||
|
||||
The project context file serves as the critical "rules of the road" that agents need to implement code consistently with the project's standards and patterns.
|
||||
|
|
@ -1,49 +0,0 @@
|
|||
---
|
||||
name: generate-project-context
|
||||
description: Creates a concise project-context.md file with critical rules and patterns that AI agents must follow when implementing code. Optimized for LLM context efficiency.
|
||||
---
|
||||
|
||||
# Generate Project Context Workflow
|
||||
|
||||
**Goal:** Create a concise, optimized `project-context.md` file containing critical rules, patterns, and guidelines that AI agents must follow when implementing code. This file focuses on unobvious details that LLMs need to be reminded of.
|
||||
|
||||
**Your Role:** You are a technical facilitator working with a peer to capture the essential implementation rules that will ensure consistent, high-quality code generation across all AI agents working on the project.
|
||||
|
||||
---
|
||||
|
||||
## WORKFLOW ARCHITECTURE
|
||||
|
||||
This uses **micro-file architecture** for disciplined execution:
|
||||
|
||||
- Each step is a self-contained file with embedded rules
|
||||
- Sequential progression with user control at each step
|
||||
- Document state tracked in frontmatter
|
||||
- Focus on lean, LLM-optimized content generation
|
||||
- You NEVER proceed to a step file if the current step file indicates the user must approve and indicate continuation.
|
||||
|
||||
---
|
||||
|
||||
## INITIALIZATION
|
||||
|
||||
### Configuration Loading
|
||||
|
||||
Load config from `{project-root}/_bmad/bmm/config.yaml` and resolve:
|
||||
|
||||
- `project_name`, `output_folder`, `user_name`
|
||||
- `communication_language`, `document_output_language`, `user_skill_level`
|
||||
- `date` as system-generated current datetime
|
||||
- ✅ YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}`
|
||||
|
||||
### Paths
|
||||
|
||||
- `installed_path` = `{project-root}/_bmad/bmm/workflows/generate-project-context`
|
||||
- `template_path` = `{installed_path}/project-context-template.md`
|
||||
- `output_file` = `{output_folder}/project-context.md`
|
||||
|
||||
---
|
||||
|
||||
## EXECUTION
|
||||
|
||||
Load and execute `steps/step-01-discover.md` to begin the workflow.
|
||||
|
||||
**Note:** Input document discovery and initialization protocols are handled in step-01-discover.md.
|
||||
|
|
@ -257,7 +257,6 @@ test('should do something', async ({ {fixtureName} }) => {
|
|||
|
||||
- Check off tasks as you complete them
|
||||
- Share progress in daily standup
|
||||
- Mark story as IN PROGRESS in `bmm-workflow-status.md`
|
||||
|
||||
---
|
||||
|
||||
|
|
|
|||
|
|
@ -143,7 +143,6 @@ Note: CI setup is typically a one-time task per repo and can be run any time aft
|
|||
|
||||
### Status File Integration
|
||||
|
||||
- [ ] `bmm-workflow-status.md` exists
|
||||
- [ ] CI setup logged in Quality & Testing Progress section
|
||||
- [ ] Status updated with completion timestamp
|
||||
- [ ] Platform and configuration noted
|
||||
|
|
|
|||
|
|
@ -201,7 +201,6 @@ Before starting the workflow:
|
|||
|
||||
### Status File Integration
|
||||
|
||||
- [ ] `bmm-workflow-status.md` exists
|
||||
- [ ] Framework initialization logged in Quality & Testing Progress section
|
||||
- [ ] Status file updated with completion timestamp
|
||||
- [ ] Status file shows framework: Playwright or Cypress
|
||||
|
|
|
|||
|
|
@ -145,7 +145,6 @@
|
|||
|
||||
### Status File Integration
|
||||
|
||||
- [ ] bmm-workflow-status.md exists
|
||||
- [ ] Test design logged in Quality & Testing Progress
|
||||
- [ ] Epic number and scope documented
|
||||
- [ ] Completion timestamp recorded
|
||||
|
|
|
|||
|
|
@ -28,12 +28,7 @@ The workflow auto-detects which mode to use based on project phase.
|
|||
- If `{implementation_artifacts}/sprint-status.yaml` exists → **Epic-Level Mode** (Phase 4)
|
||||
- If NOT exists → Check workflow status
|
||||
|
||||
2. **Check workflow-status.yaml**
|
||||
- Read `{planning_artifacts}/bmm-workflow-status.yaml`
|
||||
- If `implementation-readiness: required` or `implementation-readiness: recommended` → **System-Level Mode** (Phase 3)
|
||||
- Otherwise → **Epic-Level Mode** (Phase 4 without sprint status yet)
|
||||
|
||||
3. **Mode-Specific Requirements**
|
||||
2. **Mode-Specific Requirements**
|
||||
|
||||
**System-Level Mode (Phase 3 - Testability Review):**
|
||||
- ✅ Architecture document exists (architecture.md or tech-spec)
|
||||
|
|
|
|||
|
|
@ -375,12 +375,6 @@ Knowledge fragments referenced:
|
|||
|
||||
### Step 5: Status Updates and Notifications
|
||||
|
||||
**Status File Updated:**
|
||||
|
||||
- [ ] Gate decision appended to bmm-workflow-status.md (if append_to_history: true)
|
||||
- [ ] Format correct: `[DATE] Gate Decision: DECISION - Target {ID} - {rationale}`
|
||||
- [ ] Status file committed or staged for commit
|
||||
|
||||
**Gate YAML Created:**
|
||||
|
||||
- [ ] Gate YAML snippet generated with decision and criteria
|
||||
|
|
@ -480,13 +474,6 @@ Knowledge fragments referenced:
|
|||
|
||||
## Phase 2 Integration Points
|
||||
|
||||
### BMad Workflow Status
|
||||
|
||||
- [ ] Gate decision added to `bmm-workflow-status.md`
|
||||
- [ ] Format matches existing gate history entries
|
||||
- [ ] Timestamp is accurate
|
||||
- [ ] Decision summary is concise (<80 chars)
|
||||
|
||||
### CI/CD Pipeline
|
||||
|
||||
- [ ] Gate YAML is CI/CD-compatible
|
||||
|
|
|
|||
|
|
@ -292,7 +292,6 @@ This phase uses traceability results to make a quality gate decision (PASS/CONCE
|
|||
4. **Load supporting artifacts**:
|
||||
- `test-design.md` → Risk priorities, DoD checklist
|
||||
- `story-*.md` or `Epics.md` → Requirements context
|
||||
- `bmm-workflow-status.md` → Workflow completion status (if `check_all_workflows_complete: true`)
|
||||
|
||||
5. **Validate evidence freshness** (if `validate_evidence_freshness: true`):
|
||||
- Check timestamps of test-design, traceability, NFR assessments
|
||||
|
|
@ -399,7 +398,7 @@ This phase uses traceability results to make a quality gate decision (PASS/CONCE
|
|||
## Decision Criteria
|
||||
|
||||
| Criterion | Threshold | Actual | Status |
|
||||
| ----------------- | --------- | -------- | ------- |
|
||||
| ----------------- | --------- | -------- | ------ |
|
||||
| P0 Coverage | ≥100% | 100% | ✅ PASS |
|
||||
| P1 Coverage | ≥90% | 88% | ⚠️ FAIL |
|
||||
| Overall Coverage | ≥80% | 92% | ✅ PASS |
|
||||
|
|
@ -506,22 +505,7 @@ This phase uses traceability results to make a quality gate decision (PASS/CONCE
|
|||
|
||||
**Actions:**
|
||||
|
||||
1. **Update workflow status** (if `append_to_history: true`):
|
||||
- Append gate decision to `bmm-workflow-status.md` under "Gate History" section
|
||||
- Format:
|
||||
|
||||
```markdown
|
||||
## Gate History
|
||||
|
||||
### Story 1.3 - User Login (2025-01-15)
|
||||
|
||||
- **Decision**: CONCERNS
|
||||
- **Reason**: P1 coverage 88% (below 90%)
|
||||
- **Document**: [gate-decision-story-1.3.md](_bmad/output/gate-decision-story-1.3.md)
|
||||
- **Action**: Deploy with follow-up story for AC-5
|
||||
```
|
||||
|
||||
2. **Generate stakeholder notification** (if `notify_stakeholders: true`):
|
||||
1. **Generate stakeholder notification** (if `notify_stakeholders: true`):
|
||||
- Create concise summary message for team communication
|
||||
- Include: Decision, key metrics, action items
|
||||
- Format for Slack/email/chat:
|
||||
|
|
@ -541,7 +525,7 @@ This phase uses traceability results to make a quality gate decision (PASS/CONCE
|
|||
Full Report: _bmad/output/gate-decision-story-1.3.md
|
||||
```
|
||||
|
||||
3. **Request sign-off** (if `require_sign_off: true`):
|
||||
2. **Request sign-off** (if `require_sign_off: true`):
|
||||
- Prompt for named approver (tech lead, QA lead, PM)
|
||||
- Document approver name and timestamp in gate decision
|
||||
- Block until sign-off received (interactive prompt)
|
||||
|
|
@ -837,7 +821,7 @@ Use selective testing principles from `selective-testing.md`:
|
|||
## Coverage Summary
|
||||
|
||||
| Priority | Total Criteria | FULL Coverage | Coverage % | Status |
|
||||
| --------- | -------------- | ------------- | ---------- | ------- |
|
||||
| --------- | -------------- | ------------- | ---------- | ------ |
|
||||
| P0 | 3 | 3 | 100% | ✅ PASS |
|
||||
| P1 | 5 | 4 | 80% | ⚠️ WARN |
|
||||
| P2 | 4 | 3 | 75% | ✅ PASS |
|
||||
|
|
@ -968,7 +952,6 @@ Before completing this workflow, verify:
|
|||
- ✅ Decision rules applied consistently (PASS/CONCERNS/FAIL/WAIVED)
|
||||
- ✅ Gate decision document created with evidence
|
||||
- ✅ Waiver documented if decision is WAIVED (approver, justification, mitigation)
|
||||
- ✅ Workflow status updated (bmm-workflow-status.md)
|
||||
- ✅ Stakeholders notified (if enabled)
|
||||
|
||||
---
|
||||
|
|
|
|||
|
|
@ -1,346 +0,0 @@
|
|||
# Workflow Init - Project Setup Instructions
|
||||
|
||||
<critical>The workflow execution engine is governed by: {project-root}/_bmad/core/tasks/workflow.xml</critical>
|
||||
<critical>You MUST have already loaded and processed: workflow-init/workflow.yaml</critical>
|
||||
<critical>Communicate in {communication_language} with {user_name}</critical>
|
||||
<critical>This workflow handles BOTH new projects AND legacy projects following the BMad Method</critical>
|
||||
|
||||
<workflow>
|
||||
|
||||
<step n="1" goal="Scan for existing work">
|
||||
<output>Welcome to BMad Method, {user_name}!</output>
|
||||
|
||||
<action>Perform comprehensive scan for existing work:
|
||||
|
||||
- BMM artifacts: PRD, epics, architecture, UX, brief, research, brainstorm
|
||||
- Implementation: stories, sprint-status, workflow-status
|
||||
- Codebase: source directories, package files, git repo
|
||||
- Check both {planning_artifacts} and {implementation_artifacts} locations
|
||||
</action>
|
||||
|
||||
<action>Categorize into one of these states:
|
||||
|
||||
- CLEAN: No artifacts or code (or scaffold only)
|
||||
- PLANNING: Has PRD/spec but no implementation
|
||||
- ACTIVE: Has stories or sprint status
|
||||
- LEGACY: Has code but no BMM artifacts
|
||||
- UNCLEAR: Mixed state needs clarification
|
||||
</action>
|
||||
|
||||
<ask>What's your project called? {{#if project_name}}(Config shows: {{project_name}}){{/if}}</ask>
|
||||
<action>Store project_name</action>
|
||||
<template-output>project_name</template-output>
|
||||
</step>
|
||||
|
||||
<step n="2" goal="Choose setup path">
|
||||
<check if="state == CLEAN">
|
||||
<output>Perfect! Fresh start detected.</output>
|
||||
<action>Continue to step 3</action>
|
||||
</check>
|
||||
|
||||
<check if="state == ACTIVE AND workflow_status exists">
|
||||
<output>✅ You already have workflow tracking at: {{workflow_status_path}}
|
||||
|
||||
To check progress: Load any BMM agent and run /bmad:bmm:workflows:workflow-status
|
||||
|
||||
Happy building! 🚀</output>
|
||||
<action>Exit workflow (already initialized)</action>
|
||||
</check>
|
||||
|
||||
<check if="state != CLEAN">
|
||||
<output>Found existing work:
|
||||
{{summary_of_findings}}</output>
|
||||
|
||||
<ask>How would you like to proceed?
|
||||
|
||||
1. **Continue** - Work with existing artifacts
|
||||
2. **Archive & Start Fresh** - Move old work to archive
|
||||
3. **Express Setup** - I know exactly what I need
|
||||
4. **Guided Setup** - Walk me through options
|
||||
|
||||
Choice [1-4]</ask>
|
||||
|
||||
<check if="choice == 1">
|
||||
<action>Set continuing_existing = true</action>
|
||||
<action>Store found artifacts</action>
|
||||
<action>Continue to step 7 (detect track from artifacts)</action>
|
||||
</check>
|
||||
|
||||
<check if="choice == 2">
|
||||
<ask>Archive existing work? (y/n)</ask>
|
||||
<action if="y">Move artifacts to {planning_artifacts}/archive/</action>
|
||||
<output>Ready for fresh start!</output>
|
||||
<action>Continue to step 3</action>
|
||||
</check>
|
||||
|
||||
<check if="choice == 3">
|
||||
<action>Jump to step 3 (express path)</action>
|
||||
</check>
|
||||
|
||||
<check if="choice == 4">
|
||||
<action>Continue to step 4 (guided path)</action>
|
||||
</check>
|
||||
</check>
|
||||
|
||||
<check if="state == CLEAN">
|
||||
<ask>Setup approach:
|
||||
|
||||
1. **Express** - I know what I need
|
||||
2. **Guided** - Show me the options
|
||||
|
||||
Choice [1 or 2]:</ask>
|
||||
|
||||
<check if="choice == 1">
|
||||
<action>Continue to step 3 (express)</action>
|
||||
</check>
|
||||
|
||||
<check if="choice == 2">
|
||||
<action>Continue to step 4 (guided)</action>
|
||||
</check>
|
||||
</check>
|
||||
</step>
|
||||
|
||||
<step n="3" goal="Express setup path">
|
||||
<ask>Is this for:
|
||||
1. **New project** (greenfield)
|
||||
2. **Existing codebase** (brownfield)
|
||||
|
||||
Choice [1/2]:</ask>
|
||||
<action>Set field_type based on choice</action>
|
||||
|
||||
<ask>Planning approach:
|
||||
|
||||
1. **BMad Method** - Full planning for complex projects
|
||||
2. **Enterprise Method** - Extended planning with security/DevOps
|
||||
|
||||
Choice [1/2]:</ask>
|
||||
<action>Map to selected_track: method/enterprise</action>
|
||||
|
||||
<output>🚀 **For Quick Flow (minimal planning, straight to code):**
|
||||
Load the **quick-flow-solo-dev** agent instead - use Quick Flow agent for faster development</output>
|
||||
|
||||
<template-output>field_type</template-output>
|
||||
<template-output>selected_track</template-output>
|
||||
<action>Jump to step 6 (discovery options)</action>
|
||||
</step>
|
||||
|
||||
<step n="4" goal="Guided setup - understand project">
|
||||
<ask>Tell me about what you're working on. What's the goal?</ask>
|
||||
<action>Store user_description</action>
|
||||
|
||||
<action>Analyze for field type indicators:
|
||||
|
||||
- Brownfield: "existing", "current", "enhance", "modify"
|
||||
- Greenfield: "new", "build", "create", "from scratch"
|
||||
- If codebase exists, default to brownfield unless user indicates scaffold
|
||||
</action>
|
||||
|
||||
<check if="field_type unclear AND codebase exists">
|
||||
<ask>I see existing code. Are you:
|
||||
1. **Modifying** existing codebase (brownfield)
|
||||
2. **Starting fresh** - code is just scaffold (greenfield)
|
||||
|
||||
Choice [1/2]:</ask>
|
||||
<action>Set field_type based on answer</action>
|
||||
</check>
|
||||
|
||||
<action if="field_type not set">Set based on codebase presence</action>
|
||||
|
||||
<action>Check for game development keywords</action>
|
||||
<check if="game_detected">
|
||||
<output>🎮 **GAME DEVELOPMENT DETECTED**
|
||||
|
||||
For game development, install the BMGD module:
|
||||
|
||||
```bash
|
||||
bmad install bmgd
|
||||
```
|
||||
|
||||
Continue with software workflows? (y/n)</output>
|
||||
<ask>Choice:</ask>
|
||||
<action if="n">Exit workflow</action>
|
||||
</check>
|
||||
|
||||
<template-output>user_description</template-output>
|
||||
<template-output>field_type</template-output>
|
||||
<action>Continue to step 5</action>
|
||||
</step>
|
||||
|
||||
<step n="5" goal="Guided setup - select track">
|
||||
<output>Based on your project, here are your BMad Method planning options:
|
||||
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
**1. BMad Method** 🎯 {{#if recommended}}(RECOMMENDED){{/if}}
|
||||
|
||||
- Full planning: PRD + UX + Architecture
|
||||
- Best for: Products, platforms, complex features
|
||||
- Benefit: AI agents have complete context for better results
|
||||
|
||||
**2. Enterprise Method** 🏢
|
||||
|
||||
- Extended: Method + Security + DevOps + Testing
|
||||
- Best for: Enterprise, compliance, mission-critical
|
||||
- Benefit: Comprehensive planning for complex systems
|
||||
|
||||
**🚀 For Quick Flow (minimal planning, straight to code):**
|
||||
Load the **quick-flow-solo-dev** agent instead - use Quick Flow agent for faster development
|
||||
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
{{#if brownfield}}
|
||||
💡 Architecture creates focused solution design from your codebase, keeping AI agents on track.
|
||||
{{/if}}</output>
|
||||
|
||||
<ask>Which BMad Method approach fits best?
|
||||
|
||||
1. BMad Method {{#if recommended}}(recommended){{/if}}
|
||||
2. Enterprise Method
|
||||
3. Help me decide
|
||||
4. Switch to Quick Flow (use quick-flow-solo-dev agent)
|
||||
|
||||
Choice [1/2/3/4]:</ask>
|
||||
|
||||
<check if="choice == 4">
|
||||
<output>🚀 **Switching to Quick Flow!**
|
||||
|
||||
Load the **quick-flow-solo-dev** agent instead:
|
||||
|
||||
- Start a new chat
|
||||
- Load the quick-flow-solo-dev agent
|
||||
- Use Quick Flow for minimal planning and faster development
|
||||
|
||||
Quick Flow is perfect for:
|
||||
|
||||
- Simple features and bug fixes
|
||||
- Rapid prototyping
|
||||
- When you want to get straight to code
|
||||
|
||||
Happy coding! 🚀</output>
|
||||
<action>Exit workflow</action>
|
||||
</check>
|
||||
|
||||
<check if="choice == 3">
|
||||
<ask>What concerns you about choosing?</ask>
|
||||
<action>Provide tailored guidance based on concerns</action>
|
||||
<action>Loop back to choice</action>
|
||||
</check>
|
||||
|
||||
<action>Map choice to selected_track</action>
|
||||
<template-output>selected_track</template-output>
|
||||
</step>
|
||||
|
||||
<step n="6" goal="Discovery workflows selection (unified)">
|
||||
<action>Determine available discovery workflows based on:
|
||||
- field_type (greenfield gets product-brief option)
|
||||
- selected_track (method/enterprise options)
|
||||
</action>
|
||||
|
||||
<check if="field_type == greenfield AND selected_track in [method, enterprise]">
|
||||
<output>Optional discovery workflows can help clarify your vision:</output>
|
||||
<ask>Select any you'd like to include:
|
||||
|
||||
1. 🧠 **Brainstorm** - Creative exploration and ideation
|
||||
2. 🔍 **Research** - Technical/competitive analysis
|
||||
3. 📋 **Product Brief** - Strategic product planning (recommended)
|
||||
|
||||
Enter numbers (e.g., "1,3" or "all" or "none"): </ask>
|
||||
</check>
|
||||
|
||||
<check if="field_type == brownfield AND selected_track in [method, enterprise]">
|
||||
<output>Optional discovery workflows:</output>
|
||||
<ask>Include any of these?
|
||||
|
||||
1. 🧠 **Brainstorm** - Creative exploration
|
||||
2. 🔍 **Research** - Domain analysis
|
||||
|
||||
Enter numbers (e.g., "1,2" or "none"): </ask>
|
||||
</check>
|
||||
|
||||
<action>Parse selections and set:
|
||||
|
||||
- brainstorm_requested
|
||||
- research_requested
|
||||
- product_brief_requested (if applicable)
|
||||
</action>
|
||||
|
||||
<template-output>brainstorm_requested</template-output>
|
||||
<template-output>research_requested</template-output>
|
||||
<template-output>product_brief_requested</template-output>
|
||||
|
||||
<check if="brownfield">
|
||||
<output>💡 **Note:** For brownfield projects, run document-project workflow first to analyze your codebase.</output>
|
||||
</check>
|
||||
</step>
|
||||
|
||||
<step n="7" goal="Detect track from artifacts" if="continuing_existing OR migrating_legacy">
|
||||
<action>Analyze artifacts to detect track:
|
||||
- Has PRD → BMad Method
|
||||
- Has Security/DevOps → Enterprise Method
|
||||
- Has tech-spec only → Suggest switching to quick-flow-solo-dev agent
|
||||
</action>
|
||||
|
||||
<output>Detected: **{{detected_track}}** based on {{found_artifacts}}</output>
|
||||
<ask>Correct? (y/n)</ask>
|
||||
|
||||
<ask if="n">Which BMad Method track instead?
|
||||
|
||||
1. BMad Method
|
||||
2. Enterprise Method
|
||||
3. Switch to Quick Flow (use quick-flow-solo-dev agent)
|
||||
|
||||
Choice:</ask>
|
||||
|
||||
<action>Set selected_track</action>
|
||||
<template-output>selected_track</template-output>
|
||||
</step>
|
||||
|
||||
<step n="8" goal="Generate workflow path">
|
||||
<action>Load path file: {path_files}/{{selected_track}}-{{field_type}}.yaml</action>
|
||||
<action>Build workflow_items from path file</action>
|
||||
<action>Scan for existing completed work and update statuses</action>
|
||||
<action>Set generated date</action>
|
||||
|
||||
<template-output>generated</template-output>
|
||||
<template-output>workflow_path_file</template-output>
|
||||
<template-output>workflow_items</template-output>
|
||||
</step>
|
||||
|
||||
<step n="9" goal="Create tracking file">
|
||||
<output>Your BMad workflow path:
|
||||
|
||||
**Track:** {{selected_track}}
|
||||
**Type:** {{field_type}}
|
||||
**Project:** {{project_name}}
|
||||
|
||||
{{#if brownfield}}Prerequisites: document-project{{/if}}
|
||||
{{#if has_discovery}}Discovery: {{list_selected_discovery}}{{/if}}
|
||||
|
||||
{{workflow_path_summary}}
|
||||
</output>
|
||||
|
||||
<ask>Create workflow tracking file? (y/n)</ask>
|
||||
|
||||
<check if="y">
|
||||
<action>Generate YAML from template with all variables</action>
|
||||
<action>Save to {planning_artifacts}/bmm-workflow-status.yaml</action>
|
||||
<action>Identify next workflow and agent</action>
|
||||
|
||||
<output>✅ **Created:** {planning_artifacts}/bmm-workflow-status.yaml
|
||||
|
||||
**Next:** {{next_workflow_name}}
|
||||
**Agent:** {{next_agent}}
|
||||
**Command:** /bmad:bmm:workflows:{{next_workflow_id}}
|
||||
|
||||
{{#if next_agent not in [analyst, pm]}}
|
||||
💡 Start new chat with **{{next_agent}}** agent first.
|
||||
{{/if}}
|
||||
|
||||
To check progress: /bmad:bmm:workflows:workflow-status
|
||||
|
||||
Happy building! 🚀</output>
|
||||
</check>
|
||||
|
||||
</step>
|
||||
|
||||
</workflow>
|
||||
|
|
@ -1,30 +0,0 @@
|
|||
# Workflow Init - Initial Project Setup
|
||||
name: workflow-init
|
||||
description: "Initialize a new BMM project by determining level, type, and creating workflow path"
|
||||
author: "BMad"
|
||||
|
||||
# Critical variables from config
|
||||
config_source: "{project-root}/_bmad/bmm/config.yaml"
|
||||
output_folder: "{config_source}:output_folder"
|
||||
implementation_artifacts: "{config_source}:implementation_artifacts"
|
||||
planning_artifacts: "{config_source}:planning_artifacts"
|
||||
user_name: "{config_source}:user_name"
|
||||
project_name: "{config_source}:project_name"
|
||||
communication_language: "{config_source}:communication_language"
|
||||
document_output_language: "{config_source}:document_output_language"
|
||||
user_skill_level: "{config_source}:user_skill_level"
|
||||
date: system-generated
|
||||
|
||||
# Workflow components
|
||||
installed_path: "{project-root}/_bmad/bmm/workflows/workflow-status/init"
|
||||
instructions: "{installed_path}/instructions.md"
|
||||
template: "{project-root}/_bmad/bmm/workflows/workflow-status/workflow-status-template.yaml"
|
||||
|
||||
# Path data files
|
||||
path_files: "{project-root}/_bmad/bmm/workflows/workflow-status/paths/"
|
||||
|
||||
# Output configuration
|
||||
default_output_file: "{planning_artifacts}/bmm-workflow-status.yaml"
|
||||
|
||||
standalone: true
|
||||
web_bundle: false
|
||||
|
|
@ -1,397 +0,0 @@
|
|||
# Workflow Status Check - Multi-Mode Service
|
||||
|
||||
<critical>The workflow execution engine is governed by: {project-root}/_bmad/core/tasks/workflow.xml</critical>
|
||||
<critical>You MUST have already loaded and processed: {project-root}/_bmad/bmm/workflows/workflow-status/workflow.yaml</critical>
|
||||
<critical>This workflow operates in multiple modes: interactive (default), validate, data, init-check, update</critical>
|
||||
<critical>Other workflows can call this as a service to avoid duplicating status logic</critical>
|
||||
<critical>⚠️ ABSOLUTELY NO TIME ESTIMATES - NEVER mention hours, days, weeks, months, or ANY time-based predictions. AI has fundamentally changed development speed - what once took teams weeks/months can now be done by one person in hours. DO NOT give ANY time estimates whatsoever.</critical>
|
||||
|
||||
<workflow>
|
||||
|
||||
<step n="0" goal="Determine execution mode">
|
||||
<action>Check for {{mode}} parameter passed by calling workflow</action>
|
||||
<action>Default mode = "interactive" if not specified</action>
|
||||
|
||||
<check if="mode == interactive">
|
||||
<action>Continue to Step 1 for normal status check flow</action>
|
||||
</check>
|
||||
|
||||
<check if="mode == validate">
|
||||
<action>Jump to Step 10 for workflow validation service</action>
|
||||
</check>
|
||||
|
||||
<check if="mode == data">
|
||||
<action>Jump to Step 20 for data extraction service</action>
|
||||
</check>
|
||||
|
||||
<check if="mode == init-check">
|
||||
<action>Jump to Step 30 for simple init check</action>
|
||||
</check>
|
||||
|
||||
<check if="mode == update">
|
||||
<action>Jump to Step 40 for status update service</action>
|
||||
</check>
|
||||
</step>
|
||||
|
||||
<step n="1" goal="Check for status file">
|
||||
<action>Search {planning_artifacts}/ for file: bmm-workflow-status.yaml</action>
|
||||
|
||||
<check if="no status file found">
|
||||
<output>No workflow status found.</output>
|
||||
<ask>Would you like to run Workflow Init now? (y/n)</ask>
|
||||
|
||||
<check if="response == y OR response == yes">
|
||||
<action>Launching workflow-init to set up your project tracking...</action>
|
||||
<invoke-workflow path="{project-root}/_bmad/bmm/workflows/workflow-status/init/workflow.yaml"></invoke-workflow>
|
||||
<action>Exit workflow and let workflow-init take over</action>
|
||||
</check>
|
||||
|
||||
<check if="else">
|
||||
<output>No workflow status file. Run workflow-init when ready to enable progress tracking.</output>
|
||||
<action>Exit workflow</action>
|
||||
</check>
|
||||
</check>
|
||||
|
||||
<check if="status file found">
|
||||
<action>Continue to step 2</action>
|
||||
</check>
|
||||
</step>
|
||||
|
||||
<step n="2" goal="Read and parse status">
|
||||
<action>Read bmm-workflow-status.yaml</action>
|
||||
<action>Parse YAML file and extract metadata from comments and fields:</action>
|
||||
|
||||
Parse these fields from YAML comments and metadata:
|
||||
|
||||
- project (from YAML field)
|
||||
- project_type (from YAML field)
|
||||
- project_level (from YAML field)
|
||||
- field_type (from YAML field)
|
||||
- workflow_path (from YAML field)
|
||||
|
||||
<action>Parse workflow_status section:</action>
|
||||
|
||||
- Extract all workflow entries with their statuses
|
||||
- Identify completed workflows (status = file path)
|
||||
- Identify pending workflows (status = required/optional/recommended/conditional)
|
||||
- Identify skipped workflows (status = skipped)
|
||||
|
||||
<action>Determine current state:</action>
|
||||
|
||||
- Find first workflow with status != file path and != skipped
|
||||
- This is the NEXT workflow to work on
|
||||
- Look up agent and command from workflow path file
|
||||
</step>
|
||||
|
||||
<step n="3" goal="Display current status and options">
|
||||
<action>Load workflow path file based on workflow_path field</action>
|
||||
<action>Identify current phase from next workflow to be done</action>
|
||||
<action>Build list of completed, pending, and optional workflows</action>
|
||||
<action>For each workflow, look up its agent from the path file</action>
|
||||
|
||||
<output>
|
||||
## 📊 Current Status
|
||||
|
||||
**Project:** {{project}} (Level {{project_level}} {{project_type}})
|
||||
|
||||
**Path:** {{workflow_path}}
|
||||
|
||||
**Progress:**
|
||||
|
||||
{{#each phases}}
|
||||
{{phase_name}}:
|
||||
{{#each workflows_in_phase}}
|
||||
|
||||
- {{workflow_name}} ({{agent}}): {{status_display}}
|
||||
{{/each}}
|
||||
{{/each}}
|
||||
|
||||
## 🎯 Next Steps
|
||||
|
||||
**Next Workflow:** {{next_workflow_name}}
|
||||
|
||||
**Agent:** {{next_agent}}
|
||||
|
||||
**Command:** /bmad:bmm:workflows:{{next_workflow_id}}
|
||||
|
||||
{{#if optional_workflows_available}}
|
||||
**Optional Workflows Available:**
|
||||
{{#each optional_workflows}}
|
||||
|
||||
- {{workflow_name}} ({{agent}}) - {{status}}
|
||||
{{/each}}
|
||||
{{/if}}
|
||||
|
||||
**Tip:** For guardrail tests, run TEA `*automate` after `dev-story`. If you lose context, TEA workflows resume from artifacts in `{{output_folder}}`.
|
||||
</output>
|
||||
</step>
|
||||
|
||||
<step n="4" goal="Offer actions">
|
||||
<ask>What would you like to do?
|
||||
|
||||
1. **Start next workflow** - {{next_workflow_name}} ({{next_agent}})
|
||||
{{#if optional_workflows_available}}
|
||||
2. **Run optional workflow** - Choose from available options
|
||||
{{/if}}
|
||||
3. **View full status YAML** - See complete status file
|
||||
4. **Update workflow status** - Mark a workflow as completed or skipped
|
||||
5. **Exit** - Return to agent
|
||||
|
||||
Your choice:</ask>
|
||||
|
||||
<action>Handle user selection based on available options</action>
|
||||
|
||||
<check if="choice == 1">
|
||||
<output>Ready to run {{next_workflow_name}}!
|
||||
|
||||
**Command:** /bmad:bmm:workflows:{{next_workflow_id}}
|
||||
|
||||
**Agent:** Load {{next_agent}} agent first
|
||||
|
||||
{{#if next_agent !== current_agent}}
|
||||
Tip: Start a new chat and load the {{next_agent}} agent before running this workflow.
|
||||
{{/if}}
|
||||
</output>
|
||||
</check>
|
||||
|
||||
<check if="choice == 2 AND optional_workflows_available">
|
||||
<ask>Which optional workflow?
|
||||
{{#each optional_workflows numbered}}
|
||||
{{number}}. {{workflow_name}} ({{agent}})
|
||||
{{/each}}
|
||||
|
||||
Your choice:</ask>
|
||||
<action>Display selected workflow command and agent</action>
|
||||
</check>
|
||||
|
||||
<check if="choice == 3">
|
||||
<action>Display complete bmm-workflow-status.yaml file contents</action>
|
||||
</check>
|
||||
|
||||
<check if="choice == 4">
|
||||
<ask>What would you like to update?
|
||||
|
||||
1. Mark a workflow as **completed** (provide file path)
|
||||
2. Mark a workflow as **skipped**
|
||||
|
||||
Your choice:</ask>
|
||||
|
||||
<check if="update_choice == 1">
|
||||
<ask>Which workflow? (Enter workflow ID like 'prd' or 'create-architecture')</ask>
|
||||
<ask>File path created? (e.g., docs/prd.md)</ask>
|
||||
<critical>ONLY write the file path as the status value - no other text, notes, or metadata</critical>
|
||||
<action>Update workflow_status in YAML file: {{workflow_id}}: {{file_path}}</action>
|
||||
<action>Save updated YAML file preserving ALL structure and comments</action>
|
||||
<output>✅ Updated {{workflow_id}} to completed: {{file_path}}</output>
|
||||
</check>
|
||||
|
||||
<check if="update_choice == 2">
|
||||
<ask>Which workflow to skip? (Enter workflow ID)</ask>
|
||||
<action>Update workflow_status in YAML file: {{workflow_id}}: skipped</action>
|
||||
<action>Save updated YAML file</action>
|
||||
<output>✅ Marked {{workflow_id}} as skipped</output>
|
||||
</check>
|
||||
</check>
|
||||
</step>
|
||||
|
||||
<!-- ============================================= -->
|
||||
<!-- SERVICE MODES - Called by other workflows -->
|
||||
<!-- ============================================= -->
|
||||
|
||||
<step n="10" goal="Validate mode - Check if calling workflow should proceed">
|
||||
<action>Read {planning_artifacts}/bmm-workflow-status.yaml if exists</action>
|
||||
|
||||
<check if="status file not found">
|
||||
<template-output>status_exists = false</template-output>
|
||||
<template-output>should_proceed = true</template-output>
|
||||
<template-output>warning = "No status file found. Running without progress tracking."</template-output>
|
||||
<template-output>suggestion = "Consider running workflow-init first for progress tracking"</template-output>
|
||||
<action>Return to calling workflow</action>
|
||||
</check>
|
||||
|
||||
<check if="status file found">
|
||||
<action>Parse YAML file to extract project metadata and workflow_status</action>
|
||||
<action>Load workflow path file from workflow_path field</action>
|
||||
<action>Find first non-completed workflow in workflow_status (next workflow)</action>
|
||||
<action>Check if {{calling_workflow}} matches next workflow or is in the workflow list</action>
|
||||
|
||||
<template-output>status_exists = true</template-output>
|
||||
<template-output>project_level = {{project_level}}</template-output>
|
||||
<template-output>project_type = {{project_type}}</template-output>
|
||||
<template-output>field_type = {{field_type}}</template-output>
|
||||
<template-output>next_workflow = {{next_workflow_id}}</template-output>
|
||||
|
||||
<check if="calling_workflow == next_workflow">
|
||||
<template-output>should_proceed = true</template-output>
|
||||
<template-output>warning = ""</template-output>
|
||||
<template-output>suggestion = "Proceeding with planned next step"</template-output>
|
||||
</check>
|
||||
|
||||
<check if="calling_workflow in workflow_status list">
|
||||
<action>Check the status of calling_workflow in YAML</action>
|
||||
|
||||
<check if="status is file path">
|
||||
<template-output>should_proceed = true</template-output>
|
||||
<template-output>warning = "⚠️ Workflow already completed: {{calling_workflow}}"</template-output>
|
||||
<template-output>suggestion = "This workflow was already completed. Re-running will overwrite: {{status}}"</template-output>
|
||||
</check>
|
||||
|
||||
<check if="status is optional/recommended">
|
||||
<template-output>should_proceed = true</template-output>
|
||||
<template-output>warning = "Running optional workflow {{calling_workflow}}"</template-output>
|
||||
<template-output>suggestion = "This is optional. Expected next: {{next_workflow}}"</template-output>
|
||||
</check>
|
||||
|
||||
<check if="status is required but not next">
|
||||
<template-output>should_proceed = true</template-output>
|
||||
<template-output>warning = "⚠️ Out of sequence: Expected {{next_workflow}}, running {{calling_workflow}}"</template-output>
|
||||
<template-output>suggestion = "Consider running {{next_workflow}} instead, or continue if intentional"</template-output>
|
||||
</check>
|
||||
|
||||
</check>
|
||||
|
||||
<check if="calling_workflow NOT in workflow_status list">
|
||||
<template-output>should_proceed = true</template-output>
|
||||
<template-output>warning = "⚠️ Unknown workflow: {{calling_workflow}} not in workflow path"</template-output>
|
||||
<template-output>suggestion = "This workflow is not part of the defined path for this project"</template-output>
|
||||
</check>
|
||||
|
||||
<template-output>status_file_path = {{path to bmm-workflow-status.yaml}}</template-output>
|
||||
</check>
|
||||
|
||||
<action>Return control to calling workflow with all template outputs</action>
|
||||
</step>
|
||||
|
||||
<step n="20" goal="Data mode - Extract specific information">
|
||||
<action>Read {planning_artifacts}/bmm-workflow-status.yaml if exists</action>
|
||||
|
||||
<check if="status file not found">
|
||||
<template-output>status_exists = false</template-output>
|
||||
<template-output>error = "No status file to extract data from"</template-output>
|
||||
<action>Return to calling workflow</action>
|
||||
</check>
|
||||
|
||||
<check if="status file found">
|
||||
<action>Parse YAML file completely</action>
|
||||
<template-output>status_exists = true</template-output>
|
||||
|
||||
<check if="data_request == project_config">
|
||||
<template-output>project_name = {{project}}</template-output>
|
||||
<template-output>project_type = {{project_type}}</template-output>
|
||||
<template-output>project_level = {{project_level}}</template-output>
|
||||
<template-output>field_type = {{field_type}}</template-output>
|
||||
<template-output>workflow_path = {{workflow_path}}</template-output>
|
||||
</check>
|
||||
|
||||
<check if="data_request == workflow_status">
|
||||
<action>Parse workflow_status section and return all workflow: status pairs</action>
|
||||
<template-output>workflow_status = {{workflow_status_object}}</template-output>
|
||||
<action>Calculate completion stats:</action>
|
||||
<template-output>total_workflows = {{count all workflows}}</template-output>
|
||||
<template-output>completed_workflows = {{count file path statuses}}</template-output>
|
||||
<template-output>pending_workflows = {{count required/optional/etc}}</template-output>
|
||||
<template-output>skipped_workflows = {{count skipped}}</template-output>
|
||||
</check>
|
||||
|
||||
<check if="data_request == all">
|
||||
<action>Return all parsed fields as template outputs</action>
|
||||
<template-output>project = {{project}}</template-output>
|
||||
<template-output>project_type = {{project_type}}</template-output>
|
||||
<template-output>project_level = {{project_level}}</template-output>
|
||||
<template-output>field_type = {{field_type}}</template-output>
|
||||
<template-output>workflow_path = {{workflow_path}}</template-output>
|
||||
<template-output>workflow_status = {{workflow_status_object}}</template-output>
|
||||
<template-output>generated = {{generated}}</template-output>
|
||||
</check>
|
||||
|
||||
<template-output>status_file_path = {{path to bmm-workflow-status.yaml}}</template-output>
|
||||
</check>
|
||||
|
||||
<action>Return control to calling workflow with requested data</action>
|
||||
</step>
|
||||
|
||||
<step n="30" goal="Init-check mode - Simple existence check">
|
||||
<action>Check if {planning_artifacts}/bmm-workflow-status.yaml exists</action>
|
||||
|
||||
<check if="exists">
|
||||
<template-output>status_exists = true</template-output>
|
||||
<template-output>suggestion = "Status file found. Ready to proceed."</template-output>
|
||||
</check>
|
||||
|
||||
<check if="not exists">
|
||||
<template-output>status_exists = false</template-output>
|
||||
<template-output>suggestion = "No status file. Run workflow-init to create one (optional for progress tracking)"</template-output>
|
||||
</check>
|
||||
|
||||
<action>Return immediately to calling workflow</action>
|
||||
</step>
|
||||
|
||||
<step n="40" goal="Update mode - Centralized status file updates">
|
||||
<action>Read {planning_artifacts}/bmm-workflow-status.yaml</action>
|
||||
|
||||
<check if="status file not found">
|
||||
<template-output>success = false</template-output>
|
||||
<template-output>error = "No status file found. Cannot update."</template-output>
|
||||
<action>Return to calling workflow</action>
|
||||
</check>
|
||||
|
||||
<check if="status file found">
|
||||
<action>Parse YAML file completely</action>
|
||||
<action>Load workflow path file from workflow_path field</action>
|
||||
<action>Check {{action}} parameter to determine update type</action>
|
||||
|
||||
<!-- ============================================= -->
|
||||
<!-- ACTION: complete_workflow -->
|
||||
<!-- ============================================= -->
|
||||
<check if="action == complete_workflow">
|
||||
<action>Get {{workflow_id}} parameter (required)</action>
|
||||
<action>Get {{default_output_file}} parameter (required - path to created file)</action>
|
||||
|
||||
<critical>ONLY write the file path as the status value - no other text, notes, or metadata</critical>
|
||||
<action>Update workflow status in YAML:</action>
|
||||
- In workflow_status section, update: {{workflow_id}}: {{default_output_file}}
|
||||
|
||||
<action>Find {{workflow_id}} in loaded path YAML</action>
|
||||
<action>Determine next workflow from path sequence</action>
|
||||
<action>Find first workflow in workflow_status with status != file path and != skipped</action>
|
||||
|
||||
<action>Save updated YAML file preserving ALL structure and comments</action>
|
||||
|
||||
<template-output>success = true</template-output>
|
||||
<template-output>next_workflow = {{determined next workflow}}</template-output>
|
||||
<template-output>next_agent = {{determined next agent from path file}}</template-output>
|
||||
<template-output>completed_workflow = {{workflow_id}}</template-output>
|
||||
<template-output>default_output_file = {{default_output_file}}</template-output>
|
||||
|
||||
</check>
|
||||
|
||||
<!-- ============================================= -->
|
||||
<!-- ACTION: skip_workflow -->
|
||||
<!-- ============================================= -->
|
||||
<check if="action == skip_workflow">
|
||||
<action>Get {{workflow_id}} parameter (required)</action>
|
||||
|
||||
<action>Update workflow status in YAML:</action>
|
||||
- In workflow_status section, update: {{workflow_id}}: skipped
|
||||
|
||||
<action>Save updated YAML file</action>
|
||||
|
||||
<template-output>success = true</template-output>
|
||||
<template-output>skipped_workflow = {{workflow_id}}</template-output>
|
||||
|
||||
</check>
|
||||
|
||||
<!-- ============================================= -->
|
||||
<!-- Unknown action -->
|
||||
<!-- ============================================= -->
|
||||
<check if="action not recognized">
|
||||
<template-output>success = false</template-output>
|
||||
<template-output>error = "Unknown action: {{action}}. Valid actions: complete_workflow, skip_workflow"</template-output>
|
||||
</check>
|
||||
|
||||
</check>
|
||||
|
||||
<action>Return control to calling workflow with template outputs</action>
|
||||
</step>
|
||||
|
||||
</workflow>
|
||||
|
|
@ -1,103 +0,0 @@
|
|||
# BMad Enterprise Method - Brownfield
|
||||
# Extended enterprise planning for complex brownfield with security/devops/test (30+ stories typically)
|
||||
|
||||
method_name: "BMad Enterprise Method"
|
||||
track: "enterprise-bmad-method"
|
||||
field_type: "brownfield"
|
||||
description: "Enterprise-grade planning for complex brownfield additions with extended requirements"
|
||||
|
||||
phases:
|
||||
- phase: 0
|
||||
name: "Documentation"
|
||||
conditional: "if_undocumented"
|
||||
note: "Prerequisite for brownfield without docs"
|
||||
workflows:
|
||||
- id: "document-project"
|
||||
required: true
|
||||
workflow: "{project-root}/_bmad/bmm/workflows/document-project/workflow.yaml"
|
||||
agent: "analyst"
|
||||
command: "/bmad:bmm:workflows:document-project"
|
||||
output: "Comprehensive project documentation"
|
||||
purpose: "Understand existing codebase before planning"
|
||||
- phase: 1
|
||||
name: "Analysis (Optional)"
|
||||
optional: true
|
||||
note: "User-selected during workflow-init"
|
||||
workflows:
|
||||
- id: "brainstorm-project"
|
||||
exec: "{project-root}/_bmad/core/workflows/brainstorming/workflow.md"
|
||||
optional: true
|
||||
agent: "analyst"
|
||||
command: "/bmad:bmm:workflows:brainstorming"
|
||||
included_by: "user_choice"
|
||||
note: "Uses core brainstorming workflow with project context template"
|
||||
- id: "research"
|
||||
exec: "{project-root}/_bmad/bmm/workflows/1-analysis/research/workflow.md"
|
||||
optional: true
|
||||
agent: "analyst"
|
||||
command: "/bmad:bmm:workflows:research"
|
||||
included_by: "user_choice"
|
||||
note: "Can have multiple research workflows"
|
||||
- id: "product-brief"
|
||||
exec: "{project-root}/_bmad/bmm/workflows/1-analysis/create-product-brief/workflow.md"
|
||||
optional: true
|
||||
agent: "analyst"
|
||||
command: "/bmad:bmm:workflows:create-product-brief"
|
||||
included_by: "user_choice"
|
||||
note: "Recommended for greenfield Method projects"
|
||||
- phase: 2
|
||||
name: "Planning"
|
||||
required: true
|
||||
workflows:
|
||||
- id: "prd"
|
||||
exec: "{project-root}/_bmad/bmm/workflows/2-plan-workflows/prd/workflow.md"
|
||||
required: true
|
||||
agent: "pm"
|
||||
command: "/bmad:bmm:workflows:create-prd"
|
||||
output: "Product Requirements Document with FRs and NFRs"
|
||||
- id: "create-ux-design"
|
||||
conditional: "if_has_ui"
|
||||
exec: "{project-root}/_bmad/bmm/workflows/2-plan-workflows/create-ux-design/workflow.md"
|
||||
agent: "ux-designer"
|
||||
command: "/bmad:bmm:workflows:create-ux-design"
|
||||
note: "Determined after PRD - user/agent decides if needed"
|
||||
- phase: 3
|
||||
name: "Solutioning"
|
||||
required: true
|
||||
workflows:
|
||||
- id: "create-architecture"
|
||||
required: true
|
||||
exec: "{project-root}/_bmad/bmm/workflows/3-solutioning/create-architecture/workflow.md"
|
||||
agent: "architect"
|
||||
command: "/bmad:bmm:workflows:create-architecture"
|
||||
output: "System architecture document"
|
||||
note: "Complete system design for greenfield projects"
|
||||
- id: "create-epics-and-stories"
|
||||
required: true
|
||||
exec: "{project-root}/_bmad/bmm/workflows/3-solutioning/create-epics-and-stories/workflow.md"
|
||||
agent: "pm"
|
||||
command: "/bmad:bmm:workflows:create-epics-and-stories"
|
||||
note: "Required: Break down PRD into implementable epics and stories with full context (PRD + UX + Architecture)"
|
||||
- id: "test-design"
|
||||
optional: true
|
||||
workflow: "{project-root}/_bmad/bmm/workflows/testarch/test-design/workflow.yaml"
|
||||
agent: "tea"
|
||||
command: "/bmad:bmm:workflows:test-design"
|
||||
output: "System-level testability review"
|
||||
note: "Testability assessment before gate check - auto-detects system-level mode"
|
||||
- id: "implementation-readiness"
|
||||
required: true
|
||||
exec: "{project-root}/_bmad/bmm/workflows/3-solutioning/check-implementation-readiness/workflow.md"
|
||||
agent: "architect"
|
||||
command: "/bmad:bmm:workflows:implementation-readiness"
|
||||
note: "Validates PRD + Architecture + Epics + UX (optional)"
|
||||
- phase: 4
|
||||
name: "Implementation"
|
||||
required: true
|
||||
workflows:
|
||||
- id: "sprint-planning"
|
||||
required: true
|
||||
workflow: "{project-root}/_bmad/bmm/workflows/4-implementation/sprint-planning/workflow.yaml"
|
||||
agent: "sm"
|
||||
command: "/bmad:bmm:workflows:sprint-planning"
|
||||
note: "Creates sprint plan - subsequent work tracked there"
|
||||
|
|
@ -1,100 +0,0 @@
|
|||
# BMad Enterprise Method - Greenfield
|
||||
# Extended enterprise planning with security/devops/test for greenfield (30+ stories typically)
|
||||
|
||||
method_name: "Enterprise BMad Method"
|
||||
track: "enterprise-bmad-method"
|
||||
field_type: "greenfield"
|
||||
description: "Complete enterprise-grade planning with security, devops, and test strategy"
|
||||
|
||||
phases:
|
||||
- phase: 1
|
||||
name: "Analysis (Optional)"
|
||||
optional: true
|
||||
note: "User-selected during workflow-init"
|
||||
workflows:
|
||||
- id: "brainstorm-project"
|
||||
exec: "{project-root}/_bmad/core/workflows/brainstorming/workflow.md"
|
||||
optional: true
|
||||
agent: "analyst"
|
||||
command: "/bmad:bmm:workflows:brainstorming"
|
||||
included_by: "user_choice"
|
||||
note: "Uses core brainstorming workflow with project context template"
|
||||
|
||||
- id: "research"
|
||||
exec: "{project-root}/_bmad/bmm/workflows/1-analysis/research/workflow.md"
|
||||
optional: true
|
||||
agent: "analyst"
|
||||
command: "/bmad:bmm:workflows:research"
|
||||
included_by: "user_choice"
|
||||
note: "Can have multiple research workflows"
|
||||
|
||||
- id: "product-brief"
|
||||
exec: "{project-root}/_bmad/bmm/workflows/1-analysis/create-product-brief/workflow.md"
|
||||
optional: true
|
||||
agent: "analyst"
|
||||
command: "/bmad:bmm:workflows:create-product-brief"
|
||||
included_by: "user_choice"
|
||||
note: "Recommended for greenfield Method projects"
|
||||
|
||||
- phase: 2
|
||||
name: "Planning"
|
||||
required: true
|
||||
workflows:
|
||||
- id: "prd"
|
||||
exec: "{project-root}/_bmad/bmm/workflows/2-plan-workflows/prd/workflow.md"
|
||||
required: true
|
||||
agent: "pm"
|
||||
command: "/bmad:bmm:workflows:create-prd"
|
||||
output: "Product Requirements Document with FRs and NFRs"
|
||||
|
||||
- id: "create-ux-design"
|
||||
conditional: "if_has_ui"
|
||||
exec: "{project-root}/_bmad/bmm/workflows/2-plan-workflows/create-ux-design/workflow.md"
|
||||
agent: "ux-designer"
|
||||
command: "/bmad:bmm:workflows:create-ux-design"
|
||||
note: "Determined after PRD - user/agent decides if needed"
|
||||
|
||||
- phase: 3
|
||||
name: "Solutioning"
|
||||
required: true
|
||||
workflows:
|
||||
- id: "create-architecture"
|
||||
required: true
|
||||
exec: "{project-root}/_bmad/bmm/workflows/3-solutioning/create-architecture/workflow.md"
|
||||
agent: "architect"
|
||||
command: "/bmad:bmm:workflows:create-architecture"
|
||||
output: "System architecture document"
|
||||
note: "Complete system design for greenfield projects"
|
||||
|
||||
- id: "create-epics-and-stories"
|
||||
required: true
|
||||
exec: "{project-root}/_bmad/bmm/workflows/3-solutioning/create-epics-and-stories/workflow.md"
|
||||
agent: "pm"
|
||||
command: "/bmad:bmm:workflows:create-epics-and-stories"
|
||||
note: "Required: Break down PRD into implementable epics and stories with full context (PRD + UX + Architecture)"
|
||||
|
||||
- id: "test-design"
|
||||
optional: true
|
||||
workflow: "{project-root}/_bmad/bmm/workflows/testarch/test-design/workflow.yaml"
|
||||
agent: "tea"
|
||||
command: "/bmad:bmm:workflows:test-design"
|
||||
output: "System-level testability review"
|
||||
note: "Testability assessment before gate check - auto-detects system-level mode"
|
||||
|
||||
- id: "implementation-readiness"
|
||||
required: true
|
||||
exec: "{project-root}/_bmad/bmm/workflows/3-solutioning/check-implementation-readiness/workflow.md"
|
||||
agent: "architect"
|
||||
command: "/bmad:bmm:workflows:implementation-readiness"
|
||||
note: "Validates PRD + Architecture + Epics + UX (optional)"
|
||||
|
||||
- phase: 4
|
||||
name: "Implementation"
|
||||
required: true
|
||||
workflows:
|
||||
- id: "sprint-planning"
|
||||
required: true
|
||||
workflow: "{project-root}/_bmad/bmm/workflows/4-implementation/sprint-planning/workflow.yaml"
|
||||
agent: "sm"
|
||||
command: "/bmad:bmm:workflows:sprint-planning"
|
||||
note: "Creates sprint plan - subsequent work tracked there"
|
||||
|
|
@ -1,103 +0,0 @@
|
|||
# BMad Method - Brownfield
|
||||
# Full product + architecture planning for complex brownfield additions (10-50+ stories typically)
|
||||
|
||||
method_name: "BMad Method"
|
||||
track: "bmad-method"
|
||||
field_type: "brownfield"
|
||||
description: "Complete product and system design for complex brownfield work"
|
||||
|
||||
phases:
|
||||
- phase: 0
|
||||
name: "Documentation"
|
||||
conditional: "if_undocumented"
|
||||
note: "Prerequisite for brownfield without docs"
|
||||
workflows:
|
||||
- id: "document-project"
|
||||
required: true
|
||||
workflow: "{project-root}/_bmad/bmm/workflows/document-project/workflow.yaml"
|
||||
agent: "analyst"
|
||||
command: "/bmad:bmm:workflows:document-project"
|
||||
output: "Comprehensive project documentation"
|
||||
purpose: "Understand existing codebase before planning"
|
||||
- phase: 1
|
||||
name: "Analysis (Optional)"
|
||||
optional: true
|
||||
note: "User-selected during workflow-init"
|
||||
workflows:
|
||||
- id: "brainstorm-project"
|
||||
exec: "{project-root}/_bmad/core/workflows/brainstorming/workflow.md"
|
||||
optional: true
|
||||
agent: "analyst"
|
||||
command: "/bmad:bmm:workflows:brainstorming"
|
||||
included_by: "user_choice"
|
||||
note: "Uses core brainstorming workflow with project context template"
|
||||
- id: "research"
|
||||
exec: "{project-root}/_bmad/bmm/workflows/1-analysis/research/workflow.md"
|
||||
optional: true
|
||||
agent: "analyst"
|
||||
command: "/bmad:bmm:workflows:research"
|
||||
included_by: "user_choice"
|
||||
note: "Can have multiple research workflows"
|
||||
- id: "product-brief"
|
||||
exec: "{project-root}/_bmad/bmm/workflows/1-analysis/create-product-brief/workflow.md"
|
||||
optional: true
|
||||
agent: "analyst"
|
||||
command: "/bmad:bmm:workflows:create-product-brief"
|
||||
included_by: "user_choice"
|
||||
note: "Recommended for greenfield Method projects"
|
||||
- phase: 2
|
||||
name: "Planning"
|
||||
required: true
|
||||
workflows:
|
||||
- id: "prd"
|
||||
exec: "{project-root}/_bmad/bmm/workflows/2-plan-workflows/prd/workflow.md"
|
||||
required: true
|
||||
agent: "pm"
|
||||
command: "/bmad:bmm:workflows:create-prd"
|
||||
output: "Product Requirements Document with FRs and NFRs"
|
||||
- id: "create-ux-design"
|
||||
conditional: "if_has_ui"
|
||||
exec: "{project-root}/_bmad/bmm/workflows/2-plan-workflows/create-ux-design/workflow.md"
|
||||
agent: "ux-designer"
|
||||
command: "/bmad:bmm:workflows:create-ux-design"
|
||||
note: "Determined after PRD - user/agent decides if needed"
|
||||
- phase: 3
|
||||
name: "Solutioning"
|
||||
required: true
|
||||
workflows:
|
||||
- id: "create-architecture"
|
||||
required: true
|
||||
exec: "{project-root}/_bmad/bmm/workflows/3-solutioning/create-architecture/workflow.md"
|
||||
agent: "architect"
|
||||
command: "/bmad:bmm:workflows:create-architecture"
|
||||
output: "System architecture document"
|
||||
note: "Complete system design for greenfield projects"
|
||||
- id: "create-epics-and-stories"
|
||||
required: true
|
||||
exec: "{project-root}/_bmad/bmm/workflows/3-solutioning/create-epics-and-stories/workflow.md"
|
||||
agent: "pm"
|
||||
command: "/bmad:bmm:workflows:create-epics-and-stories"
|
||||
note: "Required: Break down PRD into implementable epics and stories with full context (PRD + UX + Architecture)"
|
||||
- id: "test-design"
|
||||
optional: true
|
||||
workflow: "{project-root}/_bmad/bmm/workflows/testarch/test-design/workflow.yaml"
|
||||
agent: "tea"
|
||||
command: "/bmad:bmm:workflows:test-design"
|
||||
output: "System-level testability review"
|
||||
note: "Testability assessment before gate check - auto-detects system-level mode"
|
||||
- id: "implementation-readiness"
|
||||
required: true
|
||||
exec: "{project-root}/_bmad/bmm/workflows/3-solutioning/check-implementation-readiness/workflow.md"
|
||||
agent: "architect"
|
||||
command: "/bmad:bmm:workflows:implementation-readiness"
|
||||
note: "Validates PRD + Architecture + Epics + UX (optional)"
|
||||
- phase: 4
|
||||
name: "Implementation"
|
||||
required: true
|
||||
workflows:
|
||||
- id: "sprint-planning"
|
||||
required: true
|
||||
workflow: "{project-root}/_bmad/bmm/workflows/4-implementation/sprint-planning/workflow.yaml"
|
||||
agent: "sm"
|
||||
command: "/bmad:bmm:workflows:sprint-planning"
|
||||
note: "Creates sprint plan - subsequent work tracked there"
|
||||
|
|
@ -1,100 +0,0 @@
|
|||
# BMad Method - Greenfield
|
||||
# Full product + architecture planning for greenfield projects (10-50+ stories typically)
|
||||
|
||||
method_name: "BMad Method"
|
||||
track: "bmad-method"
|
||||
field_type: "greenfield"
|
||||
description: "Complete product and system design methodology for greenfield projects"
|
||||
|
||||
phases:
|
||||
- phase: 1
|
||||
name: "Analysis (Optional)"
|
||||
optional: true
|
||||
note: "User-selected during workflow-init"
|
||||
workflows:
|
||||
- id: "brainstorm-project"
|
||||
exec: "{project-root}/_bmad/core/workflows/brainstorming/workflow.md"
|
||||
optional: true
|
||||
agent: "analyst"
|
||||
command: "/bmad:bmm:workflows:brainstorming"
|
||||
included_by: "user_choice"
|
||||
note: "Uses core brainstorming workflow with project context template"
|
||||
|
||||
- id: "research"
|
||||
exec: "{project-root}/_bmad/bmm/workflows/1-analysis/research/workflow.md"
|
||||
optional: true
|
||||
agent: "analyst"
|
||||
command: "/bmad:bmm:workflows:research"
|
||||
included_by: "user_choice"
|
||||
note: "Can have multiple research workflows"
|
||||
|
||||
- id: "product-brief"
|
||||
exec: "{project-root}/_bmad/bmm/workflows/1-analysis/create-product-brief/workflow.md"
|
||||
optional: true
|
||||
agent: "analyst"
|
||||
command: "/bmad:bmm:workflows:create-product-brief"
|
||||
included_by: "user_choice"
|
||||
note: "Recommended for greenfield Method projects"
|
||||
|
||||
- phase: 2
|
||||
name: "Planning"
|
||||
required: true
|
||||
workflows:
|
||||
- id: "prd"
|
||||
exec: "{project-root}/_bmad/bmm/workflows/2-plan-workflows/prd/workflow.md"
|
||||
required: true
|
||||
agent: "pm"
|
||||
command: "/bmad:bmm:workflows:create-prd"
|
||||
output: "Product Requirements Document with FRs and NFRs"
|
||||
|
||||
- id: "create-ux-design"
|
||||
conditional: "if_has_ui"
|
||||
exec: "{project-root}/_bmad/bmm/workflows/2-plan-workflows/create-ux-design/workflow.md"
|
||||
agent: "ux-designer"
|
||||
command: "/bmad:bmm:workflows:create-ux-design"
|
||||
note: "Determined after PRD - user/agent decides if needed"
|
||||
|
||||
- phase: 3
|
||||
name: "Solutioning"
|
||||
required: true
|
||||
workflows:
|
||||
- id: "create-architecture"
|
||||
required: true
|
||||
exec: "{project-root}/_bmad/bmm/workflows/3-solutioning/create-architecture/workflow.md"
|
||||
agent: "architect"
|
||||
command: "/bmad:bmm:workflows:create-architecture"
|
||||
output: "System architecture document"
|
||||
note: "Complete system design for greenfield projects"
|
||||
|
||||
- id: "create-epics-and-stories"
|
||||
required: true
|
||||
exec: "{project-root}/_bmad/bmm/workflows/3-solutioning/create-epics-and-stories/workflow.md"
|
||||
agent: "pm"
|
||||
command: "/bmad:bmm:workflows:create-epics-and-stories"
|
||||
note: "Required: Break down PRD into implementable epics and stories with full context (PRD + UX + Architecture)"
|
||||
|
||||
- id: "test-design"
|
||||
optional: true
|
||||
workflow: "{project-root}/_bmad/bmm/workflows/testarch/test-design/workflow.yaml"
|
||||
agent: "tea"
|
||||
command: "/bmad:bmm:workflows:test-design"
|
||||
output: "System-level testability review"
|
||||
note: "Testability assessment before gate check - auto-detects system-level mode"
|
||||
|
||||
- id: "implementation-readiness"
|
||||
required: true
|
||||
exec: "{project-root}/_bmad/bmm/workflows/3-solutioning/check-implementation-readiness/workflow.md"
|
||||
agent: "architect"
|
||||
command: "/bmad:bmm:workflows:implementation-readiness"
|
||||
note: "Validates PRD + Architecture + Epics + UX (optional)"
|
||||
|
||||
- phase: 4
|
||||
name: "Implementation"
|
||||
required: true
|
||||
workflows:
|
||||
- id: "sprint-planning"
|
||||
required: true
|
||||
workflow: "{project-root}/_bmad/bmm/workflows/4-implementation/sprint-planning/workflow.yaml"
|
||||
agent: "sm"
|
||||
command: "/bmad:bmm:workflows:sprint-planning"
|
||||
note: "Creates sprint plan - subsequent work tracked there"
|
||||
|
|
@ -1,24 +0,0 @@
|
|||
# Workflow Status Template
|
||||
|
||||
# This tracks progress through BMM methodology Analysis, Planning, and Solutioning phases.
|
||||
# Implementation phase is tracked separately in sprint-status.yaml
|
||||
|
||||
# STATUS DEFINITIONS:
|
||||
# ==================
|
||||
# Initial Status (before completion):
|
||||
# - required: Must be completed to progress
|
||||
# - optional: Can be completed but not required
|
||||
# - recommended: Strongly suggested but not required
|
||||
# - conditional: Required only if certain conditions met (e.g., if_has_ui)
|
||||
#
|
||||
# Completion Status:
|
||||
# - {file-path}: File created/found (e.g., "docs/product-brief.md")
|
||||
# - skipped: Optional/conditional workflow that was skipped
|
||||
|
||||
generated: "{{generated}}"
|
||||
project: "{{project_name}}"
|
||||
project_type: "{{project_type}}"
|
||||
selected_track: "{{selected_track}}"
|
||||
field_type: "{{field_type}}"
|
||||
workflow_path: "{{workflow_path_file}}"
|
||||
workflow_status: "{{workflow_items}}"
|
||||
|
|
@ -1,32 +0,0 @@
|
|||
# Workflow Status - Master Router and Status Tracker
|
||||
name: workflow-status
|
||||
description: 'Lightweight status checker - answers "what should I do now?" for any agent. Reads YAML status file for workflow tracking. Use workflow-init for new projects.'
|
||||
author: "BMad"
|
||||
|
||||
# Critical variables from config
|
||||
config_source: "{project-root}/_bmad/bmm/config.yaml"
|
||||
output_folder: "{config_source}:output_folder"
|
||||
planning_artifacts: "{config_source}:planning_artifacts"
|
||||
implementation_artifacts: "{config_source}:implementation_artifacts"
|
||||
user_name: "{config_source}:user_name"
|
||||
communication_language: "{config_source}:communication_language"
|
||||
document_output_language: "{config_source}:document_output_language"
|
||||
user_skill_level: "{config_source}:user_skill_level"
|
||||
date: system-generated
|
||||
|
||||
# Workflow components
|
||||
installed_path: "{project-root}/_bmad/bmm/workflows/workflow-status"
|
||||
instructions: "{installed_path}/instructions.md"
|
||||
|
||||
# Template for status file creation (used by workflow-init)
|
||||
template: "{installed_path}/workflow-status-template.yaml"
|
||||
|
||||
# Path definitions for project types
|
||||
path_files: "{installed_path}/paths/"
|
||||
|
||||
# Output configuration - reads existing status
|
||||
default_output_file: "{planning_artifacts}/bmm-workflow-status.yaml"
|
||||
|
||||
standalone: true
|
||||
|
||||
web_bundle: false
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
module,phase,name,code,sequence,workflow-file,command,required,agent,options,description,output-location,outputs
|
||||
core,,Advanced Elicitation,AE,10,_bmad/core/workflows/advanced-elicitation/workflow.xml,bmad:advanced-elicitation,false,,,"Apply elicitation methods iteratively to enhance content being generated, presenting options and allowing reshuffle or full method listing for comprehensive content improvement",,
|
||||
core,,Brainstorming,BS,20,_bmad/core/workflows/brainstorming/workflow.md,bmad:brainstorming,false,analyst,,Facilitate interactive brainstorming sessions using diverse creative techniques and ideation methods,{output_folder}/brainstorming/brainstorming-session-{{date}}.md,,
|
||||
core,,Party Mode,PM,30,_bmad/core/workflows/party-mode/workflow.md,bmad:party-mode,false,party-mode facilitator,,Orchestrates group discussions between all installed BMAD agents enabling natural multi-agent conversations,,
|
||||
core,,bmad-help,BH,40,_bmad/core/tasks/bmad-help.md,bmad:help,false,system,,Get unstuck by showing what workflow steps come next or answering questions about what to do in the BMad Method,,
|
||||
core,,Index Docs,ID,50,_bmad/core/tasks/index-docs.xml,bmad:index-docs,false,llm,,Generates or updates an index.md of all documents in the specified directory,,
|
||||
core,,Execute Workflow,WF,60,_bmad/core/tasks/workflow.xml,bmad:workflow,false,llm,,Execute given workflow by loading its configuration following instructions and producing output,,
|
||||
core,,Shard Document,SD,70,_bmad/core/tasks/shard-doc.xml,bmad:shard-doc,false,llm,,Splits large markdown documents into smaller organized files based on level 2 sections,,
|
||||
core,,Editorial Review - Prose,EP,80,_bmad/core/tasks/editorial-review-prose.xml,bmad:editorial-review-prose,false,llm,reader_type,Clinical copy-editor that reviews text for communication issues,,"three-column markdown table with suggested fixes",
|
||||
core,,Editorial Review - Structure,ES,90,_bmad/core/tasks/editorial-review-structure.xml,bmad:editorial-review-structure,false,llm,,Structural editor that proposes cuts reorganization and simplification while preserving comprehension,,
|
||||
core,,Adversarial Review (General),AR,100,_bmad/core/tasks/review-adversarial-general.xml,bmad:review-adversarial-general,false,llm,,Cynically review content and produce findings,,
|
||||
|
Can't render this file because it has a wrong number of fields in line 3.
|
|
|
@ -0,0 +1,62 @@
|
|||
---
|
||||
name: bmad-help
|
||||
description: Get unstuck by showing what workflow steps come next or answering questions about what to do
|
||||
standalone: true
|
||||
---
|
||||
|
||||
# Task: BMAD Help
|
||||
|
||||
## KEY RULES
|
||||
|
||||
- **Empty `phase` = anytime** — Universal tools work regardless of workflow state
|
||||
- **Numbered phases indicate sequence** — Phases like `1-discover` → `2-define` → `3-build` → `4-ship` flow in order (naming varies by module)
|
||||
- **Stay in module** — Guide through the active module's workflow based on phase+sequence ordering
|
||||
- **Descriptions contain routing** — Read for alternate paths (e.g., "back to previous if fixes needed")
|
||||
- **`required=true` blocks progress** — Required workflows must complete before proceeding to later phases
|
||||
- **Artifacts reveal completion** — Search resolved output paths for `outputs` patterns, fuzzy-match found files to workflow rows
|
||||
|
||||
## MODULE DETECTION
|
||||
|
||||
- **Empty `module` column** → universal tools (work across all modules)
|
||||
- **Named `module`** → module-specific workflows
|
||||
|
||||
Detect the active module from conversation context, recent workflows, or user query keywords. If ambiguous, ask the user.
|
||||
|
||||
## INPUT ANALYSIS
|
||||
|
||||
Determine what was just completed:
|
||||
- Did someone state they completed something? Proceed as if that was the input.
|
||||
- Was a workflow just completed in this conversation? Proceed as if that was the input.
|
||||
- Search resolved artifact locations for files; fuzzy-match to workflow `outputs` patterns.
|
||||
- If an `index.md` exists, read it for additional context.
|
||||
- If still unclear, ask: "What workflow did you most recently complete?"
|
||||
|
||||
## EXECUTION
|
||||
|
||||
1. **Load catalog** — Load `{project-root}/_bmad/_config/bmad-help.csv`
|
||||
|
||||
2. **Resolve output locations** — Scan each folder under `_bmad/` (except `_config`) for `config.yaml`. For each workflow row, resolve its `output-location` variables against that module's config so artifact paths can be searched.
|
||||
|
||||
3. **Analyze input** — Task may provide a workflow name/code, conversational phrase, or nothing. Infer what was just completed using INPUT ANALYSIS above.
|
||||
|
||||
4. **Detect active module** — Use MODULE DETECTION above to determine which module the user is working in.
|
||||
|
||||
5. **Present recommendations** — Show next steps based on completed workflows, phase/sequence ordering (KEY RULES), and artifact detection. Format per the following
|
||||
|
||||
## RECOMMENDED OUTPUT FORMAT
|
||||
|
||||
**Optional items first** — List optional workflows until a required step is reached
|
||||
**Required items next** — List the next required workflow
|
||||
For each item show:
|
||||
- Workflow **name**
|
||||
- **Command** (prefixed with `/`, e.g., `/bmad:example:build-prototype`)
|
||||
- **Agent** title and display name from the CSV (e.g., "🎨 Alex (Designer)")
|
||||
- Brief **description**
|
||||
|
||||
### Additional response output guidance to convey:
|
||||
- Run each workflow in a **fresh context window**
|
||||
- Load the agent using (`/` + `agent-command`), or run the workflow command directly
|
||||
- For **validation workflows**: recommend using a different high-quality LLM if available
|
||||
- For conversational requests: match the user's tone while presenting clearly
|
||||
|
||||
6. Return to the calling process after presenting recommendations.
|
||||
|
|
@ -33,7 +33,7 @@ Initialize the brainstorming workflow by detecting continuation state and settin
|
|||
|
||||
First, check if the output document already exists:
|
||||
|
||||
- Look for file at `{output_folder}/analysis/brainstorming-session-{{date}}.md`
|
||||
- Look for file at `{output_folder}/brainstorming/brainstorming-session-{{date}}.md`
|
||||
- If exists, read the complete file including frontmatter
|
||||
- If not exists, this is a fresh workflow
|
||||
|
||||
|
|
@ -55,10 +55,10 @@ Create the brainstorming session document:
|
|||
|
||||
```bash
|
||||
# Create directory if needed
|
||||
mkdir -p "$(dirname "{output_folder}/analysis/brainstorming-session-{{date}}.md")"
|
||||
mkdir -p "$(dirname "{output_folder}/brainstorming/brainstorming-session-{{date}}.md")"
|
||||
|
||||
# Initialize from template
|
||||
cp "{template_path}" "{output_folder}/analysis/brainstorming-session-{{date}}.md"
|
||||
cp "{template_path}" "{output_folder}/brainstorming/brainstorming-session-{{date}}.md"
|
||||
```
|
||||
|
||||
#### B. Context File Check and Loading
|
||||
|
|
@ -134,7 +134,7 @@ _[Content based on conversation about session parameters and facilitator approac
|
|||
|
||||
## APPEND TO DOCUMENT:
|
||||
|
||||
When user selects approach, append the session overview content directly to `{output_folder}/analysis/brainstorming-session-{{date}}.md` using the structure from above.
|
||||
When user selects approach, append the session overview content directly to `{output_folder}/brainstorming/brainstorming-session-{{date}}.md` using the structure from above.
|
||||
|
||||
### E. Continue to Technique Selection
|
||||
|
||||
|
|
@ -152,7 +152,7 @@ Which approach appeals to you most? (Enter 1-4)"
|
|||
|
||||
#### When user selects approach number:
|
||||
|
||||
- **Append initial session overview to `{output_folder}/analysis/brainstorming-session-{{date}}.md`**
|
||||
- **Append initial session overview to `{output_folder}/brainstorming/brainstorming-session-{{date}}.md`**
|
||||
- **Update frontmatter:** `stepsCompleted: [1]`, `selected_approach: '[selected approach]'`
|
||||
- **Load the appropriate step-02 file** based on selection
|
||||
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ Load existing document and analyze current state:
|
|||
|
||||
**Document Analysis:**
|
||||
|
||||
- Read existing `{output_folder}/analysis/brainstorming-session-{{date}}.md`
|
||||
- Read existing `{output_folder}/brainstorming/brainstorming-session-{{date}}.md`
|
||||
- Examine frontmatter for `stepsCompleted`, `session_topic`, `session_goals`
|
||||
- Review content to understand session progress and outcomes
|
||||
- Identify current stage and next logical steps
|
||||
|
|
|
|||
|
|
@ -296,7 +296,7 @@ After final technique element:
|
|||
|
||||
#### If 'C' (Move to organization):
|
||||
|
||||
- **Append the technique execution content to `{output_folder}/analysis/brainstorming-session-{{date}}.md`**
|
||||
- **Append the technique execution content to `{output_folder}/brainstorming/brainstorming-session-{{date}}.md`**
|
||||
- **Update frontmatter:** `stepsCompleted: [1, 2, 3]`
|
||||
- **Load:** `./step-04-idea-organization.md`
|
||||
|
||||
|
|
@ -356,7 +356,7 @@ _[Short narrative describing the user and AI collaboration journey - what made t
|
|||
|
||||
## APPEND TO DOCUMENT:
|
||||
|
||||
When user selects 'C', append the content directly to `{output_folder}/analysis/brainstorming-session-{{date}}.md` using the structure from above.
|
||||
When user selects 'C', append the content directly to `{output_folder}/brainstorming/brainstorming-session-{{date}}.md` using the structure from above.
|
||||
|
||||
## SUCCESS METRICS:
|
||||
|
||||
|
|
|
|||
|
|
@ -253,14 +253,14 @@ Provide final session wrap-up and forward guidance:
|
|||
|
||||
#### If [C] Complete:
|
||||
|
||||
- **Append the final session content to `{output_folder}/analysis/brainstorming-session-{{date}}.md`**
|
||||
- **Append the final session content to `{output_folder}/brainstorming/brainstorming-session-{{date}}.md`**
|
||||
- Update frontmatter: `stepsCompleted: [1, 2, 3, 4]`
|
||||
- Set `session_active: false` and `workflow_completed: true`
|
||||
- Complete workflow with positive closure message
|
||||
|
||||
## APPEND TO DOCUMENT:
|
||||
|
||||
When user selects 'C', append the content directly to `{output_folder}/analysis/brainstorming-session-{{date}}.md` using the structure from step 7.
|
||||
When user selects 'C', append the content directly to `{output_folder}/brainstorming/brainstorming-session-{{date}}.md` using the structure from step 7.
|
||||
|
||||
## SUCCESS METRICS:
|
||||
|
||||
|
|
|
|||
|
|
@ -45,7 +45,7 @@ Load config from `{project-root}/_bmad/core/config.yaml` and resolve:
|
|||
- `installed_path` = `{project-root}/_bmad/core/workflows/brainstorming`
|
||||
- `template_path` = `{installed_path}/template.md`
|
||||
- `brain_techniques_path` = `{installed_path}/brain-methods.csv`
|
||||
- `default_output_file` = `{output_folder}/analysis/brainstorming-session-{{date}}.md`
|
||||
- `default_output_file` = `{output_folder}/brainstorming/brainstorming-session-{{date}}.md`
|
||||
- `context_file` = Optional context file path from workflow invocation for project-specific guidance
|
||||
- `advancedElicitationTask` = `{project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml`
|
||||
|
||||
|
|
|
|||
|
|
@ -23,9 +23,9 @@ agent:
|
|||
- trigger: DS or fuzzy match on dev-story
|
||||
description: "[DS] Another two-word compound trigger"
|
||||
action: dev_story
|
||||
- trigger: WI or fuzzy match on workflow-init-process
|
||||
- trigger: WI or fuzzy match on three-name-thing
|
||||
description: "[WI] Three-word compound trigger (uses first 2 words for shortcut)"
|
||||
action: workflow_init
|
||||
action: three_name_thing
|
||||
- trigger: H or fuzzy match on help
|
||||
description: "[H] Single-word compound trigger (1-letter shortcut)"
|
||||
action: help
|
||||
|
|
|
|||
|
|
@ -0,0 +1,83 @@
|
|||
import fs from 'fs-extra';
|
||||
import path from 'node:path';
|
||||
import { fileURLToPath } from 'node:url';
|
||||
import yaml from 'yaml';
|
||||
import xml2js from 'xml2js';
|
||||
|
||||
// Get the directory of this module
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = path.dirname(__filename);
|
||||
|
||||
/**
|
||||
* Load a fixture file
|
||||
* @param {string} fixturePath - Relative path to fixture from test/fixtures/
|
||||
* @returns {Promise<string>} File content
|
||||
*/
|
||||
export async function loadFixture(fixturePath) {
|
||||
const fullPath = path.join(__dirname, '..', 'fixtures', fixturePath);
|
||||
return fs.readFile(fullPath, 'utf8');
|
||||
}
|
||||
|
||||
/**
|
||||
* Load a YAML fixture
|
||||
* @param {string} fixturePath - Relative path to YAML fixture
|
||||
* @returns {Promise<Object>} Parsed YAML object
|
||||
*/
|
||||
export async function loadYamlFixture(fixturePath) {
|
||||
const content = await loadFixture(fixturePath);
|
||||
return yaml.parse(content);
|
||||
}
|
||||
|
||||
/**
|
||||
* Load an XML fixture
|
||||
* @param {string} fixturePath - Relative path to XML fixture
|
||||
* @returns {Promise<Object>} Parsed XML object
|
||||
*/
|
||||
export async function loadXmlFixture(fixturePath) {
|
||||
const content = await loadFixture(fixturePath);
|
||||
return xml2js.parseStringPromise(content);
|
||||
}
|
||||
|
||||
/**
|
||||
* Load a JSON fixture
|
||||
* @param {string} fixturePath - Relative path to JSON fixture
|
||||
* @returns {Promise<Object>} Parsed JSON object
|
||||
*/
|
||||
export async function loadJsonFixture(fixturePath) {
|
||||
const content = await loadFixture(fixturePath);
|
||||
return JSON.parse(content);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a fixture file exists
|
||||
* @param {string} fixturePath - Relative path to fixture
|
||||
* @returns {Promise<boolean>} True if fixture exists
|
||||
*/
|
||||
export async function fixtureExists(fixturePath) {
|
||||
const fullPath = path.join(__dirname, '..', 'fixtures', fixturePath);
|
||||
return fs.pathExists(fullPath);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the full path to a fixture
|
||||
* @param {string} fixturePath - Relative path to fixture
|
||||
* @returns {string} Full path to fixture
|
||||
*/
|
||||
export function getFixturePath(fixturePath) {
|
||||
return path.join(__dirname, '..', 'fixtures', fixturePath);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a test file in a temporary directory
|
||||
* (Re-exported from temp-dir for convenience)
|
||||
* @param {string} tmpDir - Temporary directory path
|
||||
* @param {string} relativePath - Relative path for the file
|
||||
* @param {string} content - File content
|
||||
* @returns {Promise<string>} Full path to the created file
|
||||
*/
|
||||
export async function createTestFile(tmpDir, relativePath, content) {
|
||||
const fullPath = path.join(tmpDir, relativePath);
|
||||
await fs.ensureDir(path.dirname(fullPath));
|
||||
await fs.writeFile(fullPath, content, 'utf8');
|
||||
return fullPath;
|
||||
}
|
||||
|
|
@ -0,0 +1,82 @@
|
|||
import fs from 'fs-extra';
|
||||
import path from 'node:path';
|
||||
import os from 'node:os';
|
||||
import { randomUUID } from 'node:crypto';
|
||||
|
||||
/**
|
||||
* Create a temporary directory for testing
|
||||
* @param {string} prefix - Prefix for the directory name
|
||||
* @returns {Promise<string>} Path to the created temporary directory
|
||||
*/
|
||||
export async function createTempDir(prefix = 'bmad-test-') {
|
||||
const tmpDir = path.join(os.tmpdir(), `${prefix}${randomUUID()}`);
|
||||
await fs.ensureDir(tmpDir);
|
||||
return tmpDir;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean up a temporary directory
|
||||
* @param {string} tmpDir - Path to the temporary directory
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
export async function cleanupTempDir(tmpDir) {
|
||||
if (await fs.pathExists(tmpDir)) {
|
||||
await fs.remove(tmpDir);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a test function with a temporary directory
|
||||
* Automatically creates and cleans up the directory
|
||||
* @param {Function} testFn - Test function that receives the temp directory path
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
export async function withTempDir(testFn) {
|
||||
const tmpDir = await createTempDir();
|
||||
try {
|
||||
await testFn(tmpDir);
|
||||
} finally {
|
||||
await cleanupTempDir(tmpDir);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a test file in a temporary directory
|
||||
* @param {string} tmpDir - Temporary directory path
|
||||
* @param {string} relativePath - Relative path for the file
|
||||
* @param {string} content - File content
|
||||
* @returns {Promise<string>} Full path to the created file
|
||||
*/
|
||||
export async function createTestFile(tmpDir, relativePath, content) {
|
||||
const fullPath = path.join(tmpDir, relativePath);
|
||||
await fs.ensureDir(path.dirname(fullPath));
|
||||
await fs.writeFile(fullPath, content, 'utf8');
|
||||
return fullPath;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create multiple test files in a temporary directory
|
||||
* @param {string} tmpDir - Temporary directory path
|
||||
* @param {Object} files - Object mapping relative paths to content
|
||||
* @returns {Promise<string[]>} Array of created file paths
|
||||
*/
|
||||
export async function createTestFiles(tmpDir, files) {
|
||||
const paths = [];
|
||||
for (const [relativePath, content] of Object.entries(files)) {
|
||||
const fullPath = await createTestFile(tmpDir, relativePath, content);
|
||||
paths.push(fullPath);
|
||||
}
|
||||
return paths;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a test directory structure
|
||||
* @param {string} tmpDir - Temporary directory path
|
||||
* @param {string[]} dirs - Array of relative directory paths
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
export async function createTestDirs(tmpDir, dirs) {
|
||||
for (const dir of dirs) {
|
||||
await fs.ensureDir(path.join(tmpDir, dir));
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
import { beforeEach, afterEach } from 'vitest';
|
||||
|
||||
// Global test setup
|
||||
beforeEach(() => {
|
||||
// Reset environment variables to prevent test pollution
|
||||
// Store original env for restoration
|
||||
if (!globalThis.__originalEnv) {
|
||||
globalThis.__originalEnv = { ...process.env };
|
||||
}
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
// Restore original environment variables
|
||||
if (globalThis.__originalEnv) {
|
||||
process.env = { ...globalThis.__originalEnv };
|
||||
}
|
||||
|
||||
// Any global cleanup can go here
|
||||
});
|
||||
|
||||
// Increase timeout for file system operations
|
||||
// (Individual tests can override this if needed)
|
||||
const DEFAULT_TIMEOUT = 10_000; // 10 seconds
|
||||
|
||||
// Make timeout available globally
|
||||
globalThis.DEFAULT_TEST_TIMEOUT = DEFAULT_TIMEOUT;
|
||||
|
|
@ -0,0 +1,428 @@
|
|||
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||
import { Config } from '../../../tools/cli/lib/config.js';
|
||||
import { createTempDir, cleanupTempDir, createTestFile } from '../../helpers/temp-dir.js';
|
||||
import fs from 'fs-extra';
|
||||
import path from 'node:path';
|
||||
import yaml from 'yaml';
|
||||
|
||||
describe('Config', () => {
|
||||
let tmpDir;
|
||||
let config;
|
||||
|
||||
beforeEach(async () => {
|
||||
tmpDir = await createTempDir();
|
||||
config = new Config();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await cleanupTempDir(tmpDir);
|
||||
});
|
||||
|
||||
describe('loadYaml()', () => {
|
||||
it('should load and parse YAML file', async () => {
|
||||
const yamlContent = {
|
||||
key1: 'value1',
|
||||
key2: { nested: 'value2' },
|
||||
array: [1, 2, 3],
|
||||
};
|
||||
|
||||
const configPath = path.join(tmpDir, 'config.yaml');
|
||||
await fs.writeFile(configPath, yaml.stringify(yamlContent));
|
||||
|
||||
const result = await config.loadYaml(configPath);
|
||||
|
||||
expect(result).toEqual(yamlContent);
|
||||
});
|
||||
|
||||
it('should throw error for non-existent file', async () => {
|
||||
const nonExistent = path.join(tmpDir, 'missing.yaml');
|
||||
|
||||
await expect(config.loadYaml(nonExistent)).rejects.toThrow('Configuration file not found');
|
||||
});
|
||||
|
||||
it('should handle Unicode content', async () => {
|
||||
const yamlContent = {
|
||||
chinese: '测试',
|
||||
russian: 'Тест',
|
||||
japanese: 'テスト',
|
||||
};
|
||||
|
||||
const configPath = path.join(tmpDir, 'unicode.yaml');
|
||||
await fs.writeFile(configPath, yaml.stringify(yamlContent));
|
||||
|
||||
const result = await config.loadYaml(configPath);
|
||||
|
||||
expect(result.chinese).toBe('测试');
|
||||
expect(result.russian).toBe('Тест');
|
||||
expect(result.japanese).toBe('テスト');
|
||||
});
|
||||
});
|
||||
|
||||
// Note: saveYaml() is not tested because it uses yaml.dump() which doesn't exist
|
||||
// in yaml 2.7.0 (should use yaml.stringify). This method is never called in production
|
||||
// and represents dead code with a latent bug.
|
||||
|
||||
describe('processConfig()', () => {
|
||||
it('should replace {project-root} placeholder', async () => {
|
||||
const configPath = path.join(tmpDir, 'config.txt');
|
||||
await fs.writeFile(configPath, 'Root is {project-root}/bmad');
|
||||
|
||||
await config.processConfig(configPath, { root: '/home/user/project' });
|
||||
|
||||
const content = await fs.readFile(configPath, 'utf8');
|
||||
expect(content).toBe('Root is /home/user/project/bmad');
|
||||
});
|
||||
|
||||
it('should replace {module} placeholder', async () => {
|
||||
const configPath = path.join(tmpDir, 'config.txt');
|
||||
await fs.writeFile(configPath, 'Module: {module}');
|
||||
|
||||
await config.processConfig(configPath, { module: 'bmm' });
|
||||
|
||||
const content = await fs.readFile(configPath, 'utf8');
|
||||
expect(content).toBe('Module: bmm');
|
||||
});
|
||||
|
||||
it('should replace {version} placeholder with package version', async () => {
|
||||
const configPath = path.join(tmpDir, 'config.txt');
|
||||
await fs.writeFile(configPath, 'Version: {version}');
|
||||
|
||||
await config.processConfig(configPath);
|
||||
|
||||
const content = await fs.readFile(configPath, 'utf8');
|
||||
expect(content).toMatch(/Version: \d+\.\d+\.\d+/); // Semver format
|
||||
});
|
||||
|
||||
it('should replace {date} placeholder with current date', async () => {
|
||||
const configPath = path.join(tmpDir, 'config.txt');
|
||||
await fs.writeFile(configPath, 'Date: {date}');
|
||||
|
||||
await config.processConfig(configPath);
|
||||
|
||||
const content = await fs.readFile(configPath, 'utf8');
|
||||
expect(content).toMatch(/Date: \d{4}-\d{2}-\d{2}/); // YYYY-MM-DD
|
||||
});
|
||||
|
||||
it('should replace multiple placeholders', async () => {
|
||||
const configPath = path.join(tmpDir, 'config.txt');
|
||||
await fs.writeFile(configPath, 'Root: {project-root}, Module: {module}, Version: {version}');
|
||||
|
||||
await config.processConfig(configPath, {
|
||||
root: '/project',
|
||||
module: 'test',
|
||||
});
|
||||
|
||||
const content = await fs.readFile(configPath, 'utf8');
|
||||
expect(content).toContain('Root: /project');
|
||||
expect(content).toContain('Module: test');
|
||||
expect(content).toMatch(/Version: \d+\.\d+/);
|
||||
});
|
||||
|
||||
it('should replace custom placeholders', async () => {
|
||||
const configPath = path.join(tmpDir, 'config.txt');
|
||||
await fs.writeFile(configPath, 'Custom: {custom-placeholder}');
|
||||
|
||||
await config.processConfig(configPath, { '{custom-placeholder}': 'custom-value' });
|
||||
|
||||
const content = await fs.readFile(configPath, 'utf8');
|
||||
expect(content).toBe('Custom: custom-value');
|
||||
});
|
||||
|
||||
it('should escape regex special characters in placeholders', async () => {
|
||||
const configPath = path.join(tmpDir, 'config.txt');
|
||||
await fs.writeFile(configPath, 'Path: {project-root}/test');
|
||||
|
||||
// Test that {project-root} doesn't get interpreted as regex
|
||||
await config.processConfig(configPath, {
|
||||
root: '/path/with/special$chars^',
|
||||
});
|
||||
|
||||
const content = await fs.readFile(configPath, 'utf8');
|
||||
expect(content).toBe('Path: /path/with/special$chars^/test');
|
||||
});
|
||||
|
||||
it('should handle placeholders with regex metacharacters in values', async () => {
|
||||
const configPath = path.join(tmpDir, 'config.txt');
|
||||
await fs.writeFile(configPath, 'Value: {placeholder}');
|
||||
|
||||
await config.processConfig(configPath, {
|
||||
'{placeholder}': String.raw`value with $1 and \backslash`,
|
||||
});
|
||||
|
||||
const content = await fs.readFile(configPath, 'utf8');
|
||||
expect(content).toBe(String.raw`Value: value with $1 and \backslash`);
|
||||
});
|
||||
|
||||
it('should replace all occurrences of placeholder', async () => {
|
||||
const configPath = path.join(tmpDir, 'config.txt');
|
||||
await fs.writeFile(configPath, '{module} is here and {module} is there and {module} everywhere');
|
||||
|
||||
await config.processConfig(configPath, { module: 'BMM' });
|
||||
|
||||
const content = await fs.readFile(configPath, 'utf8');
|
||||
expect(content).toBe('BMM is here and BMM is there and BMM everywhere');
|
||||
});
|
||||
});
|
||||
|
||||
describe('deepMerge()', () => {
|
||||
it('should merge shallow objects', () => {
|
||||
const target = { a: 1, b: 2 };
|
||||
const source = { b: 3, c: 4 };
|
||||
|
||||
const result = config.deepMerge(target, source);
|
||||
|
||||
expect(result).toEqual({ a: 1, b: 3, c: 4 });
|
||||
});
|
||||
|
||||
it('should merge nested objects', () => {
|
||||
const target = { level1: { a: 1, b: 2 } };
|
||||
const source = { level1: { b: 3, c: 4 } };
|
||||
|
||||
const result = config.deepMerge(target, source);
|
||||
|
||||
expect(result.level1).toEqual({ a: 1, b: 3, c: 4 });
|
||||
});
|
||||
|
||||
it('should not merge arrays (just replace)', () => {
|
||||
const target = { items: [1, 2, 3] };
|
||||
const source = { items: [4, 5] };
|
||||
|
||||
const result = config.deepMerge(target, source);
|
||||
|
||||
expect(result.items).toEqual([4, 5]); // Replaced, not merged
|
||||
});
|
||||
|
||||
it('should handle null values', () => {
|
||||
const target = { a: 'value', b: null };
|
||||
const source = { a: null, c: 'new' };
|
||||
|
||||
const result = config.deepMerge(target, source);
|
||||
|
||||
expect(result).toEqual({ a: null, b: null, c: 'new' });
|
||||
});
|
||||
|
||||
it('should not mutate original objects', () => {
|
||||
const target = { a: 1 };
|
||||
const source = { b: 2 };
|
||||
|
||||
config.deepMerge(target, source);
|
||||
|
||||
expect(target).toEqual({ a: 1 });
|
||||
expect(source).toEqual({ b: 2 });
|
||||
});
|
||||
});
|
||||
|
||||
describe('mergeConfigs()', () => {
|
||||
it('should delegate to deepMerge', () => {
|
||||
const base = { setting1: 'base' };
|
||||
const override = { setting2: 'override' };
|
||||
|
||||
const result = config.mergeConfigs(base, override);
|
||||
|
||||
expect(result).toEqual({ setting1: 'base', setting2: 'override' });
|
||||
});
|
||||
});
|
||||
|
||||
describe('isObject()', () => {
|
||||
it('should return true for plain objects', () => {
|
||||
expect(config.isObject({})).toBe(true);
|
||||
expect(config.isObject({ key: 'value' })).toBe(true);
|
||||
});
|
||||
|
||||
it('should return false for arrays', () => {
|
||||
expect(config.isObject([])).toBe(false);
|
||||
});
|
||||
|
||||
it('should return false for null', () => {
|
||||
expect(config.isObject(null)).toBeFalsy();
|
||||
});
|
||||
|
||||
it('should return false for primitives', () => {
|
||||
expect(config.isObject('string')).toBe(false);
|
||||
expect(config.isObject(42)).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getValue() and setValue()', () => {
|
||||
it('should get value by dot notation path', () => {
|
||||
const obj = {
|
||||
level1: {
|
||||
level2: {
|
||||
value: 'test',
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const result = config.getValue(obj, 'level1.level2.value');
|
||||
|
||||
expect(result).toBe('test');
|
||||
});
|
||||
|
||||
it('should set value by dot notation path', () => {
|
||||
const obj = {
|
||||
level1: {
|
||||
level2: {},
|
||||
},
|
||||
};
|
||||
|
||||
config.setValue(obj, 'level1.level2.value', 'new value');
|
||||
|
||||
expect(obj.level1.level2.value).toBe('new value');
|
||||
});
|
||||
|
||||
it('should return default value for non-existent path', () => {
|
||||
const obj = { a: { b: 'value' } };
|
||||
|
||||
const result = config.getValue(obj, 'a.c.d', 'default');
|
||||
|
||||
expect(result).toBe('default');
|
||||
});
|
||||
|
||||
it('should return null default when path not found', () => {
|
||||
const obj = { a: { b: 'value' } };
|
||||
|
||||
const result = config.getValue(obj, 'a.c.d');
|
||||
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
|
||||
it('should handle simple (non-nested) paths', () => {
|
||||
const obj = { key: 'value' };
|
||||
|
||||
expect(config.getValue(obj, 'key')).toBe('value');
|
||||
|
||||
config.setValue(obj, 'newKey', 'newValue');
|
||||
expect(obj.newKey).toBe('newValue');
|
||||
});
|
||||
|
||||
it('should create intermediate objects when setting deep paths', () => {
|
||||
const obj = {};
|
||||
|
||||
config.setValue(obj, 'a.b.c.d', 'deep value');
|
||||
|
||||
expect(obj.a.b.c.d).toBe('deep value');
|
||||
});
|
||||
});
|
||||
|
||||
describe('validateConfig()', () => {
|
||||
it('should validate required fields', () => {
|
||||
const cfg = { field1: 'value1' };
|
||||
const schema = {
|
||||
required: ['field1', 'field2'],
|
||||
};
|
||||
|
||||
const result = config.validateConfig(cfg, schema);
|
||||
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors).toContain('Missing required field: field2');
|
||||
});
|
||||
|
||||
it('should pass when all required fields present', () => {
|
||||
const cfg = { field1: 'value1', field2: 'value2' };
|
||||
const schema = {
|
||||
required: ['field1', 'field2'],
|
||||
};
|
||||
|
||||
const result = config.validateConfig(cfg, schema);
|
||||
|
||||
expect(result.valid).toBe(true);
|
||||
expect(result.errors).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should validate field types', () => {
|
||||
const cfg = {
|
||||
stringField: 'text',
|
||||
numberField: '42', // Wrong type
|
||||
arrayField: [1, 2, 3],
|
||||
objectField: 'not-object', // Wrong type
|
||||
boolField: true,
|
||||
};
|
||||
|
||||
const schema = {
|
||||
properties: {
|
||||
stringField: { type: 'string' },
|
||||
numberField: { type: 'number' },
|
||||
arrayField: { type: 'array' },
|
||||
objectField: { type: 'object' },
|
||||
boolField: { type: 'boolean' },
|
||||
},
|
||||
};
|
||||
|
||||
const result = config.validateConfig(cfg, schema);
|
||||
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors.some((e) => e.includes('numberField'))).toBe(true);
|
||||
expect(result.errors.some((e) => e.includes('objectField'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should validate enum values', () => {
|
||||
const cfg = { level: 'expert' };
|
||||
const schema = {
|
||||
properties: {
|
||||
level: { type: 'string', enum: ['beginner', 'intermediate', 'advanced'] },
|
||||
},
|
||||
};
|
||||
|
||||
const result = config.validateConfig(cfg, schema);
|
||||
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.errors.some((e) => e.includes('must be one of'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should pass validation for valid enum value', () => {
|
||||
const cfg = { level: 'intermediate' };
|
||||
const schema = {
|
||||
properties: {
|
||||
level: { type: 'string', enum: ['beginner', 'intermediate', 'advanced'] },
|
||||
},
|
||||
};
|
||||
|
||||
const result = config.validateConfig(cfg, schema);
|
||||
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
|
||||
it('should return warnings array', () => {
|
||||
const cfg = { field: 'value' };
|
||||
const schema = { required: ['field'] };
|
||||
|
||||
const result = config.validateConfig(cfg, schema);
|
||||
|
||||
expect(result.warnings).toBeDefined();
|
||||
expect(Array.isArray(result.warnings)).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('edge cases', () => {
|
||||
it('should handle empty YAML file', async () => {
|
||||
const configPath = path.join(tmpDir, 'empty.yaml');
|
||||
await fs.writeFile(configPath, '');
|
||||
|
||||
const result = await config.loadYaml(configPath);
|
||||
|
||||
expect(result).toBeNull(); // Empty YAML parses to null
|
||||
});
|
||||
|
||||
it('should handle YAML with only comments', async () => {
|
||||
const configPath = path.join(tmpDir, 'comments.yaml');
|
||||
await fs.writeFile(configPath, '# Just a comment\n# Another comment\n');
|
||||
|
||||
const result = await config.loadYaml(configPath);
|
||||
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
|
||||
it('should handle very deep object nesting', () => {
|
||||
const deep = {
|
||||
l1: { l2: { l3: { l4: { l5: { l6: { l7: { l8: { value: 'deep' } } } } } } } },
|
||||
};
|
||||
const override = {
|
||||
l1: { l2: { l3: { l4: { l5: { l6: { l7: { l8: { value: 'updated' } } } } } } } },
|
||||
};
|
||||
|
||||
const result = config.deepMerge(deep, override);
|
||||
|
||||
expect(result.l1.l2.l3.l4.l5.l6.l7.l8.value).toBe('updated');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
@ -0,0 +1,558 @@
|
|||
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||
import { DependencyResolver } from '../../../tools/cli/installers/lib/core/dependency-resolver.js';
|
||||
import { createTempDir, cleanupTempDir, createTestFile } from '../../helpers/temp-dir.js';
|
||||
import fs from 'fs-extra';
|
||||
import path from 'node:path';
|
||||
|
||||
describe('DependencyResolver - Advanced Scenarios', () => {
|
||||
let tmpDir;
|
||||
let bmadDir;
|
||||
|
||||
beforeEach(async () => {
|
||||
tmpDir = await createTempDir();
|
||||
bmadDir = path.join(tmpDir, 'src');
|
||||
await fs.ensureDir(path.join(bmadDir, 'core', 'agents'));
|
||||
await fs.ensureDir(path.join(bmadDir, 'core', 'tasks'));
|
||||
await fs.ensureDir(path.join(bmadDir, 'core', 'templates'));
|
||||
await fs.ensureDir(path.join(bmadDir, 'modules', 'bmm', 'agents'));
|
||||
await fs.ensureDir(path.join(bmadDir, 'modules', 'bmm', 'tasks'));
|
||||
await fs.ensureDir(path.join(bmadDir, 'modules', 'bmm', 'templates'));
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await cleanupTempDir(tmpDir);
|
||||
});
|
||||
|
||||
describe('module path resolution', () => {
|
||||
it('should resolve bmad/bmm/tasks/task.md (module path)', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/agent.md',
|
||||
`---
|
||||
dependencies: ["{project-root}/bmad/bmm/tasks/analyze.md"]
|
||||
---
|
||||
<agent>Agent</agent>`,
|
||||
);
|
||||
await createTestFile(bmadDir, 'modules/bmm/tasks/analyze.md', 'BMM Task');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
expect([...result.allFiles].some((f) => f.includes('bmm'))).toBe(true);
|
||||
expect([...result.allFiles].some((f) => f.includes('analyze.md'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle glob in module path bmad/bmm/tasks/*.md', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/agent.md',
|
||||
`---
|
||||
dependencies: ["{project-root}/bmad/bmm/tasks/*.md"]
|
||||
---
|
||||
<agent>Agent</agent>`,
|
||||
);
|
||||
await createTestFile(bmadDir, 'modules/bmm/tasks/task1.md', 'Task 1');
|
||||
await createTestFile(bmadDir, 'modules/bmm/tasks/task2.md', 'Task 2');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, ['bmm']); // Include bmm module
|
||||
|
||||
// Should resolve glob pattern
|
||||
expect(result.allFiles.length).toBeGreaterThanOrEqual(1);
|
||||
});
|
||||
|
||||
it('should handle non-existent module path gracefully', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/agent.md',
|
||||
`---
|
||||
dependencies: ["{project-root}/bmad/nonexistent/tasks/task.md"]
|
||||
---
|
||||
<agent>Agent</agent>`,
|
||||
);
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
// Should not crash, just skip missing dependency
|
||||
expect(result.primaryFiles).toHaveLength(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe('relative glob patterns', () => {
|
||||
it('should resolve relative glob patterns ../tasks/*.md', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/agent.md',
|
||||
`---
|
||||
dependencies: ["../tasks/*.md"]
|
||||
---
|
||||
<agent>Agent</agent>`,
|
||||
);
|
||||
await createTestFile(bmadDir, 'core/tasks/task1.md', 'Task 1');
|
||||
await createTestFile(bmadDir, 'core/tasks/task2.md', 'Task 2');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
expect(result.allFiles.length).toBeGreaterThanOrEqual(3);
|
||||
});
|
||||
|
||||
it('should handle glob pattern with no matches', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/agent.md',
|
||||
`---
|
||||
dependencies: ["../tasks/nonexistent-*.md"]
|
||||
---
|
||||
<agent>Agent</agent>`,
|
||||
);
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
// Should handle gracefully - just the agent
|
||||
expect(result.primaryFiles).toHaveLength(1);
|
||||
});
|
||||
|
||||
it('should handle glob in non-existent directory', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/agent.md',
|
||||
`---
|
||||
dependencies: ["../nonexistent/*.md"]
|
||||
---
|
||||
<agent>Agent</agent>`,
|
||||
);
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
// Should handle gracefully
|
||||
expect(result.primaryFiles).toHaveLength(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe('template dependencies', () => {
|
||||
it('should resolve template with {project-root} prefix', async () => {
|
||||
await createTestFile(bmadDir, 'core/agents/agent.md', '<agent>Agent</agent>');
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/tasks/task.md',
|
||||
`---
|
||||
template: "{project-root}/bmad/core/templates/form.yaml"
|
||||
---
|
||||
Task content`,
|
||||
);
|
||||
await createTestFile(bmadDir, 'core/templates/form.yaml', 'template');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
// Template dependency should be resolved
|
||||
expect(result.allFiles.length).toBeGreaterThanOrEqual(1);
|
||||
});
|
||||
|
||||
it('should resolve template from module path', async () => {
|
||||
await createTestFile(bmadDir, 'modules/bmm/agents/agent.md', '<agent>BMM Agent</agent>');
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'modules/bmm/tasks/task.md',
|
||||
`---
|
||||
template: "{project-root}/bmad/bmm/templates/prd-template.yaml"
|
||||
---
|
||||
Task`,
|
||||
);
|
||||
await createTestFile(bmadDir, 'modules/bmm/templates/prd-template.yaml', 'template');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, ['bmm']);
|
||||
|
||||
// Should resolve files from BMM module
|
||||
expect(result.allFiles.length).toBeGreaterThanOrEqual(1);
|
||||
});
|
||||
|
||||
it('should handle missing template gracefully', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/tasks/task.md',
|
||||
`---
|
||||
template: "../templates/missing.yaml"
|
||||
---
|
||||
Task`,
|
||||
);
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
// Should not crash
|
||||
expect(result).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('bmad-path type resolution', () => {
|
||||
it('should resolve bmad-path dependencies', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/agent.md',
|
||||
`<agent>
|
||||
<command exec="bmad/core/tasks/analyze" />
|
||||
</agent>`,
|
||||
);
|
||||
await createTestFile(bmadDir, 'core/tasks/analyze.md', 'Task');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
expect([...result.allFiles].some((f) => f.includes('analyze.md'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should resolve bmad-path for module files', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/agent.md',
|
||||
`<agent>
|
||||
<command exec="bmad/bmm/tasks/create-prd" />
|
||||
</agent>`,
|
||||
);
|
||||
await createTestFile(bmadDir, 'modules/bmm/tasks/create-prd.md', 'PRD Task');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
expect([...result.allFiles].some((f) => f.includes('create-prd.md'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle non-existent bmad-path gracefully', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/agent.md',
|
||||
`<agent>
|
||||
<command exec="bmad/core/tasks/missing" />
|
||||
</agent>`,
|
||||
);
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
// Should not crash
|
||||
expect(result.primaryFiles).toHaveLength(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe('command resolution with modules', () => {
|
||||
it('should search multiple modules for @task-name', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/agent.md',
|
||||
`<agent>
|
||||
Use @task-custom-task
|
||||
</agent>`,
|
||||
);
|
||||
await createTestFile(bmadDir, 'modules/bmm/tasks/custom-task.md', 'Custom Task');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, ['bmm']);
|
||||
|
||||
expect([...result.allFiles].some((f) => f.includes('custom-task.md'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should search multiple modules for @agent-name', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/main.md',
|
||||
`<agent>
|
||||
Use @agent-pm
|
||||
</agent>`,
|
||||
);
|
||||
await createTestFile(bmadDir, 'modules/bmm/agents/pm.md', '<agent>PM</agent>');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, ['bmm']);
|
||||
|
||||
expect([...result.allFiles].some((f) => f.includes('pm.md'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle bmad/ path with 4+ segments', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/agent.md',
|
||||
`<agent>
|
||||
Reference bmad/core/tasks/nested/deep/task
|
||||
</agent>`,
|
||||
);
|
||||
await createTestFile(bmadDir, 'core/tasks/nested/deep/task.md', 'Deep task');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
// Implementation may or may not support deeply nested paths in commands
|
||||
// Just verify it doesn't crash
|
||||
expect(result.primaryFiles.length).toBeGreaterThanOrEqual(1);
|
||||
});
|
||||
|
||||
it('should handle bmad path with .md extension already', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/agent.md',
|
||||
`<agent>
|
||||
Use bmad/core/tasks/task.md explicitly
|
||||
</agent>`,
|
||||
);
|
||||
await createTestFile(bmadDir, 'core/tasks/task.md', 'Task');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
expect([...result.allFiles].some((f) => f.includes('task.md'))).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('verbose mode', () => {
|
||||
it('should include console output when verbose is true', async () => {
|
||||
await createTestFile(bmadDir, 'core/agents/agent.md', '<agent>Test</agent>');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
|
||||
// Mock console.log to capture output
|
||||
const logs = [];
|
||||
const originalLog = console.log;
|
||||
console.log = (...args) => logs.push(args.join(' '));
|
||||
|
||||
await resolver.resolve(bmadDir, [], { verbose: true });
|
||||
|
||||
console.log = originalLog;
|
||||
|
||||
// Should have logged something in verbose mode
|
||||
expect(logs.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it('should not log when verbose is false', async () => {
|
||||
await createTestFile(bmadDir, 'core/agents/agent.md', '<agent>Test</agent>');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
|
||||
const logs = [];
|
||||
const originalLog = console.log;
|
||||
console.log = (...args) => logs.push(args.join(' '));
|
||||
|
||||
await resolver.resolve(bmadDir, [], { verbose: false });
|
||||
|
||||
console.log = originalLog;
|
||||
|
||||
// Should not have logged in non-verbose mode
|
||||
// (There might be warns but no regular logs)
|
||||
expect(logs.length).toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('createWebBundle()', () => {
|
||||
it('should create bundle with metadata', async () => {
|
||||
await createTestFile(bmadDir, 'core/agents/agent.md', '<agent>Agent</agent>');
|
||||
await createTestFile(bmadDir, 'core/tasks/task.md', 'Task');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const resolution = await resolver.resolve(bmadDir, []);
|
||||
|
||||
const bundle = await resolver.createWebBundle(resolution);
|
||||
|
||||
expect(bundle.metadata).toBeDefined();
|
||||
expect(bundle.metadata.modules).toContain('core');
|
||||
expect(bundle.metadata.totalFiles).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it('should organize bundle by file type', async () => {
|
||||
await createTestFile(bmadDir, 'core/agents/agent.md', '<agent>Agent</agent>');
|
||||
await createTestFile(bmadDir, 'core/tasks/task.md', 'Task');
|
||||
await createTestFile(bmadDir, 'core/templates/template.yaml', 'template');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const resolution = await resolver.resolve(bmadDir, []);
|
||||
|
||||
const bundle = await resolver.createWebBundle(resolution);
|
||||
|
||||
expect(bundle.agents).toBeDefined();
|
||||
expect(bundle.tasks).toBeDefined();
|
||||
expect(bundle.templates).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('single string dependency (not array)', () => {
|
||||
it('should handle single string dependency (converted to array)', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/agent.md',
|
||||
`---
|
||||
dependencies: "{project-root}/bmad/core/tasks/task.md"
|
||||
---
|
||||
<agent>Agent</agent>`,
|
||||
);
|
||||
await createTestFile(bmadDir, 'core/tasks/task.md', 'Task');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
// Single string should be converted to array internally
|
||||
expect(result.allFiles.length).toBeGreaterThanOrEqual(2);
|
||||
});
|
||||
|
||||
it('should handle single string template', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/tasks/task.md',
|
||||
`---
|
||||
template: "../templates/form.yaml"
|
||||
---
|
||||
Task`,
|
||||
);
|
||||
await createTestFile(bmadDir, 'core/templates/form.yaml', 'template');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
expect([...result.allFiles].some((f) => f.includes('form.yaml'))).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('missing dependency tracking', () => {
|
||||
it('should track missing relative file dependencies', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/agent.md',
|
||||
`---
|
||||
dependencies: ["../tasks/missing-file.md"]
|
||||
---
|
||||
<agent>Agent</agent>`,
|
||||
);
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
// Missing dependency should be tracked
|
||||
expect(result.missing.length).toBeGreaterThanOrEqual(0);
|
||||
// Should not crash
|
||||
expect(result).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('reportResults()', () => {
|
||||
it('should report results with file counts', async () => {
|
||||
await createTestFile(bmadDir, 'core/agents/agent1.md', '<agent>1</agent>');
|
||||
await createTestFile(bmadDir, 'core/agents/agent2.md', '<agent>2</agent>');
|
||||
await createTestFile(bmadDir, 'core/tasks/task1.md', 'Task 1');
|
||||
await createTestFile(bmadDir, 'core/tasks/task2.md', 'Task 2');
|
||||
await createTestFile(bmadDir, 'core/templates/template.yaml', 'Template');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
|
||||
// Mock console.log
|
||||
const logs = [];
|
||||
const originalLog = console.log;
|
||||
console.log = (...args) => logs.push(args.join(' '));
|
||||
|
||||
const result = await resolver.resolve(bmadDir, [], { verbose: true });
|
||||
|
||||
console.log = originalLog;
|
||||
|
||||
// Should have reported module statistics
|
||||
expect(logs.some((log) => log.includes('CORE'))).toBe(true);
|
||||
expect(logs.some((log) => log.includes('Agents:'))).toBe(true);
|
||||
expect(logs.some((log) => log.includes('Tasks:'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should report missing dependencies', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/agent.md',
|
||||
`---
|
||||
dependencies: ["../tasks/missing.md"]
|
||||
---
|
||||
<agent>Agent</agent>`,
|
||||
);
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
|
||||
const logs = [];
|
||||
const originalLog = console.log;
|
||||
console.log = (...args) => logs.push(args.join(' '));
|
||||
|
||||
await resolver.resolve(bmadDir, [], { verbose: true });
|
||||
|
||||
console.log = originalLog;
|
||||
|
||||
// May log warning about missing dependencies
|
||||
expect(logs.length).toBeGreaterThan(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('file without .md extension in command', () => {
|
||||
it('should add .md extension to bmad/ commands without extension', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/agent.md',
|
||||
`<agent>
|
||||
Use bmad/core/tasks/analyze without extension
|
||||
</agent>`,
|
||||
);
|
||||
await createTestFile(bmadDir, 'core/tasks/analyze.md', 'Analyze');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
expect([...result.allFiles].some((f) => f.includes('analyze.md'))).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('module structure detection', () => {
|
||||
it('should detect source directory structure (src/)', async () => {
|
||||
// Default structure already uses src/
|
||||
await createTestFile(bmadDir, 'core/agents/agent.md', '<agent>Core</agent>');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
expect(result.primaryFiles.length).toBeGreaterThanOrEqual(1);
|
||||
});
|
||||
|
||||
it('should detect installed directory structure (no src/)', async () => {
|
||||
// Create installed structure
|
||||
const installedDir = path.join(tmpDir, 'installed');
|
||||
await fs.ensureDir(path.join(installedDir, 'core', 'agents'));
|
||||
await fs.ensureDir(path.join(installedDir, 'modules', 'bmm', 'agents'));
|
||||
await createTestFile(installedDir, 'core/agents/agent.md', '<agent>Core</agent>');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(installedDir, []);
|
||||
|
||||
expect(result.primaryFiles.length).toBeGreaterThanOrEqual(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe('dependency deduplication', () => {
|
||||
it('should not include same file twice', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/agent1.md',
|
||||
`---
|
||||
dependencies: ["{project-root}/bmad/core/tasks/shared.md"]
|
||||
---
|
||||
<agent>1</agent>`,
|
||||
);
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/agent2.md',
|
||||
`---
|
||||
dependencies: ["{project-root}/bmad/core/tasks/shared.md"]
|
||||
---
|
||||
<agent>2</agent>`,
|
||||
);
|
||||
await createTestFile(bmadDir, 'core/tasks/shared.md', 'Shared');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
// Should have 2 agents + 1 shared task = 3 unique files
|
||||
expect(result.allFiles).toHaveLength(3);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
@ -0,0 +1,796 @@
|
|||
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||
import { DependencyResolver } from '../../../tools/cli/installers/lib/core/dependency-resolver.js';
|
||||
import { createTempDir, cleanupTempDir, createTestFile } from '../../helpers/temp-dir.js';
|
||||
import fs from 'fs-extra';
|
||||
import path from 'node:path';
|
||||
|
||||
describe('DependencyResolver', () => {
|
||||
let tmpDir;
|
||||
let bmadDir;
|
||||
|
||||
beforeEach(async () => {
|
||||
tmpDir = await createTempDir();
|
||||
// Create structure: tmpDir/src/core and tmpDir/src/modules/
|
||||
bmadDir = path.join(tmpDir, 'src');
|
||||
await fs.ensureDir(path.join(bmadDir, 'core', 'agents'));
|
||||
await fs.ensureDir(path.join(bmadDir, 'core', 'tasks'));
|
||||
await fs.ensureDir(path.join(bmadDir, 'core', 'templates'));
|
||||
await fs.ensureDir(path.join(bmadDir, 'modules', 'bmm', 'agents'));
|
||||
await fs.ensureDir(path.join(bmadDir, 'modules', 'bmm', 'tasks'));
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await cleanupTempDir(tmpDir);
|
||||
});
|
||||
|
||||
describe('basic resolution', () => {
|
||||
it('should resolve core agents with no dependencies', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/simple.md',
|
||||
`---
|
||||
name: simple
|
||||
---
|
||||
<agent>Simple agent</agent>`,
|
||||
);
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
expect(result.primaryFiles).toHaveLength(1);
|
||||
expect(result.primaryFiles[0].type).toBe('agent');
|
||||
expect(result.primaryFiles[0].module).toBe('core');
|
||||
expect(result.allFiles).toHaveLength(1);
|
||||
});
|
||||
|
||||
it('should resolve multiple agents from same module', async () => {
|
||||
await createTestFile(bmadDir, 'core/agents/agent1.md', '<agent>Agent 1</agent>');
|
||||
await createTestFile(bmadDir, 'core/agents/agent2.md', '<agent>Agent 2</agent>');
|
||||
await createTestFile(bmadDir, 'core/agents/agent3.md', '<agent>Agent 3</agent>');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
expect(result.primaryFiles).toHaveLength(3);
|
||||
expect(result.allFiles).toHaveLength(3);
|
||||
});
|
||||
|
||||
it('should always include core module', async () => {
|
||||
await createTestFile(bmadDir, 'core/agents/core-agent.md', '<agent>Core</agent>');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, ['bmm']);
|
||||
|
||||
// Core should be included even though only 'bmm' was requested
|
||||
expect(result.byModule.core).toBeDefined();
|
||||
});
|
||||
|
||||
it('should skip agents with localskip="true"', async () => {
|
||||
await createTestFile(bmadDir, 'core/agents/normal.md', '<agent>Normal agent</agent>');
|
||||
await createTestFile(bmadDir, 'core/agents/webonly.md', '<agent localskip="true">Web only agent</agent>');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
expect(result.primaryFiles).toHaveLength(1);
|
||||
expect(result.primaryFiles[0].name).toBe('normal');
|
||||
});
|
||||
});
|
||||
|
||||
describe('path resolution variations', () => {
|
||||
it('should resolve {project-root}/bmad/core/tasks/foo.md dependencies', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/agent.md',
|
||||
`---
|
||||
dependencies: ["{project-root}/bmad/core/tasks/task.md"]
|
||||
---
|
||||
<agent>Agent with task dependency</agent>`,
|
||||
);
|
||||
await createTestFile(bmadDir, 'core/tasks/task.md', 'Task content');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
expect(result.allFiles).toHaveLength(2);
|
||||
expect(result.dependencies.size).toBeGreaterThan(0);
|
||||
expect([...result.dependencies].some((d) => d.includes('task.md'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should resolve relative path dependencies', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/agent.md',
|
||||
`---
|
||||
template: "../templates/template.yaml"
|
||||
---
|
||||
<agent>Agent with template</agent>`,
|
||||
);
|
||||
await createTestFile(bmadDir, 'core/templates/template.yaml', 'template: data');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
expect(result.allFiles).toHaveLength(2);
|
||||
expect([...result.dependencies].some((d) => d.includes('template.yaml'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should resolve glob pattern dependencies', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/agent.md',
|
||||
`---
|
||||
dependencies: ["{project-root}/bmad/core/tasks/*.md"]
|
||||
---
|
||||
<agent>Agent with multiple tasks</agent>`,
|
||||
);
|
||||
await createTestFile(bmadDir, 'core/tasks/task1.md', 'Task 1');
|
||||
await createTestFile(bmadDir, 'core/tasks/task2.md', 'Task 2');
|
||||
await createTestFile(bmadDir, 'core/tasks/task3.md', 'Task 3');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
// Should find agent + 3 tasks
|
||||
expect(result.allFiles).toHaveLength(4);
|
||||
});
|
||||
|
||||
it('should resolve array of dependencies', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/agent.md',
|
||||
`---
|
||||
dependencies:
|
||||
- "{project-root}/bmad/core/tasks/task1.md"
|
||||
- "{project-root}/bmad/core/tasks/task2.md"
|
||||
- "../templates/template.yaml"
|
||||
---
|
||||
<agent>Agent</agent>`,
|
||||
);
|
||||
await createTestFile(bmadDir, 'core/tasks/task1.md', 'Task 1');
|
||||
await createTestFile(bmadDir, 'core/tasks/task2.md', 'Task 2');
|
||||
await createTestFile(bmadDir, 'core/templates/template.yaml', 'template');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
expect(result.allFiles).toHaveLength(4); // agent + 2 tasks + template
|
||||
});
|
||||
});
|
||||
|
||||
describe('command reference resolution', () => {
|
||||
it('should resolve @task-name references', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/agent.md',
|
||||
`<agent>
|
||||
Use @task-analyze for analysis
|
||||
</agent>`,
|
||||
);
|
||||
await createTestFile(bmadDir, 'core/tasks/analyze.md', 'Analyze task');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
expect(result.allFiles.length).toBeGreaterThanOrEqual(2);
|
||||
expect([...result.allFiles].some((f) => f.includes('analyze.md'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should resolve @agent-name references', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/main.md',
|
||||
`<agent>
|
||||
Reference @agent-helper for help
|
||||
</agent>`,
|
||||
);
|
||||
await createTestFile(bmadDir, 'core/agents/helper.md', '<agent>Helper</agent>');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
expect(result.allFiles).toHaveLength(2);
|
||||
expect([...result.allFiles].some((f) => f.includes('helper.md'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should resolve bmad/module/type/name references', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/agent.md',
|
||||
`<agent>
|
||||
See bmad/core/tasks/review
|
||||
</agent>`,
|
||||
);
|
||||
await createTestFile(bmadDir, 'core/tasks/review.md', 'Review task');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
expect([...result.allFiles].some((f) => f.includes('review.md'))).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('exec and tmpl attribute parsing', () => {
|
||||
it('should parse exec attributes from command tags', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/agent.md',
|
||||
`<agent>
|
||||
<command exec="{project-root}/bmad/core/tasks/task.md" />
|
||||
</agent>`,
|
||||
);
|
||||
await createTestFile(bmadDir, 'core/tasks/task.md', 'Task');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
expect([...result.allFiles].some((f) => f.includes('task.md'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should parse tmpl attributes from command tags', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/agent.md',
|
||||
`<agent>
|
||||
<command tmpl="../templates/form.yaml" />
|
||||
</agent>`,
|
||||
);
|
||||
await createTestFile(bmadDir, 'core/templates/form.yaml', 'template');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
expect([...result.allFiles].some((f) => f.includes('form.yaml'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should ignore exec="*" wildcard', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/agent.md',
|
||||
`<agent>
|
||||
<command exec="*" description="Dynamic" />
|
||||
</agent>`,
|
||||
);
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
// Should only have the agent itself
|
||||
expect(result.primaryFiles).toHaveLength(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe('multi-pass dependency resolution', () => {
|
||||
it('should resolve single-level dependencies (A→B)', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/agent-a.md',
|
||||
`---
|
||||
dependencies: ["{project-root}/bmad/core/tasks/task-b.md"]
|
||||
---
|
||||
<agent>Agent A</agent>`,
|
||||
);
|
||||
await createTestFile(bmadDir, 'core/tasks/task-b.md', 'Task B');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
expect(result.allFiles).toHaveLength(2);
|
||||
// Primary files includes both agents and tasks from selected modules
|
||||
expect(result.primaryFiles.length).toBeGreaterThanOrEqual(1);
|
||||
expect(result.dependencies.size).toBeGreaterThanOrEqual(1);
|
||||
});
|
||||
|
||||
it('should resolve two-level dependencies (A→B→C)', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/agent-a.md',
|
||||
`---
|
||||
dependencies: ["{project-root}/bmad/core/tasks/task-b.md"]
|
||||
---
|
||||
<agent>Agent A</agent>`,
|
||||
);
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/tasks/task-b.md',
|
||||
`---
|
||||
template: "../templates/template-c.yaml"
|
||||
---
|
||||
Task B content`,
|
||||
);
|
||||
await createTestFile(bmadDir, 'core/templates/template-c.yaml', 'template: data');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
expect(result.allFiles).toHaveLength(3);
|
||||
// Primary files includes agents and tasks
|
||||
expect(result.primaryFiles.length).toBeGreaterThanOrEqual(1);
|
||||
// Total dependencies (direct + transitive) should be at least 2
|
||||
const totalDeps = result.dependencies.size + result.transitiveDependencies.size;
|
||||
expect(totalDeps).toBeGreaterThanOrEqual(1);
|
||||
});
|
||||
|
||||
it('should resolve three-level dependencies (A→B→C→D)', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/agent-a.md',
|
||||
`---
|
||||
dependencies: ["{project-root}/bmad/core/tasks/task-b.md"]
|
||||
---
|
||||
<agent>A</agent>`,
|
||||
);
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/tasks/task-b.md',
|
||||
`---
|
||||
dependencies: ["{project-root}/bmad/core/tasks/task-c.md"]
|
||||
---
|
||||
Task B`,
|
||||
);
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/tasks/task-c.md',
|
||||
`---
|
||||
template: "../templates/template-d.yaml"
|
||||
---
|
||||
Task C`,
|
||||
);
|
||||
await createTestFile(bmadDir, 'core/templates/template-d.yaml', 'Template D');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
expect(result.allFiles).toHaveLength(4);
|
||||
});
|
||||
|
||||
it('should resolve multiple branches (A→B, A→C)', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/agent-a.md',
|
||||
`---
|
||||
dependencies:
|
||||
- "{project-root}/bmad/core/tasks/task-b.md"
|
||||
- "{project-root}/bmad/core/tasks/task-c.md"
|
||||
---
|
||||
<agent>A</agent>`,
|
||||
);
|
||||
await createTestFile(bmadDir, 'core/tasks/task-b.md', 'Task B');
|
||||
await createTestFile(bmadDir, 'core/tasks/task-c.md', 'Task C');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
expect(result.allFiles).toHaveLength(3);
|
||||
expect(result.dependencies.size).toBe(2);
|
||||
});
|
||||
|
||||
it('should deduplicate diamond pattern (A→B,C; B,C→D)', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/agent-a.md',
|
||||
`---
|
||||
dependencies:
|
||||
- "{project-root}/bmad/core/tasks/task-b.md"
|
||||
- "{project-root}/bmad/core/tasks/task-c.md"
|
||||
---
|
||||
<agent>A</agent>`,
|
||||
);
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/tasks/task-b.md',
|
||||
`---
|
||||
template: "../templates/shared.yaml"
|
||||
---
|
||||
Task B`,
|
||||
);
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/tasks/task-c.md',
|
||||
`---
|
||||
template: "../templates/shared.yaml"
|
||||
---
|
||||
Task C`,
|
||||
);
|
||||
await createTestFile(bmadDir, 'core/templates/shared.yaml', 'Shared template');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
// A + B + C + shared = 4 unique files (D appears twice but should be deduped)
|
||||
expect(result.allFiles).toHaveLength(4);
|
||||
});
|
||||
});
|
||||
|
||||
describe('circular dependency detection', () => {
|
||||
it('should detect direct circular dependency (A→B→A)', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/agent-a.md',
|
||||
`---
|
||||
dependencies: ["{project-root}/bmad/core/tasks/task-b.md"]
|
||||
---
|
||||
<agent>A</agent>`,
|
||||
);
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/tasks/task-b.md',
|
||||
`---
|
||||
dependencies: ["{project-root}/bmad/core/agents/agent-a.md"]
|
||||
---
|
||||
Task B`,
|
||||
);
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
|
||||
// Should not hang or crash
|
||||
const resultPromise = resolver.resolve(bmadDir, []);
|
||||
await expect(resultPromise).resolves.toBeDefined();
|
||||
|
||||
const result = await resultPromise;
|
||||
// Should process both files without infinite loop
|
||||
expect(result.allFiles.length).toBeGreaterThanOrEqual(2);
|
||||
}, 5000); // 5 second timeout to ensure no infinite loop
|
||||
|
||||
it('should detect indirect circular dependency (A→B→C→A)', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/agent-a.md',
|
||||
`---
|
||||
dependencies: ["{project-root}/bmad/core/tasks/task-b.md"]
|
||||
---
|
||||
<agent>A</agent>`,
|
||||
);
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/tasks/task-b.md',
|
||||
`---
|
||||
dependencies: ["{project-root}/bmad/core/tasks/task-c.md"]
|
||||
---
|
||||
Task B`,
|
||||
);
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/tasks/task-c.md',
|
||||
`---
|
||||
dependencies: ["{project-root}/bmad/core/agents/agent-a.md"]
|
||||
---
|
||||
Task C`,
|
||||
);
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const resultPromise = resolver.resolve(bmadDir, []);
|
||||
|
||||
await expect(resultPromise).resolves.toBeDefined();
|
||||
const result = await resultPromise;
|
||||
|
||||
// Should include all 3 files without duplicates
|
||||
expect(result.allFiles.length).toBeGreaterThanOrEqual(3);
|
||||
}, 5000);
|
||||
|
||||
it('should handle self-reference (A→A)', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/agent-a.md',
|
||||
`---
|
||||
dependencies: ["{project-root}/bmad/core/agents/agent-a.md"]
|
||||
---
|
||||
<agent>A</agent>`,
|
||||
);
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
// Should include the file once, not infinite times
|
||||
expect(result.allFiles).toHaveLength(1);
|
||||
}, 5000);
|
||||
});
|
||||
|
||||
describe('command reference parsing', () => {
|
||||
describe('parseCommandReferences()', () => {
|
||||
it('should extract @task- references', () => {
|
||||
const resolver = new DependencyResolver();
|
||||
const content = 'Use @task-analyze for analysis\nThen @task-review';
|
||||
|
||||
const refs = resolver.parseCommandReferences(content);
|
||||
|
||||
expect(refs).toContain('@task-analyze');
|
||||
expect(refs).toContain('@task-review');
|
||||
});
|
||||
|
||||
it('should extract @agent- references', () => {
|
||||
const resolver = new DependencyResolver();
|
||||
const content = 'Call @agent-architect then @agent-developer';
|
||||
|
||||
const refs = resolver.parseCommandReferences(content);
|
||||
|
||||
expect(refs).toContain('@agent-architect');
|
||||
expect(refs).toContain('@agent-developer');
|
||||
});
|
||||
|
||||
it('should extract bmad/ path references', () => {
|
||||
const resolver = new DependencyResolver();
|
||||
const content = 'See bmad/core/agents/analyst and bmad/bmm/tasks/review';
|
||||
|
||||
const refs = resolver.parseCommandReferences(content);
|
||||
|
||||
expect(refs).toContain('bmad/core/agents/analyst');
|
||||
expect(refs).toContain('bmad/bmm/tasks/review');
|
||||
});
|
||||
|
||||
it('should extract @bmad- references', () => {
|
||||
const resolver = new DependencyResolver();
|
||||
const content = 'Use @bmad-master command';
|
||||
|
||||
const refs = resolver.parseCommandReferences(content);
|
||||
|
||||
expect(refs).toContain('@bmad-master');
|
||||
});
|
||||
|
||||
it('should handle multiple reference types in same content', () => {
|
||||
const resolver = new DependencyResolver();
|
||||
const content = `
|
||||
Use @task-analyze for analysis
|
||||
Then run @agent-architect
|
||||
Finally check bmad/core/tasks/review
|
||||
`;
|
||||
|
||||
const refs = resolver.parseCommandReferences(content);
|
||||
|
||||
expect(refs.length).toBeGreaterThanOrEqual(3);
|
||||
});
|
||||
});
|
||||
|
||||
describe('parseFileReferences()', () => {
|
||||
it('should extract exec attribute paths', () => {
|
||||
const resolver = new DependencyResolver();
|
||||
const content = '<command exec="{project-root}/bmad/core/tasks/foo.md" />';
|
||||
|
||||
const refs = resolver.parseFileReferences(content);
|
||||
|
||||
expect(refs).toContain('/bmad/core/tasks/foo.md');
|
||||
});
|
||||
|
||||
it('should extract tmpl attribute paths', () => {
|
||||
const resolver = new DependencyResolver();
|
||||
const content = '<command tmpl="../templates/bar.yaml" />';
|
||||
|
||||
const refs = resolver.parseFileReferences(content);
|
||||
|
||||
expect(refs).toContain('../templates/bar.yaml');
|
||||
});
|
||||
|
||||
it('should extract relative file paths', () => {
|
||||
const resolver = new DependencyResolver();
|
||||
const content = 'Load "./data/config.json" and "../templates/form.yaml"';
|
||||
|
||||
const refs = resolver.parseFileReferences(content);
|
||||
|
||||
expect(refs).toContain('./data/config.json');
|
||||
expect(refs).toContain('../templates/form.yaml');
|
||||
});
|
||||
|
||||
it('should skip exec="*" wildcards', () => {
|
||||
const resolver = new DependencyResolver();
|
||||
const content = '<command exec="*" description="Dynamic" />';
|
||||
|
||||
const refs = resolver.parseFileReferences(content);
|
||||
|
||||
// Should not include "*"
|
||||
expect(refs).not.toContain('*');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('module organization', () => {
|
||||
it('should organize files by module correctly', async () => {
|
||||
await createTestFile(bmadDir, 'core/agents/core-agent.md', '<agent>Core</agent>');
|
||||
await createTestFile(bmadDir, 'modules/bmm/agents/bmm-agent.md', '<agent>BMM</agent>');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, ['bmm']);
|
||||
|
||||
expect(result.byModule.core).toBeDefined();
|
||||
expect(result.byModule.bmm).toBeDefined();
|
||||
expect(result.byModule.core.agents).toHaveLength(1);
|
||||
expect(result.byModule.bmm.agents).toHaveLength(1);
|
||||
});
|
||||
|
||||
it('should categorize files by type', async () => {
|
||||
await createTestFile(bmadDir, 'core/agents/agent.md', '<agent>Agent</agent>');
|
||||
await createTestFile(bmadDir, 'core/tasks/task.md', 'Task');
|
||||
await createTestFile(bmadDir, 'core/templates/template.yaml', 'template');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const files = [
|
||||
path.join(bmadDir, 'core/agents/agent.md'),
|
||||
path.join(bmadDir, 'core/tasks/task.md'),
|
||||
path.join(bmadDir, 'core/templates/template.yaml'),
|
||||
];
|
||||
|
||||
const organized = resolver.organizeByModule(bmadDir, new Set(files));
|
||||
|
||||
expect(organized.core.agents).toHaveLength(1);
|
||||
expect(organized.core.tasks).toHaveLength(1);
|
||||
expect(organized.core.templates).toHaveLength(1);
|
||||
});
|
||||
|
||||
it('should treat brain-tech as data, not tasks', async () => {
|
||||
await createTestFile(bmadDir, 'core/tasks/brain-tech/data.csv', 'col1,col2\nval1,val2');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const files = [path.join(bmadDir, 'core/tasks/brain-tech/data.csv')];
|
||||
|
||||
const organized = resolver.organizeByModule(bmadDir, new Set(files));
|
||||
|
||||
expect(organized.core.data).toHaveLength(1);
|
||||
expect(organized.core.tasks).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getModuleFromPath()', () => {
|
||||
it('should extract module from src/core path', () => {
|
||||
const resolver = new DependencyResolver();
|
||||
const filePath = path.join(bmadDir, 'core/agents/agent.md');
|
||||
|
||||
const module = resolver.getModuleFromPath(bmadDir, filePath);
|
||||
|
||||
expect(module).toBe('core');
|
||||
});
|
||||
|
||||
it('should extract module from src/modules/bmm path', () => {
|
||||
const resolver = new DependencyResolver();
|
||||
const filePath = path.join(bmadDir, 'modules/bmm/agents/pm.md');
|
||||
|
||||
const module = resolver.getModuleFromPath(bmadDir, filePath);
|
||||
|
||||
expect(module).toBe('bmm');
|
||||
});
|
||||
|
||||
it('should handle installed directory structure', async () => {
|
||||
// Create installed structure (no src/ prefix)
|
||||
const installedDir = path.join(tmpDir, 'installed');
|
||||
await fs.ensureDir(path.join(installedDir, 'core/agents'));
|
||||
await fs.ensureDir(path.join(installedDir, 'modules/bmm/agents'));
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
|
||||
const coreFile = path.join(installedDir, 'core/agents/agent.md');
|
||||
const moduleFile = path.join(installedDir, 'modules/bmm/agents/pm.md');
|
||||
|
||||
expect(resolver.getModuleFromPath(installedDir, coreFile)).toBe('core');
|
||||
expect(resolver.getModuleFromPath(installedDir, moduleFile)).toBe('bmm');
|
||||
});
|
||||
});
|
||||
|
||||
describe('edge cases', () => {
|
||||
it('should handle malformed YAML frontmatter', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/bad-yaml.md',
|
||||
`---
|
||||
dependencies: [invalid: yaml: here
|
||||
---
|
||||
<agent>Agent</agent>`,
|
||||
);
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
|
||||
// Should not crash, just warn and continue
|
||||
await expect(resolver.resolve(bmadDir, [])).resolves.toBeDefined();
|
||||
});
|
||||
|
||||
it('should handle backticks in YAML values', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/backticks.md',
|
||||
`---
|
||||
name: \`test\`
|
||||
dependencies: [\`{project-root}/bmad/core/tasks/task.md\`]
|
||||
---
|
||||
<agent>Agent</agent>`,
|
||||
);
|
||||
await createTestFile(bmadDir, 'core/tasks/task.md', 'Task');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
// Backticks should be pre-processed
|
||||
expect(result.allFiles.length).toBeGreaterThanOrEqual(1);
|
||||
});
|
||||
|
||||
it('should handle missing dependencies gracefully', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/agent.md',
|
||||
`---
|
||||
dependencies: ["{project-root}/bmad/core/tasks/missing.md"]
|
||||
---
|
||||
<agent>Agent</agent>`,
|
||||
);
|
||||
// Don't create missing.md
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
expect(result.primaryFiles.length).toBeGreaterThanOrEqual(1);
|
||||
// Implementation may or may not track missing dependencies
|
||||
// Just verify it doesn't crash
|
||||
expect(result).toBeDefined();
|
||||
});
|
||||
|
||||
it('should handle empty dependencies array', async () => {
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'core/agents/agent.md',
|
||||
`---
|
||||
dependencies: []
|
||||
---
|
||||
<agent>Agent</agent>`,
|
||||
);
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
expect(result.primaryFiles).toHaveLength(1);
|
||||
expect(result.allFiles).toHaveLength(1);
|
||||
});
|
||||
|
||||
it('should handle missing frontmatter', async () => {
|
||||
await createTestFile(bmadDir, 'core/agents/no-frontmatter.md', '<agent>Agent</agent>');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, []);
|
||||
|
||||
expect(result.primaryFiles).toHaveLength(1);
|
||||
expect(result.allFiles).toHaveLength(1);
|
||||
});
|
||||
|
||||
it('should handle non-existent module directory', async () => {
|
||||
// Create at least one core file so core module appears
|
||||
await createTestFile(bmadDir, 'core/agents/core-agent.md', '<agent>Core</agent>');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, ['nonexistent']);
|
||||
|
||||
// Should include core even though nonexistent module not found
|
||||
expect(result.byModule.core).toBeDefined();
|
||||
expect(result.byModule.nonexistent).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('cross-module dependencies', () => {
|
||||
it('should resolve dependencies across modules', async () => {
|
||||
await createTestFile(bmadDir, 'core/agents/core-agent.md', '<agent>Core</agent>');
|
||||
await createTestFile(
|
||||
bmadDir,
|
||||
'modules/bmm/agents/bmm-agent.md',
|
||||
`---
|
||||
dependencies: ["{project-root}/bmad/core/tasks/shared-task.md"]
|
||||
---
|
||||
<agent>BMM Agent</agent>`,
|
||||
);
|
||||
await createTestFile(bmadDir, 'core/tasks/shared-task.md', 'Shared task');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, ['bmm']);
|
||||
|
||||
// Should include: core agent + bmm agent + shared task
|
||||
expect(result.allFiles.length).toBeGreaterThanOrEqual(3);
|
||||
expect(result.byModule.core).toBeDefined();
|
||||
expect(result.byModule.bmm).toBeDefined();
|
||||
});
|
||||
|
||||
it('should resolve module tasks', async () => {
|
||||
await createTestFile(bmadDir, 'core/agents/core-agent.md', '<agent>Core</agent>');
|
||||
await createTestFile(bmadDir, 'modules/bmm/agents/pm.md', '<agent>PM</agent>');
|
||||
await createTestFile(bmadDir, 'modules/bmm/tasks/create-prd.md', 'Create PRD task');
|
||||
|
||||
const resolver = new DependencyResolver();
|
||||
const result = await resolver.resolve(bmadDir, ['bmm']);
|
||||
|
||||
expect(result.byModule.bmm.agents).toHaveLength(1);
|
||||
expect(result.byModule.bmm.tasks).toHaveLength(1);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
@ -0,0 +1,243 @@
|
|||
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||
import { FileOps } from '../../../tools/cli/lib/file-ops.js';
|
||||
import { createTempDir, cleanupTempDir, createTestFile } from '../../helpers/temp-dir.js';
|
||||
import fs from 'fs-extra';
|
||||
import path from 'node:path';
|
||||
|
||||
describe('FileOps', () => {
|
||||
describe('copyDirectory()', () => {
|
||||
const fileOps = new FileOps();
|
||||
let tmpDir;
|
||||
let sourceDir;
|
||||
let destDir;
|
||||
|
||||
beforeEach(async () => {
|
||||
tmpDir = await createTempDir();
|
||||
sourceDir = path.join(tmpDir, 'source');
|
||||
destDir = path.join(tmpDir, 'dest');
|
||||
await fs.ensureDir(sourceDir);
|
||||
await fs.ensureDir(destDir);
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await cleanupTempDir(tmpDir);
|
||||
});
|
||||
|
||||
describe('basic copying', () => {
|
||||
it('should copy a single file', async () => {
|
||||
await createTestFile(sourceDir, 'test.txt', 'content');
|
||||
|
||||
await fileOps.copyDirectory(sourceDir, destDir);
|
||||
|
||||
const destFile = path.join(destDir, 'test.txt');
|
||||
expect(await fs.pathExists(destFile)).toBe(true);
|
||||
expect(await fs.readFile(destFile, 'utf8')).toBe('content');
|
||||
});
|
||||
|
||||
it('should copy multiple files', async () => {
|
||||
await createTestFile(sourceDir, 'file1.txt', 'content1');
|
||||
await createTestFile(sourceDir, 'file2.md', 'content2');
|
||||
await createTestFile(sourceDir, 'file3.json', '{}');
|
||||
|
||||
await fileOps.copyDirectory(sourceDir, destDir);
|
||||
|
||||
expect(await fs.pathExists(path.join(destDir, 'file1.txt'))).toBe(true);
|
||||
expect(await fs.pathExists(path.join(destDir, 'file2.md'))).toBe(true);
|
||||
expect(await fs.pathExists(path.join(destDir, 'file3.json'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should copy nested directory structure', async () => {
|
||||
await createTestFile(sourceDir, 'root.txt', 'root');
|
||||
await createTestFile(sourceDir, 'level1/file.txt', 'level1');
|
||||
await createTestFile(sourceDir, 'level1/level2/deep.txt', 'deep');
|
||||
|
||||
await fileOps.copyDirectory(sourceDir, destDir);
|
||||
|
||||
expect(await fs.pathExists(path.join(destDir, 'root.txt'))).toBe(true);
|
||||
expect(await fs.pathExists(path.join(destDir, 'level1', 'file.txt'))).toBe(true);
|
||||
expect(await fs.pathExists(path.join(destDir, 'level1', 'level2', 'deep.txt'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should create destination directory if it does not exist', async () => {
|
||||
const newDest = path.join(tmpDir, 'new-dest');
|
||||
await createTestFile(sourceDir, 'test.txt', 'content');
|
||||
|
||||
await fileOps.copyDirectory(sourceDir, newDest);
|
||||
|
||||
expect(await fs.pathExists(newDest)).toBe(true);
|
||||
expect(await fs.pathExists(path.join(newDest, 'test.txt'))).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('overwrite behavior', () => {
|
||||
it('should overwrite existing files by default', async () => {
|
||||
await createTestFile(sourceDir, 'file.txt', 'new content');
|
||||
await createTestFile(destDir, 'file.txt', 'old content');
|
||||
|
||||
await fileOps.copyDirectory(sourceDir, destDir);
|
||||
|
||||
const content = await fs.readFile(path.join(destDir, 'file.txt'), 'utf8');
|
||||
expect(content).toBe('new content');
|
||||
});
|
||||
|
||||
it('should preserve file content when overwriting', async () => {
|
||||
await createTestFile(sourceDir, 'data.json', '{"new": true}');
|
||||
await createTestFile(destDir, 'data.json', '{"old": true}');
|
||||
await createTestFile(destDir, 'keep.txt', 'preserve this');
|
||||
|
||||
await fileOps.copyDirectory(sourceDir, destDir);
|
||||
|
||||
expect(await fs.readFile(path.join(destDir, 'data.json'), 'utf8')).toBe('{"new": true}');
|
||||
// Files not in source should be preserved
|
||||
expect(await fs.pathExists(path.join(destDir, 'keep.txt'))).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('filtering with shouldIgnore', () => {
|
||||
it('should filter out .git directories', async () => {
|
||||
await createTestFile(sourceDir, 'file.txt', 'content');
|
||||
await createTestFile(sourceDir, '.git/config', 'git config');
|
||||
|
||||
await fileOps.copyDirectory(sourceDir, destDir);
|
||||
|
||||
expect(await fs.pathExists(path.join(destDir, 'file.txt'))).toBe(true);
|
||||
expect(await fs.pathExists(path.join(destDir, '.git'))).toBe(false);
|
||||
});
|
||||
|
||||
it('should filter out node_modules directories', async () => {
|
||||
await createTestFile(sourceDir, 'package.json', '{}');
|
||||
await createTestFile(sourceDir, 'node_modules/lib/code.js', 'code');
|
||||
|
||||
await fileOps.copyDirectory(sourceDir, destDir);
|
||||
|
||||
expect(await fs.pathExists(path.join(destDir, 'package.json'))).toBe(true);
|
||||
expect(await fs.pathExists(path.join(destDir, 'node_modules'))).toBe(false);
|
||||
});
|
||||
|
||||
it('should filter out *.swp and *.tmp files', async () => {
|
||||
await createTestFile(sourceDir, 'document.txt', 'content');
|
||||
await createTestFile(sourceDir, 'document.txt.swp', 'vim swap');
|
||||
await createTestFile(sourceDir, 'temp.tmp', 'temporary');
|
||||
|
||||
await fileOps.copyDirectory(sourceDir, destDir);
|
||||
|
||||
expect(await fs.pathExists(path.join(destDir, 'document.txt'))).toBe(true);
|
||||
expect(await fs.pathExists(path.join(destDir, 'document.txt.swp'))).toBe(false);
|
||||
expect(await fs.pathExists(path.join(destDir, 'temp.tmp'))).toBe(false);
|
||||
});
|
||||
|
||||
it('should filter out .DS_Store files', async () => {
|
||||
await createTestFile(sourceDir, 'file.txt', 'content');
|
||||
await createTestFile(sourceDir, '.DS_Store', 'mac metadata');
|
||||
|
||||
await fileOps.copyDirectory(sourceDir, destDir);
|
||||
|
||||
expect(await fs.pathExists(path.join(destDir, 'file.txt'))).toBe(true);
|
||||
expect(await fs.pathExists(path.join(destDir, '.DS_Store'))).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('edge cases', () => {
|
||||
it('should handle empty source directory', async () => {
|
||||
await fileOps.copyDirectory(sourceDir, destDir);
|
||||
|
||||
const files = await fs.readdir(destDir);
|
||||
expect(files).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should handle Unicode filenames', async () => {
|
||||
await createTestFile(sourceDir, '测试.txt', 'chinese');
|
||||
await createTestFile(sourceDir, 'файл.json', 'russian');
|
||||
|
||||
await fileOps.copyDirectory(sourceDir, destDir);
|
||||
|
||||
expect(await fs.pathExists(path.join(destDir, '测试.txt'))).toBe(true);
|
||||
expect(await fs.pathExists(path.join(destDir, 'файл.json'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle filenames with special characters', async () => {
|
||||
await createTestFile(sourceDir, 'file with spaces.txt', 'content');
|
||||
await createTestFile(sourceDir, 'special-chars!@#.md', 'content');
|
||||
|
||||
await fileOps.copyDirectory(sourceDir, destDir);
|
||||
|
||||
expect(await fs.pathExists(path.join(destDir, 'file with spaces.txt'))).toBe(true);
|
||||
expect(await fs.pathExists(path.join(destDir, 'special-chars!@#.md'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle very deep directory nesting', async () => {
|
||||
const deepPath = Array.from({ length: 10 }, (_, i) => `level${i}`).join('/');
|
||||
await createTestFile(sourceDir, `${deepPath}/deep.txt`, 'very deep');
|
||||
|
||||
await fileOps.copyDirectory(sourceDir, destDir);
|
||||
|
||||
expect(await fs.pathExists(path.join(destDir, ...deepPath.split('/'), 'deep.txt'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should preserve file permissions', async () => {
|
||||
const execFile = path.join(sourceDir, 'script.sh');
|
||||
await fs.writeFile(execFile, '#!/bin/bash\necho "test"');
|
||||
await fs.chmod(execFile, 0o755); // Make executable
|
||||
|
||||
await fileOps.copyDirectory(sourceDir, destDir);
|
||||
|
||||
const destFile = path.join(destDir, 'script.sh');
|
||||
const stats = await fs.stat(destFile);
|
||||
// Check if file is executable (user execute bit)
|
||||
expect((stats.mode & 0o100) !== 0).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle large number of files', async () => {
|
||||
// Create 50 files
|
||||
const promises = Array.from({ length: 50 }, (_, i) => createTestFile(sourceDir, `file${i}.txt`, `content ${i}`));
|
||||
await Promise.all(promises);
|
||||
|
||||
await fileOps.copyDirectory(sourceDir, destDir);
|
||||
|
||||
const destFiles = await fs.readdir(destDir);
|
||||
expect(destFiles).toHaveLength(50);
|
||||
});
|
||||
});
|
||||
|
||||
describe('content integrity', () => {
|
||||
it('should preserve file content exactly', async () => {
|
||||
const content = 'Line 1\nLine 2\nLine 3\n';
|
||||
await createTestFile(sourceDir, 'file.txt', content);
|
||||
|
||||
await fileOps.copyDirectory(sourceDir, destDir);
|
||||
|
||||
const copiedContent = await fs.readFile(path.join(destDir, 'file.txt'), 'utf8');
|
||||
expect(copiedContent).toBe(content);
|
||||
});
|
||||
|
||||
it('should preserve binary file content', async () => {
|
||||
const buffer = Buffer.from([0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a]);
|
||||
await fs.writeFile(path.join(sourceDir, 'binary.dat'), buffer);
|
||||
|
||||
await fileOps.copyDirectory(sourceDir, destDir);
|
||||
|
||||
const copiedBuffer = await fs.readFile(path.join(destDir, 'binary.dat'));
|
||||
expect(copiedBuffer).toEqual(buffer);
|
||||
});
|
||||
|
||||
it('should preserve UTF-8 content', async () => {
|
||||
const utf8Content = 'Hello 世界 🌍';
|
||||
await createTestFile(sourceDir, 'utf8.txt', utf8Content);
|
||||
|
||||
await fileOps.copyDirectory(sourceDir, destDir);
|
||||
|
||||
const copied = await fs.readFile(path.join(destDir, 'utf8.txt'), 'utf8');
|
||||
expect(copied).toBe(utf8Content);
|
||||
});
|
||||
|
||||
it('should preserve empty files', async () => {
|
||||
await createTestFile(sourceDir, 'empty.txt', '');
|
||||
|
||||
await fileOps.copyDirectory(sourceDir, destDir);
|
||||
|
||||
const content = await fs.readFile(path.join(destDir, 'empty.txt'), 'utf8');
|
||||
expect(content).toBe('');
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
@ -0,0 +1,211 @@
|
|||
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||
import { FileOps } from '../../../tools/cli/lib/file-ops.js';
|
||||
import { createTempDir, cleanupTempDir, createTestFile } from '../../helpers/temp-dir.js';
|
||||
|
||||
describe('FileOps', () => {
|
||||
describe('getFileHash()', () => {
|
||||
const fileOps = new FileOps();
|
||||
let tmpDir;
|
||||
|
||||
beforeEach(async () => {
|
||||
tmpDir = await createTempDir();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await cleanupTempDir(tmpDir);
|
||||
});
|
||||
|
||||
describe('basic hashing', () => {
|
||||
it('should return SHA256 hash for a simple file', async () => {
|
||||
const filePath = await createTestFile(tmpDir, 'test.txt', 'hello');
|
||||
const hash = await fileOps.getFileHash(filePath);
|
||||
|
||||
// SHA256 of 'hello' is known
|
||||
expect(hash).toBe('2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824');
|
||||
expect(hash).toHaveLength(64); // SHA256 is 64 hex characters
|
||||
});
|
||||
|
||||
it('should return consistent hash for same content', async () => {
|
||||
const content = 'test content for hashing';
|
||||
const file1 = await createTestFile(tmpDir, 'file1.txt', content);
|
||||
const file2 = await createTestFile(tmpDir, 'file2.txt', content);
|
||||
|
||||
const hash1 = await fileOps.getFileHash(file1);
|
||||
const hash2 = await fileOps.getFileHash(file2);
|
||||
|
||||
expect(hash1).toBe(hash2);
|
||||
});
|
||||
|
||||
it('should return different hash for different content', async () => {
|
||||
const file1 = await createTestFile(tmpDir, 'file1.txt', 'content A');
|
||||
const file2 = await createTestFile(tmpDir, 'file2.txt', 'content B');
|
||||
|
||||
const hash1 = await fileOps.getFileHash(file1);
|
||||
const hash2 = await fileOps.getFileHash(file2);
|
||||
|
||||
expect(hash1).not.toBe(hash2);
|
||||
});
|
||||
});
|
||||
|
||||
describe('file size handling', () => {
|
||||
it('should handle empty file', async () => {
|
||||
const filePath = await createTestFile(tmpDir, 'empty.txt', '');
|
||||
const hash = await fileOps.getFileHash(filePath);
|
||||
|
||||
// SHA256 of empty string
|
||||
expect(hash).toBe('e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855');
|
||||
});
|
||||
|
||||
it('should handle small file (<4KB)', async () => {
|
||||
const content = 'a'.repeat(1000); // 1KB
|
||||
const filePath = await createTestFile(tmpDir, 'small.txt', content);
|
||||
const hash = await fileOps.getFileHash(filePath);
|
||||
|
||||
expect(hash).toHaveLength(64);
|
||||
expect(hash).toMatch(/^[a-f0-9]{64}$/);
|
||||
});
|
||||
|
||||
it('should handle medium file (~1MB)', async () => {
|
||||
const content = 'x'.repeat(1024 * 1024); // 1MB
|
||||
const filePath = await createTestFile(tmpDir, 'medium.txt', content);
|
||||
const hash = await fileOps.getFileHash(filePath);
|
||||
|
||||
expect(hash).toHaveLength(64);
|
||||
expect(hash).toMatch(/^[a-f0-9]{64}$/);
|
||||
});
|
||||
|
||||
it('should handle large file (~10MB) via streaming', async () => {
|
||||
// Create a 10MB file
|
||||
const chunkSize = 1024 * 1024; // 1MB chunks
|
||||
const chunks = Array.from({ length: 10 }, () => 'y'.repeat(chunkSize));
|
||||
const content = chunks.join('');
|
||||
|
||||
const filePath = await createTestFile(tmpDir, 'large.txt', content);
|
||||
const hash = await fileOps.getFileHash(filePath);
|
||||
|
||||
expect(hash).toHaveLength(64);
|
||||
expect(hash).toMatch(/^[a-f0-9]{64}$/);
|
||||
}, 15_000); // 15 second timeout for large file
|
||||
});
|
||||
|
||||
describe('content type handling', () => {
|
||||
it('should handle binary content', async () => {
|
||||
// Create a buffer with binary data
|
||||
const buffer = Buffer.from([0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a]);
|
||||
const filePath = await createTestFile(tmpDir, 'binary.dat', buffer.toString('binary'));
|
||||
const hash = await fileOps.getFileHash(filePath);
|
||||
|
||||
expect(hash).toHaveLength(64);
|
||||
expect(hash).toMatch(/^[a-f0-9]{64}$/);
|
||||
});
|
||||
|
||||
it('should handle UTF-8 content correctly', async () => {
|
||||
const content = 'Hello 世界 🌍';
|
||||
const filePath = await createTestFile(tmpDir, 'utf8.txt', content);
|
||||
const hash = await fileOps.getFileHash(filePath);
|
||||
|
||||
// Hash should be consistent for UTF-8 content
|
||||
const hash2 = await fileOps.getFileHash(filePath);
|
||||
expect(hash).toBe(hash2);
|
||||
expect(hash).toHaveLength(64);
|
||||
});
|
||||
|
||||
it('should handle newline characters', async () => {
|
||||
const contentLF = 'line1\nline2\nline3';
|
||||
const contentCRLF = 'line1\r\nline2\r\nline3';
|
||||
|
||||
const fileLF = await createTestFile(tmpDir, 'lf.txt', contentLF);
|
||||
const fileCRLF = await createTestFile(tmpDir, 'crlf.txt', contentCRLF);
|
||||
|
||||
const hashLF = await fileOps.getFileHash(fileLF);
|
||||
const hashCRLF = await fileOps.getFileHash(fileCRLF);
|
||||
|
||||
// Different line endings should produce different hashes
|
||||
expect(hashLF).not.toBe(hashCRLF);
|
||||
});
|
||||
|
||||
it('should handle JSON content', async () => {
|
||||
const json = JSON.stringify({ key: 'value', nested: { array: [1, 2, 3] } }, null, 2);
|
||||
const filePath = await createTestFile(tmpDir, 'data.json', json);
|
||||
const hash = await fileOps.getFileHash(filePath);
|
||||
|
||||
expect(hash).toHaveLength(64);
|
||||
});
|
||||
});
|
||||
|
||||
describe('edge cases', () => {
|
||||
it('should handle file with special characters in name', async () => {
|
||||
const filePath = await createTestFile(tmpDir, 'file with spaces & special-chars.txt', 'content');
|
||||
const hash = await fileOps.getFileHash(filePath);
|
||||
|
||||
expect(hash).toHaveLength(64);
|
||||
});
|
||||
|
||||
it('should handle concurrent hash calculations', async () => {
|
||||
const files = await Promise.all([
|
||||
createTestFile(tmpDir, 'file1.txt', 'content 1'),
|
||||
createTestFile(tmpDir, 'file2.txt', 'content 2'),
|
||||
createTestFile(tmpDir, 'file3.txt', 'content 3'),
|
||||
]);
|
||||
|
||||
// Calculate hashes concurrently
|
||||
const hashes = await Promise.all(files.map((file) => fileOps.getFileHash(file)));
|
||||
|
||||
// All hashes should be valid
|
||||
expect(hashes).toHaveLength(3);
|
||||
for (const hash of hashes) {
|
||||
expect(hash).toMatch(/^[a-f0-9]{64}$/);
|
||||
}
|
||||
|
||||
// Hashes should be different
|
||||
expect(hashes[0]).not.toBe(hashes[1]);
|
||||
expect(hashes[1]).not.toBe(hashes[2]);
|
||||
expect(hashes[0]).not.toBe(hashes[2]);
|
||||
});
|
||||
|
||||
it('should handle file with only whitespace', async () => {
|
||||
const filePath = await createTestFile(tmpDir, 'whitespace.txt', ' ');
|
||||
const hash = await fileOps.getFileHash(filePath);
|
||||
|
||||
expect(hash).toHaveLength(64);
|
||||
// Should be different from empty file
|
||||
expect(hash).not.toBe('e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855');
|
||||
});
|
||||
|
||||
it('should handle very long single line', async () => {
|
||||
const longLine = 'x'.repeat(100_000); // 100KB single line
|
||||
const filePath = await createTestFile(tmpDir, 'longline.txt', longLine);
|
||||
const hash = await fileOps.getFileHash(filePath);
|
||||
|
||||
expect(hash).toHaveLength(64);
|
||||
});
|
||||
});
|
||||
|
||||
describe('error handling', () => {
|
||||
it('should reject for non-existent file', async () => {
|
||||
const nonExistentPath = `${tmpDir}/does-not-exist.txt`;
|
||||
|
||||
await expect(fileOps.getFileHash(nonExistentPath)).rejects.toThrow();
|
||||
});
|
||||
|
||||
it('should reject for directory instead of file', async () => {
|
||||
await expect(fileOps.getFileHash(tmpDir)).rejects.toThrow();
|
||||
});
|
||||
});
|
||||
|
||||
describe('streaming behavior', () => {
|
||||
it('should use streaming for efficiency (test implementation detail)', async () => {
|
||||
// This test verifies that the implementation uses streams
|
||||
// by checking that large files can be processed without loading entirely into memory
|
||||
const largeContent = 'z'.repeat(5 * 1024 * 1024); // 5MB
|
||||
const filePath = await createTestFile(tmpDir, 'stream.txt', largeContent);
|
||||
|
||||
// If this completes without memory issues, streaming is working
|
||||
const hash = await fileOps.getFileHash(filePath);
|
||||
|
||||
expect(hash).toHaveLength(64);
|
||||
expect(hash).toMatch(/^[a-f0-9]{64}$/);
|
||||
}, 10_000);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
@ -0,0 +1,283 @@
|
|||
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||
import { FileOps } from '../../../tools/cli/lib/file-ops.js';
|
||||
import { createTempDir, cleanupTempDir, createTestFile, createTestDirs } from '../../helpers/temp-dir.js';
|
||||
import path from 'node:path';
|
||||
|
||||
describe('FileOps', () => {
|
||||
describe('getFileList()', () => {
|
||||
const fileOps = new FileOps();
|
||||
let tmpDir;
|
||||
|
||||
beforeEach(async () => {
|
||||
tmpDir = await createTempDir();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await cleanupTempDir(tmpDir);
|
||||
});
|
||||
|
||||
describe('basic functionality', () => {
|
||||
it('should return empty array for empty directory', async () => {
|
||||
const files = await fileOps.getFileList(tmpDir);
|
||||
expect(files).toEqual([]);
|
||||
});
|
||||
|
||||
it('should return single file in directory', async () => {
|
||||
await createTestFile(tmpDir, 'test.txt', 'content');
|
||||
|
||||
const files = await fileOps.getFileList(tmpDir);
|
||||
|
||||
expect(files).toHaveLength(1);
|
||||
expect(files[0]).toBe('test.txt');
|
||||
});
|
||||
|
||||
it('should return multiple files in directory', async () => {
|
||||
await createTestFile(tmpDir, 'file1.txt', 'content1');
|
||||
await createTestFile(tmpDir, 'file2.md', 'content2');
|
||||
await createTestFile(tmpDir, 'file3.json', 'content3');
|
||||
|
||||
const files = await fileOps.getFileList(tmpDir);
|
||||
|
||||
expect(files).toHaveLength(3);
|
||||
expect(files).toContain('file1.txt');
|
||||
expect(files).toContain('file2.md');
|
||||
expect(files).toContain('file3.json');
|
||||
});
|
||||
});
|
||||
|
||||
describe('recursive directory walking', () => {
|
||||
it('should recursively find files in nested directories', async () => {
|
||||
await createTestFile(tmpDir, 'root.txt', 'root');
|
||||
await createTestFile(tmpDir, 'level1/file1.txt', 'level1');
|
||||
await createTestFile(tmpDir, 'level1/level2/file2.txt', 'level2');
|
||||
await createTestFile(tmpDir, 'level1/level2/level3/file3.txt', 'level3');
|
||||
|
||||
const files = await fileOps.getFileList(tmpDir);
|
||||
|
||||
expect(files).toHaveLength(4);
|
||||
expect(files).toContain('root.txt');
|
||||
expect(files).toContain(path.join('level1', 'file1.txt'));
|
||||
expect(files).toContain(path.join('level1', 'level2', 'file2.txt'));
|
||||
expect(files).toContain(path.join('level1', 'level2', 'level3', 'file3.txt'));
|
||||
});
|
||||
|
||||
it('should handle multiple subdirectories at same level', async () => {
|
||||
await createTestFile(tmpDir, 'dir1/file1.txt', 'content');
|
||||
await createTestFile(tmpDir, 'dir2/file2.txt', 'content');
|
||||
await createTestFile(tmpDir, 'dir3/file3.txt', 'content');
|
||||
|
||||
const files = await fileOps.getFileList(tmpDir);
|
||||
|
||||
expect(files).toHaveLength(3);
|
||||
expect(files).toContain(path.join('dir1', 'file1.txt'));
|
||||
expect(files).toContain(path.join('dir2', 'file2.txt'));
|
||||
expect(files).toContain(path.join('dir3', 'file3.txt'));
|
||||
});
|
||||
|
||||
it('should not include empty directories in results', async () => {
|
||||
await createTestDirs(tmpDir, ['empty1', 'empty2', 'has-file']);
|
||||
await createTestFile(tmpDir, 'has-file/file.txt', 'content');
|
||||
|
||||
const files = await fileOps.getFileList(tmpDir);
|
||||
|
||||
expect(files).toHaveLength(1);
|
||||
expect(files[0]).toBe(path.join('has-file', 'file.txt'));
|
||||
});
|
||||
});
|
||||
|
||||
describe('ignore filtering', () => {
|
||||
it('should ignore .git directories', async () => {
|
||||
await createTestFile(tmpDir, 'normal.txt', 'content');
|
||||
await createTestFile(tmpDir, '.git/config', 'git config');
|
||||
await createTestFile(tmpDir, '.git/hooks/pre-commit', 'hook');
|
||||
|
||||
const files = await fileOps.getFileList(tmpDir);
|
||||
|
||||
expect(files).toHaveLength(1);
|
||||
expect(files[0]).toBe('normal.txt');
|
||||
});
|
||||
|
||||
it('should ignore node_modules directories', async () => {
|
||||
await createTestFile(tmpDir, 'package.json', '{}');
|
||||
await createTestFile(tmpDir, 'node_modules/package/index.js', 'code');
|
||||
await createTestFile(tmpDir, 'node_modules/package/lib/util.js', 'util');
|
||||
|
||||
const files = await fileOps.getFileList(tmpDir);
|
||||
|
||||
expect(files).toHaveLength(1);
|
||||
expect(files[0]).toBe('package.json');
|
||||
});
|
||||
|
||||
it('should ignore .DS_Store files', async () => {
|
||||
await createTestFile(tmpDir, 'file.txt', 'content');
|
||||
await createTestFile(tmpDir, '.DS_Store', 'mac metadata');
|
||||
await createTestFile(tmpDir, 'subdir/.DS_Store', 'mac metadata');
|
||||
|
||||
const files = await fileOps.getFileList(tmpDir);
|
||||
|
||||
expect(files).toHaveLength(1);
|
||||
expect(files[0]).toBe('file.txt');
|
||||
});
|
||||
|
||||
it('should ignore *.swp and *.tmp files', async () => {
|
||||
await createTestFile(tmpDir, 'document.txt', 'content');
|
||||
await createTestFile(tmpDir, 'document.txt.swp', 'vim swap');
|
||||
await createTestFile(tmpDir, 'temp.tmp', 'temporary');
|
||||
|
||||
const files = await fileOps.getFileList(tmpDir);
|
||||
|
||||
expect(files).toHaveLength(1);
|
||||
expect(files[0]).toBe('document.txt');
|
||||
});
|
||||
|
||||
it('should ignore multiple ignored patterns together', async () => {
|
||||
await createTestFile(tmpDir, 'src/index.js', 'source code');
|
||||
await createTestFile(tmpDir, 'node_modules/lib/code.js', 'dependency');
|
||||
await createTestFile(tmpDir, '.git/config', 'git config');
|
||||
await createTestFile(tmpDir, '.DS_Store', 'mac file');
|
||||
await createTestFile(tmpDir, 'file.swp', 'swap file');
|
||||
await createTestFile(tmpDir, '.idea/workspace.xml', 'ide');
|
||||
|
||||
const files = await fileOps.getFileList(tmpDir);
|
||||
|
||||
expect(files).toHaveLength(1);
|
||||
expect(files[0]).toBe(path.join('src', 'index.js'));
|
||||
});
|
||||
});
|
||||
|
||||
describe('relative path handling', () => {
|
||||
it('should return paths relative to base directory', async () => {
|
||||
await createTestFile(tmpDir, 'a/b/c/deep.txt', 'deep');
|
||||
|
||||
const files = await fileOps.getFileList(tmpDir);
|
||||
|
||||
expect(files[0]).toBe(path.join('a', 'b', 'c', 'deep.txt'));
|
||||
expect(path.isAbsolute(files[0])).toBe(false);
|
||||
});
|
||||
|
||||
it('should handle subdirectory as base', async () => {
|
||||
await createTestFile(tmpDir, 'root.txt', 'root');
|
||||
await createTestFile(tmpDir, 'sub/file1.txt', 'sub1');
|
||||
await createTestFile(tmpDir, 'sub/file2.txt', 'sub2');
|
||||
|
||||
const subDir = path.join(tmpDir, 'sub');
|
||||
const files = await fileOps.getFileList(subDir);
|
||||
|
||||
expect(files).toHaveLength(2);
|
||||
expect(files).toContain('file1.txt');
|
||||
expect(files).toContain('file2.txt');
|
||||
// Should not include root.txt
|
||||
expect(files).not.toContain('root.txt');
|
||||
});
|
||||
});
|
||||
|
||||
describe('edge cases', () => {
|
||||
it('should handle directory with special characters', async () => {
|
||||
await createTestFile(tmpDir, 'folder with spaces/file.txt', 'content');
|
||||
await createTestFile(tmpDir, 'special-chars!@#/data.json', 'data');
|
||||
|
||||
const files = await fileOps.getFileList(tmpDir);
|
||||
|
||||
expect(files).toHaveLength(2);
|
||||
expect(files).toContain(path.join('folder with spaces', 'file.txt'));
|
||||
expect(files).toContain(path.join('special-chars!@#', 'data.json'));
|
||||
});
|
||||
|
||||
it('should handle Unicode filenames', async () => {
|
||||
await createTestFile(tmpDir, '文档/测试.txt', 'chinese');
|
||||
await createTestFile(tmpDir, 'файл/данные.json', 'russian');
|
||||
await createTestFile(tmpDir, 'ファイル/データ.yaml', 'japanese');
|
||||
|
||||
const files = await fileOps.getFileList(tmpDir);
|
||||
|
||||
expect(files).toHaveLength(3);
|
||||
expect(files.some((f) => f.includes('测试.txt'))).toBe(true);
|
||||
expect(files.some((f) => f.includes('данные.json'))).toBe(true);
|
||||
expect(files.some((f) => f.includes('データ.yaml'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should return empty array for non-existent directory', async () => {
|
||||
const nonExistent = path.join(tmpDir, 'does-not-exist');
|
||||
|
||||
const files = await fileOps.getFileList(nonExistent);
|
||||
|
||||
expect(files).toEqual([]);
|
||||
});
|
||||
|
||||
it('should handle very deep directory nesting', async () => {
|
||||
// Create a deeply nested structure (10 levels)
|
||||
const deepPath = Array.from({ length: 10 }, (_, i) => `level${i}`).join('/');
|
||||
await createTestFile(tmpDir, `${deepPath}/deep.txt`, 'very deep');
|
||||
|
||||
const files = await fileOps.getFileList(tmpDir);
|
||||
|
||||
expect(files).toHaveLength(1);
|
||||
expect(files[0]).toBe(path.join(...deepPath.split('/'), 'deep.txt'));
|
||||
});
|
||||
|
||||
it('should handle directory with many files', async () => {
|
||||
// Create 100 files
|
||||
const promises = Array.from({ length: 100 }, (_, i) => createTestFile(tmpDir, `file${i}.txt`, `content ${i}`));
|
||||
await Promise.all(promises);
|
||||
|
||||
const files = await fileOps.getFileList(tmpDir);
|
||||
|
||||
expect(files).toHaveLength(100);
|
||||
expect(files.every((f) => f.startsWith('file') && f.endsWith('.txt'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle mixed ignored and non-ignored files', async () => {
|
||||
await createTestFile(tmpDir, 'src/main.js', 'code');
|
||||
await createTestFile(tmpDir, 'src/main.js.swp', 'swap');
|
||||
await createTestFile(tmpDir, 'lib/utils.js', 'utils');
|
||||
await createTestFile(tmpDir, 'node_modules/dep/index.js', 'dep');
|
||||
await createTestFile(tmpDir, 'test/test.js', 'test');
|
||||
|
||||
const files = await fileOps.getFileList(tmpDir);
|
||||
|
||||
expect(files).toHaveLength(3);
|
||||
expect(files).toContain(path.join('src', 'main.js'));
|
||||
expect(files).toContain(path.join('lib', 'utils.js'));
|
||||
expect(files).toContain(path.join('test', 'test.js'));
|
||||
});
|
||||
});
|
||||
|
||||
describe('file types', () => {
|
||||
it('should include files with no extension', async () => {
|
||||
await createTestFile(tmpDir, 'README', 'readme content');
|
||||
await createTestFile(tmpDir, 'LICENSE', 'license text');
|
||||
await createTestFile(tmpDir, 'Makefile', 'make commands');
|
||||
|
||||
const files = await fileOps.getFileList(tmpDir);
|
||||
|
||||
expect(files).toHaveLength(3);
|
||||
expect(files).toContain('README');
|
||||
expect(files).toContain('LICENSE');
|
||||
expect(files).toContain('Makefile');
|
||||
});
|
||||
|
||||
it('should include dotfiles (except ignored ones)', async () => {
|
||||
await createTestFile(tmpDir, '.gitignore', 'ignore patterns');
|
||||
await createTestFile(tmpDir, '.env', 'environment');
|
||||
await createTestFile(tmpDir, '.eslintrc', 'eslint config');
|
||||
|
||||
const files = await fileOps.getFileList(tmpDir);
|
||||
|
||||
expect(files).toHaveLength(3);
|
||||
expect(files).toContain('.gitignore');
|
||||
expect(files).toContain('.env');
|
||||
expect(files).toContain('.eslintrc');
|
||||
});
|
||||
|
||||
it('should include files with multiple extensions', async () => {
|
||||
await createTestFile(tmpDir, 'archive.tar.gz', 'archive');
|
||||
await createTestFile(tmpDir, 'backup.sql.bak', 'backup');
|
||||
await createTestFile(tmpDir, 'config.yaml.sample', 'sample config');
|
||||
|
||||
const files = await fileOps.getFileList(tmpDir);
|
||||
|
||||
expect(files).toHaveLength(3);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
@ -0,0 +1,177 @@
|
|||
import { describe, it, expect } from 'vitest';
|
||||
import { FileOps } from '../../../tools/cli/lib/file-ops.js';
|
||||
|
||||
describe('FileOps', () => {
|
||||
describe('shouldIgnore()', () => {
|
||||
const fileOps = new FileOps();
|
||||
|
||||
describe('exact matches', () => {
|
||||
it('should ignore .git directory', () => {
|
||||
expect(fileOps.shouldIgnore('.git')).toBe(true);
|
||||
expect(fileOps.shouldIgnore('/path/to/.git')).toBe(true);
|
||||
// Note: basename of '/project/.git/hooks' is 'hooks', not '.git'
|
||||
expect(fileOps.shouldIgnore('/project/.git/hooks')).toBe(false);
|
||||
});
|
||||
|
||||
it('should ignore .DS_Store files', () => {
|
||||
expect(fileOps.shouldIgnore('.DS_Store')).toBe(true);
|
||||
expect(fileOps.shouldIgnore('/path/to/.DS_Store')).toBe(true);
|
||||
});
|
||||
|
||||
it('should ignore node_modules directory', () => {
|
||||
expect(fileOps.shouldIgnore('node_modules')).toBe(true);
|
||||
expect(fileOps.shouldIgnore('/path/to/node_modules')).toBe(true);
|
||||
// Note: basename of '/project/node_modules/package' is 'package', not 'node_modules'
|
||||
expect(fileOps.shouldIgnore('/project/node_modules/package')).toBe(false);
|
||||
});
|
||||
|
||||
it('should ignore .idea directory', () => {
|
||||
expect(fileOps.shouldIgnore('.idea')).toBe(true);
|
||||
expect(fileOps.shouldIgnore('/path/to/.idea')).toBe(true);
|
||||
});
|
||||
|
||||
it('should ignore .vscode directory', () => {
|
||||
expect(fileOps.shouldIgnore('.vscode')).toBe(true);
|
||||
expect(fileOps.shouldIgnore('/path/to/.vscode')).toBe(true);
|
||||
});
|
||||
|
||||
it('should ignore __pycache__ directory', () => {
|
||||
expect(fileOps.shouldIgnore('__pycache__')).toBe(true);
|
||||
expect(fileOps.shouldIgnore('/path/to/__pycache__')).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('glob pattern matches', () => {
|
||||
it('should ignore *.swp files (Vim swap files)', () => {
|
||||
expect(fileOps.shouldIgnore('file.swp')).toBe(true);
|
||||
expect(fileOps.shouldIgnore('.config.yaml.swp')).toBe(true);
|
||||
expect(fileOps.shouldIgnore('/path/to/document.txt.swp')).toBe(true);
|
||||
});
|
||||
|
||||
it('should ignore *.tmp files (temporary files)', () => {
|
||||
expect(fileOps.shouldIgnore('file.tmp')).toBe(true);
|
||||
expect(fileOps.shouldIgnore('temp_data.tmp')).toBe(true);
|
||||
expect(fileOps.shouldIgnore('/path/to/cache.tmp')).toBe(true);
|
||||
});
|
||||
|
||||
it('should ignore *.pyc files (Python compiled)', () => {
|
||||
expect(fileOps.shouldIgnore('module.pyc')).toBe(true);
|
||||
expect(fileOps.shouldIgnore('__init__.pyc')).toBe(true);
|
||||
expect(fileOps.shouldIgnore('/path/to/script.pyc')).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('files that should NOT be ignored', () => {
|
||||
it('should not ignore normal files', () => {
|
||||
expect(fileOps.shouldIgnore('README.md')).toBe(false);
|
||||
expect(fileOps.shouldIgnore('package.json')).toBe(false);
|
||||
expect(fileOps.shouldIgnore('index.js')).toBe(false);
|
||||
});
|
||||
|
||||
it('should not ignore .gitignore itself', () => {
|
||||
expect(fileOps.shouldIgnore('.gitignore')).toBe(false);
|
||||
expect(fileOps.shouldIgnore('/path/to/.gitignore')).toBe(false);
|
||||
});
|
||||
|
||||
it('should not ignore files with similar but different names', () => {
|
||||
expect(fileOps.shouldIgnore('git-file.txt')).toBe(false);
|
||||
expect(fileOps.shouldIgnore('node_modules.backup')).toBe(false);
|
||||
expect(fileOps.shouldIgnore('swap-file.txt')).toBe(false);
|
||||
});
|
||||
|
||||
it('should not ignore files with ignored patterns in parent directory', () => {
|
||||
// The pattern matches basename, not full path
|
||||
expect(fileOps.shouldIgnore('/project/src/utils.js')).toBe(false);
|
||||
expect(fileOps.shouldIgnore('/code/main.py')).toBe(false);
|
||||
});
|
||||
|
||||
it('should not ignore directories with dot prefix (except specific ones)', () => {
|
||||
expect(fileOps.shouldIgnore('.github')).toBe(false);
|
||||
expect(fileOps.shouldIgnore('.husky')).toBe(false);
|
||||
expect(fileOps.shouldIgnore('.npmrc')).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('edge cases', () => {
|
||||
it('should handle empty string', () => {
|
||||
expect(fileOps.shouldIgnore('')).toBe(false);
|
||||
});
|
||||
|
||||
it('should handle paths with multiple segments', () => {
|
||||
// basename of '/very/deep/path/to/node_modules/package' is 'package'
|
||||
expect(fileOps.shouldIgnore('/very/deep/path/to/node_modules/package')).toBe(false);
|
||||
expect(fileOps.shouldIgnore('/very/deep/path/to/file.swp')).toBe(true);
|
||||
expect(fileOps.shouldIgnore('/very/deep/path/to/normal.js')).toBe(false);
|
||||
// But the directory itself would be ignored
|
||||
expect(fileOps.shouldIgnore('/very/deep/path/to/node_modules')).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle Windows-style paths', () => {
|
||||
// Note: path.basename() on Unix doesn't recognize backslashes
|
||||
// On Unix: basename('C:\\project\\file.tmp') = 'C:\\project\\file.tmp'
|
||||
// So we test cross-platform path handling
|
||||
expect(fileOps.shouldIgnore(String.raw`C:\project\file.tmp`)).toBe(true); // .tmp matches
|
||||
expect(fileOps.shouldIgnore(String.raw`test\file.swp`)).toBe(true); // .swp matches
|
||||
// These won't be ignored because they don't match the patterns on Unix
|
||||
expect(fileOps.shouldIgnore(String.raw`C:\project\node_modules\pkg`)).toBe(false);
|
||||
expect(fileOps.shouldIgnore(String.raw`C:\project\src\main.js`)).toBe(false);
|
||||
});
|
||||
|
||||
it('should handle relative paths', () => {
|
||||
// basename of './node_modules/package' is 'package'
|
||||
expect(fileOps.shouldIgnore('./node_modules/package')).toBe(false);
|
||||
// basename of '../.git/hooks' is 'hooks'
|
||||
expect(fileOps.shouldIgnore('../.git/hooks')).toBe(false);
|
||||
expect(fileOps.shouldIgnore('./src/index.js')).toBe(false);
|
||||
// But the directories themselves would be ignored
|
||||
expect(fileOps.shouldIgnore('./node_modules')).toBe(true);
|
||||
expect(fileOps.shouldIgnore('../.git')).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle files with multiple extensions', () => {
|
||||
expect(fileOps.shouldIgnore('file.tar.tmp')).toBe(true);
|
||||
expect(fileOps.shouldIgnore('backup.sql.swp')).toBe(true);
|
||||
expect(fileOps.shouldIgnore('data.json.gz')).toBe(false);
|
||||
});
|
||||
|
||||
it('should be case-sensitive for exact matches', () => {
|
||||
expect(fileOps.shouldIgnore('Node_Modules')).toBe(false);
|
||||
expect(fileOps.shouldIgnore('NODE_MODULES')).toBe(false);
|
||||
expect(fileOps.shouldIgnore('node_modules')).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle files starting with ignored patterns', () => {
|
||||
expect(fileOps.shouldIgnore('.git-credentials')).toBe(false);
|
||||
expect(fileOps.shouldIgnore('.gitattributes')).toBe(false);
|
||||
expect(fileOps.shouldIgnore('.git')).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle Unicode filenames', () => {
|
||||
expect(fileOps.shouldIgnore('文档.swp')).toBe(true);
|
||||
expect(fileOps.shouldIgnore('файл.tmp')).toBe(true);
|
||||
expect(fileOps.shouldIgnore('ドキュメント.txt')).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('pattern matching behavior', () => {
|
||||
it('should match patterns based on basename only', () => {
|
||||
// shouldIgnore uses path.basename(), so only the last segment matters
|
||||
expect(fileOps.shouldIgnore('/home/user/.git/config')).toBe(false); // basename is 'config'
|
||||
expect(fileOps.shouldIgnore('/home/user/project/node_modules')).toBe(true); // basename is 'node_modules'
|
||||
});
|
||||
|
||||
it('should handle trailing slashes', () => {
|
||||
// path.basename() returns the directory name, not empty string for trailing slash
|
||||
expect(fileOps.shouldIgnore('node_modules/')).toBe(true);
|
||||
expect(fileOps.shouldIgnore('.git/')).toBe(true);
|
||||
});
|
||||
|
||||
it('should treat patterns as partial regex matches', () => {
|
||||
// The *.swp pattern becomes /.*\.swp/ regex
|
||||
expect(fileOps.shouldIgnore('test.swp')).toBe(true);
|
||||
expect(fileOps.shouldIgnore('swp')).toBe(false); // doesn't match .*\.swp
|
||||
expect(fileOps.shouldIgnore('.swp')).toBe(true); // matches .*\.swp (. before swp)
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
@ -0,0 +1,316 @@
|
|||
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||
import { FileOps } from '../../../tools/cli/lib/file-ops.js';
|
||||
import { createTempDir, cleanupTempDir, createTestFile } from '../../helpers/temp-dir.js';
|
||||
import fs from 'fs-extra';
|
||||
import path from 'node:path';
|
||||
|
||||
describe('FileOps', () => {
|
||||
describe('syncDirectory()', () => {
|
||||
const fileOps = new FileOps();
|
||||
let tmpDir;
|
||||
let sourceDir;
|
||||
let destDir;
|
||||
|
||||
beforeEach(async () => {
|
||||
tmpDir = await createTempDir();
|
||||
sourceDir = path.join(tmpDir, 'source');
|
||||
destDir = path.join(tmpDir, 'dest');
|
||||
await fs.ensureDir(sourceDir);
|
||||
await fs.ensureDir(destDir);
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await cleanupTempDir(tmpDir);
|
||||
});
|
||||
|
||||
describe('hash-based selective update', () => {
|
||||
it('should update file when hashes are identical (safe update)', async () => {
|
||||
const content = 'identical content';
|
||||
await createTestFile(sourceDir, 'file.txt', content);
|
||||
await createTestFile(destDir, 'file.txt', content);
|
||||
|
||||
await fileOps.syncDirectory(sourceDir, destDir);
|
||||
|
||||
// File should be updated (copied over) since hashes match
|
||||
const destContent = await fs.readFile(path.join(destDir, 'file.txt'), 'utf8');
|
||||
expect(destContent).toBe(content);
|
||||
});
|
||||
|
||||
it('should preserve modified file when dest is newer', async () => {
|
||||
await createTestFile(sourceDir, 'file.txt', 'source content');
|
||||
await createTestFile(destDir, 'file.txt', 'modified by user');
|
||||
|
||||
// Make dest file newer
|
||||
const destFile = path.join(destDir, 'file.txt');
|
||||
const futureTime = new Date(Date.now() + 10_000);
|
||||
await fs.utimes(destFile, futureTime, futureTime);
|
||||
|
||||
await fileOps.syncDirectory(sourceDir, destDir);
|
||||
|
||||
// User modification should be preserved
|
||||
const destContent = await fs.readFile(destFile, 'utf8');
|
||||
expect(destContent).toBe('modified by user');
|
||||
});
|
||||
|
||||
it('should update file when source is newer than modified dest', async () => {
|
||||
// Create both files first
|
||||
await createTestFile(sourceDir, 'file.txt', 'new source content');
|
||||
await createTestFile(destDir, 'file.txt', 'old modified content');
|
||||
|
||||
// Make dest older and source newer with explicit times
|
||||
const destFile = path.join(destDir, 'file.txt');
|
||||
const sourceFile = path.join(sourceDir, 'file.txt');
|
||||
|
||||
const pastTime = new Date(Date.now() - 10_000);
|
||||
const futureTime = new Date(Date.now() + 10_000);
|
||||
|
||||
await fs.utimes(destFile, pastTime, pastTime);
|
||||
await fs.utimes(sourceFile, futureTime, futureTime);
|
||||
|
||||
await fileOps.syncDirectory(sourceDir, destDir);
|
||||
|
||||
// Should update to source content since source is newer
|
||||
const destContent = await fs.readFile(destFile, 'utf8');
|
||||
expect(destContent).toBe('new source content');
|
||||
});
|
||||
});
|
||||
|
||||
describe('new file handling', () => {
|
||||
it('should copy new files from source', async () => {
|
||||
await createTestFile(sourceDir, 'new-file.txt', 'new content');
|
||||
|
||||
await fileOps.syncDirectory(sourceDir, destDir);
|
||||
|
||||
expect(await fs.pathExists(path.join(destDir, 'new-file.txt'))).toBe(true);
|
||||
expect(await fs.readFile(path.join(destDir, 'new-file.txt'), 'utf8')).toBe('new content');
|
||||
});
|
||||
|
||||
it('should copy multiple new files', async () => {
|
||||
await createTestFile(sourceDir, 'file1.txt', 'content1');
|
||||
await createTestFile(sourceDir, 'file2.md', 'content2');
|
||||
await createTestFile(sourceDir, 'file3.json', 'content3');
|
||||
|
||||
await fileOps.syncDirectory(sourceDir, destDir);
|
||||
|
||||
expect(await fs.pathExists(path.join(destDir, 'file1.txt'))).toBe(true);
|
||||
expect(await fs.pathExists(path.join(destDir, 'file2.md'))).toBe(true);
|
||||
expect(await fs.pathExists(path.join(destDir, 'file3.json'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should create nested directories for new files', async () => {
|
||||
await createTestFile(sourceDir, 'level1/level2/deep.txt', 'deep content');
|
||||
|
||||
await fileOps.syncDirectory(sourceDir, destDir);
|
||||
|
||||
expect(await fs.pathExists(path.join(destDir, 'level1', 'level2', 'deep.txt'))).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('orphaned file removal', () => {
|
||||
it('should remove files that no longer exist in source', async () => {
|
||||
await createTestFile(sourceDir, 'keep.txt', 'keep this');
|
||||
await createTestFile(destDir, 'keep.txt', 'keep this');
|
||||
await createTestFile(destDir, 'remove.txt', 'delete this');
|
||||
|
||||
await fileOps.syncDirectory(sourceDir, destDir);
|
||||
|
||||
expect(await fs.pathExists(path.join(destDir, 'keep.txt'))).toBe(true);
|
||||
expect(await fs.pathExists(path.join(destDir, 'remove.txt'))).toBe(false);
|
||||
});
|
||||
|
||||
it('should remove multiple orphaned files', async () => {
|
||||
await createTestFile(sourceDir, 'current.txt', 'current');
|
||||
await createTestFile(destDir, 'current.txt', 'current');
|
||||
await createTestFile(destDir, 'old1.txt', 'orphan 1');
|
||||
await createTestFile(destDir, 'old2.txt', 'orphan 2');
|
||||
await createTestFile(destDir, 'old3.txt', 'orphan 3');
|
||||
|
||||
await fileOps.syncDirectory(sourceDir, destDir);
|
||||
|
||||
expect(await fs.pathExists(path.join(destDir, 'current.txt'))).toBe(true);
|
||||
expect(await fs.pathExists(path.join(destDir, 'old1.txt'))).toBe(false);
|
||||
expect(await fs.pathExists(path.join(destDir, 'old2.txt'))).toBe(false);
|
||||
expect(await fs.pathExists(path.join(destDir, 'old3.txt'))).toBe(false);
|
||||
});
|
||||
|
||||
it('should remove orphaned directories', async () => {
|
||||
await createTestFile(sourceDir, 'keep/file.txt', 'keep');
|
||||
await createTestFile(destDir, 'keep/file.txt', 'keep');
|
||||
await createTestFile(destDir, 'remove/orphan.txt', 'orphan');
|
||||
|
||||
await fileOps.syncDirectory(sourceDir, destDir);
|
||||
|
||||
expect(await fs.pathExists(path.join(destDir, 'keep'))).toBe(true);
|
||||
expect(await fs.pathExists(path.join(destDir, 'remove', 'orphan.txt'))).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('complex scenarios', () => {
|
||||
it('should handle mixed operations in single sync', async () => {
|
||||
const now = Date.now();
|
||||
const pastTime = now - 100_000; // 100 seconds ago
|
||||
const futureTime = now + 100_000; // 100 seconds from now
|
||||
|
||||
// Identical file (update)
|
||||
await createTestFile(sourceDir, 'identical.txt', 'same');
|
||||
await createTestFile(destDir, 'identical.txt', 'same');
|
||||
|
||||
// Modified file with newer dest (preserve)
|
||||
await createTestFile(sourceDir, 'modified.txt', 'original');
|
||||
await createTestFile(destDir, 'modified.txt', 'user modified');
|
||||
const modifiedFile = path.join(destDir, 'modified.txt');
|
||||
await fs.utimes(modifiedFile, futureTime, futureTime);
|
||||
|
||||
// New file (copy)
|
||||
await createTestFile(sourceDir, 'new.txt', 'new content');
|
||||
|
||||
// Orphaned file (remove)
|
||||
await createTestFile(destDir, 'orphan.txt', 'delete me');
|
||||
|
||||
await fileOps.syncDirectory(sourceDir, destDir);
|
||||
|
||||
// Verify operations
|
||||
expect(await fs.pathExists(path.join(destDir, 'identical.txt'))).toBe(true);
|
||||
|
||||
expect(await fs.readFile(modifiedFile, 'utf8')).toBe('user modified');
|
||||
|
||||
expect(await fs.pathExists(path.join(destDir, 'new.txt'))).toBe(true);
|
||||
|
||||
expect(await fs.pathExists(path.join(destDir, 'orphan.txt'))).toBe(false);
|
||||
});
|
||||
|
||||
it('should handle nested directory changes', async () => {
|
||||
// Create nested structure in source
|
||||
await createTestFile(sourceDir, 'level1/keep.txt', 'keep');
|
||||
await createTestFile(sourceDir, 'level1/level2/deep.txt', 'deep');
|
||||
|
||||
// Create different nested structure in dest
|
||||
await createTestFile(destDir, 'level1/keep.txt', 'keep');
|
||||
await createTestFile(destDir, 'level1/remove.txt', 'orphan');
|
||||
await createTestFile(destDir, 'old-level/file.txt', 'old');
|
||||
|
||||
await fileOps.syncDirectory(sourceDir, destDir);
|
||||
|
||||
expect(await fs.pathExists(path.join(destDir, 'level1', 'keep.txt'))).toBe(true);
|
||||
expect(await fs.pathExists(path.join(destDir, 'level1', 'level2', 'deep.txt'))).toBe(true);
|
||||
expect(await fs.pathExists(path.join(destDir, 'level1', 'remove.txt'))).toBe(false);
|
||||
expect(await fs.pathExists(path.join(destDir, 'old-level', 'file.txt'))).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('edge cases', () => {
|
||||
it('should handle empty source directory', async () => {
|
||||
await createTestFile(destDir, 'file.txt', 'content');
|
||||
|
||||
await fileOps.syncDirectory(sourceDir, destDir);
|
||||
|
||||
// All files should be removed
|
||||
expect(await fs.pathExists(path.join(destDir, 'file.txt'))).toBe(false);
|
||||
});
|
||||
|
||||
it('should handle empty destination directory', async () => {
|
||||
await createTestFile(sourceDir, 'file.txt', 'content');
|
||||
|
||||
await fileOps.syncDirectory(sourceDir, destDir);
|
||||
|
||||
expect(await fs.pathExists(path.join(destDir, 'file.txt'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle Unicode filenames', async () => {
|
||||
await createTestFile(sourceDir, '测试.txt', 'chinese');
|
||||
await createTestFile(destDir, '测试.txt', 'modified chinese');
|
||||
|
||||
// Make dest newer
|
||||
await fs.utimes(path.join(destDir, '测试.txt'), Date.now() + 10_000, Date.now() + 10_000);
|
||||
|
||||
await fileOps.syncDirectory(sourceDir, destDir);
|
||||
|
||||
// Should preserve user modification
|
||||
expect(await fs.readFile(path.join(destDir, '测试.txt'), 'utf8')).toBe('modified chinese');
|
||||
});
|
||||
|
||||
it('should handle large number of files', async () => {
|
||||
// Create 50 files in source
|
||||
for (let i = 0; i < 50; i++) {
|
||||
await createTestFile(sourceDir, `file${i}.txt`, `content ${i}`);
|
||||
}
|
||||
|
||||
// Create 25 matching files and 25 orphaned files in dest
|
||||
for (let i = 0; i < 25; i++) {
|
||||
await createTestFile(destDir, `file${i}.txt`, `content ${i}`);
|
||||
await createTestFile(destDir, `orphan${i}.txt`, `orphan ${i}`);
|
||||
}
|
||||
|
||||
await fileOps.syncDirectory(sourceDir, destDir);
|
||||
|
||||
// All 50 source files should exist
|
||||
for (let i = 0; i < 50; i++) {
|
||||
expect(await fs.pathExists(path.join(destDir, `file${i}.txt`))).toBe(true);
|
||||
}
|
||||
|
||||
// All 25 orphaned files should be removed
|
||||
for (let i = 0; i < 25; i++) {
|
||||
expect(await fs.pathExists(path.join(destDir, `orphan${i}.txt`))).toBe(false);
|
||||
}
|
||||
});
|
||||
|
||||
it('should handle binary files correctly', async () => {
|
||||
const buffer = Buffer.from([0x89, 0x50, 0x4e, 0x47]);
|
||||
await fs.writeFile(path.join(sourceDir, 'binary.dat'), buffer);
|
||||
await fs.writeFile(path.join(destDir, 'binary.dat'), buffer);
|
||||
|
||||
await fileOps.syncDirectory(sourceDir, destDir);
|
||||
|
||||
const destBuffer = await fs.readFile(path.join(destDir, 'binary.dat'));
|
||||
expect(destBuffer).toEqual(buffer);
|
||||
});
|
||||
});
|
||||
|
||||
describe('timestamp precision', () => {
|
||||
it('should handle files with very close modification times', async () => {
|
||||
await createTestFile(sourceDir, 'file.txt', 'source');
|
||||
await createTestFile(destDir, 'file.txt', 'dest modified');
|
||||
|
||||
// Make dest just slightly newer (100ms)
|
||||
const destFile = path.join(destDir, 'file.txt');
|
||||
await fs.utimes(destFile, Date.now() + 100, Date.now() + 100);
|
||||
|
||||
await fileOps.syncDirectory(sourceDir, destDir);
|
||||
|
||||
// Should preserve user modification even with small time difference
|
||||
expect(await fs.readFile(destFile, 'utf8')).toBe('dest modified');
|
||||
});
|
||||
});
|
||||
|
||||
describe('data integrity', () => {
|
||||
it('should not corrupt files during sync', async () => {
|
||||
const content = 'Important data\nLine 2\nLine 3\n';
|
||||
await createTestFile(sourceDir, 'data.txt', content);
|
||||
|
||||
await fileOps.syncDirectory(sourceDir, destDir);
|
||||
|
||||
expect(await fs.readFile(path.join(destDir, 'data.txt'), 'utf8')).toBe(content);
|
||||
});
|
||||
|
||||
it('should handle sync interruption gracefully', async () => {
|
||||
// This test verifies that partial syncs don't leave inconsistent state
|
||||
await createTestFile(sourceDir, 'file1.txt', 'content1');
|
||||
await createTestFile(sourceDir, 'file2.txt', 'content2');
|
||||
|
||||
// First sync
|
||||
await fileOps.syncDirectory(sourceDir, destDir);
|
||||
|
||||
// Modify source
|
||||
await createTestFile(sourceDir, 'file3.txt', 'content3');
|
||||
|
||||
// Second sync
|
||||
await fileOps.syncDirectory(sourceDir, destDir);
|
||||
|
||||
// All files should be present and correct
|
||||
expect(await fs.pathExists(path.join(destDir, 'file1.txt'))).toBe(true);
|
||||
expect(await fs.pathExists(path.join(destDir, 'file2.txt'))).toBe(true);
|
||||
expect(await fs.pathExists(path.join(destDir, 'file3.txt'))).toBe(true);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
@ -0,0 +1,214 @@
|
|||
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||
import { FileOps } from '../../../tools/cli/lib/file-ops.js';
|
||||
import { createTempDir, cleanupTempDir, createTestFile } from '../../helpers/temp-dir.js';
|
||||
import fs from 'fs-extra';
|
||||
import path from 'node:path';
|
||||
|
||||
describe('FileOps', () => {
|
||||
const fileOps = new FileOps();
|
||||
let tmpDir;
|
||||
|
||||
beforeEach(async () => {
|
||||
tmpDir = await createTempDir();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await cleanupTempDir(tmpDir);
|
||||
});
|
||||
|
||||
describe('ensureDir()', () => {
|
||||
it('should create directory if it does not exist', async () => {
|
||||
const newDir = path.join(tmpDir, 'new-directory');
|
||||
|
||||
await fileOps.ensureDir(newDir);
|
||||
|
||||
expect(await fs.pathExists(newDir)).toBe(true);
|
||||
});
|
||||
|
||||
it('should not fail if directory already exists', async () => {
|
||||
const existingDir = path.join(tmpDir, 'existing');
|
||||
await fs.ensureDir(existingDir);
|
||||
|
||||
await expect(fileOps.ensureDir(existingDir)).resolves.not.toThrow();
|
||||
});
|
||||
|
||||
it('should create nested directories', async () => {
|
||||
const nestedDir = path.join(tmpDir, 'level1', 'level2', 'level3');
|
||||
|
||||
await fileOps.ensureDir(nestedDir);
|
||||
|
||||
expect(await fs.pathExists(nestedDir)).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('remove()', () => {
|
||||
it('should remove a file', async () => {
|
||||
const filePath = await createTestFile(tmpDir, 'test.txt', 'content');
|
||||
|
||||
await fileOps.remove(filePath);
|
||||
|
||||
expect(await fs.pathExists(filePath)).toBe(false);
|
||||
});
|
||||
|
||||
it('should remove a directory', async () => {
|
||||
const dirPath = path.join(tmpDir, 'test-dir');
|
||||
await fs.ensureDir(dirPath);
|
||||
await createTestFile(dirPath, 'file.txt', 'content');
|
||||
|
||||
await fileOps.remove(dirPath);
|
||||
|
||||
expect(await fs.pathExists(dirPath)).toBe(false);
|
||||
});
|
||||
|
||||
it('should not fail if path does not exist', async () => {
|
||||
const nonExistent = path.join(tmpDir, 'does-not-exist');
|
||||
|
||||
await expect(fileOps.remove(nonExistent)).resolves.not.toThrow();
|
||||
});
|
||||
|
||||
it('should remove nested directories', async () => {
|
||||
const nested = path.join(tmpDir, 'a', 'b', 'c');
|
||||
await fs.ensureDir(nested);
|
||||
await createTestFile(nested, 'file.txt', 'content');
|
||||
|
||||
await fileOps.remove(path.join(tmpDir, 'a'));
|
||||
|
||||
expect(await fs.pathExists(path.join(tmpDir, 'a'))).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('readFile()', () => {
|
||||
it('should read file content', async () => {
|
||||
const content = 'test content';
|
||||
const filePath = await createTestFile(tmpDir, 'test.txt', content);
|
||||
|
||||
const result = await fileOps.readFile(filePath);
|
||||
|
||||
expect(result).toBe(content);
|
||||
});
|
||||
|
||||
it('should read UTF-8 content', async () => {
|
||||
const content = 'Hello 世界 🌍';
|
||||
const filePath = await createTestFile(tmpDir, 'utf8.txt', content);
|
||||
|
||||
const result = await fileOps.readFile(filePath);
|
||||
|
||||
expect(result).toBe(content);
|
||||
});
|
||||
|
||||
it('should read empty file', async () => {
|
||||
const filePath = await createTestFile(tmpDir, 'empty.txt', '');
|
||||
|
||||
const result = await fileOps.readFile(filePath);
|
||||
|
||||
expect(result).toBe('');
|
||||
});
|
||||
|
||||
it('should reject for non-existent file', async () => {
|
||||
const nonExistent = path.join(tmpDir, 'does-not-exist.txt');
|
||||
|
||||
await expect(fileOps.readFile(nonExistent)).rejects.toThrow();
|
||||
});
|
||||
});
|
||||
|
||||
describe('writeFile()', () => {
|
||||
it('should write file content', async () => {
|
||||
const filePath = path.join(tmpDir, 'new-file.txt');
|
||||
const content = 'test content';
|
||||
|
||||
await fileOps.writeFile(filePath, content);
|
||||
|
||||
expect(await fs.readFile(filePath, 'utf8')).toBe(content);
|
||||
});
|
||||
|
||||
it('should create parent directories if they do not exist', async () => {
|
||||
const filePath = path.join(tmpDir, 'level1', 'level2', 'file.txt');
|
||||
|
||||
await fileOps.writeFile(filePath, 'content');
|
||||
|
||||
expect(await fs.pathExists(filePath)).toBe(true);
|
||||
expect(await fs.readFile(filePath, 'utf8')).toBe('content');
|
||||
});
|
||||
|
||||
it('should overwrite existing file', async () => {
|
||||
const filePath = await createTestFile(tmpDir, 'test.txt', 'old content');
|
||||
|
||||
await fileOps.writeFile(filePath, 'new content');
|
||||
|
||||
expect(await fs.readFile(filePath, 'utf8')).toBe('new content');
|
||||
});
|
||||
|
||||
it('should handle UTF-8 content', async () => {
|
||||
const content = '测试 Тест 🎉';
|
||||
const filePath = path.join(tmpDir, 'unicode.txt');
|
||||
|
||||
await fileOps.writeFile(filePath, content);
|
||||
|
||||
expect(await fs.readFile(filePath, 'utf8')).toBe(content);
|
||||
});
|
||||
});
|
||||
|
||||
describe('exists()', () => {
|
||||
it('should return true for existing file', async () => {
|
||||
const filePath = await createTestFile(tmpDir, 'test.txt', 'content');
|
||||
|
||||
const result = await fileOps.exists(filePath);
|
||||
|
||||
expect(result).toBe(true);
|
||||
});
|
||||
|
||||
it('should return true for existing directory', async () => {
|
||||
const dirPath = path.join(tmpDir, 'test-dir');
|
||||
await fs.ensureDir(dirPath);
|
||||
|
||||
const result = await fileOps.exists(dirPath);
|
||||
|
||||
expect(result).toBe(true);
|
||||
});
|
||||
|
||||
it('should return false for non-existent path', async () => {
|
||||
const nonExistent = path.join(tmpDir, 'does-not-exist');
|
||||
|
||||
const result = await fileOps.exists(nonExistent);
|
||||
|
||||
expect(result).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('stat()', () => {
|
||||
it('should return stats for file', async () => {
|
||||
const filePath = await createTestFile(tmpDir, 'test.txt', 'content');
|
||||
|
||||
const stats = await fileOps.stat(filePath);
|
||||
|
||||
expect(stats.isFile()).toBe(true);
|
||||
expect(stats.isDirectory()).toBe(false);
|
||||
expect(stats.size).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it('should return stats for directory', async () => {
|
||||
const dirPath = path.join(tmpDir, 'test-dir');
|
||||
await fs.ensureDir(dirPath);
|
||||
|
||||
const stats = await fileOps.stat(dirPath);
|
||||
|
||||
expect(stats.isDirectory()).toBe(true);
|
||||
expect(stats.isFile()).toBe(false);
|
||||
});
|
||||
|
||||
it('should reject for non-existent path', async () => {
|
||||
const nonExistent = path.join(tmpDir, 'does-not-exist');
|
||||
|
||||
await expect(fileOps.stat(nonExistent)).rejects.toThrow();
|
||||
});
|
||||
|
||||
it('should return modification time', async () => {
|
||||
const filePath = await createTestFile(tmpDir, 'test.txt', 'content');
|
||||
|
||||
const stats = await fileOps.stat(filePath);
|
||||
|
||||
expect(stats.mtime).toBeInstanceOf(Date);
|
||||
expect(stats.mtime.getTime()).toBeLessThanOrEqual(Date.now());
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
@ -0,0 +1,335 @@
|
|||
import { describe, it, expect, beforeEach } from 'vitest';
|
||||
import { YamlXmlBuilder } from '../../../tools/cli/lib/yaml-xml-builder.js';
|
||||
|
||||
describe('YamlXmlBuilder - buildCommandsXml()', () => {
|
||||
let builder;
|
||||
|
||||
beforeEach(() => {
|
||||
builder = new YamlXmlBuilder();
|
||||
});
|
||||
|
||||
describe('menu injection', () => {
|
||||
it('should always inject *menu item first', () => {
|
||||
const xml = builder.buildCommandsXml([]);
|
||||
|
||||
expect(xml).toContain('<item cmd="*menu">[M] Redisplay Menu Options</item>');
|
||||
});
|
||||
|
||||
it('should always inject *dismiss item last', () => {
|
||||
const xml = builder.buildCommandsXml([]);
|
||||
|
||||
expect(xml).toContain('<item cmd="*dismiss">[D] Dismiss Agent</item>');
|
||||
// Should be at the end before </menu>
|
||||
expect(xml).toMatch(/\*dismiss.*<\/menu>/s);
|
||||
});
|
||||
|
||||
it('should place user items between *menu and *dismiss', () => {
|
||||
const menuItems = [{ trigger: 'help', description: 'Show help', action: 'show_help' }];
|
||||
|
||||
const xml = builder.buildCommandsXml(menuItems);
|
||||
|
||||
const menuIndex = xml.indexOf('*menu');
|
||||
const helpIndex = xml.indexOf('*help');
|
||||
const dismissIndex = xml.indexOf('*dismiss');
|
||||
|
||||
expect(menuIndex).toBeLessThan(helpIndex);
|
||||
expect(helpIndex).toBeLessThan(dismissIndex);
|
||||
});
|
||||
});
|
||||
|
||||
describe('legacy format items', () => {
|
||||
it('should add * prefix to triggers', () => {
|
||||
const menuItems = [{ trigger: 'help', description: 'Help', action: 'show_help' }];
|
||||
|
||||
const xml = builder.buildCommandsXml(menuItems);
|
||||
|
||||
expect(xml).toContain('cmd="*help"');
|
||||
expect(xml).not.toContain('cmd="help"'); // Should not have unprefixed version
|
||||
});
|
||||
|
||||
it('should preserve * prefix if already present', () => {
|
||||
const menuItems = [{ trigger: '*custom', description: 'Custom', action: 'custom_action' }];
|
||||
|
||||
const xml = builder.buildCommandsXml(menuItems);
|
||||
|
||||
expect(xml).toContain('cmd="*custom"');
|
||||
expect(xml).not.toContain('cmd="**custom"'); // Should not double-prefix
|
||||
});
|
||||
|
||||
it('should include description as item content', () => {
|
||||
const menuItems = [{ trigger: 'analyze', description: '[A] Analyze code', action: 'analyze' }];
|
||||
|
||||
const xml = builder.buildCommandsXml(menuItems);
|
||||
|
||||
expect(xml).toContain('>[A] Analyze code</item>');
|
||||
});
|
||||
|
||||
it('should escape XML special characters in description', () => {
|
||||
const menuItems = [
|
||||
{
|
||||
trigger: 'test',
|
||||
description: 'Test <brackets> & "quotes"',
|
||||
action: 'test',
|
||||
},
|
||||
];
|
||||
|
||||
const xml = builder.buildCommandsXml(menuItems);
|
||||
|
||||
expect(xml).toContain('<brackets> & "quotes"');
|
||||
});
|
||||
});
|
||||
|
||||
describe('handler attributes', () => {
|
||||
it('should include workflow attribute', () => {
|
||||
const menuItems = [{ trigger: 'start', description: 'Start workflow', workflow: 'main-workflow' }];
|
||||
|
||||
const xml = builder.buildCommandsXml(menuItems);
|
||||
|
||||
expect(xml).toContain('workflow="main-workflow"');
|
||||
});
|
||||
|
||||
it('should include exec attribute', () => {
|
||||
const menuItems = [{ trigger: 'run', description: 'Run task', exec: 'path/to/task.md' }];
|
||||
|
||||
const xml = builder.buildCommandsXml(menuItems);
|
||||
|
||||
expect(xml).toContain('exec="path/to/task.md"');
|
||||
});
|
||||
|
||||
it('should include action attribute', () => {
|
||||
const menuItems = [{ trigger: 'help', description: 'Help', action: 'show_help' }];
|
||||
|
||||
const xml = builder.buildCommandsXml(menuItems);
|
||||
|
||||
expect(xml).toContain('action="show_help"');
|
||||
});
|
||||
|
||||
it('should include tmpl attribute', () => {
|
||||
const menuItems = [{ trigger: 'form', description: 'Form', tmpl: 'templates/form.yaml' }];
|
||||
|
||||
const xml = builder.buildCommandsXml(menuItems);
|
||||
|
||||
expect(xml).toContain('tmpl="templates/form.yaml"');
|
||||
});
|
||||
|
||||
it('should include data attribute', () => {
|
||||
const menuItems = [{ trigger: 'load', description: 'Load', data: 'data/config.json' }];
|
||||
|
||||
const xml = builder.buildCommandsXml(menuItems);
|
||||
|
||||
expect(xml).toContain('data="data/config.json"');
|
||||
});
|
||||
|
||||
it('should include validate-workflow attribute', () => {
|
||||
const menuItems = [
|
||||
{
|
||||
trigger: 'validate',
|
||||
description: 'Validate',
|
||||
'validate-workflow': 'validation-flow',
|
||||
},
|
||||
];
|
||||
|
||||
const xml = builder.buildCommandsXml(menuItems);
|
||||
|
||||
expect(xml).toContain('validate-workflow="validation-flow"');
|
||||
});
|
||||
|
||||
it('should prioritize workflow-install over workflow', () => {
|
||||
const menuItems = [
|
||||
{
|
||||
trigger: 'start',
|
||||
description: 'Start',
|
||||
workflow: 'original',
|
||||
'workflow-install': 'installed-location',
|
||||
},
|
||||
];
|
||||
|
||||
const xml = builder.buildCommandsXml(menuItems);
|
||||
|
||||
expect(xml).toContain('workflow="installed-location"');
|
||||
expect(xml).not.toContain('workflow="original"');
|
||||
});
|
||||
|
||||
it('should handle multiple attributes on same item', () => {
|
||||
const menuItems = [
|
||||
{
|
||||
trigger: 'complex',
|
||||
description: 'Complex command',
|
||||
workflow: 'flow',
|
||||
data: 'data.json',
|
||||
action: 'custom',
|
||||
},
|
||||
];
|
||||
|
||||
const xml = builder.buildCommandsXml(menuItems);
|
||||
|
||||
expect(xml).toContain('workflow="flow"');
|
||||
expect(xml).toContain('data="data.json"');
|
||||
expect(xml).toContain('action="custom"');
|
||||
});
|
||||
});
|
||||
|
||||
describe('IDE and web filtering', () => {
|
||||
it('should include ide-only items for IDE installation', () => {
|
||||
const menuItems = [
|
||||
{ trigger: 'local', description: 'Local only', action: 'local', 'ide-only': true },
|
||||
{ trigger: 'normal', description: 'Normal', action: 'normal' },
|
||||
];
|
||||
|
||||
const xml = builder.buildCommandsXml(menuItems, false);
|
||||
|
||||
expect(xml).toContain('*local');
|
||||
expect(xml).toContain('*normal');
|
||||
});
|
||||
|
||||
it('should skip ide-only items for web bundle', () => {
|
||||
const menuItems = [
|
||||
{ trigger: 'local', description: 'Local only', action: 'local', 'ide-only': true },
|
||||
{ trigger: 'normal', description: 'Normal', action: 'normal' },
|
||||
];
|
||||
|
||||
const xml = builder.buildCommandsXml(menuItems, true);
|
||||
|
||||
expect(xml).not.toContain('*local');
|
||||
expect(xml).toContain('*normal');
|
||||
});
|
||||
|
||||
it('should include web-only items for web bundle', () => {
|
||||
const menuItems = [
|
||||
{ trigger: 'web', description: 'Web only', action: 'web', 'web-only': true },
|
||||
{ trigger: 'normal', description: 'Normal', action: 'normal' },
|
||||
];
|
||||
|
||||
const xml = builder.buildCommandsXml(menuItems, true);
|
||||
|
||||
expect(xml).toContain('*web');
|
||||
expect(xml).toContain('*normal');
|
||||
});
|
||||
|
||||
it('should skip web-only items for IDE installation', () => {
|
||||
const menuItems = [
|
||||
{ trigger: 'web', description: 'Web only', action: 'web', 'web-only': true },
|
||||
{ trigger: 'normal', description: 'Normal', action: 'normal' },
|
||||
];
|
||||
|
||||
const xml = builder.buildCommandsXml(menuItems, false);
|
||||
|
||||
expect(xml).not.toContain('*web');
|
||||
expect(xml).toContain('*normal');
|
||||
});
|
||||
});
|
||||
|
||||
describe('multi format with nested handlers', () => {
|
||||
it('should build multi format items with nested handlers', () => {
|
||||
const menuItems = [
|
||||
{
|
||||
multi: '[TS] Technical Specification',
|
||||
triggers: [
|
||||
{
|
||||
'tech-spec': [{ input: 'Create technical specification' }, { route: 'workflows/tech-spec.yaml' }],
|
||||
},
|
||||
{
|
||||
TS: [{ input: 'Create technical specification' }, { route: 'workflows/tech-spec.yaml' }],
|
||||
},
|
||||
],
|
||||
},
|
||||
];
|
||||
|
||||
const xml = builder.buildCommandsXml(menuItems);
|
||||
|
||||
expect(xml).toContain('<item type="multi">');
|
||||
expect(xml).toContain('[TS] Technical Specification');
|
||||
expect(xml).toContain('<handler');
|
||||
expect(xml).toContain('match="Create technical specification"');
|
||||
expect(xml).toContain('</item>');
|
||||
});
|
||||
|
||||
it('should escape XML in multi description', () => {
|
||||
const menuItems = [
|
||||
{
|
||||
multi: '[A] Analyze <code>',
|
||||
triggers: [
|
||||
{
|
||||
analyze: [{ input: 'Analyze', route: 'task.md' }],
|
||||
},
|
||||
],
|
||||
},
|
||||
];
|
||||
|
||||
const xml = builder.buildCommandsXml(menuItems);
|
||||
|
||||
expect(xml).toContain('<code>');
|
||||
});
|
||||
});
|
||||
|
||||
describe('edge cases', () => {
|
||||
it('should handle empty menu items array', () => {
|
||||
const xml = builder.buildCommandsXml([]);
|
||||
|
||||
expect(xml).toContain('<menu>');
|
||||
expect(xml).toContain('</menu>');
|
||||
expect(xml).toContain('*menu');
|
||||
expect(xml).toContain('*dismiss');
|
||||
});
|
||||
|
||||
it('should handle null menu items', () => {
|
||||
const xml = builder.buildCommandsXml(null);
|
||||
|
||||
expect(xml).toContain('<menu>');
|
||||
expect(xml).toContain('*menu');
|
||||
expect(xml).toContain('*dismiss');
|
||||
});
|
||||
|
||||
it('should handle undefined menu items', () => {
|
||||
const xml = builder.buildCommandsXml();
|
||||
|
||||
expect(xml).toContain('<menu>');
|
||||
});
|
||||
|
||||
it('should handle empty description', () => {
|
||||
const menuItems = [{ trigger: 'test', description: '', action: 'test' }];
|
||||
|
||||
const xml = builder.buildCommandsXml(menuItems);
|
||||
|
||||
expect(xml).toContain('cmd="*test"');
|
||||
expect(xml).toContain('></item>'); // Empty content between tags
|
||||
});
|
||||
|
||||
it('should handle missing trigger (edge case)', () => {
|
||||
const menuItems = [{ description: 'No trigger', action: 'test' }];
|
||||
|
||||
const xml = builder.buildCommandsXml(menuItems);
|
||||
|
||||
// Should handle gracefully - might skip or add * prefix to empty
|
||||
expect(xml).toContain('<menu>');
|
||||
});
|
||||
|
||||
it('should handle Unicode in descriptions', () => {
|
||||
const menuItems = [{ trigger: 'test', description: '[测试] Test 日本語', action: 'test' }];
|
||||
|
||||
const xml = builder.buildCommandsXml(menuItems);
|
||||
|
||||
expect(xml).toContain('测试');
|
||||
expect(xml).toContain('日本語');
|
||||
});
|
||||
});
|
||||
|
||||
describe('multiple menu items', () => {
|
||||
it('should process all menu items in order', () => {
|
||||
const menuItems = [
|
||||
{ trigger: 'first', description: 'First', action: 'first' },
|
||||
{ trigger: 'second', description: 'Second', action: 'second' },
|
||||
{ trigger: 'third', description: 'Third', action: 'third' },
|
||||
];
|
||||
|
||||
const xml = builder.buildCommandsXml(menuItems);
|
||||
|
||||
const firstIndex = xml.indexOf('*first');
|
||||
const secondIndex = xml.indexOf('*second');
|
||||
const thirdIndex = xml.indexOf('*third');
|
||||
|
||||
expect(firstIndex).toBeLessThan(secondIndex);
|
||||
expect(secondIndex).toBeLessThan(thirdIndex);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
@ -0,0 +1,605 @@
|
|||
import { describe, it, expect, beforeEach } from 'vitest';
|
||||
import { YamlXmlBuilder } from '../../../tools/cli/lib/yaml-xml-builder.js';
|
||||
|
||||
describe('YamlXmlBuilder - convertToXml()', () => {
|
||||
let builder;
|
||||
|
||||
beforeEach(() => {
|
||||
builder = new YamlXmlBuilder();
|
||||
});
|
||||
|
||||
describe('basic XML generation', () => {
|
||||
it('should generate XML with agent tag and attributes', async () => {
|
||||
const agentYaml = {
|
||||
agent: {
|
||||
metadata: {
|
||||
id: 'test-agent',
|
||||
name: 'Test Agent',
|
||||
title: 'Test Agent Title',
|
||||
icon: '🔧',
|
||||
},
|
||||
persona: {
|
||||
role: 'Test Role',
|
||||
identity: 'Test Identity',
|
||||
communication_style: 'Professional',
|
||||
principles: ['Principle 1'],
|
||||
},
|
||||
menu: [{ trigger: 'help', description: 'Help', action: 'show_help' }],
|
||||
},
|
||||
};
|
||||
|
||||
const xml = await builder.convertToXml(agentYaml, { skipActivation: true });
|
||||
|
||||
expect(xml).toContain('<agent id="test-agent"');
|
||||
expect(xml).toContain('name="Test Agent"');
|
||||
expect(xml).toContain('title="Test Agent Title"');
|
||||
expect(xml).toContain('icon="🔧"');
|
||||
expect(xml).toContain('</agent>');
|
||||
});
|
||||
|
||||
it('should include persona section', async () => {
|
||||
const agentYaml = {
|
||||
agent: {
|
||||
metadata: { id: 'test', name: 'Test', title: 'Test', icon: '🔧' },
|
||||
persona: {
|
||||
role: 'Developer',
|
||||
identity: 'Helpful assistant',
|
||||
communication_style: 'Professional',
|
||||
principles: ['Clear', 'Concise'],
|
||||
},
|
||||
menu: [],
|
||||
},
|
||||
};
|
||||
|
||||
const xml = await builder.convertToXml(agentYaml, { skipActivation: true });
|
||||
|
||||
expect(xml).toContain('<persona>');
|
||||
expect(xml).toContain('<role>Developer</role>');
|
||||
expect(xml).toContain('<identity>Helpful assistant</identity>');
|
||||
expect(xml).toContain('<communication_style>Professional</communication_style>');
|
||||
expect(xml).toContain('<principles>Clear Concise</principles>');
|
||||
});
|
||||
|
||||
it('should include memories section if present', async () => {
|
||||
const agentYaml = {
|
||||
agent: {
|
||||
metadata: { id: 'test', name: 'Test', title: 'Test', icon: '🔧' },
|
||||
persona: {
|
||||
role: 'Role',
|
||||
identity: 'ID',
|
||||
communication_style: 'Style',
|
||||
principles: ['P'],
|
||||
},
|
||||
memories: ['Memory 1', 'Memory 2'],
|
||||
menu: [],
|
||||
},
|
||||
};
|
||||
|
||||
const xml = await builder.convertToXml(agentYaml, { skipActivation: true });
|
||||
|
||||
expect(xml).toContain('<memories>');
|
||||
expect(xml).toContain('<memory>Memory 1</memory>');
|
||||
expect(xml).toContain('<memory>Memory 2</memory>');
|
||||
});
|
||||
|
||||
it('should include prompts section if present', async () => {
|
||||
const agentYaml = {
|
||||
agent: {
|
||||
metadata: { id: 'test', name: 'Test', title: 'Test', icon: '🔧' },
|
||||
persona: {
|
||||
role: 'Role',
|
||||
identity: 'ID',
|
||||
communication_style: 'Style',
|
||||
principles: ['P'],
|
||||
},
|
||||
prompts: [{ id: 'p1', content: 'Prompt content' }],
|
||||
menu: [],
|
||||
},
|
||||
};
|
||||
|
||||
const xml = await builder.convertToXml(agentYaml, { skipActivation: true });
|
||||
|
||||
expect(xml).toContain('<prompts>');
|
||||
expect(xml).toContain('<prompt id="p1">');
|
||||
expect(xml).toContain('Prompt content');
|
||||
});
|
||||
|
||||
it('should include menu section', async () => {
|
||||
const agentYaml = {
|
||||
agent: {
|
||||
metadata: { id: 'test', name: 'Test', title: 'Test', icon: '🔧' },
|
||||
persona: {
|
||||
role: 'Role',
|
||||
identity: 'ID',
|
||||
communication_style: 'Style',
|
||||
principles: ['P'],
|
||||
},
|
||||
menu: [
|
||||
{ trigger: 'help', description: 'Show help', action: 'show_help' },
|
||||
{ trigger: 'start', description: 'Start workflow', workflow: 'main' },
|
||||
],
|
||||
},
|
||||
};
|
||||
|
||||
const xml = await builder.convertToXml(agentYaml, { skipActivation: true });
|
||||
|
||||
expect(xml).toContain('<menu>');
|
||||
expect(xml).toContain('</menu>');
|
||||
// Menu always includes injected *menu item
|
||||
expect(xml).toContain('*menu');
|
||||
});
|
||||
});
|
||||
|
||||
describe('XML escaping', () => {
|
||||
it('should escape special characters in all fields', async () => {
|
||||
const agentYaml = {
|
||||
agent: {
|
||||
metadata: {
|
||||
id: 'test',
|
||||
name: 'Test',
|
||||
title: 'Test Agent',
|
||||
icon: '🔧',
|
||||
},
|
||||
persona: {
|
||||
role: 'Role with <brackets>',
|
||||
identity: 'Identity with & ampersand',
|
||||
communication_style: 'Style with "quotes"',
|
||||
principles: ["Principle with ' apostrophe"],
|
||||
},
|
||||
menu: [],
|
||||
},
|
||||
};
|
||||
|
||||
const xml = await builder.convertToXml(agentYaml, { skipActivation: true });
|
||||
|
||||
// Metadata in attributes might not be escaped - focus on content
|
||||
expect(xml).toContain('<brackets>');
|
||||
expect(xml).toContain('& ampersand');
|
||||
expect(xml).toContain('"quotes"');
|
||||
expect(xml).toContain('' apostrophe');
|
||||
});
|
||||
|
||||
it('should preserve Unicode characters', async () => {
|
||||
const agentYaml = {
|
||||
agent: {
|
||||
metadata: {
|
||||
id: 'unicode',
|
||||
name: '测试代理',
|
||||
title: 'Тестовый агент',
|
||||
icon: '🔧',
|
||||
},
|
||||
persona: {
|
||||
role: '開発者',
|
||||
identity: 'مساعد مفيد',
|
||||
communication_style: 'Profesional',
|
||||
principles: ['原则'],
|
||||
},
|
||||
menu: [],
|
||||
},
|
||||
};
|
||||
|
||||
const xml = await builder.convertToXml(agentYaml, { skipActivation: true });
|
||||
|
||||
expect(xml).toContain('测试代理');
|
||||
expect(xml).toContain('Тестовый агент');
|
||||
expect(xml).toContain('開発者');
|
||||
expect(xml).toContain('مساعد مفيد');
|
||||
expect(xml).toContain('原则');
|
||||
});
|
||||
});
|
||||
|
||||
describe('module detection', () => {
|
||||
it('should handle module in buildMetadata', async () => {
|
||||
const agentYaml = {
|
||||
agent: {
|
||||
metadata: { id: 'test', name: 'Test', title: 'Test', icon: '🔧' },
|
||||
persona: {
|
||||
role: 'Role',
|
||||
identity: 'ID',
|
||||
communication_style: 'Style',
|
||||
principles: ['P'],
|
||||
},
|
||||
menu: [],
|
||||
},
|
||||
};
|
||||
|
||||
const xml = await builder.convertToXml(agentYaml, {
|
||||
module: 'bmm',
|
||||
skipActivation: true,
|
||||
});
|
||||
|
||||
// Module is stored in metadata but may not be rendered as attribute
|
||||
expect(xml).toContain('<agent');
|
||||
expect(xml).toBeDefined();
|
||||
});
|
||||
|
||||
it('should not include module attribute for core agents', async () => {
|
||||
const agentYaml = {
|
||||
agent: {
|
||||
metadata: { id: 'test', name: 'Test', title: 'Test', icon: '🔧' },
|
||||
persona: {
|
||||
role: 'Role',
|
||||
identity: 'ID',
|
||||
communication_style: 'Style',
|
||||
principles: ['P'],
|
||||
},
|
||||
menu: [],
|
||||
},
|
||||
};
|
||||
|
||||
const xml = await builder.convertToXml(agentYaml, { skipActivation: true });
|
||||
|
||||
// No module attribute for core
|
||||
expect(xml).not.toContain('module=');
|
||||
});
|
||||
});
|
||||
|
||||
describe('output format variations', () => {
|
||||
it('should generate installation format with YAML frontmatter', async () => {
|
||||
const agentYaml = {
|
||||
agent: {
|
||||
metadata: { id: 'test', name: 'Test', title: 'Test Agent', icon: '🔧' },
|
||||
persona: {
|
||||
role: 'Role',
|
||||
identity: 'ID',
|
||||
communication_style: 'Style',
|
||||
principles: ['P'],
|
||||
},
|
||||
menu: [],
|
||||
},
|
||||
};
|
||||
|
||||
const xml = await builder.convertToXml(agentYaml, {
|
||||
sourceFile: 'test-agent.yaml',
|
||||
skipActivation: true,
|
||||
});
|
||||
|
||||
// Installation format has YAML frontmatter
|
||||
expect(xml).toMatch(/^---\n/);
|
||||
expect(xml).toContain('name: "test agent"'); // Derived from filename
|
||||
expect(xml).toContain('description: "Test Agent"');
|
||||
expect(xml).toContain('---');
|
||||
});
|
||||
|
||||
it('should generate web bundle format without frontmatter', async () => {
|
||||
const agentYaml = {
|
||||
agent: {
|
||||
metadata: { id: 'test', name: 'Test', title: 'Test Agent', icon: '🔧' },
|
||||
persona: {
|
||||
role: 'Role',
|
||||
identity: 'ID',
|
||||
communication_style: 'Style',
|
||||
principles: ['P'],
|
||||
},
|
||||
menu: [],
|
||||
},
|
||||
};
|
||||
|
||||
const xml = await builder.convertToXml(agentYaml, {
|
||||
forWebBundle: true,
|
||||
skipActivation: true,
|
||||
});
|
||||
|
||||
// Web bundle format has comment header
|
||||
expect(xml).toContain('<!-- Powered by BMAD-CORE™ -->');
|
||||
expect(xml).toContain('# Test Agent');
|
||||
expect(xml).not.toMatch(/^---\n/);
|
||||
});
|
||||
|
||||
it('should derive name from filename (remove .agent suffix)', async () => {
|
||||
const agentYaml = {
|
||||
agent: {
|
||||
metadata: { id: 'pm', name: 'PM', title: 'Product Manager', icon: '📋' },
|
||||
persona: {
|
||||
role: 'Role',
|
||||
identity: 'ID',
|
||||
communication_style: 'Style',
|
||||
principles: ['P'],
|
||||
},
|
||||
menu: [],
|
||||
},
|
||||
};
|
||||
|
||||
const xml = await builder.convertToXml(agentYaml, {
|
||||
sourceFile: 'pm.agent.yaml',
|
||||
skipActivation: true,
|
||||
});
|
||||
|
||||
// Should convert pm.agent.yaml → "pm"
|
||||
expect(xml).toContain('name: "pm"');
|
||||
});
|
||||
|
||||
it('should convert hyphens to spaces in filename', async () => {
|
||||
const agentYaml = {
|
||||
agent: {
|
||||
metadata: { id: 'cli', name: 'CLI', title: 'CLI Chief', icon: '⚙️' },
|
||||
persona: {
|
||||
role: 'Role',
|
||||
identity: 'ID',
|
||||
communication_style: 'Style',
|
||||
principles: ['P'],
|
||||
},
|
||||
menu: [],
|
||||
},
|
||||
};
|
||||
|
||||
const xml = await builder.convertToXml(agentYaml, {
|
||||
sourceFile: 'cli-chief.yaml',
|
||||
skipActivation: true,
|
||||
});
|
||||
|
||||
// Should convert cli-chief.yaml → "cli chief"
|
||||
expect(xml).toContain('name: "cli chief"');
|
||||
});
|
||||
});
|
||||
|
||||
describe('localskip attribute', () => {
|
||||
it('should add localskip="true" when metadata has localskip', async () => {
|
||||
const agentYaml = {
|
||||
agent: {
|
||||
metadata: {
|
||||
id: 'web-only',
|
||||
name: 'Web Only',
|
||||
title: 'Web Only Agent',
|
||||
icon: '🌐',
|
||||
localskip: true,
|
||||
},
|
||||
persona: {
|
||||
role: 'Role',
|
||||
identity: 'ID',
|
||||
communication_style: 'Style',
|
||||
principles: ['P'],
|
||||
},
|
||||
menu: [],
|
||||
},
|
||||
};
|
||||
|
||||
const xml = await builder.convertToXml(agentYaml, { skipActivation: true });
|
||||
|
||||
expect(xml).toContain('localskip="true"');
|
||||
});
|
||||
|
||||
it('should not add localskip when false or missing', async () => {
|
||||
const agentYaml = {
|
||||
agent: {
|
||||
metadata: { id: 'test', name: 'Test', title: 'Test', icon: '🔧' },
|
||||
persona: {
|
||||
role: 'Role',
|
||||
identity: 'ID',
|
||||
communication_style: 'Style',
|
||||
principles: ['P'],
|
||||
},
|
||||
menu: [],
|
||||
},
|
||||
};
|
||||
|
||||
const xml = await builder.convertToXml(agentYaml, { skipActivation: true });
|
||||
|
||||
expect(xml).not.toContain('localskip=');
|
||||
});
|
||||
});
|
||||
|
||||
describe('edge cases', () => {
|
||||
it('should handle empty menu array', async () => {
|
||||
const agentYaml = {
|
||||
agent: {
|
||||
metadata: { id: 'test', name: 'Test', title: 'Test', icon: '🔧' },
|
||||
persona: {
|
||||
role: 'Role',
|
||||
identity: 'ID',
|
||||
communication_style: 'Style',
|
||||
principles: ['P'],
|
||||
},
|
||||
menu: [],
|
||||
},
|
||||
};
|
||||
|
||||
const xml = await builder.convertToXml(agentYaml, { skipActivation: true });
|
||||
|
||||
expect(xml).toContain('<menu>');
|
||||
expect(xml).toContain('</menu>');
|
||||
// Should still have injected *menu item
|
||||
expect(xml).toContain('*menu');
|
||||
});
|
||||
|
||||
it('should handle missing memories', async () => {
|
||||
const agentYaml = {
|
||||
agent: {
|
||||
metadata: { id: 'test', name: 'Test', title: 'Test', icon: '🔧' },
|
||||
persona: {
|
||||
role: 'Role',
|
||||
identity: 'ID',
|
||||
communication_style: 'Style',
|
||||
principles: ['P'],
|
||||
},
|
||||
menu: [],
|
||||
},
|
||||
};
|
||||
|
||||
const xml = await builder.convertToXml(agentYaml, { skipActivation: true });
|
||||
|
||||
expect(xml).not.toContain('<memories>');
|
||||
});
|
||||
|
||||
it('should handle missing prompts', async () => {
|
||||
const agentYaml = {
|
||||
agent: {
|
||||
metadata: { id: 'test', name: 'Test', title: 'Test', icon: '🔧' },
|
||||
persona: {
|
||||
role: 'Role',
|
||||
identity: 'ID',
|
||||
communication_style: 'Style',
|
||||
principles: ['P'],
|
||||
},
|
||||
menu: [],
|
||||
},
|
||||
};
|
||||
|
||||
const xml = await builder.convertToXml(agentYaml, { skipActivation: true });
|
||||
|
||||
expect(xml).not.toContain('<prompts>');
|
||||
});
|
||||
|
||||
it('should wrap XML in markdown code fence', async () => {
|
||||
const agentYaml = {
|
||||
agent: {
|
||||
metadata: { id: 'test', name: 'Test', title: 'Test', icon: '🔧' },
|
||||
persona: {
|
||||
role: 'Role',
|
||||
identity: 'ID',
|
||||
communication_style: 'Style',
|
||||
principles: ['P'],
|
||||
},
|
||||
menu: [],
|
||||
},
|
||||
};
|
||||
|
||||
const xml = await builder.convertToXml(agentYaml, { skipActivation: true });
|
||||
|
||||
expect(xml).toContain('```xml');
|
||||
expect(xml).toContain('```\n');
|
||||
});
|
||||
|
||||
it('should include activation instruction for installation format', async () => {
|
||||
const agentYaml = {
|
||||
agent: {
|
||||
metadata: { id: 'test', name: 'Test', title: 'Test', icon: '🔧' },
|
||||
persona: {
|
||||
role: 'Role',
|
||||
identity: 'ID',
|
||||
communication_style: 'Style',
|
||||
principles: ['P'],
|
||||
},
|
||||
menu: [],
|
||||
},
|
||||
};
|
||||
|
||||
const xml = await builder.convertToXml(agentYaml, {
|
||||
sourceFile: 'test.yaml',
|
||||
skipActivation: true,
|
||||
});
|
||||
|
||||
expect(xml).toContain('You must fully embody this agent');
|
||||
expect(xml).toContain('NEVER break character');
|
||||
});
|
||||
|
||||
it('should not include activation instruction for web bundle', async () => {
|
||||
const agentYaml = {
|
||||
agent: {
|
||||
metadata: { id: 'test', name: 'Test', title: 'Test', icon: '🔧' },
|
||||
persona: {
|
||||
role: 'Role',
|
||||
identity: 'ID',
|
||||
communication_style: 'Style',
|
||||
principles: ['P'],
|
||||
},
|
||||
menu: [],
|
||||
},
|
||||
};
|
||||
|
||||
const xml = await builder.convertToXml(agentYaml, {
|
||||
forWebBundle: true,
|
||||
skipActivation: true,
|
||||
});
|
||||
|
||||
expect(xml).not.toContain('You must fully embody');
|
||||
expect(xml).toContain('<!-- Powered by BMAD-CORE™ -->');
|
||||
});
|
||||
});
|
||||
|
||||
describe('legacy commands field support', () => {
|
||||
it('should handle legacy "commands" field (renamed to menu)', async () => {
|
||||
const agentYaml = {
|
||||
agent: {
|
||||
metadata: { id: 'test', name: 'Test', title: 'Test', icon: '🔧' },
|
||||
persona: {
|
||||
role: 'Role',
|
||||
identity: 'ID',
|
||||
communication_style: 'Style',
|
||||
principles: ['P'],
|
||||
},
|
||||
commands: [{ trigger: 'help', description: 'Help', action: 'show_help' }],
|
||||
},
|
||||
};
|
||||
|
||||
const xml = await builder.convertToXml(agentYaml, { skipActivation: true });
|
||||
|
||||
expect(xml).toContain('<menu>');
|
||||
// Should process commands as menu items
|
||||
});
|
||||
|
||||
it('should prioritize menu over commands when both exist', async () => {
|
||||
const agentYaml = {
|
||||
agent: {
|
||||
metadata: { id: 'test', name: 'Test', title: 'Test', icon: '🔧' },
|
||||
persona: {
|
||||
role: 'Role',
|
||||
identity: 'ID',
|
||||
communication_style: 'Style',
|
||||
principles: ['P'],
|
||||
},
|
||||
menu: [{ trigger: 'new', description: 'New', action: 'new_action' }],
|
||||
commands: [{ trigger: 'old', description: 'Old', action: 'old_action' }],
|
||||
},
|
||||
};
|
||||
|
||||
const xml = await builder.convertToXml(agentYaml, { skipActivation: true });
|
||||
|
||||
// Should use menu, not commands
|
||||
expect(xml).toContain('<menu>');
|
||||
});
|
||||
});
|
||||
|
||||
describe('complete agent transformation', () => {
|
||||
it('should transform a complete agent with all fields', async () => {
|
||||
const agentYaml = {
|
||||
agent: {
|
||||
metadata: {
|
||||
id: 'full-agent',
|
||||
name: 'Full Agent',
|
||||
title: 'Complete Test Agent',
|
||||
icon: '🤖',
|
||||
},
|
||||
persona: {
|
||||
role: 'Full Stack Developer',
|
||||
identity: 'Experienced software engineer',
|
||||
communication_style: 'Clear and professional',
|
||||
principles: ['Quality', 'Performance', 'Maintainability'],
|
||||
},
|
||||
memories: ['Remember project context', 'Track user preferences'],
|
||||
prompts: [
|
||||
{ id: 'init', content: 'Initialize the agent' },
|
||||
{ id: 'task', content: 'Process the task' },
|
||||
],
|
||||
critical_actions: ['Never delete data', 'Always backup'],
|
||||
menu: [
|
||||
{ trigger: 'help', description: '[H] Show help', action: 'show_help' },
|
||||
{ trigger: 'start', description: '[S] Start workflow', workflow: 'main' },
|
||||
],
|
||||
},
|
||||
};
|
||||
|
||||
const xml = await builder.convertToXml(agentYaml, {
|
||||
sourceFile: 'full-agent.yaml',
|
||||
module: 'bmm',
|
||||
skipActivation: true,
|
||||
});
|
||||
|
||||
// Verify all sections are present
|
||||
expect(xml).toContain('```xml');
|
||||
expect(xml).toContain('<agent id="full-agent"');
|
||||
expect(xml).toContain('<persona>');
|
||||
expect(xml).toContain('<memories>');
|
||||
expect(xml).toContain('<prompts>');
|
||||
expect(xml).toContain('<menu>');
|
||||
expect(xml).toContain('</agent>');
|
||||
expect(xml).toContain('```');
|
||||
// Verify persona content
|
||||
expect(xml).toContain('Full Stack Developer');
|
||||
// Verify memories
|
||||
expect(xml).toContain('Remember project context');
|
||||
// Verify prompts
|
||||
expect(xml).toContain('Initialize the agent');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
@ -0,0 +1,636 @@
|
|||
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||
import { YamlXmlBuilder } from '../../../tools/cli/lib/yaml-xml-builder.js';
|
||||
import { createTempDir, cleanupTempDir, createTestFile } from '../../helpers/temp-dir.js';
|
||||
import fs from 'fs-extra';
|
||||
import path from 'node:path';
|
||||
import yaml from 'yaml';
|
||||
|
||||
describe('YamlXmlBuilder', () => {
|
||||
let tmpDir;
|
||||
let builder;
|
||||
|
||||
beforeEach(async () => {
|
||||
tmpDir = await createTempDir();
|
||||
builder = new YamlXmlBuilder();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await cleanupTempDir(tmpDir);
|
||||
});
|
||||
|
||||
describe('deepMerge()', () => {
|
||||
it('should merge shallow objects', () => {
|
||||
const target = { a: 1, b: 2 };
|
||||
const source = { b: 3, c: 4 };
|
||||
|
||||
const result = builder.deepMerge(target, source);
|
||||
|
||||
expect(result).toEqual({ a: 1, b: 3, c: 4 });
|
||||
});
|
||||
|
||||
it('should merge nested objects', () => {
|
||||
const target = { level1: { a: 1, b: 2 } };
|
||||
const source = { level1: { b: 3, c: 4 } };
|
||||
|
||||
const result = builder.deepMerge(target, source);
|
||||
|
||||
expect(result).toEqual({ level1: { a: 1, b: 3, c: 4 } });
|
||||
});
|
||||
|
||||
it('should merge deeply nested objects', () => {
|
||||
const target = { l1: { l2: { l3: { value: 'old' } } } };
|
||||
const source = { l1: { l2: { l3: { value: 'new', extra: 'data' } } } };
|
||||
|
||||
const result = builder.deepMerge(target, source);
|
||||
|
||||
expect(result).toEqual({ l1: { l2: { l3: { value: 'new', extra: 'data' } } } });
|
||||
});
|
||||
|
||||
it('should append arrays instead of replacing', () => {
|
||||
const target = { items: [1, 2, 3] };
|
||||
const source = { items: [4, 5, 6] };
|
||||
|
||||
const result = builder.deepMerge(target, source);
|
||||
|
||||
expect(result.items).toEqual([1, 2, 3, 4, 5, 6]);
|
||||
});
|
||||
|
||||
it('should handle arrays in nested objects', () => {
|
||||
const target = { config: { values: ['a', 'b'] } };
|
||||
const source = { config: { values: ['c', 'd'] } };
|
||||
|
||||
const result = builder.deepMerge(target, source);
|
||||
|
||||
expect(result.config.values).toEqual(['a', 'b', 'c', 'd']);
|
||||
});
|
||||
|
||||
it('should replace arrays if target is not an array', () => {
|
||||
const target = { items: 'string' };
|
||||
const source = { items: ['a', 'b'] };
|
||||
|
||||
const result = builder.deepMerge(target, source);
|
||||
|
||||
expect(result.items).toEqual(['a', 'b']);
|
||||
});
|
||||
|
||||
it('should handle null values', () => {
|
||||
const target = { a: null, b: 2 };
|
||||
const source = { a: 1, c: null };
|
||||
|
||||
const result = builder.deepMerge(target, source);
|
||||
|
||||
expect(result).toEqual({ a: 1, b: 2, c: null });
|
||||
});
|
||||
|
||||
it('should preserve target values when source has no override', () => {
|
||||
const target = { a: 1, b: 2, c: 3 };
|
||||
const source = { d: 4 };
|
||||
|
||||
const result = builder.deepMerge(target, source);
|
||||
|
||||
expect(result).toEqual({ a: 1, b: 2, c: 3, d: 4 });
|
||||
});
|
||||
|
||||
it('should not mutate original objects', () => {
|
||||
const target = { a: 1 };
|
||||
const source = { b: 2 };
|
||||
|
||||
builder.deepMerge(target, source);
|
||||
|
||||
expect(target).toEqual({ a: 1 }); // Unchanged
|
||||
expect(source).toEqual({ b: 2 }); // Unchanged
|
||||
});
|
||||
});
|
||||
|
||||
describe('isObject()', () => {
|
||||
it('should return true for plain objects', () => {
|
||||
expect(builder.isObject({})).toBe(true);
|
||||
expect(builder.isObject({ key: 'value' })).toBe(true);
|
||||
});
|
||||
|
||||
it('should return false for arrays', () => {
|
||||
expect(builder.isObject([])).toBe(false);
|
||||
expect(builder.isObject([1, 2, 3])).toBe(false);
|
||||
});
|
||||
|
||||
it('should return falsy for null', () => {
|
||||
expect(builder.isObject(null)).toBeFalsy();
|
||||
});
|
||||
|
||||
it('should return falsy for primitives', () => {
|
||||
expect(builder.isObject('string')).toBeFalsy();
|
||||
expect(builder.isObject(42)).toBeFalsy();
|
||||
expect(builder.isObject(true)).toBeFalsy();
|
||||
expect(builder.isObject()).toBeFalsy();
|
||||
});
|
||||
});
|
||||
|
||||
describe('loadAndMergeAgent()', () => {
|
||||
it('should load agent YAML without customization', async () => {
|
||||
const agentYaml = {
|
||||
agent: {
|
||||
metadata: { id: 'test', name: 'Test', title: 'Test Agent', icon: '🔧' },
|
||||
persona: {
|
||||
role: 'Test Role',
|
||||
identity: 'Test Identity',
|
||||
communication_style: 'Professional',
|
||||
principles: ['Principle 1'],
|
||||
},
|
||||
menu: [],
|
||||
},
|
||||
};
|
||||
|
||||
const agentPath = path.join(tmpDir, 'agent.yaml');
|
||||
await fs.writeFile(agentPath, yaml.stringify(agentYaml));
|
||||
|
||||
const result = await builder.loadAndMergeAgent(agentPath);
|
||||
|
||||
expect(result.agent.metadata.id).toBe('test');
|
||||
expect(result.agent.persona.role).toBe('Test Role');
|
||||
});
|
||||
|
||||
it('should preserve base persona when customize has empty strings', async () => {
|
||||
const baseYaml = {
|
||||
agent: {
|
||||
metadata: { id: 'base', name: 'Base', title: 'Base', icon: '🔧' },
|
||||
persona: {
|
||||
role: 'Base Role',
|
||||
identity: 'Base Identity',
|
||||
communication_style: 'Base Style',
|
||||
principles: ['Base Principle'],
|
||||
},
|
||||
menu: [],
|
||||
},
|
||||
};
|
||||
|
||||
const customizeYaml = {
|
||||
persona: {
|
||||
role: 'Custom Role',
|
||||
identity: '', // Empty - should NOT override
|
||||
communication_style: 'Custom Style',
|
||||
// principles omitted
|
||||
},
|
||||
};
|
||||
|
||||
const basePath = path.join(tmpDir, 'base.yaml');
|
||||
const customizePath = path.join(tmpDir, 'customize.yaml');
|
||||
await fs.writeFile(basePath, yaml.stringify(baseYaml));
|
||||
await fs.writeFile(customizePath, yaml.stringify(customizeYaml));
|
||||
|
||||
const result = await builder.loadAndMergeAgent(basePath, customizePath);
|
||||
|
||||
expect(result.agent.persona.role).toBe('Custom Role'); // Overridden
|
||||
expect(result.agent.persona.identity).toBe('Base Identity'); // Preserved
|
||||
expect(result.agent.persona.communication_style).toBe('Custom Style'); // Overridden
|
||||
expect(result.agent.persona.principles).toEqual(['Base Principle']); // Preserved
|
||||
});
|
||||
|
||||
it('should preserve base persona when customize has null values', async () => {
|
||||
const baseYaml = {
|
||||
agent: {
|
||||
metadata: { id: 'base', name: 'Base', title: 'Base', icon: '🔧' },
|
||||
persona: {
|
||||
role: 'Base Role',
|
||||
identity: 'Base Identity',
|
||||
communication_style: 'Base Style',
|
||||
principles: ['Base'],
|
||||
},
|
||||
menu: [],
|
||||
},
|
||||
};
|
||||
|
||||
const customizeYaml = {
|
||||
persona: {
|
||||
role: null,
|
||||
identity: 'Custom Identity',
|
||||
},
|
||||
};
|
||||
|
||||
const basePath = path.join(tmpDir, 'base.yaml');
|
||||
const customizePath = path.join(tmpDir, 'customize.yaml');
|
||||
await fs.writeFile(basePath, yaml.stringify(baseYaml));
|
||||
await fs.writeFile(customizePath, yaml.stringify(customizeYaml));
|
||||
|
||||
const result = await builder.loadAndMergeAgent(basePath, customizePath);
|
||||
|
||||
expect(result.agent.persona.role).toBe('Base Role'); // Preserved (null skipped)
|
||||
expect(result.agent.persona.identity).toBe('Custom Identity'); // Overridden
|
||||
});
|
||||
|
||||
it('should preserve base persona when customize has empty arrays', async () => {
|
||||
const baseYaml = {
|
||||
agent: {
|
||||
metadata: { id: 'base', name: 'Base', title: 'Base', icon: '🔧' },
|
||||
persona: {
|
||||
role: 'Base Role',
|
||||
identity: 'Base Identity',
|
||||
communication_style: 'Base Style',
|
||||
principles: ['Principle 1', 'Principle 2'],
|
||||
},
|
||||
menu: [],
|
||||
},
|
||||
};
|
||||
|
||||
const customizeYaml = {
|
||||
persona: {
|
||||
principles: [], // Empty array - should NOT override
|
||||
},
|
||||
};
|
||||
|
||||
const basePath = path.join(tmpDir, 'base.yaml');
|
||||
const customizePath = path.join(tmpDir, 'customize.yaml');
|
||||
await fs.writeFile(basePath, yaml.stringify(baseYaml));
|
||||
await fs.writeFile(customizePath, yaml.stringify(customizeYaml));
|
||||
|
||||
const result = await builder.loadAndMergeAgent(basePath, customizePath);
|
||||
|
||||
expect(result.agent.persona.principles).toEqual(['Principle 1', 'Principle 2']);
|
||||
});
|
||||
|
||||
it('should append menu items from customize', async () => {
|
||||
const baseYaml = {
|
||||
agent: {
|
||||
metadata: { id: 'base', name: 'Base', title: 'Base', icon: '🔧' },
|
||||
persona: { role: 'Role', identity: 'ID', communication_style: 'Style', principles: ['P'] },
|
||||
menu: [{ trigger: 'help', description: 'Help', action: 'show_help' }],
|
||||
},
|
||||
};
|
||||
|
||||
const customizeYaml = {
|
||||
menu: [{ trigger: 'custom', description: 'Custom', action: 'custom_action' }],
|
||||
};
|
||||
|
||||
const basePath = path.join(tmpDir, 'base.yaml');
|
||||
const customizePath = path.join(tmpDir, 'customize.yaml');
|
||||
await fs.writeFile(basePath, yaml.stringify(baseYaml));
|
||||
await fs.writeFile(customizePath, yaml.stringify(customizeYaml));
|
||||
|
||||
const result = await builder.loadAndMergeAgent(basePath, customizePath);
|
||||
|
||||
expect(result.agent.menu).toHaveLength(2);
|
||||
expect(result.agent.menu[0].trigger).toBe('help');
|
||||
expect(result.agent.menu[1].trigger).toBe('custom');
|
||||
});
|
||||
|
||||
it('should append critical_actions from customize', async () => {
|
||||
const baseYaml = {
|
||||
agent: {
|
||||
metadata: { id: 'base', name: 'Base', title: 'Base', icon: '🔧' },
|
||||
persona: { role: 'Role', identity: 'ID', communication_style: 'Style', principles: ['P'] },
|
||||
critical_actions: ['Action 1'],
|
||||
menu: [],
|
||||
},
|
||||
};
|
||||
|
||||
const customizeYaml = {
|
||||
critical_actions: ['Action 2', 'Action 3'],
|
||||
};
|
||||
|
||||
const basePath = path.join(tmpDir, 'base.yaml');
|
||||
const customizePath = path.join(tmpDir, 'customize.yaml');
|
||||
await fs.writeFile(basePath, yaml.stringify(baseYaml));
|
||||
await fs.writeFile(customizePath, yaml.stringify(customizeYaml));
|
||||
|
||||
const result = await builder.loadAndMergeAgent(basePath, customizePath);
|
||||
|
||||
expect(result.agent.critical_actions).toHaveLength(3);
|
||||
expect(result.agent.critical_actions).toEqual(['Action 1', 'Action 2', 'Action 3']);
|
||||
});
|
||||
|
||||
it('should append prompts from customize', async () => {
|
||||
const baseYaml = {
|
||||
agent: {
|
||||
metadata: { id: 'base', name: 'Base', title: 'Base', icon: '🔧' },
|
||||
persona: { role: 'Role', identity: 'ID', communication_style: 'Style', principles: ['P'] },
|
||||
prompts: [{ id: 'p1', content: 'Prompt 1' }],
|
||||
menu: [],
|
||||
},
|
||||
};
|
||||
|
||||
const customizeYaml = {
|
||||
prompts: [{ id: 'p2', content: 'Prompt 2' }],
|
||||
};
|
||||
|
||||
const basePath = path.join(tmpDir, 'base.yaml');
|
||||
const customizePath = path.join(tmpDir, 'customize.yaml');
|
||||
await fs.writeFile(basePath, yaml.stringify(baseYaml));
|
||||
await fs.writeFile(customizePath, yaml.stringify(customizeYaml));
|
||||
|
||||
const result = await builder.loadAndMergeAgent(basePath, customizePath);
|
||||
|
||||
expect(result.agent.prompts).toHaveLength(2);
|
||||
});
|
||||
|
||||
it('should handle missing customization file', async () => {
|
||||
const agentYaml = {
|
||||
agent: {
|
||||
metadata: { id: 'test', name: 'Test', title: 'Test', icon: '🔧' },
|
||||
persona: { role: 'Role', identity: 'ID', communication_style: 'Style', principles: ['P'] },
|
||||
menu: [],
|
||||
},
|
||||
};
|
||||
|
||||
const agentPath = path.join(tmpDir, 'agent.yaml');
|
||||
await fs.writeFile(agentPath, yaml.stringify(agentYaml));
|
||||
|
||||
const nonExistent = path.join(tmpDir, 'nonexistent.yaml');
|
||||
const result = await builder.loadAndMergeAgent(agentPath, nonExistent);
|
||||
|
||||
expect(result.agent.metadata.id).toBe('test');
|
||||
});
|
||||
|
||||
it('should handle legacy commands field (renamed to menu)', async () => {
|
||||
const baseYaml = {
|
||||
agent: {
|
||||
metadata: { id: 'base', name: 'Base', title: 'Base', icon: '🔧' },
|
||||
persona: { role: 'Role', identity: 'ID', communication_style: 'Style', principles: ['P'] },
|
||||
commands: [{ trigger: 'old', description: 'Old', action: 'old_action' }],
|
||||
},
|
||||
};
|
||||
|
||||
const customizeYaml = {
|
||||
commands: [{ trigger: 'new', description: 'New', action: 'new_action' }],
|
||||
};
|
||||
|
||||
const basePath = path.join(tmpDir, 'base.yaml');
|
||||
const customizePath = path.join(tmpDir, 'customize.yaml');
|
||||
await fs.writeFile(basePath, yaml.stringify(baseYaml));
|
||||
await fs.writeFile(customizePath, yaml.stringify(customizeYaml));
|
||||
|
||||
const result = await builder.loadAndMergeAgent(basePath, customizePath);
|
||||
|
||||
expect(result.agent.commands).toHaveLength(2);
|
||||
});
|
||||
|
||||
it('should override metadata with non-empty values', async () => {
|
||||
const baseYaml = {
|
||||
agent: {
|
||||
metadata: { id: 'base', name: 'Base Name', title: 'Base Title', icon: '🔧' },
|
||||
persona: { role: 'Role', identity: 'ID', communication_style: 'Style', principles: ['P'] },
|
||||
menu: [],
|
||||
},
|
||||
};
|
||||
|
||||
const customizeYaml = {
|
||||
agent: {
|
||||
metadata: {
|
||||
name: 'Custom Name',
|
||||
title: '', // Empty - should be skipped
|
||||
icon: '🎯',
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const basePath = path.join(tmpDir, 'base.yaml');
|
||||
const customizePath = path.join(tmpDir, 'customize.yaml');
|
||||
await fs.writeFile(basePath, yaml.stringify(baseYaml));
|
||||
await fs.writeFile(customizePath, yaml.stringify(customizeYaml));
|
||||
|
||||
const result = await builder.loadAndMergeAgent(basePath, customizePath);
|
||||
|
||||
expect(result.agent.metadata.name).toBe('Custom Name');
|
||||
expect(result.agent.metadata.title).toBe('Base Title'); // Preserved
|
||||
expect(result.agent.metadata.icon).toBe('🎯');
|
||||
});
|
||||
});
|
||||
|
||||
describe('buildPersonaXml()', () => {
|
||||
it('should build complete persona XML', () => {
|
||||
const persona = {
|
||||
role: 'Test Role',
|
||||
identity: 'Test Identity',
|
||||
communication_style: 'Professional',
|
||||
principles: ['Principle 1', 'Principle 2', 'Principle 3'],
|
||||
};
|
||||
|
||||
const xml = builder.buildPersonaXml(persona);
|
||||
|
||||
expect(xml).toContain('<persona>');
|
||||
expect(xml).toContain('</persona>');
|
||||
expect(xml).toContain('<role>Test Role</role>');
|
||||
expect(xml).toContain('<identity>Test Identity</identity>');
|
||||
expect(xml).toContain('<communication_style>Professional</communication_style>');
|
||||
expect(xml).toContain('<principles>Principle 1 Principle 2 Principle 3</principles>');
|
||||
});
|
||||
|
||||
it('should escape XML special characters in persona', () => {
|
||||
const persona = {
|
||||
role: 'Role with <tags> & "quotes"',
|
||||
identity: "O'Reilly's Identity",
|
||||
communication_style: 'Use <code> tags',
|
||||
principles: ['Principle with & ampersand'],
|
||||
};
|
||||
|
||||
const xml = builder.buildPersonaXml(persona);
|
||||
|
||||
expect(xml).toContain('<tags> & "quotes"');
|
||||
expect(xml).toContain('O'Reilly's Identity');
|
||||
expect(xml).toContain('<code> tags');
|
||||
expect(xml).toContain('& ampersand');
|
||||
});
|
||||
|
||||
it('should handle principles as array', () => {
|
||||
const persona = {
|
||||
role: 'Role',
|
||||
identity: 'ID',
|
||||
communication_style: 'Style',
|
||||
principles: ['P1', 'P2', 'P3'],
|
||||
};
|
||||
|
||||
const xml = builder.buildPersonaXml(persona);
|
||||
|
||||
expect(xml).toContain('<principles>P1 P2 P3</principles>');
|
||||
});
|
||||
|
||||
it('should handle principles as string', () => {
|
||||
const persona = {
|
||||
role: 'Role',
|
||||
identity: 'ID',
|
||||
communication_style: 'Style',
|
||||
principles: 'Single principle string',
|
||||
};
|
||||
|
||||
const xml = builder.buildPersonaXml(persona);
|
||||
|
||||
expect(xml).toContain('<principles>Single principle string</principles>');
|
||||
});
|
||||
|
||||
it('should preserve Unicode in persona fields', () => {
|
||||
const persona = {
|
||||
role: 'Тестовая роль',
|
||||
identity: '日本語のアイデンティティ',
|
||||
communication_style: 'Estilo profesional',
|
||||
principles: ['原则一', 'Принцип два'],
|
||||
};
|
||||
|
||||
const xml = builder.buildPersonaXml(persona);
|
||||
|
||||
expect(xml).toContain('Тестовая роль');
|
||||
expect(xml).toContain('日本語のアイデンティティ');
|
||||
expect(xml).toContain('Estilo profesional');
|
||||
expect(xml).toContain('原则一 Принцип два');
|
||||
});
|
||||
|
||||
it('should handle missing persona gracefully', () => {
|
||||
const xml = builder.buildPersonaXml(null);
|
||||
|
||||
expect(xml).toBe('');
|
||||
});
|
||||
|
||||
it('should handle partial persona (missing optional fields)', () => {
|
||||
const persona = {
|
||||
role: 'Role',
|
||||
identity: 'ID',
|
||||
communication_style: 'Style',
|
||||
// principles missing
|
||||
};
|
||||
|
||||
const xml = builder.buildPersonaXml(persona);
|
||||
|
||||
expect(xml).toContain('<role>Role</role>');
|
||||
expect(xml).toContain('<identity>ID</identity>');
|
||||
expect(xml).toContain('<communication_style>Style</communication_style>');
|
||||
expect(xml).not.toContain('<principles>');
|
||||
});
|
||||
});
|
||||
|
||||
describe('buildMemoriesXml()', () => {
|
||||
it('should build memories XML from array', () => {
|
||||
const memories = ['Memory 1', 'Memory 2', 'Memory 3'];
|
||||
|
||||
const xml = builder.buildMemoriesXml(memories);
|
||||
|
||||
expect(xml).toContain('<memories>');
|
||||
expect(xml).toContain('</memories>');
|
||||
expect(xml).toContain('<memory>Memory 1</memory>');
|
||||
expect(xml).toContain('<memory>Memory 2</memory>');
|
||||
expect(xml).toContain('<memory>Memory 3</memory>');
|
||||
});
|
||||
|
||||
it('should escape XML special characters in memories', () => {
|
||||
const memories = ['Memory with <tags>', 'Memory with & ampersand', 'Memory with "quotes"'];
|
||||
|
||||
const xml = builder.buildMemoriesXml(memories);
|
||||
|
||||
expect(xml).toContain('<tags>');
|
||||
expect(xml).toContain('& ampersand');
|
||||
expect(xml).toContain('"quotes"');
|
||||
});
|
||||
|
||||
it('should return empty string for null memories', () => {
|
||||
expect(builder.buildMemoriesXml(null)).toBe('');
|
||||
});
|
||||
|
||||
it('should return empty string for empty array', () => {
|
||||
expect(builder.buildMemoriesXml([])).toBe('');
|
||||
});
|
||||
|
||||
it('should handle Unicode in memories', () => {
|
||||
const memories = ['记忆 1', 'Память 2', '記憶 3'];
|
||||
|
||||
const xml = builder.buildMemoriesXml(memories);
|
||||
|
||||
expect(xml).toContain('记忆 1');
|
||||
expect(xml).toContain('Память 2');
|
||||
expect(xml).toContain('記憶 3');
|
||||
});
|
||||
});
|
||||
|
||||
describe('buildPromptsXml()', () => {
|
||||
it('should build prompts XML from array format', () => {
|
||||
const prompts = [
|
||||
{ id: 'p1', content: 'Prompt 1 content' },
|
||||
{ id: 'p2', content: 'Prompt 2 content' },
|
||||
];
|
||||
|
||||
const xml = builder.buildPromptsXml(prompts);
|
||||
|
||||
expect(xml).toContain('<prompts>');
|
||||
expect(xml).toContain('</prompts>');
|
||||
expect(xml).toContain('<prompt id="p1">');
|
||||
expect(xml).toContain('<content>');
|
||||
expect(xml).toContain('Prompt 1 content');
|
||||
expect(xml).toContain('<prompt id="p2">');
|
||||
expect(xml).toContain('Prompt 2 content');
|
||||
});
|
||||
|
||||
it('should escape XML special characters in prompts', () => {
|
||||
const prompts = [{ id: 'test', content: 'Content with <tags> & "quotes"' }];
|
||||
|
||||
const xml = builder.buildPromptsXml(prompts);
|
||||
|
||||
expect(xml).toContain('<content>');
|
||||
expect(xml).toContain('<tags> & "quotes"');
|
||||
});
|
||||
|
||||
it('should return empty string for null prompts', () => {
|
||||
expect(builder.buildPromptsXml(null)).toBe('');
|
||||
});
|
||||
|
||||
it('should handle Unicode in prompts', () => {
|
||||
const prompts = [{ id: 'unicode', content: 'Test 测试 тест テスト' }];
|
||||
|
||||
const xml = builder.buildPromptsXml(prompts);
|
||||
|
||||
expect(xml).toContain('<content>');
|
||||
expect(xml).toContain('测试 тест テスト');
|
||||
});
|
||||
|
||||
it('should handle object/dictionary format prompts', () => {
|
||||
const prompts = {
|
||||
p1: 'Prompt 1 content',
|
||||
p2: 'Prompt 2 content',
|
||||
};
|
||||
|
||||
const xml = builder.buildPromptsXml(prompts);
|
||||
|
||||
expect(xml).toContain('<prompts>');
|
||||
expect(xml).toContain('<prompt id="p1">');
|
||||
expect(xml).toContain('Prompt 1 content');
|
||||
expect(xml).toContain('<prompt id="p2">');
|
||||
expect(xml).toContain('Prompt 2 content');
|
||||
});
|
||||
|
||||
it('should return empty string for empty array', () => {
|
||||
expect(builder.buildPromptsXml([])).toBe('');
|
||||
});
|
||||
});
|
||||
|
||||
describe('calculateFileHash()', () => {
|
||||
it('should calculate MD5 hash of file content', async () => {
|
||||
const content = 'test content for hashing';
|
||||
const filePath = await createTestFile(tmpDir, 'test.txt', content);
|
||||
|
||||
const hash = await builder.calculateFileHash(filePath);
|
||||
|
||||
expect(hash).toHaveLength(8); // MD5 truncated to 8 chars
|
||||
expect(hash).toMatch(/^[a-f0-9]{8}$/);
|
||||
});
|
||||
|
||||
it('should return consistent hash for same content', async () => {
|
||||
const file1 = await createTestFile(tmpDir, 'file1.txt', 'content');
|
||||
const file2 = await createTestFile(tmpDir, 'file2.txt', 'content');
|
||||
|
||||
const hash1 = await builder.calculateFileHash(file1);
|
||||
const hash2 = await builder.calculateFileHash(file2);
|
||||
|
||||
expect(hash1).toBe(hash2);
|
||||
});
|
||||
|
||||
it('should return null for non-existent file', async () => {
|
||||
const nonExistent = path.join(tmpDir, 'missing.txt');
|
||||
|
||||
const hash = await builder.calculateFileHash(nonExistent);
|
||||
|
||||
expect(hash).toBeNull();
|
||||
});
|
||||
|
||||
it('should handle empty file', async () => {
|
||||
const file = await createTestFile(tmpDir, 'empty.txt', '');
|
||||
|
||||
const hash = await builder.calculateFileHash(file);
|
||||
|
||||
expect(hash).toHaveLength(8);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
@ -0,0 +1,84 @@
|
|||
import { describe, it, expect } from 'vitest';
|
||||
import { escapeXml } from '../../../tools/lib/xml-utils.js';
|
||||
|
||||
describe('xml-utils', () => {
|
||||
describe('escapeXml()', () => {
|
||||
it('should escape ampersand (&) to &', () => {
|
||||
expect(escapeXml('Tom & Jerry')).toBe('Tom & Jerry');
|
||||
});
|
||||
|
||||
it('should escape less than (<) to <', () => {
|
||||
expect(escapeXml('5 < 10')).toBe('5 < 10');
|
||||
});
|
||||
|
||||
it('should escape greater than (>) to >', () => {
|
||||
expect(escapeXml('10 > 5')).toBe('10 > 5');
|
||||
});
|
||||
|
||||
it('should escape double quote (") to "', () => {
|
||||
expect(escapeXml('He said "hello"')).toBe('He said "hello"');
|
||||
});
|
||||
|
||||
it("should escape single quote (') to '", () => {
|
||||
expect(escapeXml("It's working")).toBe('It's working');
|
||||
});
|
||||
|
||||
it('should preserve Unicode characters', () => {
|
||||
expect(escapeXml('Hello 世界 🌍')).toBe('Hello 世界 🌍');
|
||||
});
|
||||
|
||||
it('should escape multiple special characters in sequence', () => {
|
||||
expect(escapeXml('<tag attr="value">')).toBe('<tag attr="value">');
|
||||
});
|
||||
|
||||
it('should escape all five special characters together', () => {
|
||||
expect(escapeXml(`&<>"'`)).toBe('&<>"'');
|
||||
});
|
||||
|
||||
it('should handle empty string', () => {
|
||||
expect(escapeXml('')).toBe('');
|
||||
});
|
||||
|
||||
it('should handle null', () => {
|
||||
expect(escapeXml(null)).toBe('');
|
||||
});
|
||||
|
||||
it('should handle undefined', () => {
|
||||
expect(escapeXml()).toBe('');
|
||||
});
|
||||
|
||||
it('should handle text with no special characters', () => {
|
||||
expect(escapeXml('Hello World')).toBe('Hello World');
|
||||
});
|
||||
|
||||
it('should handle text that is only special characters', () => {
|
||||
expect(escapeXml('&&&')).toBe('&&&');
|
||||
});
|
||||
|
||||
it('should not double-escape already escaped entities', () => {
|
||||
// Note: This is expected behavior - the function WILL double-escape
|
||||
// This test documents the actual behavior
|
||||
expect(escapeXml('&')).toBe('&amp;');
|
||||
});
|
||||
|
||||
it('should escape special characters in XML content', () => {
|
||||
const xmlContent = '<persona role="Developer & Architect">Use <code> tags</persona>';
|
||||
const expected = '<persona role="Developer & Architect">Use <code> tags</persona>';
|
||||
expect(escapeXml(xmlContent)).toBe(expected);
|
||||
});
|
||||
|
||||
it('should handle mixed Unicode and special characters', () => {
|
||||
expect(escapeXml('测试 <tag> & "quotes"')).toBe('测试 <tag> & "quotes"');
|
||||
});
|
||||
|
||||
it('should handle newlines and special characters', () => {
|
||||
const multiline = 'Line 1 & text\n<Line 2>\n"Line 3"';
|
||||
const expected = 'Line 1 & text\n<Line 2>\n"Line 3"';
|
||||
expect(escapeXml(multiline)).toBe(expected);
|
||||
});
|
||||
|
||||
it('should handle string with only whitespace', () => {
|
||||
expect(escapeXml(' ')).toBe(' ');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
@ -975,6 +975,7 @@ class Installer {
|
|||
this.installedFiles.add(path.join(cfgDir, 'task-manifest.csv'));
|
||||
|
||||
// Generate CSV manifests for workflows, agents, tasks AND ALL FILES with hashes BEFORE IDE setup
|
||||
// This must happen BEFORE mergeModuleHelpCatalogs because it depends on agent-manifest.csv
|
||||
spinner.start('Generating workflow and agent manifests...');
|
||||
const manifestGen = new ManifestGenerator();
|
||||
|
||||
|
|
@ -1007,6 +1008,12 @@ class Installer {
|
|||
`Manifests generated: ${manifestStats.workflows} workflows, ${manifestStats.agents} agents, ${manifestStats.tasks} tasks, ${manifestStats.tools} tools, ${manifestStats.files} files`,
|
||||
);
|
||||
|
||||
// Merge all module-help.csv files into bmad-help.csv
|
||||
// This must happen AFTER generateManifests because it depends on agent-manifest.csv
|
||||
spinner.start('Generating workflow help catalog...');
|
||||
await this.mergeModuleHelpCatalogs(bmadDir);
|
||||
spinner.succeed('Workflow help catalog generated');
|
||||
|
||||
// Configure IDEs and copy documentation
|
||||
if (!config.skipIde && config.ides && config.ides.length > 0) {
|
||||
// Filter out any undefined/null values from the IDE list
|
||||
|
|
@ -1367,6 +1374,240 @@ class Installer {
|
|||
/**
|
||||
* Private: Create directory structure
|
||||
*/
|
||||
/**
|
||||
* Merge all module-help.csv files into a single bmad-help.csv
|
||||
* Scans all installed modules for module-help.csv and merges them
|
||||
* Enriches agent info from agent-manifest.csv
|
||||
* Output is written to _bmad/_config/bmad-help.csv
|
||||
* @param {string} bmadDir - BMAD installation directory
|
||||
*/
|
||||
async mergeModuleHelpCatalogs(bmadDir) {
|
||||
const allRows = [];
|
||||
const headerRow =
|
||||
'module,phase,name,code,sequence,workflow-file,command,required,agent-name,agent-command,agent-display-name,agent-title,options,description,output-location,outputs';
|
||||
|
||||
// Load agent manifest for agent info lookup
|
||||
const agentManifestPath = path.join(bmadDir, '_config', 'agent-manifest.csv');
|
||||
const agentInfo = new Map(); // agent-name -> {command, displayName, title+icon}
|
||||
|
||||
if (await fs.pathExists(agentManifestPath)) {
|
||||
const manifestContent = await fs.readFile(agentManifestPath, 'utf8');
|
||||
const lines = manifestContent.split('\n').filter((line) => line.trim());
|
||||
|
||||
for (const line of lines) {
|
||||
if (line.startsWith('name,')) continue; // Skip header
|
||||
|
||||
const cols = line.split(',');
|
||||
if (cols.length >= 4) {
|
||||
const agentName = cols[0].replaceAll('"', '').trim();
|
||||
const displayName = cols[1].replaceAll('"', '').trim();
|
||||
const title = cols[2].replaceAll('"', '').trim();
|
||||
const icon = cols[3].replaceAll('"', '').trim();
|
||||
const module = cols[10] ? cols[10].replaceAll('"', '').trim() : '';
|
||||
|
||||
// Build agent command: bmad:module:agent:name
|
||||
const agentCommand = module ? `bmad:${module}:agent:${agentName}` : `bmad:agent:${agentName}`;
|
||||
|
||||
agentInfo.set(agentName, {
|
||||
command: agentCommand,
|
||||
displayName: displayName || agentName,
|
||||
title: icon && title ? `${icon} ${title}` : title || agentName,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get all installed module directories
|
||||
const entries = await fs.readdir(bmadDir, { withFileTypes: true });
|
||||
const installedModules = entries
|
||||
.filter((entry) => entry.isDirectory() && entry.name !== '_config' && entry.name !== 'docs' && entry.name !== '_memory')
|
||||
.map((entry) => entry.name);
|
||||
|
||||
// Add core module to scan (it's installed at root level as _config, but we check src/core)
|
||||
const coreModulePath = getSourcePath('core');
|
||||
const modulePaths = new Map();
|
||||
|
||||
// Map all module source paths
|
||||
if (await fs.pathExists(coreModulePath)) {
|
||||
modulePaths.set('core', coreModulePath);
|
||||
}
|
||||
|
||||
// Map installed module paths
|
||||
for (const moduleName of installedModules) {
|
||||
const modulePath = path.join(bmadDir, moduleName);
|
||||
modulePaths.set(moduleName, modulePath);
|
||||
}
|
||||
|
||||
// Scan each module for module-help.csv
|
||||
for (const [moduleName, modulePath] of modulePaths) {
|
||||
const helpFilePath = path.join(modulePath, 'module-help.csv');
|
||||
|
||||
if (await fs.pathExists(helpFilePath)) {
|
||||
try {
|
||||
const content = await fs.readFile(helpFilePath, 'utf8');
|
||||
const lines = content.split('\n').filter((line) => line.trim() && !line.startsWith('#'));
|
||||
|
||||
for (const line of lines) {
|
||||
// Skip header row
|
||||
if (line.startsWith('module,')) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Parse the line - handle quoted fields with commas
|
||||
const columns = this.parseCSVLine(line);
|
||||
if (columns.length >= 12) {
|
||||
// Map old schema to new schema
|
||||
// Old: module,phase,name,code,sequence,workflow-file,command,required,agent,options,description,output-location,outputs
|
||||
// New: module,phase,name,code,sequence,workflow-file,command,required,agent-name,agent-command,agent-display-name,agent-title,options,description,output-location,outputs
|
||||
|
||||
const [
|
||||
module,
|
||||
phase,
|
||||
name,
|
||||
code,
|
||||
sequence,
|
||||
workflowFile,
|
||||
command,
|
||||
required,
|
||||
agentName,
|
||||
options,
|
||||
description,
|
||||
outputLocation,
|
||||
outputs,
|
||||
] = columns;
|
||||
|
||||
// If module column is empty, set it to this module's name (except for core which stays empty for universal tools)
|
||||
const finalModule = (!module || module.trim() === '') && moduleName !== 'core' ? moduleName : module || '';
|
||||
|
||||
// Lookup agent info
|
||||
const cleanAgentName = agentName ? agentName.trim() : '';
|
||||
const agentData = agentInfo.get(cleanAgentName) || { command: '', displayName: '', title: '' };
|
||||
|
||||
// Build new row with agent info
|
||||
const newRow = [
|
||||
finalModule,
|
||||
phase || '',
|
||||
name || '',
|
||||
code || '',
|
||||
sequence || '',
|
||||
workflowFile || '',
|
||||
command || '',
|
||||
required || 'false',
|
||||
cleanAgentName,
|
||||
agentData.command,
|
||||
agentData.displayName,
|
||||
agentData.title,
|
||||
options || '',
|
||||
description || '',
|
||||
outputLocation || '',
|
||||
outputs || '',
|
||||
];
|
||||
|
||||
allRows.push(newRow.map((c) => this.escapeCSVField(c)).join(','));
|
||||
}
|
||||
}
|
||||
|
||||
if (process.env.BMAD_VERBOSE_INSTALL === 'true') {
|
||||
console.log(chalk.dim(` Merged module-help from: ${moduleName}`));
|
||||
}
|
||||
} catch (error) {
|
||||
console.warn(chalk.yellow(` Warning: Failed to read module-help.csv from ${moduleName}:`, error.message));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sort by module, then phase, then sequence
|
||||
allRows.sort((a, b) => {
|
||||
const colsA = this.parseCSVLine(a);
|
||||
const colsB = this.parseCSVLine(b);
|
||||
|
||||
// Module comparison (empty module/universal tools come first)
|
||||
const moduleA = (colsA[0] || '').toLowerCase();
|
||||
const moduleB = (colsB[0] || '').toLowerCase();
|
||||
if (moduleA !== moduleB) {
|
||||
return moduleA.localeCompare(moduleB);
|
||||
}
|
||||
|
||||
// Phase comparison
|
||||
const phaseA = colsA[1] || '';
|
||||
const phaseB = colsB[1] || '';
|
||||
if (phaseA !== phaseB) {
|
||||
return phaseA.localeCompare(phaseB);
|
||||
}
|
||||
|
||||
// Sequence comparison
|
||||
const seqA = parseInt(colsA[4] || '0', 10);
|
||||
const seqB = parseInt(colsB[4] || '0', 10);
|
||||
return seqA - seqB;
|
||||
});
|
||||
|
||||
// Write merged catalog
|
||||
const outputDir = path.join(bmadDir, '_config');
|
||||
await fs.ensureDir(outputDir);
|
||||
const outputPath = path.join(outputDir, 'bmad-help.csv');
|
||||
|
||||
const mergedContent = [headerRow, ...allRows].join('\n');
|
||||
await fs.writeFile(outputPath, mergedContent, 'utf8');
|
||||
|
||||
// Track the installed file
|
||||
this.installedFiles.add(outputPath);
|
||||
|
||||
if (process.env.BMAD_VERBOSE_INSTALL === 'true') {
|
||||
console.log(chalk.dim(` Generated bmad-help.csv: ${allRows.length} workflows`));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse a CSV line, handling quoted fields
|
||||
* @param {string} line - CSV line to parse
|
||||
* @returns {Array} Array of field values
|
||||
*/
|
||||
parseCSVLine(line) {
|
||||
const result = [];
|
||||
let current = '';
|
||||
let inQuotes = false;
|
||||
|
||||
for (let i = 0; i < line.length; i++) {
|
||||
const char = line[i];
|
||||
const nextChar = line[i + 1];
|
||||
|
||||
if (char === '"') {
|
||||
if (inQuotes && nextChar === '"') {
|
||||
// Escaped quote
|
||||
current += '"';
|
||||
i++; // Skip next quote
|
||||
} else {
|
||||
// Toggle quote mode
|
||||
inQuotes = !inQuotes;
|
||||
}
|
||||
} else if (char === ',' && !inQuotes) {
|
||||
result.push(current);
|
||||
current = '';
|
||||
} else {
|
||||
current += char;
|
||||
}
|
||||
}
|
||||
result.push(current);
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Escape a CSV field if it contains special characters
|
||||
* @param {string} field - Field value to escape
|
||||
* @returns {string} Escaped field
|
||||
*/
|
||||
escapeCSVField(field) {
|
||||
if (field === null || field === undefined) {
|
||||
return '';
|
||||
}
|
||||
const str = String(field);
|
||||
// If field contains comma, quote, or newline, wrap in quotes and escape inner quotes
|
||||
if (str.includes(',') || str.includes('"') || str.includes('\n')) {
|
||||
return `"${str.replaceAll('"', '""')}"`;
|
||||
}
|
||||
return str;
|
||||
}
|
||||
|
||||
async createDirectoryStructure(bmadDir) {
|
||||
await fs.ensureDir(bmadDir);
|
||||
await fs.ensureDir(path.join(bmadDir, '_config'));
|
||||
|
|
|
|||
|
|
@ -385,26 +385,45 @@ class ManifestGenerator {
|
|||
const filePath = path.join(dirPath, file);
|
||||
const content = await fs.readFile(filePath, 'utf8');
|
||||
|
||||
// Extract task metadata from content if possible
|
||||
const nameMatch = content.match(/name="([^"]+)"/);
|
||||
let name = file.replace(/\.(xml|md)$/, '');
|
||||
let displayName = name;
|
||||
let description = '';
|
||||
let standalone = false;
|
||||
|
||||
if (file.endsWith('.md')) {
|
||||
// Parse YAML frontmatter for .md tasks
|
||||
const frontmatterMatch = content.match(/^---\n([\s\S]*?)\n---/);
|
||||
if (frontmatterMatch) {
|
||||
try {
|
||||
const frontmatter = yaml.parse(frontmatterMatch[1]);
|
||||
name = frontmatter.name || name;
|
||||
displayName = frontmatter.displayName || frontmatter.name || name;
|
||||
description = frontmatter.description || '';
|
||||
standalone = frontmatter.standalone === true || frontmatter.standalone === 'true';
|
||||
} catch {
|
||||
// If YAML parsing fails, use defaults
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// For .xml tasks, extract from tag attributes
|
||||
const nameMatch = content.match(/name="([^"]+)"/);
|
||||
displayName = nameMatch ? nameMatch[1] : name;
|
||||
|
||||
// Try description attribute first, fall back to <objective> element
|
||||
const descMatch = content.match(/description="([^"]+)"/);
|
||||
const objMatch = content.match(/<objective>([^<]+)<\/objective>/);
|
||||
const description = descMatch ? descMatch[1] : objMatch ? objMatch[1].trim() : '';
|
||||
description = descMatch ? descMatch[1] : objMatch ? objMatch[1].trim() : '';
|
||||
|
||||
// Check for standalone attribute in <task> tag (default: false)
|
||||
const standaloneMatch = content.match(/<task[^>]+standalone="true"/);
|
||||
const standalone = !!standaloneMatch;
|
||||
standalone = !!standaloneMatch;
|
||||
}
|
||||
|
||||
// Build relative path for installation
|
||||
const installPath =
|
||||
moduleName === 'core' ? `${this.bmadFolderName}/core/tasks/${file}` : `${this.bmadFolderName}/${moduleName}/tasks/${file}`;
|
||||
|
||||
const taskName = file.replace(/\.(xml|md)$/, '');
|
||||
tasks.push({
|
||||
name: taskName,
|
||||
displayName: nameMatch ? nameMatch[1] : taskName,
|
||||
name: name,
|
||||
displayName: displayName,
|
||||
description: description.replaceAll('"', '""'),
|
||||
module: moduleName,
|
||||
path: installPath,
|
||||
|
|
@ -414,7 +433,7 @@ class ManifestGenerator {
|
|||
// Add to files list
|
||||
this.files.push({
|
||||
type: 'task',
|
||||
name: taskName,
|
||||
name: name,
|
||||
module: moduleName,
|
||||
path: installPath,
|
||||
});
|
||||
|
|
@ -455,26 +474,45 @@ class ManifestGenerator {
|
|||
const filePath = path.join(dirPath, file);
|
||||
const content = await fs.readFile(filePath, 'utf8');
|
||||
|
||||
// Extract tool metadata from content if possible
|
||||
const nameMatch = content.match(/name="([^"]+)"/);
|
||||
let name = file.replace(/\.(xml|md)$/, '');
|
||||
let displayName = name;
|
||||
let description = '';
|
||||
let standalone = false;
|
||||
|
||||
if (file.endsWith('.md')) {
|
||||
// Parse YAML frontmatter for .md tools
|
||||
const frontmatterMatch = content.match(/^---\n([\s\S]*?)\n---/);
|
||||
if (frontmatterMatch) {
|
||||
try {
|
||||
const frontmatter = yaml.parse(frontmatterMatch[1]);
|
||||
name = frontmatter.name || name;
|
||||
displayName = frontmatter.displayName || frontmatter.name || name;
|
||||
description = frontmatter.description || '';
|
||||
standalone = frontmatter.standalone === true || frontmatter.standalone === 'true';
|
||||
} catch {
|
||||
// If YAML parsing fails, use defaults
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// For .xml tools, extract from tag attributes
|
||||
const nameMatch = content.match(/name="([^"]+)"/);
|
||||
displayName = nameMatch ? nameMatch[1] : name;
|
||||
|
||||
// Try description attribute first, fall back to <objective> element
|
||||
const descMatch = content.match(/description="([^"]+)"/);
|
||||
const objMatch = content.match(/<objective>([^<]+)<\/objective>/);
|
||||
const description = descMatch ? descMatch[1] : objMatch ? objMatch[1].trim() : '';
|
||||
description = descMatch ? descMatch[1] : objMatch ? objMatch[1].trim() : '';
|
||||
|
||||
// Check for standalone attribute in <tool> tag (default: false)
|
||||
const standaloneMatch = content.match(/<tool[^>]+standalone="true"/);
|
||||
const standalone = !!standaloneMatch;
|
||||
standalone = !!standaloneMatch;
|
||||
}
|
||||
|
||||
// Build relative path for installation
|
||||
const installPath =
|
||||
moduleName === 'core' ? `${this.bmadFolderName}/core/tools/${file}` : `${this.bmadFolderName}/${moduleName}/tools/${file}`;
|
||||
|
||||
const toolName = file.replace(/\.(xml|md)$/, '');
|
||||
tools.push({
|
||||
name: toolName,
|
||||
displayName: nameMatch ? nameMatch[1] : toolName,
|
||||
name: name,
|
||||
displayName: displayName,
|
||||
description: description.replaceAll('"', '""'),
|
||||
module: moduleName,
|
||||
path: installPath,
|
||||
|
|
@ -484,7 +522,7 @@ class ManifestGenerator {
|
|||
// Add to files list
|
||||
this.files.push({
|
||||
type: 'tool',
|
||||
name: toolName,
|
||||
name: name,
|
||||
module: moduleName,
|
||||
path: installPath,
|
||||
});
|
||||
|
|
|
|||
|
|
@ -0,0 +1,234 @@
|
|||
# IDE Installer Standardization Plan
|
||||
|
||||
## Overview
|
||||
|
||||
Standardize IDE installers to use **flat file naming** and centralize duplicated code in shared utilities.
|
||||
|
||||
**Key Rule: Only folder-based IDEs convert to colon format. IDEs already using dashes keep using dashes.**
|
||||
|
||||
## Current State Analysis
|
||||
|
||||
### File Structure Patterns
|
||||
|
||||
| IDE | Current Pattern | Path Format |
|
||||
|-----|-----------------|-------------|
|
||||
| **claude-code** | Hierarchical | `.claude/commands/bmad/{module}/agents/{name}.md` |
|
||||
| **cursor** | Hierarchical | `.cursor/commands/bmad/{module}/agents/{name}.md` |
|
||||
| **crush** | Hierarchical | `.crush/commands/bmad/{module}/agents/{name}.md` |
|
||||
| **antigravity** | Flattened (dashes) | `.agent/workflows/bmad-module-agents-name.md` |
|
||||
| **codex** | Flattened (dashes) | `~/.codex/prompts/bmad-module-agents-name.md` |
|
||||
| **cline** | Flattened (dashes) | `.clinerules/workflows/bmad-module-type-name.md` |
|
||||
| **roo** | Flattened (dashes) | `.roo/commands/bmad-{module}-agent-{name}.md` |
|
||||
| **auggie** | Hybrid | `.augment/commands/bmad/agents/{module}-{name}.md` |
|
||||
| **iflow** | Hybrid | `.iflow/commands/bmad/agents/{module}-{name}.md` |
|
||||
| **trae** | Different (rules) | `.trae/rules/bmad-agent-{module}-{name}.md` |
|
||||
| **github-copilot** | Different (agents) | `.github/agents/bmd-custom-{module}-{name}.agent.md` |
|
||||
|
||||
### Shared Generators (in `/shared`)
|
||||
|
||||
1. `agent-command-generator.js` - generates agent launchers
|
||||
2. `task-tool-command-generator.js` - generates task/tool commands
|
||||
3. `workflow-command-generator.js` - generates workflow commands
|
||||
|
||||
All currently create artifacts with **nested relative paths** like `{module}/agents/{name}.md`
|
||||
|
||||
### Code Duplication Issues
|
||||
|
||||
1. **Flattening logic** duplicated in multiple IDEs
|
||||
2. **Agent launcher content creation** duplicated
|
||||
3. **Path transformation** duplicated
|
||||
|
||||
## Target Standardization
|
||||
|
||||
### For Folder-Based IDEs (convert to colon format)
|
||||
|
||||
**IDEs affected:** claude-code, cursor, crush
|
||||
|
||||
```
|
||||
Format: bmad:{module}:{type}:{name}.md
|
||||
|
||||
Examples:
|
||||
- Agent: bmad:bmm:agents:pm.md
|
||||
- Agent: bmad:core:agents:dev.md
|
||||
- Workflow: bmad:bmm:workflows:correct-course.md
|
||||
- Task: bmad:bmm:tasks:bmad-help.md
|
||||
- Tool: bmad:core:tools:code-review.md
|
||||
- Custom: bmad:custom:agents:fred-commit-poet.md
|
||||
```
|
||||
|
||||
### For Already-Flat IDEs (keep using dashes)
|
||||
|
||||
**IDEs affected:** antigravity, codex, cline, roo
|
||||
|
||||
```
|
||||
Format: bmad-{module}-{type}-{name}.md
|
||||
|
||||
Examples:
|
||||
- Agent: bmad-bmm-agents-pm.md
|
||||
- Workflow: bmad-bmm-workflows-correct-course.md
|
||||
- Task: bmad-bmm-tasks-bmad-help.md
|
||||
- Custom: bmad-custom-agents-fred-commit-poet.md
|
||||
```
|
||||
|
||||
### For Hybrid IDEs (keep as-is)
|
||||
|
||||
**IDEs affected:** auggie, iflow
|
||||
|
||||
These use `{module}-{name}.md` format within subdirectories - keep as-is.
|
||||
|
||||
### Skip (drastically different)
|
||||
|
||||
**IDEs affected:** trae, github-copilot
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
### Phase 1: Create Shared Utility
|
||||
|
||||
**File:** `shared/path-utils.js`
|
||||
|
||||
```javascript
|
||||
/**
|
||||
* Convert hierarchical path to flat colon-separated name (for folder-based IDEs)
|
||||
* @param {string} module - Module name (e.g., 'bmm', 'core')
|
||||
* @param {string} type - Artifact type ('agents', 'workflows', 'tasks', 'tools')
|
||||
* @param {string} name - Artifact name (e.g., 'pm', 'correct-course')
|
||||
* @returns {string} Flat filename like 'bmad:bmm:agents:pm.md'
|
||||
*/
|
||||
function toColonName(module, type, name) {
|
||||
return `bmad:${module}:${type}:${name}.md`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert relative path to flat colon-separated name (for folder-based IDEs)
|
||||
* @param {string} relativePath - Path like 'bmm/agents/pm.md'
|
||||
* @returns {string} Flat filename like 'bmad:bmm:agents:pm.md'
|
||||
*/
|
||||
function toColonPath(relativePath) {
|
||||
const withoutExt = relativePath.replace('.md', '');
|
||||
const parts = withoutExt.split(/[\/\\]/);
|
||||
return `bmad:${parts.join(':')}.md`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert hierarchical path to flat dash-separated name (for flat IDEs)
|
||||
* @param {string} relativePath - Path like 'bmm/agents/pm.md'
|
||||
* @returns {string} Flat filename like 'bmad-bmm-agents-pm.md'
|
||||
*/
|
||||
function toDashPath(relativePath) {
|
||||
const withoutExt = relativePath.replace('.md', '');
|
||||
const parts = withoutExt.split(/[\/\\]/);
|
||||
return `bmad-${parts.join('-')}.md`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create custom agent colon name
|
||||
* @param {string} agentName - Custom agent name
|
||||
* @returns {string} Flat filename like 'bmad:custom:agents:fred-commit-poet.md'
|
||||
*/
|
||||
function customAgentColonName(agentName) {
|
||||
return `bmad:custom:agents:${agentName}.md`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create custom agent dash name
|
||||
* @param {string} agentName - Custom agent name
|
||||
* @returns {string} Flat filename like 'bmad-custom-agents-fred-commit-poet.md'
|
||||
*/
|
||||
function customAgentDashName(agentName) {
|
||||
return `bmad-custom-agents-${agentName}.md`;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
toColonName,
|
||||
toColonPath,
|
||||
toDashPath,
|
||||
customAgentColonName,
|
||||
customAgentDashName,
|
||||
};
|
||||
```
|
||||
|
||||
### Phase 2: Update Shared Generators
|
||||
|
||||
**Files to modify:**
|
||||
- `shared/agent-command-generator.js`
|
||||
- `shared/task-tool-command-generator.js`
|
||||
- `shared/workflow-command-generator.js`
|
||||
|
||||
**Changes:**
|
||||
1. Import path utilities
|
||||
2. Change `relativePath` to use flat format
|
||||
3. Add method `writeColonArtifacts()` for folder-based IDEs
|
||||
4. Add method `writeDashArtifacts()` for flat IDEs
|
||||
|
||||
### Phase 3: Update Folder-Based IDEs
|
||||
|
||||
**Files to modify:**
|
||||
- `claude-code.js`
|
||||
- `cursor.js`
|
||||
- `crush.js`
|
||||
|
||||
**Changes:**
|
||||
1. Import `toColonPath`, `customAgentColonName` from path-utils
|
||||
2. Change from hierarchical to flat colon naming
|
||||
3. Update cleanup to handle flat structure
|
||||
|
||||
### Phase 4: Update Flat IDEs
|
||||
|
||||
**Files to modify:**
|
||||
- `antigravity.js`
|
||||
- `codex.js`
|
||||
- `cline.js`
|
||||
- `roo.js`
|
||||
|
||||
**Changes:**
|
||||
1. Import `toDashPath`, `customAgentDashName` from path-utils
|
||||
2. Replace local `flattenFilename()` with shared `toDashPath()`
|
||||
|
||||
### Phase 5: Update Base Class
|
||||
|
||||
**File:** `_base-ide.js`
|
||||
|
||||
**Changes:**
|
||||
1. Mark `flattenFilename()` as `@deprecated`
|
||||
2. Add comment pointing to new path-utils
|
||||
|
||||
## Migration Checklist
|
||||
|
||||
### New Files
|
||||
- [ ] Create `shared/path-utils.js`
|
||||
|
||||
### Folder-Based IDEs (convert to colon format)
|
||||
- [ ] Update `shared/agent-command-generator.js` - add `writeColonArtifacts()`
|
||||
- [ ] Update `shared/task-tool-command-generator.js` - add `writeColonArtifacts()`
|
||||
- [ ] Update `shared/workflow-command-generator.js` - add `writeColonArtifacts()`
|
||||
- [ ] Update `claude-code.js` - convert to colon format
|
||||
- [ ] Update `cursor.js` - convert to colon format
|
||||
- [ ] Update `crush.js` - convert to colon format
|
||||
|
||||
### Flat IDEs (standardize dash format)
|
||||
- [ ] Update `shared/agent-command-generator.js` - add `writeDashArtifacts()`
|
||||
- [ ] Update `shared/task-tool-command-generator.js` - add `writeDashArtifacts()`
|
||||
- [ ] Update `shared/workflow-command-generator.js` - add `writeDashArtifacts()`
|
||||
- [ ] Update `antigravity.js` - use shared `toDashPath()`
|
||||
- [ ] Update `codex.js` - use shared `toDashPath()`
|
||||
- [ ] Update `cline.js` - use shared `toDashPath()`
|
||||
- [ ] Update `roo.js` - use shared `toDashPath()`
|
||||
|
||||
### Base Class
|
||||
- [ ] Update `_base-ide.js` - add deprecation notice
|
||||
|
||||
### Testing
|
||||
- [ ] Test claude-code installation
|
||||
- [ ] Test cursor installation
|
||||
- [ ] Test crush installation
|
||||
- [ ] Test antigravity installation
|
||||
- [ ] Test codex installation
|
||||
- [ ] Test cline installation
|
||||
- [ ] Test roo installation
|
||||
|
||||
## Notes
|
||||
|
||||
1. **Keep segments**: agents, workflows, tasks, tools all become part of the flat name
|
||||
2. **Colon vs Dash**: Colons for folder-based IDEs converting to flat, dashes for already-flat IDEs
|
||||
3. **Custom agents**: Follow the same pattern as regular agents
|
||||
4. **Backward compatibility**: Cleanup will remove old folder structure
|
||||
|
|
@ -619,6 +619,7 @@ class BaseIdeSetup {
|
|||
|
||||
/**
|
||||
* Flatten a relative path to a single filename for flat slash command naming
|
||||
* @deprecated Use toColonPath() or toDashPath() from shared/path-utils.js instead
|
||||
* Example: 'module/agents/name.md' -> 'bmad-module-agents-name.md'
|
||||
* Used by IDEs that ignore directory structure for slash commands (e.g., Antigravity, Codex)
|
||||
* @param {string} relativePath - Relative path to flatten
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ const {
|
|||
resolveSubagentFiles,
|
||||
} = require('./shared/module-injections');
|
||||
const { getAgentsFromBmad, getAgentsFromDir } = require('./shared/bmad-artifacts');
|
||||
const { toDashPath, customAgentDashName } = require('./shared/path-utils');
|
||||
const prompts = require('../../../lib/prompts');
|
||||
|
||||
/**
|
||||
|
|
@ -125,16 +126,10 @@ class AntigravitySetup extends BaseIdeSetup {
|
|||
const agentGen = new AgentCommandGenerator(this.bmadFolderName);
|
||||
const { artifacts: agentArtifacts, counts: agentCounts } = await agentGen.collectAgentArtifacts(bmadDir, options.selectedModules || []);
|
||||
|
||||
// Write agent launcher files with FLATTENED naming
|
||||
// Antigravity ignores directory structure, so we flatten to: bmad-module-agents-name.md
|
||||
// This creates slash commands like /bmad-bmm-agents-dev instead of /dev
|
||||
let agentCount = 0;
|
||||
for (const artifact of agentArtifacts) {
|
||||
const flattenedName = this.flattenFilename(artifact.relativePath);
|
||||
const targetPath = path.join(bmadWorkflowsDir, flattenedName);
|
||||
await this.writeFile(targetPath, artifact.content);
|
||||
agentCount++;
|
||||
}
|
||||
// Write agent launcher files with FLATTENED naming using shared utility
|
||||
// Antigravity ignores directory structure, so we flatten to: bmad-module-name.md
|
||||
// This creates slash commands like /bmad-bmm-dev instead of /dev
|
||||
const agentCount = await agentGen.writeDashArtifacts(bmadWorkflowsDir, agentArtifacts);
|
||||
|
||||
// Process Antigravity specific injections for installed modules
|
||||
// Use pre-collected configuration if available, or skip if already configured
|
||||
|
|
@ -152,16 +147,8 @@ class AntigravitySetup extends BaseIdeSetup {
|
|||
const workflowGen = new WorkflowCommandGenerator(this.bmadFolderName);
|
||||
const { artifacts: workflowArtifacts } = await workflowGen.collectWorkflowArtifacts(bmadDir);
|
||||
|
||||
// Write workflow-command artifacts with FLATTENED naming
|
||||
let workflowCommandCount = 0;
|
||||
for (const artifact of workflowArtifacts) {
|
||||
if (artifact.type === 'workflow-command') {
|
||||
const flattenedName = this.flattenFilename(artifact.relativePath);
|
||||
const targetPath = path.join(bmadWorkflowsDir, flattenedName);
|
||||
await this.writeFile(targetPath, artifact.content);
|
||||
workflowCommandCount++;
|
||||
}
|
||||
}
|
||||
// Write workflow-command artifacts with FLATTENED naming using shared utility
|
||||
const workflowCommandCount = await workflowGen.writeDashArtifacts(bmadWorkflowsDir, workflowArtifacts);
|
||||
|
||||
// Generate task and tool commands from manifests (if they exist)
|
||||
const taskToolGen = new TaskToolCommandGenerator();
|
||||
|
|
@ -468,7 +455,8 @@ usage: |
|
|||
|
||||
⚠️ **IMPORTANT**: Run @${agentPath} to load the complete agent before using this launcher!`;
|
||||
|
||||
const fileName = `bmad-custom-agents-${agentName}.md`;
|
||||
// Use dash format: bmad-custom-agents-fred-commit-poet.md
|
||||
const fileName = customAgentDashName(agentName);
|
||||
const launcherPath = path.join(bmadWorkflowsDir, fileName);
|
||||
|
||||
// Write the launcher file
|
||||
|
|
@ -477,7 +465,7 @@ usage: |
|
|||
return {
|
||||
ide: 'antigravity',
|
||||
path: path.relative(projectDir, launcherPath),
|
||||
command: `/${agentName}`,
|
||||
command: `/${fileName.replace('.md', '')}`,
|
||||
type: 'custom-agent-launcher',
|
||||
};
|
||||
}
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ const {
|
|||
resolveSubagentFiles,
|
||||
} = require('./shared/module-injections');
|
||||
const { getAgentsFromBmad, getAgentsFromDir } = require('./shared/bmad-artifacts');
|
||||
const { customAgentColonName } = require('./shared/path-utils');
|
||||
const prompts = require('../../../lib/prompts');
|
||||
|
||||
/**
|
||||
|
|
@ -89,13 +90,46 @@ class ClaudeCodeSetup extends BaseIdeSetup {
|
|||
* @param {string} projectDir - Project directory
|
||||
*/
|
||||
async cleanup(projectDir) {
|
||||
const bmadCommandsDir = path.join(projectDir, this.configDir, this.commandsDir, 'bmad');
|
||||
const commandsDir = path.join(projectDir, this.configDir, this.commandsDir);
|
||||
|
||||
if (await fs.pathExists(bmadCommandsDir)) {
|
||||
await fs.remove(bmadCommandsDir);
|
||||
// Remove any bmad:* files from the commands directory
|
||||
if (await fs.pathExists(commandsDir)) {
|
||||
const entries = await fs.readdir(commandsDir);
|
||||
let removedCount = 0;
|
||||
for (const entry of entries) {
|
||||
if (entry.startsWith('bmad:')) {
|
||||
await fs.remove(path.join(commandsDir, entry));
|
||||
removedCount++;
|
||||
}
|
||||
}
|
||||
// Also remove legacy bmad folder if it exists
|
||||
const bmadFolder = path.join(commandsDir, 'bmad');
|
||||
if (await fs.pathExists(bmadFolder)) {
|
||||
await fs.remove(bmadFolder);
|
||||
console.log(chalk.dim(` Removed old BMAD commands from ${this.name}`));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean up legacy folder structure (module/type/name.md) if it exists
|
||||
* This can be called after migration to remove old nested directories
|
||||
* @param {string} projectDir - Project directory
|
||||
*/
|
||||
async cleanupLegacyFolders(projectDir) {
|
||||
const commandsDir = path.join(projectDir, this.configDir, this.commandsDir);
|
||||
|
||||
if (!(await fs.pathExists(commandsDir))) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Remove legacy bmad folder if it exists
|
||||
const bmadFolder = path.join(commandsDir, 'bmad');
|
||||
if (await fs.pathExists(bmadFolder)) {
|
||||
await fs.remove(bmadFolder);
|
||||
console.log(chalk.dim(` Removed legacy bmad folder from ${this.name}`));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Setup Claude Code IDE configuration
|
||||
|
|
@ -115,28 +149,19 @@ class ClaudeCodeSetup extends BaseIdeSetup {
|
|||
// Create .claude/commands directory structure
|
||||
const claudeDir = path.join(projectDir, this.configDir);
|
||||
const commandsDir = path.join(claudeDir, this.commandsDir);
|
||||
const bmadCommandsDir = path.join(commandsDir, 'bmad');
|
||||
await this.ensureDir(commandsDir);
|
||||
|
||||
await this.ensureDir(bmadCommandsDir);
|
||||
// Use colon format: files written directly to commands dir (no bmad subfolder)
|
||||
// Creates: .claude/commands/bmad:bmm:pm.md
|
||||
|
||||
// Generate agent launchers using AgentCommandGenerator
|
||||
// This creates small launcher files that reference the actual agents in _bmad/
|
||||
const agentGen = new AgentCommandGenerator(this.bmadFolderName);
|
||||
const { artifacts: agentArtifacts, counts: agentCounts } = await agentGen.collectAgentArtifacts(bmadDir, options.selectedModules || []);
|
||||
|
||||
// Create directories for each module
|
||||
const modules = new Set();
|
||||
for (const artifact of agentArtifacts) {
|
||||
modules.add(artifact.module);
|
||||
}
|
||||
|
||||
for (const module of modules) {
|
||||
await this.ensureDir(path.join(bmadCommandsDir, module));
|
||||
await this.ensureDir(path.join(bmadCommandsDir, module, 'agents'));
|
||||
}
|
||||
|
||||
// Write agent launcher files
|
||||
const agentCount = await agentGen.writeAgentLaunchers(bmadCommandsDir, agentArtifacts);
|
||||
// Write agent launcher files using flat colon naming
|
||||
// Creates files like: bmad:bmm:pm.md
|
||||
const agentCount = await agentGen.writeColonArtifacts(commandsDir, agentArtifacts);
|
||||
|
||||
// Process Claude Code specific injections for installed modules
|
||||
// Use pre-collected configuration if available, or skip if already configured
|
||||
|
|
@ -157,22 +182,13 @@ class ClaudeCodeSetup extends BaseIdeSetup {
|
|||
const workflowGen = new WorkflowCommandGenerator(this.bmadFolderName);
|
||||
const { artifacts: workflowArtifacts } = await workflowGen.collectWorkflowArtifacts(bmadDir);
|
||||
|
||||
// Write only workflow-command artifacts, skip workflow-launcher READMEs
|
||||
let workflowCommandCount = 0;
|
||||
for (const artifact of workflowArtifacts) {
|
||||
if (artifact.type === 'workflow-command') {
|
||||
const moduleWorkflowsDir = path.join(bmadCommandsDir, artifact.module, 'workflows');
|
||||
await this.ensureDir(moduleWorkflowsDir);
|
||||
const commandPath = path.join(moduleWorkflowsDir, path.basename(artifact.relativePath));
|
||||
await this.writeFile(commandPath, artifact.content);
|
||||
workflowCommandCount++;
|
||||
}
|
||||
// Skip workflow-launcher READMEs as they would be treated as slash commands
|
||||
}
|
||||
// Write workflow-command artifacts using flat colon naming
|
||||
// Creates files like: bmad:bmm:correct-course.md
|
||||
const workflowCommandCount = await workflowGen.writeColonArtifacts(commandsDir, workflowArtifacts);
|
||||
|
||||
// Generate task and tool commands from manifests (if they exist)
|
||||
const taskToolGen = new TaskToolCommandGenerator();
|
||||
const taskToolResult = await taskToolGen.generateTaskToolCommands(projectDir, bmadDir);
|
||||
const taskToolResult = await taskToolGen.generateColonTaskToolCommands(projectDir, bmadDir, commandsDir);
|
||||
|
||||
console.log(chalk.green(`✓ ${this.name} configured:`));
|
||||
console.log(chalk.dim(` - ${agentCount} agents installed`));
|
||||
|
|
@ -186,7 +202,7 @@ class ClaudeCodeSetup extends BaseIdeSetup {
|
|||
),
|
||||
);
|
||||
}
|
||||
console.log(chalk.dim(` - Commands directory: ${path.relative(projectDir, bmadCommandsDir)}`));
|
||||
console.log(chalk.dim(` - Commands directory: ${path.relative(projectDir, commandsDir)}`));
|
||||
|
||||
return {
|
||||
success: true,
|
||||
|
|
@ -449,13 +465,13 @@ class ClaudeCodeSetup extends BaseIdeSetup {
|
|||
* @returns {Object|null} Info about created command
|
||||
*/
|
||||
async installCustomAgentLauncher(projectDir, agentName, agentPath, metadata) {
|
||||
const customAgentsDir = path.join(projectDir, this.configDir, this.commandsDir, 'bmad', 'custom', 'agents');
|
||||
const commandsDir = path.join(projectDir, this.configDir, this.commandsDir);
|
||||
|
||||
if (!(await this.exists(path.join(projectDir, this.configDir)))) {
|
||||
return null; // IDE not configured for this project
|
||||
}
|
||||
|
||||
await this.ensureDir(customAgentsDir);
|
||||
await this.ensureDir(commandsDir);
|
||||
|
||||
const launcherContent = `---
|
||||
name: '${agentName}'
|
||||
|
|
@ -474,12 +490,15 @@ You must fully embody this agent's persona and follow all activation instruction
|
|||
</agent-activation>
|
||||
`;
|
||||
|
||||
const launcherPath = path.join(customAgentsDir, `${agentName}.md`);
|
||||
// Use colon format: bmad:custom:agents:fred-commit-poet.md
|
||||
// Written directly to commands dir (no bmad subfolder)
|
||||
const launcherName = customAgentColonName(agentName);
|
||||
const launcherPath = path.join(commandsDir, launcherName);
|
||||
await this.writeFile(launcherPath, launcherContent);
|
||||
|
||||
return {
|
||||
path: launcherPath,
|
||||
command: `/bmad:custom:agents:${agentName}`,
|
||||
command: `/${launcherName.replace('.md', '')}`,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,7 +4,9 @@ const chalk = require('chalk');
|
|||
const { BaseIdeSetup } = require('./_base-ide');
|
||||
const { WorkflowCommandGenerator } = require('./shared/workflow-command-generator');
|
||||
const { AgentCommandGenerator } = require('./shared/agent-command-generator');
|
||||
const { TaskToolCommandGenerator } = require('./shared/task-tool-command-generator');
|
||||
const { getAgentsFromBmad, getTasksFromBmad } = require('./shared/bmad-artifacts');
|
||||
const { toDashPath, customAgentDashName } = require('./shared/path-utils');
|
||||
|
||||
/**
|
||||
* Cline IDE setup handler
|
||||
|
|
@ -56,7 +58,7 @@ class ClineSetup extends BaseIdeSetup {
|
|||
console.log(chalk.dim(' Usage:'));
|
||||
console.log(chalk.dim(' - Type / to see available commands'));
|
||||
console.log(chalk.dim(' - All BMAD items start with "bmad-"'));
|
||||
console.log(chalk.dim(' - Example: /bmad-bmm-agents-pm'));
|
||||
console.log(chalk.dim(' - Example: /bmad-bmm-pm'));
|
||||
|
||||
return {
|
||||
success: true,
|
||||
|
|
@ -145,10 +147,10 @@ class ClineSetup extends BaseIdeSetup {
|
|||
|
||||
/**
|
||||
* Flatten file path to bmad-module-type-name.md format
|
||||
* Uses shared toDashPath utility
|
||||
*/
|
||||
flattenFilename(relativePath) {
|
||||
const sanitized = relativePath.replaceAll(/[\\/]/g, '-');
|
||||
return `bmad-${sanitized}`;
|
||||
return toDashPath(relativePath);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -244,7 +246,8 @@ The agent will follow the persona and instructions from the main agent file.
|
|||
|
||||
*Generated by BMAD Method*`;
|
||||
|
||||
const fileName = `bmad-custom-${agentName.toLowerCase()}.md`;
|
||||
// Use dash format: bmad-custom-agents-fred-commit-poet.md
|
||||
const fileName = customAgentDashName(agentName);
|
||||
const launcherPath = path.join(workflowsDir, fileName);
|
||||
|
||||
// Write the launcher file
|
||||
|
|
@ -253,7 +256,7 @@ The agent will follow the persona and instructions from the main agent file.
|
|||
return {
|
||||
ide: 'cline',
|
||||
path: path.relative(projectDir, launcherPath),
|
||||
command: agentName,
|
||||
command: fileName.replace('.md', ''),
|
||||
type: 'custom-agent-launcher',
|
||||
};
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,7 +5,9 @@ const chalk = require('chalk');
|
|||
const { BaseIdeSetup } = require('./_base-ide');
|
||||
const { WorkflowCommandGenerator } = require('./shared/workflow-command-generator');
|
||||
const { AgentCommandGenerator } = require('./shared/agent-command-generator');
|
||||
const { TaskToolCommandGenerator } = require('./shared/task-tool-command-generator');
|
||||
const { getTasksFromBmad } = require('./shared/bmad-artifacts');
|
||||
const { toDashPath, customAgentDashName } = require('./shared/path-utils');
|
||||
const prompts = require('../../../lib/prompts');
|
||||
|
||||
/**
|
||||
|
|
@ -83,7 +85,41 @@ class CodexSetup extends BaseIdeSetup {
|
|||
const destDir = this.getCodexPromptDir(projectDir, installLocation);
|
||||
await fs.ensureDir(destDir);
|
||||
await this.clearOldBmadFiles(destDir);
|
||||
const written = await this.flattenAndWriteArtifacts(artifacts, destDir);
|
||||
|
||||
// Collect artifacts and write using DASH format
|
||||
const agentGen = new AgentCommandGenerator(this.bmadFolderName);
|
||||
const { artifacts: agentArtifacts } = await agentGen.collectAgentArtifacts(bmadDir, options.selectedModules || []);
|
||||
const agentCount = await agentGen.writeDashArtifacts(destDir, agentArtifacts);
|
||||
|
||||
const tasks = await getTasksFromBmad(bmadDir, options.selectedModules || []);
|
||||
const taskArtifacts = [];
|
||||
for (const task of tasks) {
|
||||
const content = await this.readAndProcessWithProject(
|
||||
task.path,
|
||||
{
|
||||
module: task.module,
|
||||
name: task.name,
|
||||
},
|
||||
projectDir,
|
||||
);
|
||||
taskArtifacts.push({
|
||||
type: 'task',
|
||||
module: task.module,
|
||||
sourcePath: task.path,
|
||||
relativePath: path.join(task.module, 'tasks', `${task.name}.md`),
|
||||
content,
|
||||
});
|
||||
}
|
||||
|
||||
const workflowGenerator = new WorkflowCommandGenerator(this.bmadFolderName);
|
||||
const { artifacts: workflowArtifacts } = await workflowGenerator.collectWorkflowArtifacts(bmadDir);
|
||||
const workflowCount = await workflowGenerator.writeDashArtifacts(destDir, workflowArtifacts);
|
||||
|
||||
// Also write tasks using dash format
|
||||
const ttGen = new TaskToolCommandGenerator();
|
||||
const tasksWritten = await ttGen.writeDashArtifacts(destDir, taskArtifacts);
|
||||
|
||||
const written = agentCount + workflowCount + tasksWritten;
|
||||
|
||||
console.log(chalk.green(`✓ ${this.name} configured:`));
|
||||
console.log(chalk.dim(` - Mode: CLI`));
|
||||
|
|
@ -256,7 +292,7 @@ class CodexSetup extends BaseIdeSetup {
|
|||
chalk.dim(" To use with other projects, you'd need to copy the _bmad dir"),
|
||||
'',
|
||||
chalk.green(' ✓ You can now use /commands in Codex CLI'),
|
||||
chalk.dim(' Example: /bmad-bmm-agents-pm'),
|
||||
chalk.dim(' Example: /bmad-bmm-pm'),
|
||||
chalk.dim(' Type / to see all available commands'),
|
||||
'',
|
||||
chalk.bold.cyan('═'.repeat(70)),
|
||||
|
|
@ -361,7 +397,8 @@ You must fully embody this agent's persona and follow all activation instruction
|
|||
</agent-activation>
|
||||
`;
|
||||
|
||||
const fileName = `bmad-custom-agents-${agentName}.md`;
|
||||
// Use dash format: bmad-custom-agents-fred-commit-poet.md
|
||||
const fileName = customAgentDashName(agentName);
|
||||
const launcherPath = path.join(destDir, fileName);
|
||||
await fs.writeFile(launcherPath, launcherContent, 'utf8');
|
||||
|
||||
|
|
|
|||
|
|
@ -4,10 +4,12 @@ const { BaseIdeSetup } = require('./_base-ide');
|
|||
const chalk = require('chalk');
|
||||
const { AgentCommandGenerator } = require('./shared/agent-command-generator');
|
||||
const { WorkflowCommandGenerator } = require('./shared/workflow-command-generator');
|
||||
const { TaskToolCommandGenerator } = require('./shared/task-tool-command-generator');
|
||||
const { customAgentColonName } = require('./shared/path-utils');
|
||||
|
||||
/**
|
||||
* Crush IDE setup handler
|
||||
* Creates commands in .crush/commands/ directory structure
|
||||
* Creates commands in .crush/commands/ directory structure using flat colon naming
|
||||
*/
|
||||
class CrushSetup extends BaseIdeSetup {
|
||||
constructor() {
|
||||
|
|
@ -25,227 +27,73 @@ class CrushSetup extends BaseIdeSetup {
|
|||
async setup(projectDir, bmadDir, options = {}) {
|
||||
console.log(chalk.cyan(`Setting up ${this.name}...`));
|
||||
|
||||
// Create .crush/commands/bmad directory structure
|
||||
const crushDir = path.join(projectDir, this.configDir);
|
||||
const commandsDir = path.join(crushDir, this.commandsDir, 'bmad');
|
||||
// Clean up old BMAD installation first
|
||||
await this.cleanup(projectDir);
|
||||
|
||||
// Create .crush/commands directory
|
||||
const crushDir = path.join(projectDir, this.configDir);
|
||||
const commandsDir = path.join(crushDir, this.commandsDir);
|
||||
await this.ensureDir(commandsDir);
|
||||
|
||||
// Use colon format: files written directly to commands dir (no bmad subfolder)
|
||||
// Creates: .crush/commands/bmad:bmm:pm.md
|
||||
|
||||
// Generate agent launchers
|
||||
const agentGen = new AgentCommandGenerator(this.bmadFolderName);
|
||||
const { artifacts: agentArtifacts } = await agentGen.collectAgentArtifacts(bmadDir, options.selectedModules || []);
|
||||
|
||||
// Get tasks, tools, and workflows (ALL workflows now generate commands)
|
||||
const tasks = await this.getTasks(bmadDir, true);
|
||||
const tools = await this.getTools(bmadDir, true);
|
||||
// Write agent launcher files using flat colon naming
|
||||
// Creates files like: bmad:bmm:pm.md
|
||||
const agentCount = await agentGen.writeColonArtifacts(commandsDir, agentArtifacts);
|
||||
|
||||
// Get ALL workflows using the new workflow command generator
|
||||
const workflowGenerator = new WorkflowCommandGenerator(this.bmadFolderName);
|
||||
const { artifacts: workflowArtifacts, counts: workflowCounts } = await workflowGenerator.collectWorkflowArtifacts(bmadDir);
|
||||
const { artifacts: workflowArtifacts } = await workflowGenerator.collectWorkflowArtifacts(bmadDir);
|
||||
|
||||
// Convert workflow artifacts to expected format for organizeByModule
|
||||
const workflows = workflowArtifacts
|
||||
.filter((artifact) => artifact.type === 'workflow-command')
|
||||
.map((artifact) => ({
|
||||
module: artifact.module,
|
||||
name: path.basename(artifact.relativePath, '.md'),
|
||||
path: artifact.sourcePath,
|
||||
content: artifact.content,
|
||||
}));
|
||||
// Write workflow-command artifacts using flat colon naming
|
||||
// Creates files like: bmad:bmm:correct-course.md
|
||||
const workflowCount = await workflowGenerator.writeColonArtifacts(commandsDir, workflowArtifacts);
|
||||
|
||||
// Organize by module
|
||||
const agentCount = await this.organizeByModule(commandsDir, agentArtifacts, tasks, tools, workflows, projectDir);
|
||||
// Generate task and tool commands using flat colon naming
|
||||
const taskToolGen = new TaskToolCommandGenerator();
|
||||
const taskToolResult = await taskToolGen.generateColonTaskToolCommands(projectDir, bmadDir, commandsDir);
|
||||
|
||||
console.log(chalk.green(`✓ ${this.name} configured:`));
|
||||
console.log(chalk.dim(` - ${agentCount.agents} agent commands created`));
|
||||
console.log(chalk.dim(` - ${agentCount.tasks} task commands created`));
|
||||
console.log(chalk.dim(` - ${agentCount.tools} tool commands created`));
|
||||
console.log(chalk.dim(` - ${agentCount.workflows} workflow commands created`));
|
||||
console.log(chalk.dim(` - ${agentCount} agent commands created`));
|
||||
console.log(chalk.dim(` - ${taskToolResult.tasks} task commands created`));
|
||||
console.log(chalk.dim(` - ${taskToolResult.tools} tool commands created`));
|
||||
console.log(chalk.dim(` - ${workflowCount} workflow commands created`));
|
||||
console.log(chalk.dim(` - Commands directory: ${path.relative(projectDir, commandsDir)}`));
|
||||
console.log(chalk.dim('\n Commands can be accessed via Crush command palette'));
|
||||
|
||||
return {
|
||||
success: true,
|
||||
...agentCount,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Organize commands by module
|
||||
*/
|
||||
async organizeByModule(commandsDir, agentArtifacts, tasks, tools, workflows, projectDir) {
|
||||
// Get unique modules
|
||||
const modules = new Set();
|
||||
for (const artifact of agentArtifacts) modules.add(artifact.module);
|
||||
for (const task of tasks) modules.add(task.module);
|
||||
for (const tool of tools) modules.add(tool.module);
|
||||
for (const workflow of workflows) modules.add(workflow.module);
|
||||
|
||||
let agentCount = 0;
|
||||
let taskCount = 0;
|
||||
let toolCount = 0;
|
||||
let workflowCount = 0;
|
||||
|
||||
// Create module directories
|
||||
for (const module of modules) {
|
||||
const moduleDir = path.join(commandsDir, module);
|
||||
const moduleAgentsDir = path.join(moduleDir, 'agents');
|
||||
const moduleTasksDir = path.join(moduleDir, 'tasks');
|
||||
const moduleToolsDir = path.join(moduleDir, 'tools');
|
||||
const moduleWorkflowsDir = path.join(moduleDir, 'workflows');
|
||||
|
||||
await this.ensureDir(moduleAgentsDir);
|
||||
await this.ensureDir(moduleTasksDir);
|
||||
await this.ensureDir(moduleToolsDir);
|
||||
await this.ensureDir(moduleWorkflowsDir);
|
||||
|
||||
// Write module-specific agent launchers
|
||||
const moduleAgents = agentArtifacts.filter((a) => a.module === module);
|
||||
for (const artifact of moduleAgents) {
|
||||
const targetPath = path.join(moduleAgentsDir, `${artifact.name}.md`);
|
||||
await this.writeFile(targetPath, artifact.content);
|
||||
agentCount++;
|
||||
}
|
||||
|
||||
// Copy module-specific tasks
|
||||
const moduleTasks = tasks.filter((t) => t.module === module);
|
||||
for (const task of moduleTasks) {
|
||||
const content = await this.readFile(task.path);
|
||||
const commandContent = this.createTaskCommand(task, content);
|
||||
const targetPath = path.join(moduleTasksDir, `${task.name}.md`);
|
||||
await this.writeFile(targetPath, commandContent);
|
||||
taskCount++;
|
||||
}
|
||||
|
||||
// Copy module-specific tools
|
||||
const moduleTools = tools.filter((t) => t.module === module);
|
||||
for (const tool of moduleTools) {
|
||||
const content = await this.readFile(tool.path);
|
||||
const commandContent = this.createToolCommand(tool, content);
|
||||
const targetPath = path.join(moduleToolsDir, `${tool.name}.md`);
|
||||
await this.writeFile(targetPath, commandContent);
|
||||
toolCount++;
|
||||
}
|
||||
|
||||
// Copy module-specific workflow commands (already generated)
|
||||
const moduleWorkflows = workflows.filter((w) => w.module === module);
|
||||
for (const workflow of moduleWorkflows) {
|
||||
// Use the pre-generated workflow command content
|
||||
const targetPath = path.join(moduleWorkflowsDir, `${workflow.name}.md`);
|
||||
await this.writeFile(targetPath, workflow.content);
|
||||
workflowCount++;
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
agents: agentCount,
|
||||
tasks: taskCount,
|
||||
tools: toolCount,
|
||||
tasks: taskToolResult.tasks || 0,
|
||||
tools: taskToolResult.tools || 0,
|
||||
workflows: workflowCount,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Create task command content
|
||||
*/
|
||||
createTaskCommand(task, content) {
|
||||
// Extract task name
|
||||
const nameMatch = content.match(/name="([^"]+)"/);
|
||||
const taskName = nameMatch ? nameMatch[1] : this.formatTitle(task.name);
|
||||
|
||||
let commandContent = `# /task-${task.name} Command
|
||||
|
||||
When this command is used, execute the following task:
|
||||
|
||||
## ${taskName} Task
|
||||
|
||||
${content}
|
||||
|
||||
## Command Usage
|
||||
|
||||
This command executes the ${taskName} task from the BMAD ${task.module.toUpperCase()} module.
|
||||
|
||||
## Module
|
||||
|
||||
Part of the BMAD ${task.module.toUpperCase()} module.
|
||||
`;
|
||||
|
||||
return commandContent;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create tool command content
|
||||
*/
|
||||
createToolCommand(tool, content) {
|
||||
// Extract tool name
|
||||
const nameMatch = content.match(/name="([^"]+)"/);
|
||||
const toolName = nameMatch ? nameMatch[1] : this.formatTitle(tool.name);
|
||||
|
||||
let commandContent = `# /tool-${tool.name} Command
|
||||
|
||||
When this command is used, execute the following tool:
|
||||
|
||||
## ${toolName} Tool
|
||||
|
||||
${content}
|
||||
|
||||
## Command Usage
|
||||
|
||||
This command executes the ${toolName} tool from the BMAD ${tool.module.toUpperCase()} module.
|
||||
|
||||
## Module
|
||||
|
||||
Part of the BMAD ${tool.module.toUpperCase()} module.
|
||||
`;
|
||||
|
||||
return commandContent;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create workflow command content
|
||||
*/
|
||||
createWorkflowCommand(workflow, content) {
|
||||
const workflowName = workflow.name ? this.formatTitle(workflow.name) : 'Workflow';
|
||||
|
||||
let commandContent = `# /${workflow.name} Command
|
||||
|
||||
When this command is used, execute the following workflow:
|
||||
|
||||
## ${workflowName} Workflow
|
||||
|
||||
${content}
|
||||
|
||||
## Command Usage
|
||||
|
||||
This command executes the ${workflowName} workflow from the BMAD ${workflow.module.toUpperCase()} module.
|
||||
|
||||
## Module
|
||||
|
||||
Part of the BMAD ${workflow.module.toUpperCase()} module.
|
||||
`;
|
||||
|
||||
return commandContent;
|
||||
}
|
||||
|
||||
/**
|
||||
* Format name as title
|
||||
*/
|
||||
formatTitle(name) {
|
||||
return name
|
||||
.split('-')
|
||||
.map((word) => word.charAt(0).toUpperCase() + word.slice(1))
|
||||
.join(' ');
|
||||
}
|
||||
|
||||
/**
|
||||
* Cleanup Crush configuration
|
||||
*/
|
||||
async cleanup(projectDir) {
|
||||
const fs = require('fs-extra');
|
||||
const bmadCommandsDir = path.join(projectDir, this.configDir, this.commandsDir, 'bmad');
|
||||
const commandsDir = path.join(projectDir, this.configDir, this.commandsDir);
|
||||
|
||||
if (await fs.pathExists(bmadCommandsDir)) {
|
||||
await fs.remove(bmadCommandsDir);
|
||||
// Remove any bmad:* files from the commands directory
|
||||
if (await fs.pathExists(commandsDir)) {
|
||||
const entries = await fs.readdir(commandsDir);
|
||||
for (const entry of entries) {
|
||||
if (entry.startsWith('bmad:')) {
|
||||
await fs.remove(path.join(commandsDir, entry));
|
||||
}
|
||||
}
|
||||
}
|
||||
// Also remove legacy bmad folder if it exists
|
||||
const bmadFolder = path.join(commandsDir, 'bmad');
|
||||
if (await fs.pathExists(bmadFolder)) {
|
||||
await fs.remove(bmadFolder);
|
||||
console.log(chalk.dim(`Removed BMAD commands from Crush`));
|
||||
}
|
||||
}
|
||||
|
|
@ -259,11 +107,10 @@ Part of the BMAD ${workflow.module.toUpperCase()} module.
|
|||
* @returns {Object} Installation result
|
||||
*/
|
||||
async installCustomAgentLauncher(projectDir, agentName, agentPath, metadata) {
|
||||
const crushDir = path.join(projectDir, this.configDir);
|
||||
const bmadCommandsDir = path.join(crushDir, this.commandsDir, 'bmad');
|
||||
const commandsDir = path.join(projectDir, this.configDir, this.commandsDir);
|
||||
|
||||
// Create .crush/commands/bmad directory if it doesn't exist
|
||||
await fs.ensureDir(bmadCommandsDir);
|
||||
// Create .crush/commands directory if it doesn't exist
|
||||
await fs.ensureDir(commandsDir);
|
||||
|
||||
// Create custom agent launcher
|
||||
const launcherContent = `# ${agentName} Custom Agent
|
||||
|
|
@ -282,8 +129,10 @@ The agent will follow the persona and instructions from the main agent file.
|
|||
|
||||
*Generated by BMAD Method*`;
|
||||
|
||||
const fileName = `custom-${agentName.toLowerCase()}.md`;
|
||||
const launcherPath = path.join(bmadCommandsDir, fileName);
|
||||
// Use colon format: bmad:custom:agents:fred-commit-poet.md
|
||||
// Written directly to commands dir (no bmad subfolder)
|
||||
const launcherName = customAgentColonName(agentName);
|
||||
const launcherPath = path.join(commandsDir, launcherName);
|
||||
|
||||
// Write the launcher file
|
||||
await fs.writeFile(launcherPath, launcherContent, 'utf8');
|
||||
|
|
@ -291,7 +140,7 @@ The agent will follow the persona and instructions from the main agent file.
|
|||
return {
|
||||
ide: 'crush',
|
||||
path: path.relative(projectDir, launcherPath),
|
||||
command: agentName,
|
||||
command: launcherName.replace('.md', ''),
|
||||
type: 'custom-agent-launcher',
|
||||
};
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ const chalk = require('chalk');
|
|||
const { AgentCommandGenerator } = require('./shared/agent-command-generator');
|
||||
const { WorkflowCommandGenerator } = require('./shared/workflow-command-generator');
|
||||
const { TaskToolCommandGenerator } = require('./shared/task-tool-command-generator');
|
||||
const { customAgentColonName } = require('./shared/path-utils');
|
||||
|
||||
/**
|
||||
* Cursor IDE setup handler
|
||||
|
|
@ -22,16 +23,21 @@ class CursorSetup extends BaseIdeSetup {
|
|||
*/
|
||||
async cleanup(projectDir) {
|
||||
const fs = require('fs-extra');
|
||||
const bmadRulesDir = path.join(projectDir, this.configDir, this.rulesDir, 'bmad');
|
||||
const bmadCommandsDir = path.join(projectDir, this.configDir, this.commandsDir, 'bmad');
|
||||
const commandsDir = path.join(projectDir, this.configDir, this.commandsDir);
|
||||
|
||||
if (await fs.pathExists(bmadRulesDir)) {
|
||||
await fs.remove(bmadRulesDir);
|
||||
console.log(chalk.dim(` Removed old BMAD rules from ${this.name}`));
|
||||
// Remove any bmad:* files from the commands directory
|
||||
if (await fs.pathExists(commandsDir)) {
|
||||
const entries = await fs.readdir(commandsDir);
|
||||
for (const entry of entries) {
|
||||
if (entry.startsWith('bmad:')) {
|
||||
await fs.remove(path.join(commandsDir, entry));
|
||||
}
|
||||
|
||||
if (await fs.pathExists(bmadCommandsDir)) {
|
||||
await fs.remove(bmadCommandsDir);
|
||||
}
|
||||
}
|
||||
// Also remove legacy bmad folder if it exists
|
||||
const bmadFolder = path.join(commandsDir, 'bmad');
|
||||
if (await fs.pathExists(bmadFolder)) {
|
||||
await fs.remove(bmadFolder);
|
||||
console.log(chalk.dim(` Removed old BMAD commands from ${this.name}`));
|
||||
}
|
||||
}
|
||||
|
|
@ -51,49 +57,31 @@ class CursorSetup extends BaseIdeSetup {
|
|||
// Create .cursor/commands directory structure
|
||||
const cursorDir = path.join(projectDir, this.configDir);
|
||||
const commandsDir = path.join(cursorDir, this.commandsDir);
|
||||
const bmadCommandsDir = path.join(commandsDir, 'bmad');
|
||||
await this.ensureDir(commandsDir);
|
||||
|
||||
await this.ensureDir(bmadCommandsDir);
|
||||
// Use colon format: files written directly to commands dir (no bmad subfolder)
|
||||
// Creates: .cursor/commands/bmad:bmm:pm.md
|
||||
|
||||
// Generate agent launchers using AgentCommandGenerator
|
||||
// This creates small launcher files that reference the actual agents in _bmad/
|
||||
const agentGen = new AgentCommandGenerator(this.bmadFolderName);
|
||||
const { artifacts: agentArtifacts, counts: agentCounts } = await agentGen.collectAgentArtifacts(bmadDir, options.selectedModules || []);
|
||||
|
||||
// Create directories for each module
|
||||
const modules = new Set();
|
||||
for (const artifact of agentArtifacts) {
|
||||
modules.add(artifact.module);
|
||||
}
|
||||
|
||||
for (const module of modules) {
|
||||
await this.ensureDir(path.join(bmadCommandsDir, module));
|
||||
await this.ensureDir(path.join(bmadCommandsDir, module, 'agents'));
|
||||
}
|
||||
|
||||
// Write agent launcher files
|
||||
const agentCount = await agentGen.writeAgentLaunchers(bmadCommandsDir, agentArtifacts);
|
||||
// Write agent launcher files using flat colon naming
|
||||
// Creates files like: bmad:bmm:pm.md
|
||||
const agentCount = await agentGen.writeColonArtifacts(commandsDir, agentArtifacts);
|
||||
|
||||
// Generate workflow commands from manifest (if it exists)
|
||||
const workflowGen = new WorkflowCommandGenerator(this.bmadFolderName);
|
||||
const { artifacts: workflowArtifacts } = await workflowGen.collectWorkflowArtifacts(bmadDir);
|
||||
|
||||
// Write only workflow-command artifacts, skip workflow-launcher READMEs
|
||||
let workflowCommandCount = 0;
|
||||
for (const artifact of workflowArtifacts) {
|
||||
if (artifact.type === 'workflow-command') {
|
||||
const moduleWorkflowsDir = path.join(bmadCommandsDir, artifact.module, 'workflows');
|
||||
await this.ensureDir(moduleWorkflowsDir);
|
||||
const commandPath = path.join(moduleWorkflowsDir, path.basename(artifact.relativePath));
|
||||
await this.writeFile(commandPath, artifact.content);
|
||||
workflowCommandCount++;
|
||||
}
|
||||
// Skip workflow-launcher READMEs as they would be treated as slash commands
|
||||
}
|
||||
// Write workflow-command artifacts using flat colon naming
|
||||
// Creates files like: bmad:bmm:correct-course.md
|
||||
const workflowCommandCount = await workflowGen.writeColonArtifacts(commandsDir, workflowArtifacts);
|
||||
|
||||
// Generate task and tool commands from manifests (if they exist)
|
||||
const taskToolGen = new TaskToolCommandGenerator();
|
||||
const taskToolResult = await taskToolGen.generateTaskToolCommands(projectDir, bmadDir, bmadCommandsDir);
|
||||
const taskToolResult = await taskToolGen.generateColonTaskToolCommands(projectDir, bmadDir, commandsDir);
|
||||
|
||||
console.log(chalk.green(`✓ ${this.name} configured:`));
|
||||
console.log(chalk.dim(` - ${agentCount} agents installed`));
|
||||
|
|
@ -107,7 +95,7 @@ class CursorSetup extends BaseIdeSetup {
|
|||
),
|
||||
);
|
||||
}
|
||||
console.log(chalk.dim(` - Commands directory: ${path.relative(projectDir, bmadCommandsDir)}`));
|
||||
console.log(chalk.dim(` - Commands directory: ${path.relative(projectDir, commandsDir)}`));
|
||||
|
||||
return {
|
||||
success: true,
|
||||
|
|
@ -127,13 +115,13 @@ class CursorSetup extends BaseIdeSetup {
|
|||
* @returns {Object|null} Info about created command
|
||||
*/
|
||||
async installCustomAgentLauncher(projectDir, agentName, agentPath, metadata) {
|
||||
const customAgentsDir = path.join(projectDir, this.configDir, this.commandsDir, 'bmad', 'custom', 'agents');
|
||||
const commandsDir = path.join(projectDir, this.configDir, this.commandsDir);
|
||||
|
||||
if (!(await this.exists(path.join(projectDir, this.configDir)))) {
|
||||
return null; // IDE not configured for this project
|
||||
}
|
||||
|
||||
await this.ensureDir(customAgentsDir);
|
||||
await this.ensureDir(commandsDir);
|
||||
|
||||
const launcherContent = `You must fully embody this agent's persona and follow all activation instructions exactly as specified. NEVER break character until given an exit command.
|
||||
|
||||
|
|
@ -156,12 +144,15 @@ description: '${agentName} agent'
|
|||
${launcherContent}
|
||||
`;
|
||||
|
||||
const launcherPath = path.join(customAgentsDir, `${agentName}.md`);
|
||||
// Use colon format: bmad:custom:agents:fred-commit-poet.md
|
||||
// Written directly to commands dir (no bmad subfolder)
|
||||
const launcherName = customAgentColonName(agentName);
|
||||
const launcherPath = path.join(commandsDir, launcherName);
|
||||
await this.writeFile(launcherPath, commandContent);
|
||||
|
||||
return {
|
||||
path: launcherPath,
|
||||
command: `/bmad/custom/agents/${agentName}`,
|
||||
command: `/${launcherName.replace('.md', '')}`,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ const path = require('node:path');
|
|||
const { BaseIdeSetup } = require('./_base-ide');
|
||||
const chalk = require('chalk');
|
||||
const { AgentCommandGenerator } = require('./shared/agent-command-generator');
|
||||
const { toDashPath, customAgentDashName } = require('./shared/path-utils');
|
||||
|
||||
/**
|
||||
* Roo IDE setup handler
|
||||
|
|
@ -35,7 +36,8 @@ class RooSetup extends BaseIdeSetup {
|
|||
let skippedCount = 0;
|
||||
|
||||
for (const artifact of agentArtifacts) {
|
||||
const commandName = `bmad-${artifact.module}-agent-${artifact.name}`;
|
||||
// Use shared toDashPath to get consistent naming: bmad-bmm-name.md
|
||||
const commandName = toDashPath(artifact.relativePath).replace('.md', '');
|
||||
const commandPath = path.join(rooCommandsDir, `${commandName}.md`);
|
||||
|
||||
// Skip if already exists
|
||||
|
|
@ -71,7 +73,7 @@ class RooSetup extends BaseIdeSetup {
|
|||
if (skippedCount > 0) {
|
||||
console.log(chalk.dim(` - ${skippedCount} commands skipped (already exist)`));
|
||||
}
|
||||
console.log(chalk.dim(` - Commands directory: ${this.configDir}/${this.commandsDir}/bmad/`));
|
||||
console.log(chalk.dim(` - Commands directory: ${this.configDir}/${this.commandsDir}/`));
|
||||
console.log(chalk.dim(` Commands will be available when you open this project in Roo Code`));
|
||||
|
||||
return {
|
||||
|
|
@ -222,7 +224,8 @@ class RooSetup extends BaseIdeSetup {
|
|||
const rooCommandsDir = path.join(projectDir, this.configDir, this.commandsDir);
|
||||
await this.ensureDir(rooCommandsDir);
|
||||
|
||||
const commandName = `bmad-custom-agent-${agentName.toLowerCase()}`;
|
||||
// Use dash format: bmad-custom-agents-fred-commit-poet.md
|
||||
const commandName = customAgentDashName(agentName).replace('.md', '');
|
||||
const commandPath = path.join(rooCommandsDir, `${commandName}.md`);
|
||||
|
||||
// Check if command already exists
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
const path = require('node:path');
|
||||
const fs = require('fs-extra');
|
||||
const chalk = require('chalk');
|
||||
const { toColonPath, toDashPath, customAgentColonName, customAgentDashName } = require('./path-utils');
|
||||
|
||||
/**
|
||||
* Generates launcher command files for each agent
|
||||
|
|
@ -91,6 +92,74 @@ class AgentCommandGenerator {
|
|||
|
||||
return writtenCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* Write agent launcher artifacts using COLON format (for folder-based IDEs)
|
||||
* Creates flat files like: bmad:bmm:pm.md
|
||||
*
|
||||
* @param {string} baseCommandsDir - Base commands directory for the IDE
|
||||
* @param {Array} artifacts - Agent launcher artifacts
|
||||
* @returns {number} Count of launchers written
|
||||
*/
|
||||
async writeColonArtifacts(baseCommandsDir, artifacts) {
|
||||
let writtenCount = 0;
|
||||
|
||||
for (const artifact of artifacts) {
|
||||
if (artifact.type === 'agent-launcher') {
|
||||
// Convert relativePath to colon format: bmm/agents/pm.md → bmad:bmm:pm.md
|
||||
const flatName = toColonPath(artifact.relativePath);
|
||||
const launcherPath = path.join(baseCommandsDir, flatName);
|
||||
await fs.ensureDir(path.dirname(launcherPath));
|
||||
await fs.writeFile(launcherPath, artifact.content);
|
||||
writtenCount++;
|
||||
}
|
||||
}
|
||||
|
||||
return writtenCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* Write agent launcher artifacts using DASH format (for flat IDEs)
|
||||
* Creates flat files like: bmad-bmm-pm.md
|
||||
*
|
||||
* @param {string} baseCommandsDir - Base commands directory for the IDE
|
||||
* @param {Array} artifacts - Agent launcher artifacts
|
||||
* @returns {number} Count of launchers written
|
||||
*/
|
||||
async writeDashArtifacts(baseCommandsDir, artifacts) {
|
||||
let writtenCount = 0;
|
||||
|
||||
for (const artifact of artifacts) {
|
||||
if (artifact.type === 'agent-launcher') {
|
||||
// Convert relativePath to dash format: bmm/agents/pm.md → bmad-bmm-pm.md
|
||||
const flatName = toDashPath(artifact.relativePath);
|
||||
const launcherPath = path.join(baseCommandsDir, flatName);
|
||||
await fs.ensureDir(path.dirname(launcherPath));
|
||||
await fs.writeFile(launcherPath, artifact.content);
|
||||
writtenCount++;
|
||||
}
|
||||
}
|
||||
|
||||
return writtenCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the custom agent name in colon format
|
||||
* @param {string} agentName - Custom agent name
|
||||
* @returns {string} Colon-formatted filename
|
||||
*/
|
||||
getCustomAgentColonName(agentName) {
|
||||
return customAgentColonName(agentName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the custom agent name in dash format
|
||||
* @param {string} agentName - Custom agent name
|
||||
* @returns {string} Dash-formatted filename
|
||||
*/
|
||||
getCustomAgentDashName(agentName) {
|
||||
return customAgentDashName(agentName);
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { AgentCommandGenerator };
|
||||
|
|
|
|||
|
|
@ -0,0 +1,153 @@
|
|||
/**
|
||||
* Path transformation utilities for IDE installer standardization
|
||||
*
|
||||
* Provides utilities to convert hierarchical paths to flat naming conventions.
|
||||
* - Colon format (bmad:module:name.md) for folder-based IDEs converting to flat
|
||||
* - Dash format (bmad-module-name.md) for already-flat IDEs
|
||||
*/
|
||||
|
||||
// Type segments to filter out from paths
|
||||
const TYPE_SEGMENTS = ['agents', 'workflows', 'tasks', 'tools'];
|
||||
|
||||
/**
|
||||
* Convert hierarchical path to flat colon-separated name (for folder-based IDEs)
|
||||
* Converts: 'bmm/agents/pm.md' → 'bmad:bmm:pm.md'
|
||||
* Converts: 'bmm/workflows/correct-course.md' → 'bmad:bmm:correct-course.md'
|
||||
*
|
||||
* @param {string} module - Module name (e.g., 'bmm', 'core')
|
||||
* @param {string} type - Artifact type ('agents', 'workflows', 'tasks', 'tools') - filtered out
|
||||
* @param {string} name - Artifact name (e.g., 'pm', 'correct-course')
|
||||
* @returns {string} Flat filename like 'bmad:bmm:pm.md'
|
||||
*/
|
||||
function toColonName(module, type, name) {
|
||||
return `bmad:${module}:${name}.md`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert relative path to flat colon-separated name (for folder-based IDEs)
|
||||
* Converts: 'bmm/agents/pm.md' → 'bmad:bmm:pm.md'
|
||||
* Converts: 'bmm/workflows/correct-course.md' → 'bmad:bmm:correct-course.md'
|
||||
*
|
||||
* @param {string} relativePath - Path like 'bmm/agents/pm.md'
|
||||
* @returns {string} Flat filename like 'bmad:bmm:pm.md'
|
||||
*/
|
||||
function toColonPath(relativePath) {
|
||||
const withoutExt = relativePath.replace('.md', '');
|
||||
const parts = withoutExt.split(/[/\\]/);
|
||||
// Filter out type segments (agents, workflows, tasks, tools)
|
||||
const filtered = parts.filter((p) => !TYPE_SEGMENTS.includes(p));
|
||||
return `bmad:${filtered.join(':')}.md`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert hierarchical path to flat dash-separated name (for flat IDEs)
|
||||
* Converts: 'bmm/agents/pm.md' → 'bmad-bmm-pm.md'
|
||||
* Converts: 'bmm/workflows/correct-course.md' → 'bmad-bmm-correct-course.md'
|
||||
*
|
||||
* @param {string} relativePath - Path like 'bmm/agents/pm.md'
|
||||
* @returns {string} Flat filename like 'bmad-bmm-pm.md'
|
||||
*/
|
||||
function toDashPath(relativePath) {
|
||||
const withoutExt = relativePath.replace('.md', '');
|
||||
const parts = withoutExt.split(/[/\\]/);
|
||||
// Filter out type segments (agents, workflows, tasks, tools)
|
||||
const filtered = parts.filter((p) => !TYPE_SEGMENTS.includes(p));
|
||||
return `bmad-${filtered.join('-')}.md`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create custom agent colon name (for folder-based IDEs)
|
||||
* Creates: 'bmad:custom:fred-commit-poet.md'
|
||||
*
|
||||
* @param {string} agentName - Custom agent name
|
||||
* @returns {string} Flat filename like 'bmad:custom:fred-commit-poet.md'
|
||||
*/
|
||||
function customAgentColonName(agentName) {
|
||||
return `bmad:custom:${agentName}.md`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create custom agent dash name (for flat IDEs)
|
||||
* Creates: 'bmad-custom-fred-commit-poet.md'
|
||||
*
|
||||
* @param {string} agentName - Custom agent name
|
||||
* @returns {string} Flat filename like 'bmad-custom-fred-commit-poet.md'
|
||||
*/
|
||||
function customAgentDashName(agentName) {
|
||||
return `bmad-custom-${agentName}.md`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a filename uses colon format
|
||||
* @param {string} filename - Filename to check
|
||||
* @returns {boolean} True if filename uses colon format
|
||||
*/
|
||||
function isColonFormat(filename) {
|
||||
return filename.includes('bmad:') && filename.includes(':');
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a filename uses dash format
|
||||
* @param {string} filename - Filename to check
|
||||
* @returns {boolean} True if filename uses dash format
|
||||
*/
|
||||
function isDashFormat(filename) {
|
||||
return filename.startsWith('bmad-') && !filename.includes(':');
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract parts from a colon-formatted filename
|
||||
* Parses: 'bmad:bmm:pm.md' → { prefix: 'bmad', module: 'bmm', name: 'pm' }
|
||||
*
|
||||
* @param {string} filename - Colon-formatted filename
|
||||
* @returns {Object|null} Parsed parts or null if invalid format
|
||||
*/
|
||||
function parseColonName(filename) {
|
||||
const withoutExt = filename.replace('.md', '');
|
||||
const parts = withoutExt.split(':');
|
||||
|
||||
if (parts.length < 3 || parts[0] !== 'bmad') {
|
||||
return null;
|
||||
}
|
||||
|
||||
return {
|
||||
prefix: parts[0],
|
||||
module: parts[1],
|
||||
name: parts.slice(2).join(':'), // Handle names that might contain colons
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract parts from a dash-formatted filename
|
||||
* Parses: 'bmad-bmm-pm.md' → { prefix: 'bmad', module: 'bmm', name: 'pm' }
|
||||
*
|
||||
* @param {string} filename - Dash-formatted filename
|
||||
* @returns {Object|null} Parsed parts or null if invalid format
|
||||
*/
|
||||
function parseDashName(filename) {
|
||||
const withoutExt = filename.replace('.md', '');
|
||||
const parts = withoutExt.split('-');
|
||||
|
||||
if (parts.length < 3 || parts[0] !== 'bmad') {
|
||||
return null;
|
||||
}
|
||||
|
||||
return {
|
||||
prefix: parts[0],
|
||||
module: parts[1],
|
||||
name: parts.slice(2).join('-'), // Handle names that might contain dashes
|
||||
};
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
toColonName,
|
||||
toColonPath,
|
||||
toDashPath,
|
||||
customAgentColonName,
|
||||
customAgentDashName,
|
||||
isColonFormat,
|
||||
isDashFormat,
|
||||
parseColonName,
|
||||
parseDashName,
|
||||
TYPE_SEGMENTS,
|
||||
};
|
||||
|
|
@ -2,6 +2,7 @@ const path = require('node:path');
|
|||
const fs = require('fs-extra');
|
||||
const csv = require('csv-parse/sync');
|
||||
const chalk = require('chalk');
|
||||
const { toColonName, toColonPath, toDashPath } = require('./path-utils');
|
||||
|
||||
/**
|
||||
* Generates command files for standalone tasks and tools
|
||||
|
|
@ -114,6 +115,154 @@ Follow all instructions in the ${type} file exactly as written.
|
|||
skip_empty_lines: true,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate task and tool commands using COLON format (for folder-based IDEs)
|
||||
* Creates flat files like: bmad:bmm:bmad-help.md
|
||||
*
|
||||
* @param {string} projectDir - Project directory
|
||||
* @param {string} bmadDir - BMAD installation directory
|
||||
* @param {string} baseCommandsDir - Base commands directory for the IDE
|
||||
* @returns {Object} Generation results
|
||||
*/
|
||||
async generateColonTaskToolCommands(projectDir, bmadDir, baseCommandsDir) {
|
||||
const tasks = await this.loadTaskManifest(bmadDir);
|
||||
const tools = await this.loadToolManifest(bmadDir);
|
||||
|
||||
// Filter to only standalone items
|
||||
const standaloneTasks = tasks ? tasks.filter((t) => t.standalone === 'true' || t.standalone === true) : [];
|
||||
const standaloneTools = tools ? tools.filter((t) => t.standalone === 'true' || t.standalone === true) : [];
|
||||
|
||||
let generatedCount = 0;
|
||||
|
||||
// Generate command files for tasks
|
||||
for (const task of standaloneTasks) {
|
||||
const commandContent = this.generateCommandContent(task, 'task');
|
||||
// Use colon format: bmad:bmm:name.md
|
||||
const flatName = toColonName(task.module, 'tasks', task.name);
|
||||
const commandPath = path.join(baseCommandsDir, flatName);
|
||||
await fs.ensureDir(path.dirname(commandPath));
|
||||
await fs.writeFile(commandPath, commandContent);
|
||||
generatedCount++;
|
||||
}
|
||||
|
||||
// Generate command files for tools
|
||||
for (const tool of standaloneTools) {
|
||||
const commandContent = this.generateCommandContent(tool, 'tool');
|
||||
// Use colon format: bmad:bmm:name.md
|
||||
const flatName = toColonName(tool.module, 'tools', tool.name);
|
||||
const commandPath = path.join(baseCommandsDir, flatName);
|
||||
await fs.ensureDir(path.dirname(commandPath));
|
||||
await fs.writeFile(commandPath, commandContent);
|
||||
generatedCount++;
|
||||
}
|
||||
|
||||
return {
|
||||
generated: generatedCount,
|
||||
tasks: standaloneTasks.length,
|
||||
tools: standaloneTools.length,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate task and tool commands using DASH format (for flat IDEs)
|
||||
* Creates flat files like: bmad-bmm-bmad-help.md
|
||||
*
|
||||
* @param {string} projectDir - Project directory
|
||||
* @param {string} bmadDir - BMAD installation directory
|
||||
* @param {string} baseCommandsDir - Base commands directory for the IDE
|
||||
* @returns {Object} Generation results
|
||||
*/
|
||||
async generateDashTaskToolCommands(projectDir, bmadDir, baseCommandsDir) {
|
||||
const tasks = await this.loadTaskManifest(bmadDir);
|
||||
const tools = await this.loadToolManifest(bmadDir);
|
||||
|
||||
// Filter to only standalone items
|
||||
const standaloneTasks = tasks ? tasks.filter((t) => t.standalone === 'true' || t.standalone === true) : [];
|
||||
const standaloneTools = tools ? tools.filter((t) => t.standalone === 'true' || t.standalone === true) : [];
|
||||
|
||||
let generatedCount = 0;
|
||||
|
||||
// Generate command files for tasks
|
||||
for (const task of standaloneTasks) {
|
||||
const commandContent = this.generateCommandContent(task, 'task');
|
||||
// Use dash format: bmad-bmm-name.md
|
||||
const flatName = toDashPath(`${task.module}/tasks/${task.name}.md`);
|
||||
const commandPath = path.join(baseCommandsDir, flatName);
|
||||
await fs.ensureDir(path.dirname(commandPath));
|
||||
await fs.writeFile(commandPath, commandContent);
|
||||
generatedCount++;
|
||||
}
|
||||
|
||||
// Generate command files for tools
|
||||
for (const tool of standaloneTools) {
|
||||
const commandContent = this.generateCommandContent(tool, 'tool');
|
||||
// Use dash format: bmad-bmm-name.md
|
||||
const flatName = toDashPath(`${tool.module}/tools/${tool.name}.md`);
|
||||
const commandPath = path.join(baseCommandsDir, flatName);
|
||||
await fs.ensureDir(path.dirname(commandPath));
|
||||
await fs.writeFile(commandPath, commandContent);
|
||||
generatedCount++;
|
||||
}
|
||||
|
||||
return {
|
||||
generated: generatedCount,
|
||||
tasks: standaloneTasks.length,
|
||||
tools: standaloneTools.length,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Write task/tool artifacts using COLON format (for folder-based IDEs)
|
||||
* Creates flat files like: bmad:bmm:bmad-help.md
|
||||
*
|
||||
* @param {string} baseCommandsDir - Base commands directory for the IDE
|
||||
* @param {Array} artifacts - Task/tool artifacts with relativePath
|
||||
* @returns {number} Count of commands written
|
||||
*/
|
||||
async writeColonArtifacts(baseCommandsDir, artifacts) {
|
||||
let writtenCount = 0;
|
||||
|
||||
for (const artifact of artifacts) {
|
||||
if (artifact.type === 'task' || artifact.type === 'tool') {
|
||||
const commandContent = this.generateCommandContent(artifact, artifact.type);
|
||||
// Use colon format: bmad:module:name.md
|
||||
const flatName = toColonPath(artifact.relativePath);
|
||||
const commandPath = path.join(baseCommandsDir, flatName);
|
||||
await fs.ensureDir(path.dirname(commandPath));
|
||||
await fs.writeFile(commandPath, commandContent);
|
||||
writtenCount++;
|
||||
}
|
||||
}
|
||||
|
||||
return writtenCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* Write task/tool artifacts using DASH format (for flat IDEs)
|
||||
* Creates flat files like: bmad-bmm-bmad-help.md
|
||||
*
|
||||
* @param {string} baseCommandsDir - Base commands directory for the IDE
|
||||
* @param {Array} artifacts - Task/tool artifacts with relativePath
|
||||
* @returns {number} Count of commands written
|
||||
*/
|
||||
async writeDashArtifacts(baseCommandsDir, artifacts) {
|
||||
let writtenCount = 0;
|
||||
|
||||
for (const artifact of artifacts) {
|
||||
if (artifact.type === 'task' || artifact.type === 'tool') {
|
||||
const commandContent = this.generateCommandContent(artifact, artifact.type);
|
||||
// Use dash format: bmad-module-name.md
|
||||
const flatName = toDashPath(artifact.relativePath);
|
||||
const commandPath = path.join(baseCommandsDir, flatName);
|
||||
await fs.ensureDir(path.dirname(commandPath));
|
||||
await fs.writeFile(commandPath, commandContent);
|
||||
writtenCount++;
|
||||
}
|
||||
}
|
||||
|
||||
return writtenCount;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { TaskToolCommandGenerator };
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ const path = require('node:path');
|
|||
const fs = require('fs-extra');
|
||||
const csv = require('csv-parse/sync');
|
||||
const chalk = require('chalk');
|
||||
const { toColonPath, toDashPath, customAgentColonName, customAgentDashName } = require('./path-utils');
|
||||
|
||||
/**
|
||||
* Generates command files for each workflow in the manifest
|
||||
|
|
@ -237,6 +238,56 @@ When running any workflow:
|
|||
skip_empty_lines: true,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Write workflow command artifacts using COLON format (for folder-based IDEs)
|
||||
* Creates flat files like: bmad:bmm:correct-course.md
|
||||
*
|
||||
* @param {string} baseCommandsDir - Base commands directory for the IDE
|
||||
* @param {Array} artifacts - Workflow artifacts
|
||||
* @returns {number} Count of commands written
|
||||
*/
|
||||
async writeColonArtifacts(baseCommandsDir, artifacts) {
|
||||
let writtenCount = 0;
|
||||
|
||||
for (const artifact of artifacts) {
|
||||
if (artifact.type === 'workflow-command') {
|
||||
// Convert relativePath to colon format: bmm/workflows/correct-course.md → bmad:bmm:correct-course.md
|
||||
const flatName = toColonPath(artifact.relativePath);
|
||||
const commandPath = path.join(baseCommandsDir, flatName);
|
||||
await fs.ensureDir(path.dirname(commandPath));
|
||||
await fs.writeFile(commandPath, artifact.content);
|
||||
writtenCount++;
|
||||
}
|
||||
}
|
||||
|
||||
return writtenCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* Write workflow command artifacts using DASH format (for flat IDEs)
|
||||
* Creates flat files like: bmad-bmm-correct-course.md
|
||||
*
|
||||
* @param {string} baseCommandsDir - Base commands directory for the IDE
|
||||
* @param {Array} artifacts - Workflow artifacts
|
||||
* @returns {number} Count of commands written
|
||||
*/
|
||||
async writeDashArtifacts(baseCommandsDir, artifacts) {
|
||||
let writtenCount = 0;
|
||||
|
||||
for (const artifact of artifacts) {
|
||||
if (artifact.type === 'workflow-command') {
|
||||
// Convert relativePath to dash format: bmm/workflows/correct-course.md → bmad-bmm-correct-course.md
|
||||
const flatName = toDashPath(artifact.relativePath);
|
||||
const commandPath = path.join(baseCommandsDir, flatName);
|
||||
await fs.ensureDir(path.dirname(commandPath));
|
||||
await fs.writeFile(commandPath, artifact.content);
|
||||
writtenCount++;
|
||||
}
|
||||
}
|
||||
|
||||
return writtenCount;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { WorkflowCommandGenerator };
|
||||
|
|
|
|||
|
|
@ -0,0 +1,51 @@
|
|||
import { defineConfig } from 'vitest/config';
|
||||
|
||||
export default defineConfig({
|
||||
test: {
|
||||
// Test file patterns
|
||||
include: ['test/unit/**/*.test.js', 'test/integration/**/*.test.js'],
|
||||
exclude: ['test/test-*.js', 'node_modules/**'],
|
||||
|
||||
// Timeouts
|
||||
testTimeout: 10_000, // 10s for unit tests
|
||||
hookTimeout: 30_000, // 30s for setup/teardown
|
||||
|
||||
// Parallel execution for speed
|
||||
threads: true,
|
||||
maxThreads: 4,
|
||||
|
||||
// Coverage configuration (using V8)
|
||||
coverage: {
|
||||
provider: 'v8',
|
||||
reporter: ['text', 'html', 'lcov', 'json-summary'],
|
||||
|
||||
// Files to include in coverage
|
||||
include: ['tools/**/*.js', 'src/**/*.js'],
|
||||
|
||||
// Files to exclude from coverage
|
||||
exclude: [
|
||||
'test/**',
|
||||
'tools/flattener/**', // Separate concern
|
||||
'tools/bmad-npx-wrapper.js', // Entry point
|
||||
'tools/build-docs.js', // Documentation tools
|
||||
'tools/check-doc-links.js', // Documentation tools
|
||||
'**/*.config.js', // Configuration files
|
||||
],
|
||||
|
||||
// Include all files for accurate coverage
|
||||
all: true,
|
||||
|
||||
// Coverage thresholds (fail if below these)
|
||||
statements: 85,
|
||||
branches: 80,
|
||||
functions: 85,
|
||||
lines: 85,
|
||||
},
|
||||
|
||||
// Global setup file
|
||||
setupFiles: ['./test/setup.js'],
|
||||
|
||||
// Environment
|
||||
environment: 'node',
|
||||
},
|
||||
});
|
||||
Loading…
Reference in New Issue