BMAD-METHOD/.github/workflows/llm-hello-world.yaml

233 lines
6.9 KiB
YAML

name: LLM Hello World
on:
workflow_dispatch:
inputs:
pr_number:
description: "PR number to review"
required: true
type: number
phase:
description: "Which phase to run"
required: true
type: choice
options:
- hello-world
- token-probe
- triage-poc
default: hello-world
env:
LLM_ENDPOINT: "https://models.github.ai/inference/chat/completions"
LLM_MODEL: "openai/gpt-4.1"
permissions:
contents: read
pull-requests: write
models: read
jobs:
hello-world:
if: ${{ inputs.phase == 'hello-world' }}
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Fetch PR details
id: pr
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
echo "Fetching PR #${{ inputs.pr_number }}..."
# Fetch PR title and body
PR_JSON=$(gh pr view ${{ inputs.pr_number }} --json title,body,author)
if [ $? -ne 0 ]; then
echo "::error::Failed to fetch PR #${{ inputs.pr_number }}. Does it exist?"
exit 1
fi
PR_TITLE=$(echo "$PR_JSON" | jq -r '.title')
PR_BODY=$(echo "$PR_JSON" | jq -r '.body // "No description provided"')
PR_AUTHOR=$(echo "$PR_JSON" | jq -r '.author.login')
echo "PR Title: $PR_TITLE"
echo "PR Author: $PR_AUTHOR"
# Store for next step (escape newlines for multiline)
{
echo "title<<EOF"
echo "$PR_TITLE"
echo "EOF"
} >> $GITHUB_OUTPUT
{
echo "body<<EOF"
echo "$PR_BODY"
echo "EOF"
} >> $GITHUB_OUTPUT
echo "author=$PR_AUTHOR" >> $GITHUB_OUTPUT
- name: Call LLM for summary
id: llm
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
echo "Calling LLM to summarize PR..."
# Build the prompt
PROMPT="Summarize this PR in one sentence.
Title: ${{ steps.pr.outputs.title }}
Description:
${{ steps.pr.outputs.body }}"
# Escape for JSON
PROMPT_ESCAPED=$(echo "$PROMPT" | jq -Rs .)
# Call the LLM
RESPONSE=$(curl -s -w "\n%{http_code}" -X POST "$LLM_ENDPOINT" \
-H "Authorization: Bearer $GITHUB_TOKEN" \
-H "Content-Type: application/json" \
-d "{
\"model\": \"$LLM_MODEL\",
\"messages\": [
{\"role\": \"system\", \"content\": \"You are a helpful assistant that summarizes pull requests concisely.\"},
{\"role\": \"user\", \"content\": $PROMPT_ESCAPED}
],
\"max_tokens\": 150
}")
# Extract HTTP status code (last line)
HTTP_CODE=$(echo "$RESPONSE" | tail -n1)
BODY=$(echo "$RESPONSE" | sed '$d')
echo "HTTP Status: $HTTP_CODE"
if [ "$HTTP_CODE" != "200" ]; then
echo "::error::LLM API call failed with status $HTTP_CODE"
echo "Response: $BODY"
exit 1
fi
# Extract the summary from the response
SUMMARY=$(echo "$BODY" | jq -r '.choices[0].message.content')
echo "LLM Summary: $SUMMARY"
{
echo "summary<<EOF"
echo "$SUMMARY"
echo "EOF"
} >> $GITHUB_OUTPUT
- name: Post comment to PR
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SUMMARY: ${{ steps.llm.outputs.summary }}
AUTHOR: ${{ steps.pr.outputs.author }}
PR_NUM: ${{ inputs.pr_number }}
run: |
echo "Posting comment to PR #$PR_NUM..."
gh pr comment "$PR_NUM" --body "$(cat <<EOF
## LLM Hello World Test
**Model:** \`$LLM_MODEL\`
**PR Author:** @$AUTHOR
### Summary
$SUMMARY
---
*This comment was generated by the LLM Hello World workflow (Phase 0a)*
EOF
)"
echo "Comment posted successfully!"
token-probe:
if: ${{ inputs.phase == 'token-probe' }}
runs-on: ubuntu-latest
steps:
- name: Probe token limits
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
echo "=== Token Limit Probe ==="
echo "Testing progressively larger payloads to find the ceiling"
echo ""
# Test sizes in characters (roughly 4 chars per token)
SIZES=(2000 4000 8000 16000 32000 64000)
# Generate filler text (lorem ipsum style)
generate_filler() {
local size=$1
python3 -c "print('The quick brown fox jumps over the lazy dog. ' * ($size // 45 + 1))[:$size]"
}
echo "| Size (chars) | ~Tokens | HTTP Status | Result |"
echo "|--------------|---------|-------------|--------|"
for SIZE in "${SIZES[@]}"; do
APPROX_TOKENS=$((SIZE / 4))
# Generate payload of specified size
FILLER=$(generate_filler $SIZE)
PROMPT="Respond with exactly one word: 'received'. Here is padding text to test token limits: $FILLER"
PROMPT_ESCAPED=$(echo "$PROMPT" | jq -Rs .)
# Call the LLM
RESPONSE=$(curl -s -w "\n%{http_code}" -X POST "$LLM_ENDPOINT" \
-H "Authorization: Bearer $GITHUB_TOKEN" \
-H "Content-Type: application/json" \
-d "{
\"model\": \"$LLM_MODEL\",
\"messages\": [
{\"role\": \"user\", \"content\": $PROMPT_ESCAPED}
],
\"max_tokens\": 10
}" 2>/dev/null)
HTTP_CODE=$(echo "$RESPONSE" | tail -n1)
BODY=$(echo "$RESPONSE" | sed '$d')
if [ "$HTTP_CODE" = "200" ]; then
RESULT="OK"
else
# Extract error message if present
ERROR_MSG=$(echo "$BODY" | jq -r '.error.message // .message // "Unknown error"' 2>/dev/null | head -c 50)
RESULT="FAIL: $ERROR_MSG"
# Log full error for debugging
echo ""
echo "::warning::Failed at ${SIZE} chars (~${APPROX_TOKENS} tokens)"
echo "Full error response:"
echo "$BODY" | jq . 2>/dev/null || echo "$BODY"
echo ""
fi
echo "| $SIZE | ~$APPROX_TOKENS | $HTTP_CODE | $RESULT |"
# Stop probing after first failure to avoid hammering the API
if [ "$HTTP_CODE" != "200" ]; then
echo ""
echo "=== Ceiling Found ==="
echo "Last successful size: $((SIZE / 2)) chars (~$((SIZE / 8)) tokens)"
echo "First failure at: $SIZE chars (~$APPROX_TOKENS tokens)"
break
fi
# Small delay to avoid rate limiting
sleep 1
done
echo ""
echo "=== Probe Complete ==="