152 lines
4.1 KiB
YAML
152 lines
4.1 KiB
YAML
name: LLM Hello World
|
|
|
|
on:
|
|
workflow_dispatch:
|
|
inputs:
|
|
pr_number:
|
|
description: "PR number to review"
|
|
required: true
|
|
type: number
|
|
phase:
|
|
description: "Which phase to run"
|
|
required: true
|
|
type: choice
|
|
options:
|
|
- hello-world
|
|
- token-probe
|
|
- triage-poc
|
|
default: hello-world
|
|
|
|
env:
|
|
LLM_ENDPOINT: "https://models.github.ai/inference/chat/completions"
|
|
LLM_MODEL: "openai/gpt-4.1"
|
|
|
|
permissions:
|
|
contents: read
|
|
pull-requests: write
|
|
models: read
|
|
|
|
jobs:
|
|
hello-world:
|
|
if: ${{ inputs.phase == 'hello-world' }}
|
|
runs-on: ubuntu-latest
|
|
steps:
|
|
- name: Checkout repository
|
|
uses: actions/checkout@v4
|
|
|
|
- name: Fetch PR details
|
|
id: pr
|
|
env:
|
|
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
run: |
|
|
echo "Fetching PR #${{ inputs.pr_number }}..."
|
|
|
|
# Fetch PR title and body
|
|
PR_JSON=$(gh pr view ${{ inputs.pr_number }} --json title,body,author)
|
|
|
|
if [ $? -ne 0 ]; then
|
|
echo "::error::Failed to fetch PR #${{ inputs.pr_number }}. Does it exist?"
|
|
exit 1
|
|
fi
|
|
|
|
PR_TITLE=$(echo "$PR_JSON" | jq -r '.title')
|
|
PR_BODY=$(echo "$PR_JSON" | jq -r '.body // "No description provided"')
|
|
PR_AUTHOR=$(echo "$PR_JSON" | jq -r '.author.login')
|
|
|
|
echo "PR Title: $PR_TITLE"
|
|
echo "PR Author: $PR_AUTHOR"
|
|
|
|
# Store for next step (escape newlines for multiline)
|
|
{
|
|
echo "title<<EOF"
|
|
echo "$PR_TITLE"
|
|
echo "EOF"
|
|
} >> $GITHUB_OUTPUT
|
|
|
|
{
|
|
echo "body<<EOF"
|
|
echo "$PR_BODY"
|
|
echo "EOF"
|
|
} >> $GITHUB_OUTPUT
|
|
|
|
echo "author=$PR_AUTHOR" >> $GITHUB_OUTPUT
|
|
|
|
- name: Call LLM for summary
|
|
id: llm
|
|
env:
|
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
run: |
|
|
echo "Calling LLM to summarize PR..."
|
|
|
|
# Build the prompt
|
|
PROMPT="Summarize this PR in one sentence.
|
|
|
|
Title: ${{ steps.pr.outputs.title }}
|
|
|
|
Description:
|
|
${{ steps.pr.outputs.body }}"
|
|
|
|
# Escape for JSON
|
|
PROMPT_ESCAPED=$(echo "$PROMPT" | jq -Rs .)
|
|
|
|
# Call the LLM
|
|
RESPONSE=$(curl -s -w "\n%{http_code}" -X POST "$LLM_ENDPOINT" \
|
|
-H "Authorization: Bearer $GITHUB_TOKEN" \
|
|
-H "Content-Type: application/json" \
|
|
-d "{
|
|
\"model\": \"$LLM_MODEL\",
|
|
\"messages\": [
|
|
{\"role\": \"system\", \"content\": \"You are a helpful assistant that summarizes pull requests concisely.\"},
|
|
{\"role\": \"user\", \"content\": $PROMPT_ESCAPED}
|
|
],
|
|
\"max_tokens\": 150
|
|
}")
|
|
|
|
# Extract HTTP status code (last line)
|
|
HTTP_CODE=$(echo "$RESPONSE" | tail -n1)
|
|
BODY=$(echo "$RESPONSE" | sed '$d')
|
|
|
|
echo "HTTP Status: $HTTP_CODE"
|
|
|
|
if [ "$HTTP_CODE" != "200" ]; then
|
|
echo "::error::LLM API call failed with status $HTTP_CODE"
|
|
echo "Response: $BODY"
|
|
exit 1
|
|
fi
|
|
|
|
# Extract the summary from the response
|
|
SUMMARY=$(echo "$BODY" | jq -r '.choices[0].message.content')
|
|
|
|
echo "LLM Summary: $SUMMARY"
|
|
|
|
{
|
|
echo "summary<<EOF"
|
|
echo "$SUMMARY"
|
|
echo "EOF"
|
|
} >> $GITHUB_OUTPUT
|
|
|
|
- name: Post comment to PR
|
|
env:
|
|
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
SUMMARY: ${{ steps.llm.outputs.summary }}
|
|
AUTHOR: ${{ steps.pr.outputs.author }}
|
|
PR_NUM: ${{ inputs.pr_number }}
|
|
run: |
|
|
echo "Posting comment to PR #$PR_NUM..."
|
|
|
|
gh pr comment "$PR_NUM" --body "$(cat <<EOF
|
|
## LLM Hello World Test
|
|
|
|
**Model:** \`$LLM_MODEL\`
|
|
**PR Author:** @$AUTHOR
|
|
|
|
### Summary
|
|
$SUMMARY
|
|
|
|
---
|
|
*This comment was generated by the LLM Hello World workflow (Phase 0a)*
|
|
EOF
|
|
)"
|
|
|
|
echo "Comment posted successfully!"
|