name: LLM Hello World on: workflow_dispatch: inputs: pr_number: description: "PR number to review" required: true type: number phase: description: "Which phase to run" required: true type: choice options: - hello-world - token-probe default: hello-world env: LLM_ENDPOINT: "https://models.github.ai/inference/chat/completions" LLM_MODEL: "openai/gpt-4.1" permissions: contents: read pull-requests: write models: read jobs: hello-world: if: ${{ inputs.phase == 'hello-world' }} runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v4 - name: Fetch PR details id: pr env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | echo "Fetching PR #${{ inputs.pr_number }}..." # Fetch PR title and body PR_JSON=$(gh pr view ${{ inputs.pr_number }} --json title,body,author) if [ $? -ne 0 ]; then echo "::error::Failed to fetch PR #${{ inputs.pr_number }}. Does it exist?" exit 1 fi PR_TITLE=$(echo "$PR_JSON" | jq -r '.title') PR_BODY=$(echo "$PR_JSON" | jq -r '.body // "No description provided"') PR_AUTHOR=$(echo "$PR_JSON" | jq -r '.author.login') echo "PR Title: $PR_TITLE" echo "PR Author: $PR_AUTHOR" # Store for next step (escape newlines for multiline) { echo "title<> $GITHUB_OUTPUT { echo "body<> $GITHUB_OUTPUT echo "author=$PR_AUTHOR" >> $GITHUB_OUTPUT - name: Call LLM for summary id: llm env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | echo "Calling LLM to summarize PR..." # Build the prompt PROMPT="Summarize this PR in one sentence. Title: ${{ steps.pr.outputs.title }} Description: ${{ steps.pr.outputs.body }}" # Escape for JSON PROMPT_ESCAPED=$(echo "$PROMPT" | jq -Rs .) # Call the LLM RESPONSE=$(curl -s -w "\n%{http_code}" -X POST "$LLM_ENDPOINT" \ -H "Authorization: Bearer $GITHUB_TOKEN" \ -H "Content-Type: application/json" \ -d "{ \"model\": \"$LLM_MODEL\", \"messages\": [ {\"role\": \"system\", \"content\": \"You are a helpful assistant that summarizes pull requests concisely.\"}, {\"role\": \"user\", \"content\": $PROMPT_ESCAPED} ], \"max_tokens\": 150 }") # Extract HTTP status code (last line) HTTP_CODE=$(echo "$RESPONSE" | tail -n1) BODY=$(echo "$RESPONSE" | sed '$d') echo "HTTP Status: $HTTP_CODE" if [ "$HTTP_CODE" != "200" ]; then echo "::error::LLM API call failed with status $HTTP_CODE" echo "Response: $BODY" exit 1 fi # Extract the summary from the response SUMMARY=$(echo "$BODY" | jq -r '.choices[0].message.content') echo "LLM Summary: $SUMMARY" { echo "summary<> $GITHUB_OUTPUT - name: Post comment to PR env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} SUMMARY: ${{ steps.llm.outputs.summary }} AUTHOR: ${{ steps.pr.outputs.author }} PR_NUM: ${{ inputs.pr_number }} run: | echo "Posting comment to PR #$PR_NUM..." gh pr comment "$PR_NUM" --body "$(cat </dev/null) HTTP_CODE=$(echo "$RESPONSE" | tail -n1) BODY=$(echo "$RESPONSE" | sed '$d') if [ "$HTTP_CODE" = "200" ]; then RESULT="OK" else # Extract error message if present ERROR_MSG=$(echo "$BODY" | jq -r '.error.message // .message // "Unknown error"' 2>/dev/null | head -c 50) RESULT="FAIL: $ERROR_MSG" # Log full error for debugging echo "" echo "::warning::Failed at ${SIZE} chars (~${APPROX_TOKENS} tokens)" echo "Full error response:" echo "$BODY" | jq . 2>/dev/null || echo "$BODY" echo "" fi echo "| $SIZE | ~$APPROX_TOKENS | $HTTP_CODE | $RESULT |" # Stop probing after first failure to avoid hammering the API if [ "$HTTP_CODE" != "200" ]; then echo "" echo "=== Ceiling Found ===" echo "Last successful size: $((SIZE / 2)) chars (~$((SIZE / 8)) tokens)" echo "First failure at: $SIZE chars (~$APPROX_TOKENS tokens)" break fi # Small delay to avoid rate limiting sleep 1 done echo "" echo "=== Probe Complete ==="