8000 Add GitHub Models as default inference provider by sgoedecke · Pull Request #481 · ultralytics/actions · GitHub
[go: up one dir, main page]
More Web Proxy on the site http://driver.im/
Skip to content

Add GitHub Models as default inference provider #481

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/workflows/format.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ permissions:
contents: write # Modify code in PRs
pull-requests: write # Add comments and labels to PRs
issues: write # Add comments and labels to issues
models: read # For AI-driven PR summaries, labels and comments

jobs:
actions:
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/publish.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ jobs:
runs-on: ubuntu-latest
permissions:
contents: write
models: read
outputs:
increment: ${{ steps.check_pypi.outputs.increment }}
current_tag: ${{ steps.check_pypi.outputs.current_tag }}
Expand Down
3 changes: 2 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,7 @@ To integrate this action into your Ultralytics repository:
contents: write # Modify code in PRs
pull-requests: write # Add comments and labels to PRs
issues: write # Add comments and labels to issues
models: read # Use GitHub Models for inference (optional)

jobs:
actions:
Expand All @@ -90,7 +91,7 @@ To integrate this action into your Ultralytics repository:
brave_api_key: ${{ secrets.BRAVE_API_KEY }} # Used for broken link resolution
```

3. **Customize:** Adjust the `runs-on` runner and the boolean flags (`labels`, `python`, `prettier`, `swift`, `spelling`, `links`, `summary`) based on your project's needs. Remember to add your `OPENAI_API_KEY` as a secret in your repository settings if you enable `labels` or `summary`.
3. **Customize:** Adjust the `runs-on` runner and the boolean flags (`labels`, `python`, `prettier`, `swift`, `spelling`, `links`, `summary`) based on your project's needs. If you enable `labels` or `summary`, you'll use LLM inference: by default via free [GitHub Models](https://github.com/features/models), or alternatively you can add your `OPENAI_API_KEY` as a secret in your repository settings.

## 💡 Contribute

Expand Down
22 changes: 17 additions & 5 deletions actions/utils/openai_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@
from actions.utils.common_utils import check_links_in_string

OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
GITHUB_TOKEN = os.getenv("GITHUB_TOKEN")
GITHUB_MODEL = os.getenv("OPENAI_MODEL", "openai/gpt-4.1")
OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-4.1-2025-04-14")
SYSTEM_PROMPT_ADDITION = """
Guidance:
Expand All @@ -33,18 +35,28 @@ def get_completion(
remove: List[str] = (" @giscus[bot]",), # strings to remove from response
temperature: float = 0.7, # default temperature value
) -> str:
"""Generates a completion using OpenAI's API based on input messages."""
assert OPENAI_API_KEY, "OpenAI API key is required."
url = "https://api.openai.com/v1/chat/completions"
headers = {"Authorization": f"Bearer {OPENAI_API_KEY}", "Content-Type": "application/json"}
"""
Generates a completion using OpenAI's API or GitHub Models API based on available credentials.

Prioritizes OpenAI API if OPENAI_API_KEY is available, otherwise falls back to GitHub Models using GITHUB_TOKEN.
"""
if OPENAI_API_KEY:
url = "https://api.openai.com/v1/chat/completions"
headers = {"Authorization": f"Bearer {OPENAI_API_KEY}", "Content-Type": "application/json"}
model = OPENAI_MODEL
else:
url = "https://models.github.ai/inference/chat/completions"
headers = {"Authorization": f"Bearer {GITHUB_TOKEN}", "Content-Type": "application/json"}
model = GITHUB_MODEL

if messages and messages[0].get("role") == "system":
messages[0]["content"] += "\n\n" + SYSTEM_PROMPT_ADDITION

content = ""
max_retries = 2
for attempt in range(max_retries + 2): # attempt = [0, 1, 2, 3], 2 random retries before asking for no links
data = {
"model": OPENAI_MODEL,
"model": model,
"messages": messages,
"seed": int(time.time() * 1000),
"temperature": temperature,
Expand Down
56 changes: 56 additions & 0 deletions tests/test_openai_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,3 +63,59 @@ def test_get_completion_with_link_check(mock_check_links, mock_post):

assert result == "Response with https://example.com link"
mock_check_links.assert_called_once()


@patch("requests.post")
def test_get_completion_with_github_token(mock_post):
"""Test GitHub Models API completion function with mocked response."""
# Setup mock response
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = {"choices": [{"message": {"content": "Test response from GitHub Models"}}]}
mock_post.return_value = mock_response

# Test with basic messages
messages = [{"role": "system", "content": "You are a helpful assistant"}, {"role": "user", "content": "Hello"}]

# Use GitHub token instead of OpenAI API key
with patch.dict("os.environ", {"GITHUB_TOKEN": "test-github-token"}, clear=True):
with patch("actions.utils.openai_utils.OPENAI_API_KEY", None):
with patch("actions.utils.openai_utils.GITHUB_TOKEN", "test-github-token"):
result = get_completion(messages, check_links=False)

assert result == "Test response from GitHub Models"
mock_post.assert_called_once()

# Verify the correct URL and headers were used for GitHub Models
call_args = mock_post.call_args
assert call_args[0][0] == "https://models.github.ai/inference/chat/completions"
assert "Bearer test-github-token" in call_args[1]["headers"]["Authorization"]

# Verify model name is prefixed with "openai/"
data = call_args[1]["json"]
assert data["model"].startswith("openai/")


@patch("requests.post")
def test_get_completion_openai_preferred_over_github(mock_post):
"""Test that OpenAI API is preferred when both credentials are available."""
# Setup mock response
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = {"choices": [{"message": {"content": "Test response"}}]}
mock_post.return_value = mock_response

messages = [{"role": "user", "content": "Hello"}]

# Test with both credentials available
with patch.dict(
"os.environ", {"OPENAI_API_KEY": "test-openai-key", "GITHUB_TOKEN": "test-github-token"}, clear=True
):
with patch("actions.utils.openai_utils.OPENAI_API_KEY", "test-openai-key"):
with patch("actions.utils.openai_utils.GITHUB_TOKEN", "test-github-token"):
get_completion(messages, check_links=False)

# Verify OpenAI API was used (not GitHub Models)
call_args = mock_post.call_args
assert call_args[0][0] == "https://api.openai.com/v1/chat/completions"
assert "Bearer test-openai-key" in call_args[1]["headers"]["Authorization"]
Loading
0