query_gpt5.py•8.52 kB
#!/usr/bin/env python3
"""
Query GPT-5 Tool for Claude
This tool allows Claude to query OpenAI's GPT-5 models with arbitrary context,
including file contents. Supports GPT-5, GPT-5-mini, and GPT-5-nano models.
Usage:
query_gpt5.py --query "Your question" [--files file1.py file2.py] [--model gpt-5] [--reasoning-effort medium]
Environment Variables:
OPENAI_API_KEY - Required: Your OpenAI API key
"""
import argparse
import os
import sys
from pathlib import Path
from typing import List, Optional
import json
try:
import requests
except ImportError:
print("Error: 'requests' library is required. Install it with: pip install requests", file=sys.stderr)
sys.exit(1)
class GPT5Query:
"""Handler for querying OpenAI's GPT-5 models."""
VALID_MODELS = ['gpt-5', 'gpt-5-mini', 'gpt-5-nano', 'gpt-5-pro']
VALID_REASONING_EFFORTS = ['minimal', 'low', 'medium', 'high']
CHAT_API_URL = 'https://api.openai.com/v1/chat/completions'
RESPONSES_API_URL = 'https://api.openai.com/v1/responses'
def __init__(
self,
api_key: str,
model: str = 'gpt-5',
reasoning_effort: str = 'medium',
max_tokens: int = 4096
):
"""Initialize the GPT-5 query handler.
Args:
api_key: OpenAI API key
model: Model to use (gpt-5, gpt-5-mini, or gpt-5-nano)
reasoning_effort: Reasoning effort level (minimal, low, medium, high)
max_tokens: Maximum completion tokens
"""
self.api_key = api_key
self.model = model
self.reasoning_effort = reasoning_effort
self.max_tokens = max_tokens
def read_files(self, file_paths: List[str]) -> str:
"""Read file contents and format them for inclusion in the query.
Args:
file_paths: List of file paths to read
Returns:
Formatted string containing all file contents
"""
if not file_paths:
return ""
content = "\n\n## Referenced Files:\n\n"
for file_path in file_paths:
try:
abs_path = Path(file_path).resolve()
with open(abs_path, 'r', encoding='utf-8') as f:
file_content = f.read()
content += f"\n### File: {file_path}\n```\n{file_content}\n```\n"
except Exception as e:
print(f"Warning: Could not read file {file_path}: {e}", file=sys.stderr)
return content
def query(self, query: str, files: Optional[List[str]] = None) -> dict:
"""Send a query to the GPT-5 API.
Args:
query: The question or prompt to send
files: Optional list of file paths to include as context
Returns:
API response as a dictionary
Raises:
Exception: If the API request fails
"""
# Build the message content
message_content = query
if files:
message_content += self.read_files(files)
# Prepare the request
headers = {
'Content-Type': 'application/json',
'Authorization': f'Bearer {self.api_key}'
}
# GPT-5-pro uses the Responses API, others use Chat Completions API
if self.model == 'gpt-5-pro':
api_url = self.RESPONSES_API_URL
payload = {
'model': self.model,
'input': message_content,
'reasoning_effort': self.reasoning_effort,
'max_output_tokens': self.max_tokens
}
else:
api_url = self.CHAT_API_URL
payload = {
'model': self.model,
'messages': [
{
'role': 'user',
'content': message_content
}
],
'reasoning_effort': self.reasoning_effort,
'max_completion_tokens': self.max_tokens
}
# Make the request
response = requests.post(
api_url,
headers=headers,
json=payload,
timeout=120
)
if not response.ok:
error_message = f"OpenAI API Error ({response.status_code}): {response.text}"
raise Exception(error_message)
return response.json()
def format_response(self, response_data: dict) -> str:
"""Format the API response for display.
Args:
response_data: The API response dictionary
Returns:
Formatted string for display
"""
output = []
output.append("=" * 80)
output.append(f"GPT-5 Response (Model: {self.model}, Reasoning Effort: {self.reasoning_effort})")
output.append("=" * 80)
output.append("")
# Handle both Chat Completions and Responses API formats
content = None
if 'choices' in response_data and response_data['choices']:
# Chat Completions API format
content = response_data['choices'][0].get('message', {}).get('content', '')
elif 'output' in response_data:
# Responses API format
if isinstance(response_data['output'], str):
content = response_data['output']
elif isinstance(response_data['output'], dict):
content = response_data['output'].get('content', '')
if content:
output.append(content)
else:
output.append("Unexpected response format:")
output.append(json.dumps(response_data, indent=2))
output.append("")
output.append("=" * 80)
# Usage information
usage = response_data.get('usage', {})
total_tokens = usage.get('total_tokens', 'N/A')
prompt_tokens = usage.get('prompt_tokens', usage.get('input_tokens', 'N/A'))
completion_tokens = usage.get('completion_tokens', usage.get('output_tokens', 'N/A'))
output.append(f"Usage: {total_tokens} tokens total ({prompt_tokens} prompt, {completion_tokens} completion)")
output.append("=" * 80)
return '\n'.join(output)
def main():
"""Main entry point for the CLI tool."""
parser = argparse.ArgumentParser(
description='Query GPT-5 models with optional file context',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Simple query
query_gpt5.py --query "What is the capital of France?"
# Query with file context
query_gpt5.py --query "Explain this code" --files server.py config.py
# Use a different model with high reasoning
query_gpt5.py --query "Solve this complex problem" --model gpt-5 --reasoning-effort high
# Query with multiple files and custom settings
query_gpt5.py -q "Review this code for bugs" -f app.py utils.py -m gpt-5-mini -r low -t 8000
Environment Variables:
OPENAI_API_KEY Your OpenAI API key (required)
"""
)
parser.add_argument(
'-q', '--query',
required=True,
help='Question or prompt to send to GPT-5'
)
parser.add_argument(
'-f', '--files',
nargs='*',
help='File paths to include as context'
)
parser.add_argument(
'-m', '--model',
choices=GPT5Query.VALID_MODELS,
default='gpt-5',
help='Model to use (default: gpt-5)'
)
parser.add_argument(
'-r', '--reasoning-effort',
choices=GPT5Query.VALID_REASONING_EFFORTS,
default='medium',
help='Reasoning effort level (default: medium)'
)
parser.add_argument(
'-t', '--max-tokens',
type=int,
default=4096,
help='Maximum completion tokens (default: 4096)'
)
args = parser.parse_args()
# Check for API key
api_key = os.environ.get('OPENAI_API_KEY')
if not api_key:
print("Error: OPENAI_API_KEY environment variable is required", file=sys.stderr)
sys.exit(1)
# Create the query handler
gpt5 = GPT5Query(
api_key=api_key,
model=args.model,
reasoning_effort=args.reasoning_effort,
max_tokens=args.max_tokens
)
try:
# Execute the query
response = gpt5.query(args.query, args.files)
# Format and print the response
output = gpt5.format_response(response)
print(output)
except Exception as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
if __name__ == '__main__':
main()