Skip to main content
Glama

Gemini MCP Server

by lucky-dersan
api_requests.py5.14 kB
import os from dotenv import load_dotenv from openai import OpenAI import httpx load_dotenv() def setup_proxy(): """设置代理配置""" http_proxy = os.getenv("http_proxy") or os.getenv("HTTP_PROXY") https_proxy = os.getenv("https_proxy") or os.getenv("HTTPS_PROXY") proxies = {} if http_proxy: proxies['http'] = http_proxy if https_proxy: proxies['https'] = https_proxy # 如果没有环境变量且需要使用代理,可以手动设置默认代理 # 注释掉默认代理,避免不必要的代理连接 # if not proxies: # proxies = { # 'http': 'http://127.0.0.1:17890', # 'https': 'http://127.0.0.1:17890' # } return proxies def get_config(): """获取API配置""" api_key = os.getenv("GEMINI_API_KEY") if not api_key: raise ValueError("GEMINI_API_KEY环境变量未设置") # 从环境变量读取模型和URL配置,提供默认值 model = os.getenv("GEMINI_MODEL", "gemini-2.5-flash") base_url = os.getenv("GEMINI_BASE_URL", "https://generativelanguage.googleapis.com/v1beta/openai/") print(f"📝 使用模型: {model}") print(f"🔗 API地址: {base_url}") return api_key, model, base_url def call_gemini_api(prompt, stream=False,thinking=False): """ 使用OpenAI兼容接口调用Gemini API :param prompt: 发送给模型的文本提示 :param stream: True为"思考模式"(流式响应),False为"非思考模式"(一次性响应) """ # 获取配置 api_key, model, base_url = get_config() # 设置代理(如果需要) proxies = setup_proxy() http_client = None try: # 创建OpenAI客户端,使用正确的代理设置方式 if proxies: # 使用HTTPS代理,因为通常API调用都是HTTPS proxy_url = proxies.get('https') or proxies.get('http') http_client = httpx.Client(proxy=proxy_url) client = OpenAI( api_key=api_key, base_url=base_url, http_client=http_client ) print(f"🌐 使用代理: {proxy_url}") else: client = OpenAI( api_key=api_key, base_url=base_url, ) messages = [ {"role": "system", "content": "你是一个有用的AI助手,请用中文回答问题。"}, {"role": "user", "content": prompt} ] if stream: # 流式响应 if thinking: # 思考模式 response = client.chat.completions.create( model=model, messages=messages, reasoning_effort="high", # 模式:"low"、"medium"、"high" stream=True ) else: # 非思考模式 response = client.chat.completions.create( model=model, messages=messages, reasoning_effort="high", # 模式:"low"、"medium"、"high" stream=True ) print("\n--- AI回答 ---") for chunk in response: if chunk.choices[0].delta.content is not None: print(chunk.choices[0].delta.content, end="", flush=True) print("\n✅ 流式响应接收完毕。") else: # 非流式响应 if thinking: # 思考模式 response = client.chat.completions.create( model=model, messages=messages, reasoning_effort="high", # 模式:"low"、"medium"、"high" ) else: # 非思考模式 response = client.chat.completions.create( model=model, messages=messages, reasoning_effort="high", # 模式:"low"、"medium"、"high" ) print("✅ 请求成功!") print("\n--- AI回答 ---") print(response.choices[0].message.content) except Exception as e: print(f"❌ 请求失败: {e}") if proxies: proxy_url = proxies.get('https') or proxies.get('http') print(f"当前代理: {proxy_url}") # 打印更详细的错误信息以便调试 import traceback print(f"详细错误信息: {traceback.format_exc()}") finally: # 确保正确关闭httpx客户端 if http_client: http_client.close() if __name__ == "__main__": my_prompt = "请详细解释一下什么是大型语言模型(LLM),并举例说明它们的应用场景。" # 示例1:非思考模式 call_gemini_api(my_prompt, stream=False, thinking=False) print("\n" + "="*50 + "\n") # 示例2:思考模式 call_gemini_api(my_prompt, stream=True, thinking=True)

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/lucky-dersan/gemini-mcp-server'

If you have feedback or need assistance with the MCP directory API, please join our Discord server