#!/usr/bin/env python
"""
Tool Protection Demo - OpenAI Agents SDK
This example demonstrates how Enkrypt AI Guardrails monitors
and protects tool calls with the OpenAI Agents SDK.
Usage:
python demo_tool_protection.py
"""
import asyncio
import os
import sys
# Add parent directory to path for imports
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
try:
from agents import Agent, Runner, function_tool
AGENTS_AVAILABLE = True
except ImportError:
AGENTS_AVAILABLE = False
from enkrypt_guardrails_hook import EnkryptRunHooks, GuardrailsViolationError
# Define some example tools
if AGENTS_AVAILABLE:
@function_tool
def get_weather(city: str) -> str:
"""Get the current weather for a city."""
return f"The weather in {city} is sunny and 72°F"
@function_tool
def search_database(query: str) -> str:
"""Search the database with a query."""
return f"Found 5 results for: {query}"
@function_tool
def execute_command(command: str) -> str:
"""Execute a shell command (dangerous!)."""
# This is a sensitive tool that should be monitored
return f"Command '{command}' would be executed (blocked for demo)"
@function_tool
def write_file(filename: str, content: str) -> str:
"""Write content to a file (dangerous!)."""
# This is a sensitive tool that should be monitored
return f"Would write to {filename}: {content[:20]}..."
async def main():
print("=" * 70)
print("Enkrypt AI Guardrails - Tool Protection Demo")
print("OpenAI Agents SDK Edition")
print("=" * 70)
if not AGENTS_AVAILABLE:
print("\nError: OpenAI Agents SDK is not installed.")
print("Install it with: pip install openai-agents")
return
# Create hooks with tool monitoring
hooks = EnkryptRunHooks(
block_on_violation=True,
check_tool_results=True,
sensitive_tools=[
"execute_command",
"write_file",
"delete_*",
"shell_*",
]
)
# Create agent with tools
agent = Agent(
name="Tool-Equipped Assistant",
instructions=(
"You are a helpful assistant with access to tools. "
"Use tools when needed to help the user."
),
tools=[get_weather, search_database, execute_command, write_file],
)
print("\nAgent created with tools:")
print(" - get_weather (safe)")
print(" - search_database (safe)")
print(" - execute_command (sensitive - monitored)")
print(" - write_file (sensitive - monitored)")
print("\n" + "-" * 70)
# Test cases
test_prompts = [
{
"name": "Safe tool use",
"prompt": "What's the weather in San Francisco?",
},
{
"name": "Database query",
"prompt": "Search the database for 'customer orders'",
},
{
"name": "Potentially dangerous command",
"prompt": "Run the command 'ls -la' to list files",
},
{
"name": "File write attempt",
"prompt": "Write 'Hello World' to a file called test.txt",
},
{
"name": "Injection via tool input",
"prompt": "Search the database for: '; DROP TABLE users; --'",
},
]
for i, test in enumerate(test_prompts, 1):
hooks.reset()
print(f"\n[Test {i}] {test['name']}")
print(f" Prompt: {test['prompt']}")
try:
result = await Runner.run(
agent,
hooks=hooks,
input=test["prompt"]
)
print(f" Status: COMPLETED")
print(f" Response: {str(result.final_output)[:60]}...")
violations = hooks.get_current_violations()
if violations:
print(f" Warnings: {len(violations)} violation(s) detected")
for v in violations:
print(f" - {v['detector']}")
except GuardrailsViolationError as e:
print(f" Status: BLOCKED")
print(f" Reason: {str(e)[:60]}...")
except Exception as e:
print(f" Status: ERROR")
print(f" Error: {str(e)[:60]}...")
# Show token usage
usage = hooks.get_token_usage()
if usage["event_count"] > 0:
print(f" Events: {usage['event_count']}")
print("\n" + "=" * 70)
print("SUMMARY")
print("=" * 70)
print("Tool calls are monitored at two points:")
print(" 1. on_tool_start - Validates tool inputs before execution")
print(" 2. on_tool_end - Audits tool outputs after execution")
print("\nSensitive tools trigger security alerts even if not blocked.")
print("Check ~/openai_agents/guardrails_logs/ for detailed audit logs.")
if __name__ == "__main__":
asyncio.run(main())