Skip to main content
Glama
8b-is
by 8b-is
trust.rs20.4 kB
use anyhow::Result; use chrono::{DateTime, Utc}; use serde::{Deserialize, Serialize}; use std::collections::HashMap; /// Trust verification for AI models and routes pub struct TrustVerifier { /// Known good model signatures known_models: HashMap<String, ModelSignature>, /// Route fingerprints route_analyzer: RouteAnalyzer, /// Behavioral baseline for each model model_baselines: HashMap<String, ModelBaseline>, /// Verification cache cache: VerificationCache, } #[derive(Debug, Serialize, Deserialize, Clone)] pub struct ModelSignature { /// Model identifier pub model_id: String, /// Version/build info pub version: String, /// Expected provider pub provider: String, /// Behavioral fingerprint pub behavioral_fingerprint: BehavioralFingerprint, /// Known good hashes pub signature_hashes: Vec<String>, /// Timing profile pub timing_profile: TimingProfile, } #[derive(Debug, Serialize, Deserialize, Clone)] pub struct BehavioralFingerprint { /// Response style markers pub style_markers: HashMap<String, f32>, /// Linguistic patterns pub linguistic_patterns: Vec<String>, /// Reasoning depth pub reasoning_depth: f32, /// Safety adherence pub safety_score: f32, } #[derive(Debug, Serialize, Deserialize, Clone)] pub struct TimingProfile { /// Expected latency range pub latency_range_ms: (u32, u32), /// Token generation rate pub tokens_per_second: (f32, f32), /// Pause patterns pub pause_pattern: String, } #[derive(Debug, Serialize, Deserialize)] pub struct ModelBaseline { /// Model ID pub model_id: String, /// Baseline behavioral vectors pub baseline_vectors: ModelVectors, /// Acceptable variance pub variance_threshold: f32, /// Last updated pub last_updated: DateTime<Utc>, } #[derive(Debug, Serialize, Deserialize, Clone)] pub struct ModelVectors { /// Helpfulness level pub helpfulness: f32, /// Technical accuracy pub accuracy: f32, /// Response length tendency pub verbosity: f32, /// Creativity level pub creativity: f32, /// Safety consciousness pub safety_bias: f32, } /// Analyzes routing patterns to detect manipulation pub struct RouteAnalyzer { /// Known provider endpoints known_endpoints: HashMap<String, EndpointProfile>, /// Latency fingerprints latency_db: HashMap<String, LatencyFingerprint>, /// Header patterns header_patterns: HashMap<String, Vec<String>>, } #[derive(Debug, Serialize, Deserialize)] pub struct EndpointProfile { /// Provider name pub provider: String, /// Expected domains pub domains: Vec<String>, /// Expected headers pub expected_headers: Vec<String>, /// SSL fingerprint pub ssl_fingerprint: Option<String>, } #[derive(Debug, Serialize, Deserialize, Clone)] pub struct LatencyFingerprint { /// Provider pub provider: String, /// Geographic region pub region: String, /// Expected latency profile pub profile: LatencyProfile, } #[derive(Debug, Serialize, Deserialize, Clone)] pub struct LatencyProfile { /// First byte time pub ttfb_ms: (u32, u32), // min, max /// Streaming chunk delays pub chunk_delay_ms: (u32, u32), /// Completion time ranges pub completion_ranges: Vec<CompletionRange>, } #[derive(Debug, Serialize, Deserialize, Clone)] pub struct CompletionRange { /// Token count range pub tokens: (u32, u32), /// Expected time range pub time_ms: (u32, u32), } /// Caches recent verifications struct VerificationCache { entries: HashMap<String, CachedVerification>, max_age: chrono::Duration, } #[derive(Debug, Clone)] struct CachedVerification { result: TrustResult, timestamp: DateTime<Utc>, } #[derive(Debug, Serialize, Deserialize, Clone)] pub struct TrustResult { /// Overall trust score pub trust_score: f32, /// Verification details pub verification: VerificationDetails, /// Detected issues pub issues: Vec<TrustIssue>, /// Recommendations pub recommendations: Vec<String>, } #[derive(Debug, Serialize, Deserialize, Clone)] pub struct VerificationDetails { /// Model verification pub model_verified: bool, /// Route verification pub route_verified: bool, /// Behavioral match pub behavioral_match: f32, /// Timing match pub timing_match: f32, /// Signature chain status pub chain_status: ChainStatus, } #[derive(Debug, Serialize, Deserialize, Clone)] pub enum ChainStatus { Valid, Broken { at_block: u64 }, Missing, Tampered, } #[derive(Debug, Serialize, Deserialize, Clone)] pub struct TrustIssue { /// Issue type pub issue_type: IssueType, /// Severity pub severity: Severity, /// Description pub description: String, /// Evidence pub evidence: HashMap<String, String>, } #[derive(Debug, Serialize, Deserialize, Clone)] pub enum IssueType { UnknownModel, VersionMismatch, RouteManipulation, BehavioralAnomaly, TimingAnomaly, ChainBreak, ProviderMismatch, } #[derive(Debug, Serialize, Deserialize, Clone)] pub enum Severity { Low, Medium, High, Critical, } impl TrustVerifier { pub fn new() -> Self { let mut known_models = HashMap::new(); // Add known Claude models known_models.insert("claude-3.5-opus".to_string(), ModelSignature { model_id: "claude-3.5-opus".to_string(), version: "2025-07-10".to_string(), provider: "Anthropic".to_string(), behavioral_fingerprint: BehavioralFingerprint { style_markers: HashMap::from([ ("helpful".to_string(), 0.9), ("harmless".to_string(), 0.95), ("honest".to_string(), 0.9), ("nuanced".to_string(), 0.8), ]), linguistic_patterns: vec![ "I understand".to_string(), "Let me".to_string(), "Here's".to_string(), ], reasoning_depth: 0.85, safety_score: 0.95, }, signature_hashes: vec![], timing_profile: TimingProfile { latency_range_ms: (100, 500), tokens_per_second: (50.0, 150.0), pause_pattern: "natural".to_string(), }, }); // Add GPT-4 models known_models.insert("gpt-4o".to_string(), ModelSignature { model_id: "gpt-4o".to_string(), version: "2024-05-13".to_string(), provider: "OpenAI".to_string(), behavioral_fingerprint: BehavioralFingerprint { style_markers: HashMap::from([ ("helpful".to_string(), 0.85), ("creative".to_string(), 0.8), ("technical".to_string(), 0.9), ]), linguistic_patterns: vec![ "Certainly".to_string(), "I'll".to_string(), "Here's how".to_string(), ], reasoning_depth: 0.8, safety_score: 0.85, }, signature_hashes: vec![], timing_profile: TimingProfile { latency_range_ms: (150, 600), tokens_per_second: (40.0, 120.0), pause_pattern: "streaming".to_string(), }, }); Self { known_models, route_analyzer: RouteAnalyzer::new(), model_baselines: HashMap::new(), cache: VerificationCache { entries: HashMap::new(), max_age: chrono::Duration::minutes(5), }, } } /// Verify an AI response pub async fn verify_response(&mut self, response: &AIResponse) -> Result<TrustResult> { // Check cache first let cache_key = format!("{}-{}", response.model_id, response.request_id); if let Some(cached) = self.cache.get(&cache_key) { return Ok(cached); } let mut issues = Vec::new(); let mut trust_score = 1.0; // Verify model let model_verified = if let Some(known_model) = self.known_models.get(&response.model_id) { self.verify_model_signature(response, known_model, &mut issues) } else { issues.push(TrustIssue { issue_type: IssueType::UnknownModel, severity: Severity::High, description: format!("Unknown model: {}", response.model_id), evidence: HashMap::new(), }); trust_score *= 0.5; false }; // Verify route let route_verified = self.route_analyzer.verify_route(&response.route_info, &mut issues).await; if !route_verified { trust_score *= 0.7; } // Verify behavior let behavioral_match = self.verify_behavior(response, &mut issues).await; trust_score *= behavioral_match; // Verify timing let timing_match = self.verify_timing(response, &mut issues); trust_score *= timing_match; // Check signature chain let chain_status = self.verify_chain(response); if !matches!(chain_status, ChainStatus::Valid) { trust_score *= 0.6; } let recommendations = self.generate_recommendations(&issues); let result = TrustResult { trust_score, verification: VerificationDetails { model_verified, route_verified, behavioral_match, timing_match, chain_status, }, issues, recommendations, }; // Cache result self.cache.put(cache_key, result.clone()); Ok(result) } /// Verify model signature fn verify_model_signature( &self, response: &AIResponse, known_model: &ModelSignature, issues: &mut Vec<TrustIssue>, ) -> bool { // Check version if response.model_version != known_model.version { issues.push(TrustIssue { issue_type: IssueType::VersionMismatch, severity: Severity::Medium, description: format!( "Model version mismatch: expected {}, got {}", known_model.version, response.model_version ), evidence: HashMap::from([ ("expected".to_string(), known_model.version.clone()), ("actual".to_string(), response.model_version.clone()), ]), }); return false; } // Check provider if response.provider != known_model.provider { issues.push(TrustIssue { issue_type: IssueType::ProviderMismatch, severity: Severity::High, description: format!( "Provider mismatch: expected {}, got {}", known_model.provider, response.provider ), evidence: HashMap::new(), }); return false; } true } /// Verify behavioral patterns async fn verify_behavior(&self, response: &AIResponse, issues: &mut Vec<TrustIssue>) -> f32 { if let Some(baseline) = self.model_baselines.get(&response.model_id) { let vectors = self.extract_vectors(response); let distance = self.calculate_vector_distance(&baseline.baseline_vectors, &vectors); if distance > baseline.variance_threshold { issues.push(TrustIssue { issue_type: IssueType::BehavioralAnomaly, severity: Severity::Medium, description: format!( "Behavioral divergence: {:.2} (threshold: {:.2})", distance, baseline.variance_threshold ), evidence: HashMap::new(), }); return 1.0 - distance; } 1.0 - distance } else { // No baseline, can't verify 0.8 } } /// Verify timing patterns fn verify_timing(&self, response: &AIResponse, issues: &mut Vec<TrustIssue>) -> f32 { if let Some(known_model) = self.known_models.get(&response.model_id) { let profile = &known_model.timing_profile; // Check latency if response.latency_ms < profile.latency_range_ms.0 || response.latency_ms > profile.latency_range_ms.1 { issues.push(TrustIssue { issue_type: IssueType::TimingAnomaly, severity: Severity::Low, description: format!( "Latency outside expected range: {}ms", response.latency_ms ), evidence: HashMap::from([ ("expected_min".to_string(), profile.latency_range_ms.0.to_string()), ("expected_max".to_string(), profile.latency_range_ms.1.to_string()), ("actual".to_string(), response.latency_ms.to_string()), ]), }); return 0.8; } 1.0 } else { 0.9 } } /// Verify signature chain fn verify_chain(&self, response: &AIResponse) -> ChainStatus { if response.signature_chain.is_empty() { return ChainStatus::Missing; } // Verify each block links to previous for i in 1..response.signature_chain.len() { let prev_block = &response.signature_chain[i-1]; let curr_block = &response.signature_chain[i]; if curr_block.previous_hash != prev_block.hash { return ChainStatus::Broken { at_block: i as u64 }; } // Verify hash integrity if !self.verify_block_hash(curr_block) { return ChainStatus::Tampered; } } ChainStatus::Valid } /// Verify individual block hash fn verify_block_hash(&self, block: &SignatureBlock) -> bool { use sha2::{Sha256, Digest}; let mut hasher = Sha256::new(); hasher.update(block.previous_hash.as_bytes()); hasher.update(block.content.as_bytes()); hasher.update(block.timestamp.to_rfc3339().as_bytes()); let calculated_hash = format!("{:x}", hasher.finalize()); calculated_hash == block.hash } /// Extract behavioral vectors from response fn extract_vectors(&self, response: &AIResponse) -> ModelVectors { // Analyze response content let content_len = response.content.len() as f32; let verbosity = (content_len / 1000.0).min(1.0); // Simple heuristics for now ModelVectors { helpfulness: 0.8, // Default accuracy: 0.85, // Default verbosity, creativity: 0.7, // Default safety_bias: 0.9, // Default } } /// Calculate vector distance fn calculate_vector_distance(&self, v1: &ModelVectors, v2: &ModelVectors) -> f32 { let diffs = [ (v1.helpfulness - v2.helpfulness).abs(), (v1.accuracy - v2.accuracy).abs(), (v1.verbosity - v2.verbosity).abs(), (v1.creativity - v2.creativity).abs(), (v1.safety_bias - v2.safety_bias).abs(), ]; diffs.iter().sum::<f32>() / diffs.len() as f32 } /// Generate recommendations based on issues fn generate_recommendations(&self, issues: &[TrustIssue]) -> Vec<String> { let mut recommendations = Vec::new(); for issue in issues { match issue.issue_type { IssueType::UnknownModel => { recommendations.push("Request direct connection to known model".to_string()); }, IssueType::RouteManipulation => { recommendations.push("Switch to direct API access".to_string()); }, IssueType::BehavioralAnomaly => { recommendations.push("Verify model identity with challenge questions".to_string()); }, _ => {} } } recommendations } } impl RouteAnalyzer { fn new() -> Self { let mut known_endpoints = HashMap::new(); // Anthropic endpoints known_endpoints.insert("anthropic".to_string(), EndpointProfile { provider: "Anthropic".to_string(), domains: vec!["api.anthropic.com".to_string()], expected_headers: vec![ "anthropic-version".to_string(), "x-api-key".to_string(), ], ssl_fingerprint: None, }); // OpenAI endpoints known_endpoints.insert("openai".to_string(), EndpointProfile { provider: "OpenAI".to_string(), domains: vec!["api.openai.com".to_string()], expected_headers: vec![ "openai-version".to_string(), "authorization".to_string(), ], ssl_fingerprint: None, }); Self { known_endpoints, latency_db: HashMap::new(), header_patterns: HashMap::new(), } } async fn verify_route(&self, route_info: &RouteInfo, issues: &mut Vec<TrustIssue>) -> bool { // Check for known proxy services for provider in &route_info.provider_chain { if provider.contains("openrouter") || provider.contains("proxy") { issues.push(TrustIssue { issue_type: IssueType::RouteManipulation, severity: Severity::Medium, description: format!("Proxy service detected: {}", provider), evidence: HashMap::from([ ("provider".to_string(), provider.clone()), ]), }); return false; } } true } } impl VerificationCache { fn get(&self, key: &str) -> Option<TrustResult> { if let Some(cached) = self.entries.get(key) { if Utc::now() - cached.timestamp < self.max_age { return Some(cached.result.clone()); } } None } fn put(&mut self, key: String, result: TrustResult) { self.entries.insert(key, CachedVerification { result, timestamp: Utc::now(), }); // Clean old entries self.entries.retain(|_, v| Utc::now() - v.timestamp < self.max_age); } } /// AI response structure for verification #[derive(Debug, Serialize, Deserialize)] pub struct AIResponse { pub model_id: String, pub model_version: String, pub provider: String, pub request_id: String, pub content: String, pub route_info: RouteInfo, pub latency_ms: u32, pub signature_chain: Vec<SignatureBlock>, } #[derive(Debug, Serialize, Deserialize)] pub struct RouteInfo { pub provider_chain: Vec<String>, pub headers: HashMap<String, String>, pub ssl_info: Option<SSLInfo>, } #[derive(Debug, Serialize, Deserialize)] pub struct SSLInfo { pub certificate_chain: Vec<String>, pub cipher_suite: String, } #[derive(Debug, Serialize, Deserialize)] pub struct SignatureBlock { pub hash: String, pub previous_hash: String, pub content: String, pub timestamp: DateTime<Utc>, }

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/8b-is/smart-tree'

If you have feedback or need assistance with the MCP directory API, please join our Discord server