Skip to main content
Glama

Ultralytics MCP Server

inference.py11.5 kB
import streamlit as st import subprocess import os from pathlib import Path import torch from PIL import Image import tempfile import shutil # Page configuration st.set_page_config( page_title="DENTEX AI Inference", page_icon="🔍", layout="wide", initial_sidebar_state="expanded" ) # Disable warnings import warnings warnings.filterwarnings("ignore") def get_gpu_info(): """Get GPU information""" try: if torch.cuda.is_available(): gpu_name = torch.cuda.get_device_name(0) gpu_memory = torch.cuda.get_device_properties(0).total_memory gpu_memory_gb = gpu_memory / (1024**3) torch.cuda.empty_cache() gpu_memory_allocated = torch.cuda.memory_allocated(0) gpu_memory_available = gpu_memory - gpu_memory_allocated gpu_memory_available_gb = gpu_memory_available / (1024**3) return { 'name': gpu_name, 'total_memory': gpu_memory_gb, 'available_memory': gpu_memory_available_gb, 'available': True } else: return {'name': 'No GPU Available', 'available': False} except Exception as e: return {'name': f'GPU Error: {str(e)}', 'available': False} def get_available_models(): """Get available trained models""" models = [] # Get workspace models workspace_models = Path("/workspace/trained_models") if workspace_models.exists(): for model_file in workspace_models.glob("*.pt"): models.append({ 'name': model_file.name, 'path': str(model_file), 'type': 'Workspace Model', 'size': f"{model_file.stat().st_size / (1024*1024):.1f} MB" }) # Get YOLO pre-trained models yolo_models = [ {'name': 'yolo11n.pt', 'path': 'yolo11n.pt', 'type': 'YOLO11 Nano', 'size': '5.8 MB'}, {'name': 'yolo11s.pt', 'path': 'yolo11s.pt', 'type': 'YOLO11 Small', 'size': '19.8 MB'}, {'name': 'yolo11m.pt', 'path': 'yolo11m.pt', 'type': 'YOLO11 Medium', 'size': '40.8 MB'}, {'name': 'yolo11l.pt', 'path': 'yolo11l.pt', 'type': 'YOLO11 Large', 'size': '52.8 MB'}, {'name': 'yolo11x.pt', 'path': 'yolo11x.pt', 'type': 'YOLO11 Extra Large', 'size': '68.2 MB'} ] models.extend(yolo_models) return models def run_inference(model_path, image_path, confidence, iou_threshold, device): """Run YOLO inference on uploaded image""" try: # Create inference script script_content = f""" import sys from ultralytics import YOLO import os print("Loading model: {model_path}") model = YOLO('{model_path}') print("Running inference...") results = model.predict( source='{image_path}', conf={confidence}, iou={iou_threshold}, device='{device}', save=True, project='/tmp/inference_results', name='latest', exist_ok=True, show_labels=True, show_conf=True, line_width=2 ) print("Inference completed!") print(f"Results saved to: /tmp/inference_results/latest/") # Print detection results if results: result = results[0] if result.boxes is not None: print(f"Detected {{len(result.boxes)}} objects:") for i, box in enumerate(result.boxes): conf = float(box.conf) cls_id = int(box.cls) class_name = model.names[cls_id] if hasattr(model, 'names') and cls_id in model.names else f"Class {{cls_id}}" print(f" - {{class_name}}: {{conf:.3f}}") else: print("No objects detected") else: print("No results returned") """ # Write and execute inference script with open('/tmp/inference_script.py', 'w') as f: f.write(script_content) result = subprocess.run( ["python", "/tmp/inference_script.py"], cwd="/ultralytics", capture_output=True, text=True ) if result.returncode == 0: # Check for result image result_image_path = Path("/tmp/inference_results/latest") / Path(image_path).name if result_image_path.exists(): return True, str(result_image_path), result.stdout else: return False, "Result image not found", result.stdout else: return False, "Inference failed", result.stderr except Exception as e: return False, f"Error: {str(e)}", "" # Header st.markdown(""" <div style="background: linear-gradient(90deg, #28a745 0%, #1e7e34 100%); color: white; padding: 1rem; border-radius: 10px; margin-bottom: 2rem; text-align: center;"> <h1>🔍 DENTEX AI Inference</h1> <p>Professional Dental X-Ray Detection & Analysis</p> </div> """, unsafe_allow_html=True) # Navigation col1, col2 = st.columns([1, 4]) with col1: if st.button("🏠 Back to Dashboard"): st.switch_page("main_dashboard.py") # Sidebar with st.sidebar: st.markdown(""" <div style="background: #28a745; color: white; padding: 0.5rem; border-radius: 5px; text-align: center; margin-bottom: 1rem;"> <h3>Inference Settings</h3> </div> """, unsafe_allow_html=True) # GPU Information gpu_info = get_gpu_info() if gpu_info['available']: st.success(f"🖥️ **GPU:** {gpu_info['name']}") st.info(f"💾 **Available VRAM:** {gpu_info['available_memory']:.1f} GB / {gpu_info['total_memory']:.1f} GB") device_option = "0" device_display = f"GPU: {gpu_info['name']}" else: st.warning(f"⚠️ **Device:** {gpu_info['name']}") device_option = "cpu" device_display = "CPU" # Model selection models = get_available_models() if not models: st.error("No models available!") st.stop() model_options = {f"{model['type']} - {model['name']} ({model['size']})": model['path'] for model in models} selected_model_name = st.selectbox( "Select Model", options=list(model_options.keys()), help="Choose a model for inference" ) selected_model_path = model_options[selected_model_name] st.markdown("---") # Inference parameters confidence = st.slider("Confidence Threshold", min_value=0.0, max_value=1.0, value=0.25, step=0.05, help="Minimum confidence for detections") iou_threshold = st.slider("IoU Threshold", min_value=0.0, max_value=1.0, value=0.45, step=0.05, help="IoU threshold for Non-Maximum Suppression") st.markdown("---") st.info(f"**Device:** {device_display}") # Main content col1, col2 = st.columns([1, 1]) with col1: st.subheader("📁 Upload Image") uploaded_file = st.file_uploader( "Choose an image file", type=['png', 'jpg', 'jpeg', 'bmp', 'tiff'], help="Upload a dental X-ray image for detection" ) if uploaded_file is not None: # Display uploaded image image = Image.open(uploaded_file) st.image(image, caption="Uploaded Image", use_column_width=True) # Save uploaded file with tempfile.NamedTemporaryFile(delete=False, suffix=f".{uploaded_file.name.split('.')[-1]}") as tmp_file: tmp_file.write(uploaded_file.getvalue()) temp_image_path = tmp_file.name # Run inference button if st.button("🔍 Run Detection", type="primary", use_container_width=True): with st.spinner("Running inference..."): success, result_path, output = run_inference( selected_model_path, temp_image_path, confidence, iou_threshold, device_option ) if success: st.success("✅ Detection completed!") # Store results in session state st.session_state['inference_result'] = result_path st.session_state['inference_output'] = output st.rerun() else: st.error(f"❌ Detection failed: {result_path}") if output: with st.expander("Error Details"): st.code(output) with col2: st.subheader("📊 Detection Results") if 'inference_result' in st.session_state and st.session_state['inference_result']: result_path = st.session_state['inference_result'] if Path(result_path).exists(): # Display result image result_image = Image.open(result_path) st.image(result_image, caption="Detection Results", use_column_width=True) # Parse detection output if 'inference_output' in st.session_state: output = st.session_state['inference_output'] # Extract detection information if "Detected" in output: lines = output.split('\n') detection_lines = [line for line in lines if line.strip().startswith('- ')] if detection_lines: st.subheader("🎯 Detected Objects") for line in detection_lines: clean_line = line.strip().replace('- ', '') if ':' in clean_line: obj_name, confidence_str = clean_line.split(':', 1) st.metric(obj_name.strip(), confidence_str.strip()) else: st.info("No objects detected in the image") # Show raw output with st.expander("📋 Raw Output"): st.code(output) # Download button for result with open(result_path, "rb") as file: st.download_button( label="💾 Download Result", data=file.read(), file_name=f"detection_result_{Path(result_path).name}", mime="image/png", use_container_width=True ) else: st.warning("Result image not found") else: st.info("👆 Upload an image and run detection to see results here") # Show example st.markdown(""" ### 💡 How to use: 1. **Select a Model**: Choose from workspace models or pre-trained YOLO models 2. **Upload Image**: Select a dental X-ray image 3. **Adjust Settings**: Fine-tune confidence and IoU thresholds 4. **Run Detection**: Click the detection button 5. **View Results**: See detected objects with confidence scores """) # Clear results if st.button("🗑️ Clear Results"): if 'inference_result' in st.session_state: del st.session_state['inference_result'] if 'inference_output' in st.session_state: del st.session_state['inference_output'] st.rerun() # Footer st.markdown("---") st.markdown(""" <div style="text-align: center; color: #666; padding: 1rem;"> <p>🔍 DENTEX AI Inference - Professional Detection with Custom Models</p> </div> """, unsafe_allow_html=True)

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/MetehanYasar11/ultralytics_mcp_server'

If you have feedback or need assistance with the MCP directory API, please join our Discord server