//! Blender MCP Server - PyO3 Extension
//!
//! Architecture: Rust Thread + Channel + Python Polling
//! - MCP server runs in separate Rust thread with tokio runtime
//! - Commands sent to Python via channel
//! - Python polls commands, executes bpy, sends results back
//!
//! Supports multiple instances with unique tags and ports.
mod server;
mod protocol;
mod error;
use crossbeam_channel::{bounded, Receiver, Sender};
use pyo3::prelude::*;
use std::collections::HashMap;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use std::thread::JoinHandle;
pub use protocol::{Command, Response};
/// Global registry of running instances (tag -> port)
static INSTANCES: std::sync::LazyLock<Mutex<HashMap<String, u16>>> =
std::sync::LazyLock::new(|| Mutex::new(HashMap::new()));
/// Main MCP server class exposed to Python
#[pyclass]
pub struct BlenderMcp {
/// Unique instance tag
tag: String,
/// Commands from MCP to Python (bpy calls)
cmd_rx: Receiver<Command>,
cmd_tx: Sender<Command>,
/// Results from Python back to MCP
result_tx: Sender<Response>,
result_rx: Receiver<Response>,
/// Server thread handle
server_handle: Option<JoinHandle<()>>,
/// Running flag
running: Arc<AtomicBool>,
/// Server port
port: u16,
}
#[pymethods]
impl BlenderMcp {
/// Create new BlenderMcp instance
/// tag: unique identifier (default: "default")
/// port: HTTP port (default: 8765, use 0 for auto-assign)
#[new]
#[pyo3(signature = (tag="default", port=8765))]
fn new(tag: &str, port: u16) -> PyResult<Self> {
// Check if tag already in use
{
let instances = INSTANCES.lock().unwrap();
if instances.contains_key(tag) {
return Err(pyo3::exceptions::PyRuntimeError::new_err(
format!("Instance '{}' already exists. Use a different tag or stop the existing one.", tag)
));
}
}
let (cmd_tx, cmd_rx) = bounded(64);
let (result_tx, result_rx) = bounded(64);
Ok(Self {
tag: tag.to_string(),
cmd_rx,
cmd_tx,
result_tx,
result_rx,
server_handle: None,
running: Arc::new(AtomicBool::new(false)),
port,
})
}
/// Get instance tag
#[getter]
fn tag(&self) -> &str {
&self.tag
}
/// Get port (may differ from requested if auto-assigned)
#[getter]
fn port(&self) -> u16 {
self.port
}
/// Start MCP server in background thread
/// Returns actual port (useful when port=0 for auto-assign)
fn start(&mut self) -> PyResult<u16> {
if self.running.load(Ordering::SeqCst) {
return Err(pyo3::exceptions::PyRuntimeError::new_err(
format!("Server '{}' already running on port {}", self.tag, self.port)
));
}
// Register instance
{
let mut instances = INSTANCES.lock().unwrap();
if instances.values().any(|&p| p == self.port && self.port != 0) {
return Err(pyo3::exceptions::PyRuntimeError::new_err(
format!("Port {} already in use by another instance", self.port)
));
}
instances.insert(self.tag.clone(), self.port);
}
self.running.store(true, Ordering::SeqCst);
let cmd_tx = self.cmd_tx.clone();
let result_rx = self.result_rx.clone();
let running = self.running.clone();
let port = self.port;
let tag = self.tag.clone();
// Channel to get actual port back from server thread
let (port_tx, port_rx) = std::sync::mpsc::channel();
let handle = std::thread::spawn(move || {
eprintln!("[BlenderMcp] Thread started, creating tokio runtime...");
let rt = tokio::runtime::Runtime::new().expect("Failed to create tokio runtime");
eprintln!("[BlenderMcp] Tokio runtime created, starting server...");
rt.block_on(async {
match server::run_mcp_server(port, cmd_tx, result_rx, running, port_tx, &tag).await {
Ok(()) => eprintln!("[BlenderMcp] MCP server '{}' stopped gracefully", tag),
Err(e) => eprintln!("[BlenderMcp] MCP server '{}' error: {}", tag, e),
}
});
eprintln!("[BlenderMcp] Thread ending");
});
// Wait for actual port
eprintln!("[BlenderMcp] Waiting for port from server thread...");
let actual_port = port_rx.recv().map_err(|e| {
eprintln!("[BlenderMcp] Failed to receive port: {}", e);
pyo3::exceptions::PyRuntimeError::new_err("Failed to start server")
})?;
eprintln!("[BlenderMcp] Got port: {}", actual_port);
self.port = actual_port;
self.server_handle = Some(handle);
// Update registry with actual port
{
let mut instances = INSTANCES.lock().unwrap();
instances.insert(self.tag.clone(), actual_port);
}
Ok(actual_port)
}
/// Stop MCP server
fn stop(&mut self) {
eprintln!("[BlenderMcp] stop() called for '{}'", self.tag);
self.running.store(false, Ordering::SeqCst);
// Remove from registry
{
let mut instances = INSTANCES.lock().unwrap();
instances.remove(&self.tag);
}
// Don't wait for thread to finish - it will stop when it notices running=false
// The graceful shutdown has 100ms poll interval so it should stop soon
if let Some(_handle) = self.server_handle.take() {
eprintln!("[BlenderMcp] Server thread will stop shortly (not waiting)");
// Note: We don't join() because axum graceful shutdown can take time
// The thread will exit on its own
}
}
/// Poll for pending command (non-blocking)
/// Returns: Optional dict with {id, method, params} or None
fn poll(&self, py: Python<'_>) -> PyResult<Option<PyObject>> {
match self.cmd_rx.try_recv() {
Ok(cmd) => {
let dict = pyo3::types::PyDict::new(py);
dict.set_item("id", cmd.id)?;
dict.set_item("method", &cmd.method)?;
dict.set_item("params", &cmd.params)?;
Ok(Some(dict.into()))
}
Err(_) => Ok(None),
}
}
/// Send response back to MCP server
/// result: JSON string of the result
/// error: Optional error message
#[pyo3(signature = (id, result=None, error=None))]
fn respond(&self, id: u64, result: Option<String>, error: Option<String>) -> PyResult<()> {
let response = Response { id, result, error };
self.result_tx
.send(response)
.map_err(|e| pyo3::exceptions::PyRuntimeError::new_err(e.to_string()))?;
Ok(())
}
/// Check if server is running
fn is_running(&self) -> bool {
self.running.load(Ordering::SeqCst)
}
/// Get server URL
fn url(&self) -> String {
format!("http://127.0.0.1:{}/mcp", self.port)
}
}
impl Drop for BlenderMcp {
fn drop(&mut self) {
// Just set running=false, don't call stop() which may already be called
self.running.store(false, Ordering::SeqCst);
// Remove from registry if still there
if let Ok(mut instances) = INSTANCES.lock() {
instances.remove(&self.tag);
}
}
}
/// Get list of running instances as dict {tag: port}
#[pyfunction]
fn list_instances(py: Python<'_>) -> PyResult<PyObject> {
let instances = INSTANCES.lock().unwrap();
let dict = pyo3::types::PyDict::new(py);
for (tag, port) in instances.iter() {
dict.set_item(tag, port)?;
}
Ok(dict.into())
}
/// Check if instance with tag exists
#[pyfunction]
fn instance_exists(tag: &str) -> bool {
let instances = INSTANCES.lock().unwrap();
instances.contains_key(tag)
}
/// Get port for instance by tag
#[pyfunction]
fn get_instance_port(tag: &str) -> Option<u16> {
let instances = INSTANCES.lock().unwrap();
instances.get(tag).copied()
}
/// Python module definition
#[pymodule]
fn blender_mcp(m: &Bound<'_, PyModule>) -> PyResult<()> {
m.add_class::<BlenderMcp>()?;
m.add_function(wrap_pyfunction!(list_instances, m)?)?;
m.add_function(wrap_pyfunction!(instance_exists, m)?)?;
m.add_function(wrap_pyfunction!(get_instance_port, m)?)?;
Ok(())
}