# Code created by Siddharth Ahuja: www.github.com/ahujasid © 2025
import re
import bpy
import mathutils
import math
import bmesh
import json
import threading
import socket
import time
try:
import requests
except Exception:
requests = None
import tempfile
import traceback
import os
import shutil
import zipfile
from bpy.props import IntProperty
import io
from datetime import datetime
import hashlib, hmac, base64
import os.path as osp
import secrets
from contextlib import redirect_stdout, suppress
from typing import Any, Dict, List, Optional, Tuple
bl_info = {
"name": "Blender MCP",
"author": "BlenderMCP",
"version": (1, 2),
"blender": (3, 0, 0),
"location": "View3D > Sidebar > BlenderMCP",
"description": "Connect Blender to Claude via MCP",
"category": "Interface",
}
RODIN_FREE_TRIAL_KEY = ""
ADDON_IDNAME = __name__
MAX_MESSAGE_BYTES = 10 * 1024 * 1024 # 10MB
SOCKET_RECV_TIMEOUT_S = 1.0
DSL_VERSION = "1"
def _is_num(x: Any) -> bool:
return isinstance(x, (int, float)) and not isinstance(x, bool)
def _validate_vec3(value: Any, *, path: str) -> Optional[Tuple[float, float, float]]:
if value is None:
return None
if not isinstance(value, list) or len(value) != 3 or not all(_is_num(x) for x in value):
raise ValueError(f"{path} must be [x,y,z] numbers")
return (float(value[0]), float(value[1]), float(value[2]))
def _validate_rgba(value: Any, *, path: str) -> Optional[Tuple[float, float, float, float]]:
if value is None:
return None
if not isinstance(value, list) or len(value) != 4 or not all(_is_num(x) for x in value):
raise ValueError(f"{path} must be [r,g,b,a] numbers")
rgba = (float(value[0]), float(value[1]), float(value[2]), float(value[3]))
for i, c in enumerate(rgba):
if c < 0.0 or c > 1.0:
raise ValueError(f"{path}[{i}] must be between 0 and 1")
return rgba
def _validate_ops_request(request: Any) -> Dict[str, Any]:
if not isinstance(request, dict):
raise ValueError("request must be an object")
dsl_version = request.get("dsl_version", DSL_VERSION)
if dsl_version != DSL_VERSION:
raise ValueError(f"dsl_version must be '{DSL_VERSION}'")
transaction = request.get("transaction", "atomic")
if transaction not in ("none", "atomic"):
raise ValueError("transaction must be 'none' or 'atomic'")
dry_run = bool(request.get("dry_run", False))
ops = request.get("ops")
if not isinstance(ops, list) or not ops:
raise ValueError("ops must be a non-empty list")
if len(ops) > 200:
raise ValueError("ops exceeds max length (200)")
normalized_ops: List[Dict[str, Any]] = []
for idx, op in enumerate(ops):
if not isinstance(op, dict):
raise ValueError(f"ops[{idx}] must be an object")
if "type" not in op or not isinstance(op["type"], str):
raise ValueError(f"ops[{idx}].type must be a string")
op_type = op["type"]
# Keep this in sync with server-side allowlist. Add-on is the source of truth at runtime.
if op_type not in {
"deselect_all",
"select",
"create_primitive",
"delete",
"duplicate",
"rename",
"set_transform",
"apply_transform",
"snap_to_ground",
"set_origin",
# Mesh utilities
"set_shading",
"recalculate_normals",
"merge_by_distance",
"triangulate",
"join_objects",
"separate_mesh",
"convert_to_mesh",
# Visibility / organization
"set_visibility",
"set_collection_visibility",
"isolate_objects",
# UV / baking
"uv_smart_project",
"uv_unwrap",
"uv_pack_islands",
"bake_maps",
"ensure_collection",
"move_to_collection",
"set_parent",
"clear_parent",
"ensure_material",
"set_material_params",
"assign_material",
"set_texture_maps",
"create_light",
"set_light_params",
"create_camera",
"set_camera_params",
"set_active_camera",
"frame_camera",
"camera_look_at",
"create_turntable_animation",
"set_world_background",
"set_world_hdri",
"import_model",
"export_scene",
"set_render_settings",
"render_still",
"render_animation",
"add_modifier",
"remove_modifier",
"apply_modifier",
"boolean_operation",
"purge_orphans",
"pack_external_data",
"save_blend",
}:
raise ValueError(f"Unsupported op type: {op_type}")
# Minimal normalization; per-op strictness happens during execution.
normalized_ops.append(op)
return {
"dsl_version": DSL_VERSION,
"transaction": transaction,
"dry_run": dry_run,
"ops": normalized_ops,
}
def _require_requests(feature_name: str):
if requests is None:
raise RuntimeError(
f"{feature_name} requires the 'requests' Python package, but it is not available in this Blender Python.\n"
"Install requests into Blender's Python, or vendor it with the addon."
)
def _get_prefs():
addon = bpy.context.preferences.addons.get(ADDON_IDNAME)
if addon is None:
return None
return addon.preferences
def _get_req_headers():
_require_requests("Network features (PolyHaven/Hyper3D/Sketchfab/Hunyuan)")
headers = requests.utils.default_headers()
headers.update({"User-Agent": "blender-mcp"})
return headers
def _safe_extract_zip(zip_ref: zipfile.ZipFile, target_dir: str):
abs_target_dir = os.path.abspath(target_dir)
for file_info in zip_ref.infolist():
file_path = file_info.filename
if not file_path:
continue
norm = os.path.normpath(file_path)
if os.path.isabs(norm):
raise RuntimeError("Security issue: Zip contains absolute paths")
if ".." in norm.split(os.sep):
raise RuntimeError("Security issue: Zip contains directory traversal sequence")
target_path = os.path.join(target_dir, norm)
abs_target_path = os.path.abspath(target_path)
if not abs_target_path.startswith(abs_target_dir + os.sep) and abs_target_path != abs_target_dir:
raise RuntimeError("Security issue: Zip contains path traversal attempt")
zip_ref.extractall(target_dir)
class BlenderMCPServer:
def __init__(self, host='localhost', port=9876):
self.host = host
self.port = port
self.running = False
self.socket = None
self.server_thread = None
self._clients = set()
self._clients_lock = threading.Lock()
def start(self):
if self.running:
print("Server is already running")
return
self.running = True
try:
# Create socket
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind((self.host, self.port))
self.socket.listen(1)
# Start server thread
self.server_thread = threading.Thread(target=self._server_loop)
self.server_thread.daemon = True
self.server_thread.start()
print(f"BlenderMCP server started on {self.host}:{self.port}")
except Exception as e:
print(f"Failed to start server: {str(e)}")
self.stop()
def stop(self):
self.running = False
# Close socket
if self.socket:
try:
self.socket.close()
except:
pass
self.socket = None
# Close all clients to unblock their recv loops
with self._clients_lock:
clients = list(self._clients)
self._clients.clear()
for c in clients:
with suppress(Exception):
c.shutdown(socket.SHUT_RDWR)
with suppress(Exception):
c.close()
# Wait for thread to finish
if self.server_thread:
try:
if self.server_thread.is_alive():
self.server_thread.join(timeout=1.0)
except:
pass
self.server_thread = None
print("BlenderMCP server stopped")
def _server_loop(self):
"""Main server loop in a separate thread"""
print("Server thread started")
self.socket.settimeout(1.0) # Timeout to allow for stopping
while self.running:
try:
# Accept new connection
try:
client, address = self.socket.accept()
print(f"Connected to client: {address}")
# Handle client in a separate thread
client_thread = threading.Thread(
target=self._handle_client,
args=(client,)
)
client_thread.daemon = True
client_thread.start()
except socket.timeout:
# Just check running condition
continue
except Exception as e:
print(f"Error accepting connection: {str(e)}")
time.sleep(0.5)
except Exception as e:
print(f"Error in server loop: {str(e)}")
if not self.running:
break
time.sleep(0.5)
print("Server thread stopped")
def _handle_client(self, client):
"""Handle connected client"""
print("Client handler started")
client.settimeout(SOCKET_RECV_TIMEOUT_S)
buffer = bytearray()
with self._clients_lock:
self._clients.add(client)
try:
while self.running:
# Receive data
try:
data = client.recv(8192)
if not data:
print("Client disconnected")
break
buffer.extend(data)
if len(buffer) > MAX_MESSAGE_BYTES:
raise RuntimeError(f"Incoming message exceeded max size ({MAX_MESSAGE_BYTES} bytes)")
while True:
nl = buffer.find(b"\n")
if nl == -1:
break
raw_line = bytes(buffer[:nl])
del buffer[: nl + 1]
if not raw_line.strip():
continue
try:
command = json.loads(raw_line.decode("utf-8"))
except Exception as e:
err = {"status": "error", "message": f"Invalid JSON command: {str(e)}"}
with suppress(Exception):
client.sendall((json.dumps(err) + "\n").encode("utf-8"))
continue
def execute_wrapper(cmd=command):
try:
response = self.execute_command(cmd)
response_json = json.dumps(response)
try:
client.sendall((response_json + "\n").encode("utf-8"))
except:
print("Failed to send response - client disconnected")
except Exception as e:
print(f"Error executing command: {str(e)}")
traceback.print_exc()
try:
error_response = {
"status": "error",
"message": str(e)
}
client.sendall((json.dumps(error_response) + "\n").encode("utf-8"))
except:
pass
return None
bpy.app.timers.register(execute_wrapper, first_interval=0.0)
except socket.timeout:
continue
except Exception as e:
print(f"Error receiving data: {str(e)}")
break
except Exception as e:
print(f"Error in client handler: {str(e)}")
finally:
try:
client.close()
except:
pass
with self._clients_lock:
with suppress(KeyError):
self._clients.remove(client)
print("Client handler stopped")
def execute_command(self, command):
"""Execute a command in the main Blender thread"""
try:
return self._execute_command_internal(command)
except Exception as e:
print(f"Error executing command: {str(e)}")
traceback.print_exc()
return {"status": "error", "message": str(e)}
def _execute_command_internal(self, command):
"""Internal command execution with proper context"""
prefs = _get_prefs()
expected_token = getattr(prefs, "auth_token", None) if prefs else None
provided_token = command.get("auth_token")
if expected_token and provided_token != expected_token:
return {"status": "error", "message": "Unauthorized"}
cmd_type = command.get("type")
params = command.get("params", {})
# Lightweight liveness probe (no side effects, always available)
if cmd_type == "ping":
return {"status": "success", "result": self.ping()}
# Add a handler for checking PolyHaven status
if cmd_type == "get_polyhaven_status":
return {"status": "success", "result": self.get_polyhaven_status()}
# Base handlers that are always available
handlers = {
"ping": self.ping,
"get_scene_info": self.get_scene_info,
"get_object_info": self.get_object_info,
"get_viewport_screenshot": self.get_viewport_screenshot,
"execute_code": self.execute_code,
"execute_ops": self.execute_ops,
"get_capabilities": self.get_capabilities,
"list_objects": self.list_objects,
"get_selection": self.get_selection,
"get_world_info": self.get_world_info,
"get_collections": self.get_collections,
"list_materials": self.list_materials,
"list_modifiers": self.list_modifiers,
"get_polyhaven_status": self.get_polyhaven_status,
"get_hyper3d_status": self.get_hyper3d_status,
"get_sketchfab_status": self.get_sketchfab_status,
"get_hunyuan3d_status": self.get_hunyuan3d_status,
}
# Add Polyhaven handlers only if enabled
if bpy.context.scene.blendermcp_use_polyhaven:
polyhaven_handlers = {
"get_polyhaven_categories": self.get_polyhaven_categories,
"search_polyhaven_assets": self.search_polyhaven_assets,
"download_polyhaven_asset": self.download_polyhaven_asset,
"set_texture": self.set_texture,
}
handlers.update(polyhaven_handlers)
# Add Hyper3d handlers only if enabled
if bpy.context.scene.blendermcp_use_hyper3d:
polyhaven_handlers = {
"create_rodin_job": self.create_rodin_job,
"poll_rodin_job_status": self.poll_rodin_job_status,
"import_generated_asset": self.import_generated_asset,
}
handlers.update(polyhaven_handlers)
# Add Sketchfab handlers only if enabled
if bpy.context.scene.blendermcp_use_sketchfab:
sketchfab_handlers = {
"search_sketchfab_models": self.search_sketchfab_models,
"download_sketchfab_model": self.download_sketchfab_model,
}
handlers.update(sketchfab_handlers)
# Add Hunyuan3d handlers only if enabled
if bpy.context.scene.blendermcp_use_hunyuan3d:
hunyuan_handlers = {
"create_hunyuan_job": self.create_hunyuan_job,
"poll_hunyuan_job_status": self.poll_hunyuan_job_status,
"import_generated_asset_hunyuan": self.import_generated_asset_hunyuan
}
handlers.update(hunyuan_handlers)
handler = handlers.get(cmd_type)
if handler:
try:
print(f"Executing handler for {cmd_type}")
result = handler(**params)
print(f"Handler execution complete")
return {"status": "success", "result": result}
except Exception as e:
print(f"Error in handler: {str(e)}")
traceback.print_exc()
return {"status": "error", "message": str(e)}
else:
return {"status": "error", "message": f"Unknown command type: {cmd_type}"}
def ping(self):
return {
"ok": True,
"blender_version": bpy.app.version_string,
}
def get_scene_info(self):
"""Get information about the current Blender scene"""
try:
print("Getting scene info...")
# Simplify the scene info to reduce data size
scene_info = {
"name": bpy.context.scene.name,
"object_count": len(bpy.context.scene.objects),
"objects": [],
"materials_count": len(bpy.data.materials),
}
# Collect minimal object information (limit to first 10 objects)
for i, obj in enumerate(bpy.context.scene.objects):
if i >= 10: # Reduced from 20 to 10
break
obj_info = {
"name": obj.name,
"type": obj.type,
# Only include basic location data
"location": [round(float(obj.location.x), 2),
round(float(obj.location.y), 2),
round(float(obj.location.z), 2)],
}
scene_info["objects"].append(obj_info)
print(f"Scene info collected: {len(scene_info['objects'])} objects")
return scene_info
except Exception as e:
print(f"Error in get_scene_info: {str(e)}")
traceback.print_exc()
return {"error": str(e)}
@staticmethod
def _get_aabb(obj):
""" Returns the world-space axis-aligned bounding box (AABB) of an object. """
if obj.type != 'MESH':
raise TypeError("Object must be a mesh")
# Get the bounding box corners in local space
local_bbox_corners = [mathutils.Vector(corner) for corner in obj.bound_box]
# Convert to world coordinates
world_bbox_corners = [obj.matrix_world @ corner for corner in local_bbox_corners]
# Compute axis-aligned min/max coordinates
min_corner = mathutils.Vector(map(min, zip(*world_bbox_corners)))
max_corner = mathutils.Vector(map(max, zip(*world_bbox_corners)))
return [
[*min_corner], [*max_corner]
]
def get_object_info(self, name):
"""Get detailed information about a specific object"""
obj = bpy.data.objects.get(name)
if not obj:
raise ValueError(f"Object not found: {name}")
# Basic object info
obj_info = {
"name": obj.name,
"type": obj.type,
"location": [obj.location.x, obj.location.y, obj.location.z],
"rotation": [obj.rotation_euler.x, obj.rotation_euler.y, obj.rotation_euler.z],
"scale": [obj.scale.x, obj.scale.y, obj.scale.z],
"visible": obj.visible_get(),
"materials": [],
}
if obj.type == "MESH":
bounding_box = self._get_aabb(obj)
obj_info["world_bounding_box"] = bounding_box
# Add material slots
for slot in obj.material_slots:
if slot.material:
obj_info["materials"].append(slot.material.name)
# Add mesh data if applicable
if obj.type == 'MESH' and obj.data:
mesh = obj.data
obj_info["mesh"] = {
"vertices": len(mesh.vertices),
"edges": len(mesh.edges),
"polygons": len(mesh.polygons),
}
return obj_info
def get_viewport_screenshot(self, max_size=800, filepath=None, format="png"):
"""
Capture a screenshot of the current 3D viewport and save it to the specified path.
Parameters:
- max_size: Maximum size in pixels for the largest dimension of the image
- filepath: Path where to save the screenshot file
- format: Image format (png, jpg, etc.)
Returns success/error status
"""
try:
if not filepath:
return {"error": "No filepath provided"}
# Find the active 3D viewport
area = None
for a in bpy.context.screen.areas:
if a.type == 'VIEW_3D':
area = a
break
if not area:
return {"error": "No 3D viewport found"}
# Take screenshot with proper context override
region = next((r for r in area.regions if r.type == 'WINDOW'), None)
window = bpy.context.window
if window and region:
with bpy.context.temp_override(window=window, area=area, region=region):
bpy.ops.screen.screenshot_area(filepath=filepath)
else:
with bpy.context.temp_override(area=area):
bpy.ops.screen.screenshot_area(filepath=filepath)
# Load and resize if needed
img = bpy.data.images.load(filepath)
width, height = img.size
if max(width, height) > max_size:
scale = max_size / max(width, height)
new_width = int(width * scale)
new_height = int(height * scale)
img.scale(new_width, new_height)
# Set format and save
img.file_format = format.upper()
img.save()
width, height = new_width, new_height
# Cleanup Blender image data
bpy.data.images.remove(img)
return {
"success": True,
"width": width,
"height": height,
"filepath": filepath
}
except Exception as e:
return {"error": str(e)}
def execute_code(self, code):
"""Execute arbitrary Blender Python code"""
# This is powerful but potentially dangerous - use with caution
try:
# Create a local namespace for execution
namespace = {"bpy": bpy}
# Capture stdout during execution, and return it as result
capture_buffer = io.StringIO()
with redirect_stdout(capture_buffer):
exec(code, namespace)
captured_output = capture_buffer.getvalue()
return {"executed": True, "result": captured_output}
except Exception as e:
raise Exception(f"Code execution error: {str(e)}")
def get_capabilities(self):
roots = self._get_allowed_path_roots()
return {
"dsl_version": DSL_VERSION,
"blender_version": bpy.app.version_string,
"integrations": {
"polyhaven": bool(getattr(bpy.context.scene, "blendermcp_use_polyhaven", False)),
"hyper3d": bool(getattr(bpy.context.scene, "blendermcp_use_hyper3d", False)),
"sketchfab": bool(getattr(bpy.context.scene, "blendermcp_use_sketchfab", False)),
"hunyuan3d": bool(getattr(bpy.context.scene, "blendermcp_use_hunyuan3d", False)),
},
"ops_allowlist": [
"deselect_all",
"select",
"create_primitive",
"delete",
"duplicate",
"rename",
"set_transform",
"apply_transform",
"snap_to_ground",
"set_origin",
"set_shading",
"recalculate_normals",
"merge_by_distance",
"triangulate",
"join_objects",
"separate_mesh",
"convert_to_mesh",
"set_visibility",
"set_collection_visibility",
"isolate_objects",
"uv_smart_project",
"uv_unwrap",
"uv_pack_islands",
"bake_maps",
"ensure_collection",
"move_to_collection",
"set_parent",
"clear_parent",
"ensure_material",
"set_material_params",
"assign_material",
"set_texture_maps",
"create_light",
"set_light_params",
"create_camera",
"set_camera_params",
"set_active_camera",
"frame_camera",
"camera_look_at",
"create_turntable_animation",
"set_world_background",
"set_world_hdri",
"import_model",
"export_scene",
"set_render_settings",
"render_still",
"render_animation",
"add_modifier",
"remove_modifier",
"apply_modifier",
"boolean_operation",
"purge_orphans",
"pack_external_data",
"save_blend",
],
"allowed_path_roots": roots,
"supported_import_formats": ["gltf", "glb", "obj", "fbx"],
"supported_export_formats": ["gltf", "glb", "obj", "fbx"],
"supported_render_engines": ["CYCLES", "BLENDER_EEVEE", "BLENDER_WORKBENCH"],
}
def list_objects(self, filter=None, limit=500):
if filter is None:
filter = {}
if not isinstance(filter, dict):
raise ValueError("filter must be an object")
limit_i = int(limit) if _is_num(limit) else 500
limit_i = max(1, min(limit_i, 2000))
types = filter.get("types")
if types is not None and not isinstance(types, list):
raise ValueError("filter.types must be a list of strings")
type_set = set(t for t in (types or []) if isinstance(t, str))
collection = filter.get("collection")
name_contains = filter.get("name_contains")
if name_contains is not None and not isinstance(name_contains, str):
raise ValueError("filter.name_contains must be a string")
objs = bpy.context.scene.objects
out = []
for obj in objs:
if type_set and obj.type not in type_set:
continue
if name_contains and name_contains not in obj.name:
continue
if collection:
col = bpy.data.collections.get(collection)
if col is None or obj.name not in col.objects:
continue
out.append(
{
"name": obj.name,
"type": obj.type,
"location": [float(obj.location.x), float(obj.location.y), float(obj.location.z)],
"visible": bool(obj.visible_get()),
}
)
if len(out) >= limit_i:
break
return {"count": len(out), "objects": out, "truncated": len(out) >= limit_i}
def get_selection(self):
selected = [o.name for o in bpy.context.selected_objects] if bpy.context.selected_objects else []
active = bpy.context.view_layer.objects.active.name if bpy.context.view_layer.objects.active else None
return {"selected": selected, "active": active}
def get_world_info(self):
scene = bpy.context.scene
world = scene.world
if world is None:
return {"world": None}
info = {"name": world.name, "use_nodes": bool(world.use_nodes)}
if world.use_nodes and world.node_tree:
nt = world.node_tree
info["nodes"] = [{"name": n.name, "type": n.type} for n in nt.nodes][:50]
return {"world": info}
def get_collections(self):
cols = []
for c in bpy.data.collections:
cols.append({"name": c.name, "object_count": len(c.objects)})
cols.sort(key=lambda x: x["name"])
return {"collections": cols}
def list_materials(self, limit=500):
limit_i = int(limit) if _is_num(limit) else 500
limit_i = max(1, min(limit_i, 2000))
mats = []
for m in bpy.data.materials:
mats.append({"name": m.name, "use_nodes": bool(getattr(m, "use_nodes", False))})
if len(mats) >= limit_i:
break
return {"count": len(mats), "materials": mats, "truncated": len(mats) >= limit_i}
def list_modifiers(self, object_name: str):
obj = bpy.data.objects.get(object_name)
if obj is None:
raise ValueError(f"Object not found: {object_name}")
mods = []
for m in obj.modifiers:
mods.append({"name": m.name, "type": m.type})
return {"object": obj.name, "modifiers": mods}
def execute_ops(self, request):
normalized = _validate_ops_request(request)
if normalized.get("dry_run"):
return {"dry_run": True, "normalized": normalized}
transaction = normalized.get("transaction", "atomic")
ops = normalized["ops"]
undo_pushed = False
rollback_attempted = False
if transaction == "atomic":
try:
bpy.ops.ed.undo_push(message="BlenderMCP execute_ops (pre)")
undo_pushed = True
except Exception:
undo_pushed = False
results = []
created = []
changed = []
try:
for i, op in enumerate(ops):
try:
out = self._execute_single_op(op)
results.append({"op_index": i, "status": "success", "data": out})
if isinstance(out, dict):
if out.get("created_object"):
created.append(out["created_object"])
if out.get("changed_object"):
changed.append(out["changed_object"])
except Exception as e:
results.append({"op_index": i, "status": "error", "error": str(e)})
raise
return {
"ok": True,
"results": results,
"summary": {
"created_objects": created,
"changed_objects": changed,
},
"transaction": {"mode": transaction, "undo_pushed": undo_pushed, "rolled_back": False},
}
except Exception as e:
if transaction == "atomic" and undo_pushed:
try:
bpy.ops.ed.undo()
rollback_attempted = True
except Exception:
rollback_attempted = False
return {
"ok": False,
"error": str(e),
"results": results,
"transaction": {"mode": transaction, "undo_pushed": undo_pushed, "rolled_back": rollback_attempted},
}
def _execute_single_op(self, op):
op_type = op.get("type")
def _ensure_object_mode():
with suppress(Exception):
bpy.ops.object.mode_set(mode="OBJECT")
def _deselect_all_selected():
for o in bpy.context.selected_objects:
with suppress(Exception):
o.select_set(False)
with suppress(Exception):
bpy.context.view_layer.objects.active = None
def _set_active(obj):
bpy.context.view_layer.objects.active = obj
def _select(obj, selected: bool = True):
with suppress(Exception):
obj.select_set(bool(selected))
def _override_any_area():
# Some bpy.ops need an area/region context. Best-effort override.
wm = bpy.context.window_manager
for win in getattr(wm, "windows", []):
scr = getattr(win, "screen", None)
if scr is None:
continue
for area in getattr(scr, "areas", []):
if area.type in {"VIEW_3D", "IMAGE_EDITOR", "OUTLINER"}:
region = next((r for r in area.regions if r.type == "WINDOW"), None)
if region is None:
continue
return {"window": win, "screen": scr, "area": area, "region": region}
return None
if op_type == "deselect_all":
for obj in bpy.context.scene.objects:
with suppress(Exception):
obj.select_set(False)
bpy.context.view_layer.objects.active = None
return {"changed_object": None}
if op_type == "select":
names = op.get("names") or []
mode = op.get("mode")
active = op.get("active")
if mode == "replace":
for obj in bpy.context.scene.objects:
with suppress(Exception):
obj.select_set(False)
for n in names:
obj = bpy.data.objects.get(n)
if obj is None:
raise ValueError(f"Object not found: {n}")
if mode in ("replace", "add"):
obj.select_set(True)
elif mode == "remove":
obj.select_set(False)
if active:
a = bpy.data.objects.get(active)
if a is None:
raise ValueError(f"Active object not found: {active}")
bpy.context.view_layer.objects.active = a
return {"changed_object": active or (names[0] if names else None)}
if op_type == "create_primitive":
primitive = op.get("primitive")
size = float(op.get("size", 1.0))
location = _validate_vec3(op.get("location"), path="location") or (0.0, 0.0, 0.0)
rotation = _validate_vec3(op.get("rotation"), path="rotation")
scale = _validate_vec3(op.get("scale"), path="scale")
name = op.get("name")
with suppress(Exception):
bpy.ops.object.mode_set(mode="OBJECT")
if primitive == "cube":
bpy.ops.mesh.primitive_cube_add(size=size, location=location)
elif primitive == "plane":
bpy.ops.mesh.primitive_plane_add(size=size, location=location)
elif primitive == "uv_sphere":
bpy.ops.mesh.primitive_uv_sphere_add(radius=size / 2.0, location=location)
elif primitive == "ico_sphere":
bpy.ops.mesh.primitive_ico_sphere_add(radius=size / 2.0, location=location)
elif primitive == "cylinder":
bpy.ops.mesh.primitive_cylinder_add(radius=size / 2.0, depth=size, location=location)
elif primitive == "cone":
bpy.ops.mesh.primitive_cone_add(radius1=size / 2.0, depth=size, location=location)
elif primitive == "torus":
bpy.ops.mesh.primitive_torus_add(major_radius=size / 2.0, minor_radius=size / 4.0, location=location)
else:
raise ValueError(f"Unsupported primitive: {primitive}")
obj = bpy.context.active_object
if obj is None:
raise RuntimeError("Failed to create primitive")
if name:
obj.name = name
if rotation is not None:
obj.rotation_euler = rotation
if scale is not None:
obj.scale = scale
return {"created_object": obj.name}
if op_type == "delete":
names = op.get("names") or []
deleted = []
for n in names:
obj = bpy.data.objects.get(n)
if obj is None:
raise ValueError(f"Object not found: {n}")
bpy.data.objects.remove(obj, do_unlink=True)
deleted.append(n)
return {"deleted_objects": deleted}
if op_type == "duplicate":
names = op.get("names") or []
linked = bool(op.get("linked", False))
new_names = op.get("new_names") or []
if new_names and len(new_names) != len(names):
raise ValueError("new_names length must match names length")
duped = []
for i, n in enumerate(names):
src = bpy.data.objects.get(n)
if src is None:
raise ValueError(f"Object not found: {n}")
obj = src.copy()
if src.data and not linked:
obj.data = src.data.copy()
bpy.context.scene.collection.objects.link(obj)
if new_names:
obj.name = new_names[i]
duped.append(obj.name)
return {"created_objects": duped}
if op_type == "rename":
src = op.get("from")
dst = op.get("to")
obj = bpy.data.objects.get(src)
if obj is None:
raise ValueError(f"Object not found: {src}")
obj.name = dst
return {"changed_object": dst}
if op_type == "set_transform":
name = op.get("name")
obj = bpy.data.objects.get(name)
if obj is None:
raise ValueError(f"Object not found: {name}")
location = _validate_vec3(op.get("location"), path="location")
rotation = _validate_vec3(op.get("rotation"), path="rotation")
scale = _validate_vec3(op.get("scale"), path="scale")
if location is not None:
obj.location = location
if rotation is not None:
obj.rotation_euler = rotation
if scale is not None:
obj.scale = scale
return {"changed_object": obj.name}
if op_type == "apply_transform":
name = op.get("name")
obj = bpy.data.objects.get(name)
if obj is None:
raise ValueError(f"Object not found: {name}")
with suppress(Exception):
bpy.ops.object.mode_set(mode="OBJECT")
for o in bpy.context.selected_objects:
with suppress(Exception):
o.select_set(False)
obj.select_set(True)
bpy.context.view_layer.objects.active = obj
bpy.ops.object.transform_apply(
location=bool(op.get("location", False)),
rotation=bool(op.get("rotation", False)),
scale=bool(op.get("scale", False)),
)
return {"changed_object": obj.name}
if op_type == "set_shading":
names = op.get("names") or []
shade = op.get("shade")
auto_smooth = op.get("auto_smooth")
auto_smooth_angle = op.get("auto_smooth_angle")
changed = []
for n in names:
obj = bpy.data.objects.get(n)
if obj is None:
raise ValueError(f"Object not found: {n}")
if obj.type != "MESH" or obj.data is None:
continue
me = obj.data
smooth = shade == "smooth"
for p in me.polygons:
p.use_smooth = smooth
if auto_smooth is not None:
with suppress(Exception):
me.use_auto_smooth = bool(auto_smooth)
if auto_smooth_angle is not None and _is_num(auto_smooth_angle):
with suppress(Exception):
me.auto_smooth_angle = math.radians(float(auto_smooth_angle))
changed.append(obj.name)
return {"changed_objects": changed}
if op_type == "recalculate_normals":
names = op.get("names") or []
inside = bool(op.get("inside", False))
changed = []
for n in names:
obj = bpy.data.objects.get(n)
if obj is None:
raise ValueError(f"Object not found: {n}")
if obj.type != "MESH" or obj.data is None:
continue
me = obj.data
bm = bmesh.new()
bm.from_mesh(me)
bmesh.ops.recalc_face_normals(bm, faces=bm.faces)
if inside:
bmesh.ops.reverse_faces(bm, faces=bm.faces)
bm.to_mesh(me)
bm.free()
with suppress(Exception):
me.update()
changed.append(obj.name)
return {"changed_objects": changed}
if op_type == "merge_by_distance":
names = op.get("names") or []
dist = float(op.get("distance", 0.0))
changed = []
for n in names:
obj = bpy.data.objects.get(n)
if obj is None:
raise ValueError(f"Object not found: {n}")
if obj.type != "MESH" or obj.data is None:
continue
me = obj.data
bm = bmesh.new()
bm.from_mesh(me)
bmesh.ops.remove_doubles(bm, verts=bm.verts, dist=dist)
bm.to_mesh(me)
bm.free()
with suppress(Exception):
me.update()
changed.append(obj.name)
return {"changed_objects": changed}
if op_type == "triangulate":
names = op.get("names") or []
quad_method = op.get("quad_method", "BEAUTY")
ngon_method = op.get("ngon_method", "BEAUTY")
changed = []
for n in names:
obj = bpy.data.objects.get(n)
if obj is None:
raise ValueError(f"Object not found: {n}")
if obj.type != "MESH" or obj.data is None:
continue
me = obj.data
bm = bmesh.new()
bm.from_mesh(me)
bmesh.ops.triangulate(bm, faces=bm.faces, quad_method=quad_method, ngon_method=ngon_method)
bm.to_mesh(me)
bm.free()
with suppress(Exception):
me.update()
changed.append(obj.name)
return {"changed_objects": changed}
if op_type == "join_objects":
names = op.get("names") or []
active_name = op.get("active")
new_name = op.get("new_name")
if len(names) < 2:
raise ValueError("join_objects requires at least 2 objects")
objs = []
for n in names:
o = bpy.data.objects.get(n)
if o is None:
raise ValueError(f"Object not found: {n}")
objs.append(o)
active = bpy.data.objects.get(active_name) if active_name else objs[0]
if active is None:
raise ValueError(f"Active object not found: {active_name}")
_ensure_object_mode()
_deselect_all_selected()
for o in objs:
_select(o, True)
_set_active(active)
bpy.ops.object.join()
out = bpy.context.view_layer.objects.active
if out is None:
raise RuntimeError("join_objects failed (no active object after join)")
if new_name:
out.name = new_name
return {"changed_object": out.name, "joined_objects": names}
if op_type == "separate_mesh":
name = op.get("name")
mode = op.get("mode")
obj = bpy.data.objects.get(name)
if obj is None:
raise ValueError(f"Object not found: {name}")
if obj.type != "MESH":
raise ValueError("separate_mesh only supports MESH objects")
_ensure_object_mode()
_deselect_all_selected()
_select(obj, True)
_set_active(obj)
bpy.ops.object.mode_set(mode="EDIT")
with suppress(Exception):
bpy.ops.mesh.select_all(action="SELECT")
sep_type = {"loose": "LOOSE", "material": "MATERIAL", "selected": "SELECTED"}.get(mode)
if sep_type is None:
raise ValueError(f"Unsupported separate mode: {mode}")
bpy.ops.mesh.separate(type=sep_type)
bpy.ops.object.mode_set(mode="OBJECT")
created = [o.name for o in bpy.context.selected_objects if o.name != name]
return {"changed_object": obj.name, "created_objects": created}
if op_type == "convert_to_mesh":
names = op.get("names") or []
keep_original = bool(op.get("keep_original", False))
_ensure_object_mode()
converted = []
for n in names:
obj = bpy.data.objects.get(n)
if obj is None:
raise ValueError(f"Object not found: {n}")
_deselect_all_selected()
_select(obj, True)
_set_active(obj)
try:
bpy.ops.object.convert(target="MESH", keep_original=keep_original)
except TypeError:
bpy.ops.object.convert(target="MESH")
converted.append(obj.name)
return {"changed_objects": converted}
if op_type == "set_visibility":
names = op.get("names") or []
viewport = op.get("viewport")
render = op.get("render")
selectable = op.get("selectable")
changed = []
for n in names:
obj = bpy.data.objects.get(n)
if obj is None:
raise ValueError(f"Object not found: {n}")
if viewport is not None:
v = bool(viewport)
with suppress(Exception):
obj.hide_set(not v)
with suppress(Exception):
obj.hide_viewport = not v
if render is not None:
obj.hide_render = not bool(render)
if selectable is not None:
obj.hide_select = not bool(selectable)
changed.append(obj.name)
return {"changed_objects": changed}
if op_type == "set_collection_visibility":
col_name = op.get("collection")
viewport = op.get("viewport")
render = op.get("render")
col = bpy.data.collections.get(col_name)
if col is None:
raise ValueError(f"Collection not found: {col_name}")
def _find_layer_collection(lc):
if lc.collection == col:
return lc
for child in lc.children:
found = _find_layer_collection(child)
if found is not None:
return found
return None
lc = _find_layer_collection(bpy.context.view_layer.layer_collection)
if lc is None:
raise ValueError(f"Layer collection not found in view layer: {col_name}")
if viewport is not None:
lc.hide_viewport = not bool(viewport)
if render is not None:
lc.hide_render = not bool(render)
return {"changed_object": None, "collection": col_name}
if op_type == "isolate_objects":
mode = op.get("mode")
names = op.get("names") or []
include_children = bool(op.get("include_children", True))
render = op.get("render")
if mode == "clear":
changed = []
for obj in bpy.context.scene.objects:
with suppress(Exception):
obj.hide_set(False)
with suppress(Exception):
obj.hide_viewport = False
if render is not None:
obj.hide_render = False
changed.append(obj.name)
return {"changed_objects": changed, "mode": "clear"}
if mode != "isolate":
raise ValueError(f"Unsupported isolate mode: {mode}")
keep = set()
for n in names:
obj = bpy.data.objects.get(n)
if obj is None:
raise ValueError(f"Object not found: {n}")
keep.add(obj)
if include_children:
for ch in obj.children_recursive:
keep.add(ch)
changed = []
for obj in bpy.context.scene.objects:
visible = obj in keep
with suppress(Exception):
obj.hide_set(not visible)
with suppress(Exception):
obj.hide_viewport = not visible
if render is not None:
obj.hide_render = not (bool(render) and visible)
changed.append(obj.name)
return {"changed_objects": changed, "mode": "isolate", "kept": [o.name for o in keep]}
if op_type in {"uv_smart_project", "uv_unwrap", "uv_pack_islands"}:
names = op.get("names") or []
params = op.get("params") or {}
if not isinstance(params, dict):
raise ValueError("params must be an object")
_ensure_object_mode()
changed = []
for n in names:
obj = bpy.data.objects.get(n)
if obj is None:
raise ValueError(f"Object not found: {n}")
if obj.type != "MESH":
continue
_deselect_all_selected()
_select(obj, True)
_set_active(obj)
bpy.ops.object.mode_set(mode="EDIT")
with suppress(Exception):
bpy.ops.mesh.select_all(action="SELECT")
def _run_uv():
if op_type == "uv_smart_project":
return bpy.ops.uv.smart_project(**params)
if op_type == "uv_unwrap":
return bpy.ops.uv.unwrap(**params)
return bpy.ops.uv.pack_islands(**params)
try:
_run_uv()
except Exception:
ov = _override_any_area()
if ov is None:
raise
with bpy.context.temp_override(**ov):
_run_uv()
bpy.ops.object.mode_set(mode="OBJECT")
changed.append(obj.name)
return {"changed_objects": changed, "uv_op": op_type}
if op_type == "bake_maps":
name = op.get("name")
bake_type = op.get("bake_type")
output_path = op.get("output_path")
resolution = int(op.get("resolution", 1024))
margin = int(op.get("margin", 16))
samples = int(op.get("samples", 64))
use_selected_to_active = bool(op.get("use_selected_to_active", False))
cage_extrusion = op.get("cage_extrusion")
if not self._is_path_allowed(output_path, kind="write"):
raise ValueError("Bake output_path is not allowed by addon path policy")
out_abs = os.path.abspath(output_path)
os.makedirs(os.path.dirname(out_abs), exist_ok=True)
obj = bpy.data.objects.get(name)
if obj is None:
raise ValueError(f"Object not found: {name}")
if obj.type != "MESH":
raise ValueError("bake_maps only supports MESH objects")
_ensure_object_mode()
if not use_selected_to_active:
_deselect_all_selected()
_select(obj, True)
_set_active(obj)
scene = bpy.context.scene
with suppress(Exception):
scene.render.engine = "CYCLES"
if hasattr(scene, "cycles") and hasattr(scene.cycles, "samples"):
with suppress(Exception):
scene.cycles.samples = samples
bake = scene.render.bake
bake.use_clear = True
bake.margin = margin
bake.use_selected_to_active = use_selected_to_active
if cage_extrusion is not None and _is_num(cage_extrusion):
with suppress(Exception):
bake.cage_extrusion = float(cage_extrusion)
if not obj.data.materials or obj.data.materials[0] is None:
mat = bpy.data.materials.new(name=f"{obj.name}_Bake")
mat.use_nodes = True
obj.data.materials.append(mat)
mat = obj.data.materials[0]
mat.use_nodes = True
nt = mat.node_tree
if nt is None:
raise RuntimeError("Material node_tree missing")
img = bpy.data.images.new(name=f"bake_{obj.name}_{bake_type}", width=resolution, height=resolution, alpha=True)
img.filepath_raw = out_abs
img.file_format = "PNG"
tex = nt.nodes.new(type="ShaderNodeTexImage")
tex.image = img
tex.select = True
nt.nodes.active = tex
try:
bpy.ops.object.bake(type=bake_type)
except Exception:
ov = _override_any_area()
if ov is None:
raise
with bpy.context.temp_override(**ov):
bpy.ops.object.bake(type=bake_type)
with suppress(Exception):
img.save()
return {"changed_object": obj.name, "output_path": out_abs, "bake_type": bake_type}
if op_type == "camera_look_at":
cam_name = op.get("camera")
target = op.get("target")
roll_deg = float(op.get("roll", 0.0))
cam = bpy.data.objects.get(cam_name)
if cam is None or cam.type != "CAMERA":
raise ValueError(f"Camera not found: {cam_name}")
t = _validate_vec3(target, path="target")
if t is None:
raise ValueError("target must be [x,y,z]")
target_v = mathutils.Vector(t)
direction = target_v - cam.location
if direction.length < 1e-9:
raise ValueError("camera_look_at target is at the camera location")
quat = direction.to_track_quat("-Z", "Y")
if abs(roll_deg) > 1e-9:
roll_q = mathutils.Quaternion(direction.normalized(), math.radians(roll_deg))
quat = roll_q @ quat
cam.rotation_euler = quat.to_euler()
return {"changed_object": cam.name}
if op_type == "create_turntable_animation":
name = op.get("name")
frame_start = int(op.get("frame_start"))
frame_end = int(op.get("frame_end"))
axis = op.get("axis", "Z")
revolutions = float(op.get("revolutions", 1.0))
rig_name = op.get("rig_name") or f"Turntable_{name}"
obj = bpy.data.objects.get(name)
if obj is None:
raise ValueError(f"Object not found: {name}")
scene = bpy.context.scene
scene.frame_start = frame_start
scene.frame_end = frame_end
rig = bpy.data.objects.get(rig_name)
if rig is None:
rig = bpy.data.objects.new(rig_name, None)
rig.empty_display_type = "PLAIN_AXES"
bpy.context.scene.collection.objects.link(rig)
rig.location = obj.location
rig.rotation_euler = (0.0, 0.0, 0.0)
rig_mw = rig.matrix_world.copy()
obj_mw = obj.matrix_world.copy()
obj.parent = rig
obj.matrix_parent_inverse = rig_mw.inverted()
obj.matrix_world = obj_mw
rig.keyframe_insert(data_path="rotation_euler", frame=frame_start)
rot = list(rig.rotation_euler)
angle = 2.0 * math.pi * revolutions
idx = {"X": 0, "Y": 1, "Z": 2}.get(axis, 2)
rot[idx] = angle
rig.rotation_euler = tuple(rot)
rig.keyframe_insert(data_path="rotation_euler", frame=frame_end)
return {"created_object": rig.name, "changed_object": obj.name}
if op_type == "boolean_operation":
target_name = op.get("target")
cutter_name = op.get("cutter")
operation = op.get("operation", "DIFFERENCE")
solver = op.get("solver", "FAST")
apply = bool(op.get("apply", True))
remove_cutter = bool(op.get("remove_cutter", False))
target = bpy.data.objects.get(target_name)
cutter = bpy.data.objects.get(cutter_name)
if target is None:
raise ValueError(f"Object not found: {target_name}")
if cutter is None:
raise ValueError(f"Object not found: {cutter_name}")
if target.type != "MESH" or cutter.type != "MESH":
raise ValueError("boolean_operation requires MESH target and cutter")
mod = target.modifiers.new(name="BOOLEAN", type="BOOLEAN")
mod.operation = operation
mod.object = cutter
with suppress(Exception):
mod.solver = solver
if apply:
_ensure_object_mode()
_deselect_all_selected()
_select(target, True)
_set_active(target)
bpy.ops.object.modifier_apply(modifier=mod.name)
if remove_cutter:
bpy.data.objects.remove(cutter, do_unlink=True)
return {"changed_object": target.name, "modifier_name": mod.name}
if op_type == "purge_orphans":
purged = 0
for _ in range(10):
try:
res = bpy.ops.outliner.orphans_purge(do_recursive=True)
except Exception:
ov = _override_any_area()
if ov is None:
break
with bpy.context.temp_override(**ov):
res = bpy.ops.outliner.orphans_purge(do_recursive=True)
if "FINISHED" in res:
purged += 1
continue
break
return {"changed_object": None, "purge_passes": purged}
if op_type == "pack_external_data":
try:
bpy.ops.file.pack_all()
except Exception:
ov = _override_any_area()
if ov is None:
raise
with bpy.context.temp_override(**ov):
bpy.ops.file.pack_all()
return {"changed_object": None}
if op_type == "save_blend":
path = op.get("path")
compress = bool(op.get("compress", False))
copy = bool(op.get("copy", False))
if not self._is_path_allowed(path, kind="write"):
raise ValueError("save_blend path is not allowed by addon path policy")
abs_path = os.path.abspath(path)
os.makedirs(os.path.dirname(abs_path), exist_ok=True)
try:
bpy.ops.wm.save_as_mainfile(filepath=abs_path, compress=compress, copy=copy)
except TypeError:
bpy.ops.wm.save_as_mainfile(filepath=abs_path)
return {"changed_object": None, "path": abs_path}
if op_type == "snap_to_ground":
names = op.get("names") or []
moved = []
for n in names:
obj = bpy.data.objects.get(n)
if obj is None:
raise ValueError(f"Object not found: {n}")
if obj.type != "MESH":
continue
corners = [obj.matrix_world @ mathutils.Vector(c) for c in obj.bound_box]
min_z = min(v.z for v in corners)
obj.location.z -= min_z
moved.append(obj.name)
return {"changed_objects": moved}
if op_type == "set_origin":
name = op.get("name")
mode = op.get("mode")
obj = bpy.data.objects.get(name)
if obj is None:
raise ValueError(f"Object not found: {name}")
with suppress(Exception):
bpy.ops.object.mode_set(mode="OBJECT")
for o in bpy.context.selected_objects:
with suppress(Exception):
o.select_set(False)
obj.select_set(True)
bpy.context.view_layer.objects.active = obj
if mode == "origin_to_geometry":
bpy.ops.object.origin_set(type="ORIGIN_GEOMETRY", center="MEDIAN")
elif mode == "geometry_to_origin":
bpy.ops.object.origin_set(type="GEOMETRY_ORIGIN", center="MEDIAN")
elif mode == "origin_to_3d_cursor":
bpy.ops.object.origin_set(type="ORIGIN_CURSOR", center="MEDIAN")
else:
raise ValueError(f"Unsupported origin mode: {mode}")
return {"changed_object": obj.name}
if op_type == "ensure_collection":
name = op.get("name")
if bpy.data.collections.get(name) is None:
col = bpy.data.collections.new(name)
bpy.context.scene.collection.children.link(col)
return {"changed_object": None}
if op_type == "move_to_collection":
obj_name = op.get("object")
col_name = op.get("collection")
obj = bpy.data.objects.get(obj_name)
if obj is None:
raise ValueError(f"Object not found: {obj_name}")
col = bpy.data.collections.get(col_name)
if col is None:
col = bpy.data.collections.new(col_name)
bpy.context.scene.collection.children.link(col)
if obj.name not in col.objects:
col.objects.link(obj)
return {"changed_object": obj.name}
if op_type == "set_parent":
child_name = op.get("child")
parent_name = op.get("parent")
child = bpy.data.objects.get(child_name)
parent = bpy.data.objects.get(parent_name)
if child is None:
raise ValueError(f"Object not found: {child_name}")
if parent is None:
raise ValueError(f"Object not found: {parent_name}")
child.parent = parent
return {"changed_object": child.name}
if op_type == "clear_parent":
names = op.get("names") or []
out = []
for n in names:
obj = bpy.data.objects.get(n)
if obj is None:
raise ValueError(f"Object not found: {n}")
obj.parent = None
out.append(obj.name)
return {"changed_objects": out}
if op_type == "ensure_material":
name = op.get("name")
model = op.get("model", "pbr")
mat = bpy.data.materials.get(name)
if mat is None:
mat = bpy.data.materials.new(name=name)
mat.use_nodes = True
nt = mat.node_tree
if nt is None:
raise RuntimeError("Material node_tree missing")
nt.nodes.clear()
out_node = nt.nodes.new(type="ShaderNodeOutputMaterial")
out_node.location = (300, 0)
if model == "pbr":
bsdf = nt.nodes.new(type="ShaderNodeBsdfPrincipled")
bsdf.location = (0, 0)
nt.links.new(bsdf.outputs.get("BSDF"), out_node.inputs.get("Surface"))
else:
em = nt.nodes.new(type="ShaderNodeEmission")
em.location = (0, 0)
nt.links.new(em.outputs.get("Emission"), out_node.inputs.get("Surface"))
return {"changed_object": None}
if op_type == "set_material_params":
mat_name = op.get("material")
params = op.get("params") or {}
mat = bpy.data.materials.get(mat_name)
if mat is None or not mat.use_nodes or mat.node_tree is None:
raise ValueError(f"Material not found or has no nodes: {mat_name}")
nt = mat.node_tree
bsdf = next((n for n in nt.nodes if n.type == "BSDF_PRINCIPLED"), None)
emission = next((n for n in nt.nodes if n.type == "EMISSION"), None)
node = bsdf or emission
if node is None:
raise ValueError("No Principled BSDF or Emission node found")
if "baseColor" in params and hasattr(node.inputs, "get"):
inp = node.inputs.get("Base Color") or node.inputs.get("Color")
if inp:
rgba = _validate_rgba(params["baseColor"], path="params.baseColor") or (1, 1, 1, 1)
inp.default_value = rgba
if "metallic" in params and bsdf:
inp = bsdf.inputs.get("Metallic")
if inp:
inp.default_value = float(params["metallic"])
if "roughness" in params and bsdf:
inp = bsdf.inputs.get("Roughness")
if inp:
inp.default_value = float(params["roughness"])
if "emissionColor" in params and bsdf:
inp = bsdf.inputs.get("Emission")
if inp:
rgba = _validate_rgba(params["emissionColor"], path="params.emissionColor") or (0, 0, 0, 1)
inp.default_value = rgba
if "emissionStrength" in params and bsdf:
inp = bsdf.inputs.get("Emission Strength")
if inp:
inp.default_value = float(params["emissionStrength"])
if "alpha" in params and bsdf:
inp = bsdf.inputs.get("Alpha")
if inp:
a = float(params["alpha"])
inp.default_value = a
if a < 1.0:
mat.blend_method = "BLEND"
return {"changed_object": None}
if op_type == "assign_material":
obj_name = op.get("object")
mat_name = op.get("material")
slot = int(op.get("slot", 0))
obj = bpy.data.objects.get(obj_name)
if obj is None:
raise ValueError(f"Object not found: {obj_name}")
mat = bpy.data.materials.get(mat_name)
if mat is None:
raise ValueError(f"Material not found: {mat_name}")
if obj.type != "MESH":
raise ValueError("assign_material only supports MESH objects")
while len(obj.data.materials) <= slot:
obj.data.materials.append(None)
obj.data.materials[slot] = mat
return {"changed_object": obj.name}
if op_type == "set_texture_maps":
mat_name = op.get("material")
maps = op.get("maps") or {}
if not isinstance(maps, dict):
raise ValueError("maps must be an object")
mat = bpy.data.materials.get(mat_name)
if mat is None:
raise ValueError(f"Material not found: {mat_name}")
mat.use_nodes = True
nt = mat.node_tree
if nt is None:
raise RuntimeError("Material node_tree missing")
bsdf = next((n for n in nt.nodes if n.type == "BSDF_PRINCIPLED"), None)
if bsdf is None:
bsdf = nt.nodes.new(type="ShaderNodeBsdfPrincipled")
bsdf.location = (0, 0)
out = next((n for n in nt.nodes if n.type == "OUTPUT_MATERIAL"), None)
if out is None:
out = nt.nodes.new(type="ShaderNodeOutputMaterial")
out.location = (300, 0)
nt.links.new(bsdf.outputs.get("BSDF"), out.inputs.get("Surface"))
def _load_image(p: str):
if not self._is_path_allowed(p, kind="read"):
raise ValueError(f"Texture path not allowed: {p}")
return bpy.data.images.load(p, check_existing=True)
texcoord = next((n for n in nt.nodes if n.type == "TEX_COORD"), None)
if texcoord is None:
texcoord = nt.nodes.new(type="ShaderNodeTexCoord")
texcoord.location = (-900, 0)
mapping = next((n for n in nt.nodes if n.type == "MAPPING"), None)
if mapping is None:
mapping = nt.nodes.new(type="ShaderNodeMapping")
mapping.location = (-700, 0)
nt.links.new(texcoord.outputs.get("UV"), mapping.inputs.get("Vector"))
def _ensure_tex_node(label: str, x: int, y: int):
node = nt.nodes.new(type="ShaderNodeTexImage")
node.label = label
node.location = (x, y)
nt.links.new(mapping.outputs.get("Vector"), node.inputs.get("Vector"))
return node
if "basecolor" in maps and maps["basecolor"]:
img = _load_image(maps["basecolor"])
tex = _ensure_tex_node("BaseColor", -450, 250)
tex.image = img
tex.image.colorspace_settings.name = "sRGB"
nt.links.new(tex.outputs.get("Color"), bsdf.inputs.get("Base Color"))
if "roughness" in maps and maps["roughness"]:
img = _load_image(maps["roughness"])
tex = _ensure_tex_node("Roughness", -450, 0)
tex.image = img
tex.image.colorspace_settings.name = "Non-Color"
nt.links.new(tex.outputs.get("Color"), bsdf.inputs.get("Roughness"))
if "metallic" in maps and maps["metallic"]:
img = _load_image(maps["metallic"])
tex = _ensure_tex_node("Metallic", -450, -250)
tex.image = img
tex.image.colorspace_settings.name = "Non-Color"
nt.links.new(tex.outputs.get("Color"), bsdf.inputs.get("Metallic"))
if "ao" in maps and maps["ao"]:
img = _load_image(maps["ao"])
tex = _ensure_tex_node("AO", -450, 500)
tex.image = img
tex.image.colorspace_settings.name = "Non-Color"
# AO multiply into base color if base color link exists
mix = nt.nodes.new(type="ShaderNodeMixRGB")
mix.blend_type = "MULTIPLY"
mix.inputs.get("Fac").default_value = 1.0
mix.location = (-150, 400)
nt.links.new(tex.outputs.get("Color"), mix.inputs.get("Color2"))
# If base color is linked, reroute through mix; otherwise just drive base color
base_in = bsdf.inputs.get("Base Color")
if base_in.is_linked:
base_link = base_in.links[0]
nt.links.remove(base_link)
nt.links.new(base_link.from_socket, mix.inputs.get("Color1"))
nt.links.new(mix.outputs.get("Color"), base_in)
if "normal" in maps and maps["normal"]:
img = _load_image(maps["normal"])
tex = _ensure_tex_node("Normal", -450, -500)
tex.image = img
tex.image.colorspace_settings.name = "Non-Color"
normal_map = nt.nodes.new(type="ShaderNodeNormalMap")
normal_map.location = (-150, -500)
nt.links.new(tex.outputs.get("Color"), normal_map.inputs.get("Color"))
nt.links.new(normal_map.outputs.get("Normal"), bsdf.inputs.get("Normal"))
return {"changed_object": None}
if op_type == "create_light":
light_type = op.get("light")
name = op.get("name") or "Light"
location = _validate_vec3(op.get("location"), path="location") or (0.0, 0.0, 0.0)
params = op.get("params") or {}
data = bpy.data.lights.new(name=name, type=light_type.upper())
obj = bpy.data.objects.new(name=name, object_data=data)
bpy.context.scene.collection.objects.link(obj)
obj.location = location
self._apply_light_params(data, params)
return {"created_object": obj.name}
if op_type == "set_light_params":
name = op.get("name")
params = op.get("params") or {}
obj = bpy.data.objects.get(name)
if obj is None or obj.type != "LIGHT":
raise ValueError(f"Light object not found: {name}")
self._apply_light_params(obj.data, params)
return {"changed_object": obj.name}
if op_type == "create_camera":
name = op.get("name") or "Camera"
location = _validate_vec3(op.get("location"), path="location") or (0.0, -5.0, 2.0)
rotation = _validate_vec3(op.get("rotation"), path="rotation")
params = op.get("params") or {}
data = bpy.data.cameras.new(name=name)
obj = bpy.data.objects.new(name=name, object_data=data)
bpy.context.scene.collection.objects.link(obj)
obj.location = location
if rotation is not None:
obj.rotation_euler = rotation
self._apply_camera_params(data, params)
return {"created_object": obj.name}
if op_type == "set_camera_params":
name = op.get("name")
params = op.get("params") or {}
cam = bpy.data.objects.get(name)
if cam is None or cam.type != "CAMERA":
raise ValueError(f"Camera not found: {name}")
self._apply_camera_params(cam.data, params)
return {"changed_object": cam.name}
if op_type == "set_active_camera":
name = op.get("name")
cam = bpy.data.objects.get(name)
if cam is None or cam.type != "CAMERA":
raise ValueError(f"Camera not found: {name}")
bpy.context.scene.camera = cam
return {"changed_object": cam.name}
if op_type == "frame_camera":
cam_name = op.get("camera")
object_names = op.get("objects") or []
margin = float(op.get("margin", 0.1))
cam = bpy.data.objects.get(cam_name)
if cam is None or cam.type != "CAMERA":
raise ValueError(f"Camera not found: {cam_name}")
objs = []
for n in object_names:
o = bpy.data.objects.get(n)
if o is None:
raise ValueError(f"Object not found: {n}")
objs.append(o)
if not objs:
raise ValueError("No objects to frame")
# Compute combined AABB in world space
min_v = mathutils.Vector((1e18, 1e18, 1e18))
max_v = mathutils.Vector((-1e18, -1e18, -1e18))
for o in objs:
if o.type == "MESH":
corners = [o.matrix_world @ mathutils.Vector(c) for c in o.bound_box]
else:
corners = [o.matrix_world.translation]
for c in corners:
min_v.x = min(min_v.x, c.x)
min_v.y = min(min_v.y, c.y)
min_v.z = min(min_v.z, c.z)
max_v.x = max(max_v.x, c.x)
max_v.y = max(max_v.y, c.y)
max_v.z = max(max_v.z, c.z)
center = (min_v + max_v) * 0.5
extent = (max_v - min_v) * 0.5
radius = max(extent.x, extent.y, extent.z) * (1.0 + margin)
# Point camera at center and move it back along its local -Z
cam.location = center + cam.matrix_world.to_quaternion() @ mathutils.Vector((0.0, 0.0, radius * 3.0))
direction = center - cam.location
cam.rotation_euler = direction.to_track_quat("-Z", "Y").to_euler()
return {"changed_object": cam.name}
if op_type == "set_world_background":
color = _validate_rgba(op.get("color"), path="color")
strength = op.get("strength")
self._set_world_background(color=color, strength=strength)
return {"changed_object": None}
if op_type == "set_world_hdri":
source = op.get("source") or {}
strength = float(op.get("strength", 1.0))
rotation = _validate_vec3(op.get("rotation"), path="rotation") or (0.0, 0.0, 0.0)
path = source.get("path")
url = source.get("url")
poly_id = source.get("polyhaven_id")
if poly_id:
if not getattr(bpy.context.scene, "blendermcp_use_polyhaven", False):
raise ValueError("PolyHaven integration is disabled; enable it in Blender to use polyhaven_id")
result = self.download_polyhaven_asset(asset_id=poly_id, asset_type="hdris", resolution="1k", file_format=None)
if "error" in result:
raise ValueError(result["error"])
return {"changed_object": None, "note": "HDRI set via PolyHaven download"}
if url:
_require_requests("HDRI download")
tmp_dir = tempfile.gettempdir()
tmp_path = os.path.join(tmp_dir, f"blender_mcp_hdri_{secrets.token_hex(8)}.hdr")
resp = requests.get(url, headers=_get_req_headers(), stream=True, timeout=60)
resp.raise_for_status()
with open(tmp_path, "wb") as f:
for chunk in resp.iter_content(chunk_size=1024 * 1024):
if chunk:
f.write(chunk)
path = tmp_path
if not path:
raise ValueError("set_world_hdri requires source.path, source.url, or source.polyhaven_id")
if not self._is_path_allowed(path, kind="read"):
raise ValueError("HDRI path is not allowed by addon path policy")
self._set_world_hdri(filepath=path, strength=strength, rotation=rotation)
return {"changed_object": None}
if op_type == "import_model":
path = op.get("path")
fmt = op.get("format")
options = op.get("options") or {}
if not self._is_path_allowed(path, kind="read"):
raise ValueError("Import path is not allowed by addon path policy")
return self._import_model(path, fmt, options)
if op_type == "export_scene":
path = op.get("path")
fmt = op.get("format")
options = op.get("options") or {}
if not self._is_path_allowed(path, kind="write"):
raise ValueError("Export path is not allowed by addon path policy")
return self._export_scene(path, fmt, options)
if op_type == "add_modifier":
obj_name = op.get("name")
mod_type = op.get("modifier_type")
params = op.get("params") or {}
obj = bpy.data.objects.get(obj_name)
if obj is None:
raise ValueError(f"Object not found: {obj_name}")
if obj.type != "MESH":
raise ValueError("Modifiers are only supported on MESH objects")
type_map = {
"subdivision": "SUBSURF",
"bevel": "BEVEL",
"solidify": "SOLIDIFY",
"mirror": "MIRROR",
"array": "ARRAY",
"decimate": "DECIMATE",
"boolean": "BOOLEAN",
"ocean": "OCEAN",
}
bpy_type = type_map.get(mod_type)
if bpy_type is None:
raise ValueError(f"Unsupported modifier type: {mod_type}")
mod = obj.modifiers.new(name=bpy_type, type=bpy_type)
if isinstance(params, dict):
for k, v in params.items():
with suppress(Exception):
setattr(mod, k, v)
return {"changed_object": obj.name, "modifier_name": mod.name}
if op_type == "apply_modifier":
obj_name = op.get("name")
mod_name = op.get("modifier_name")
obj = bpy.data.objects.get(obj_name)
if obj is None:
raise ValueError(f"Object not found: {obj_name}")
mod = obj.modifiers.get(mod_name)
if mod is None:
raise ValueError(f"Modifier not found: {mod_name}")
with suppress(Exception):
bpy.ops.object.mode_set(mode="OBJECT")
for o in bpy.context.selected_objects:
with suppress(Exception):
o.select_set(False)
obj.select_set(True)
bpy.context.view_layer.objects.active = obj
bpy.ops.object.modifier_apply(modifier=mod.name)
return {"changed_object": obj.name, "modifier_name": mod.name}
if op_type == "remove_modifier":
obj_name = op.get("name")
mod_name = op.get("modifier_name")
obj = bpy.data.objects.get(obj_name)
if obj is None:
raise ValueError(f"Object not found: {obj_name}")
mod = obj.modifiers.get(mod_name)
if mod is None:
raise ValueError(f"Modifier not found: {mod_name}")
obj.modifiers.remove(mod)
return {"changed_object": obj.name, "modifier_name": mod_name}
if op_type == "set_render_settings":
return self._set_render_settings(op)
if op_type == "render_still":
output_path = op.get("output_path")
fmt = op.get("format", "PNG")
return self._render_still(output_path=output_path, file_format=fmt)
if op_type == "render_animation":
return self._render_animation(op)
raise ValueError(f"Unsupported op type: {op_type}")
def _get_allowed_path_roots(self):
roots = set()
with suppress(Exception):
roots.add(os.path.abspath(tempfile.gettempdir()))
with suppress(Exception):
roots.add(os.path.abspath(os.path.expanduser("~")))
with suppress(Exception):
fp = bpy.data.filepath
if fp:
roots.add(os.path.abspath(os.path.dirname(fp)))
return sorted(roots)
def _is_path_allowed(self, path: str, *, kind: str) -> bool:
if not path or not isinstance(path, str):
return False
abs_path = os.path.abspath(path)
roots = self._get_allowed_path_roots()
for r in roots:
try:
if abs_path == r or abs_path.startswith(r + os.sep):
return True
except Exception:
continue
return False
def _apply_light_params(self, light_data, params: Dict[str, Any]):
if not isinstance(params, dict):
return
if "energy" in params and _is_num(params["energy"]):
light_data.energy = float(params["energy"])
if "color" in params:
c = _validate_rgba(params["color"], path="params.color")
if c is not None:
light_data.color = (c[0], c[1], c[2])
if "angle" in params and hasattr(light_data, "angle") and _is_num(params["angle"]):
light_data.angle = float(params["angle"])
if "size" in params and hasattr(light_data, "shadow_soft_size") and _is_num(params["size"]):
light_data.shadow_soft_size = float(params["size"])
if "spot_size" in params and hasattr(light_data, "spot_size") and _is_num(params["spot_size"]):
light_data.spot_size = float(params["spot_size"])
def _apply_camera_params(self, camera_data, params: Dict[str, Any]):
if not isinstance(params, dict):
return
if "lens" in params and _is_num(params["lens"]):
camera_data.lens = float(params["lens"])
if "clip_start" in params and _is_num(params["clip_start"]):
camera_data.clip_start = float(params["clip_start"])
if "clip_end" in params and _is_num(params["clip_end"]):
camera_data.clip_end = float(params["clip_end"])
dof = getattr(camera_data, "dof", None)
if dof and isinstance(params.get("dof"), dict):
d = params["dof"]
if "focus_distance" in d and _is_num(d["focus_distance"]):
dof.focus_distance = float(d["focus_distance"])
if "aperture_fstop" in d and hasattr(dof, "aperture_fstop") and _is_num(d["aperture_fstop"]):
dof.aperture_fstop = float(d["aperture_fstop"])
def _ensure_world_nodes(self):
scene = bpy.context.scene
if scene.world is None:
scene.world = bpy.data.worlds.new("World")
world = scene.world
world.use_nodes = True
nt = world.node_tree
if nt is None:
raise RuntimeError("World node_tree missing")
return world, nt
def _set_world_background(self, *, color=None, strength=None):
_, nt = self._ensure_world_nodes()
out = next((n for n in nt.nodes if n.type == "OUTPUT_WORLD"), None)
if out is None:
out = nt.nodes.new(type="ShaderNodeOutputWorld")
out.location = (400, 0)
bg = next((n for n in nt.nodes if n.type == "BACKGROUND"), None)
if bg is None:
bg = nt.nodes.new(type="ShaderNodeBackground")
bg.location = (0, 0)
if not any(l.to_node == out for l in nt.links):
nt.links.new(bg.outputs.get("Background"), out.inputs.get("Surface"))
if color is not None:
bg.inputs.get("Color").default_value = color
if strength is not None and _is_num(strength):
bg.inputs.get("Strength").default_value = float(strength)
def _set_world_hdri(self, *, filepath: str, strength: float, rotation: Tuple[float, float, float]):
world, nt = self._ensure_world_nodes()
nt.nodes.clear()
out = nt.nodes.new(type="ShaderNodeOutputWorld")
out.location = (600, 0)
bg = nt.nodes.new(type="ShaderNodeBackground")
bg.location = (300, 0)
env = nt.nodes.new(type="ShaderNodeTexEnvironment")
env.location = (0, 0)
env.image = bpy.data.images.load(filepath, check_existing=True)
mapping = nt.nodes.new(type="ShaderNodeMapping")
mapping.location = (0, -200)
mapping.inputs.get("Rotation").default_value = rotation
texcoord = nt.nodes.new(type="ShaderNodeTexCoord")
texcoord.location = (-300, -200)
nt.links.new(texcoord.outputs.get("Generated"), mapping.inputs.get("Vector"))
nt.links.new(mapping.outputs.get("Vector"), env.inputs.get("Vector"))
nt.links.new(env.outputs.get("Color"), bg.inputs.get("Color"))
bg.inputs.get("Strength").default_value = strength
nt.links.new(bg.outputs.get("Background"), out.inputs.get("Surface"))
world.use_nodes = True
def _import_model(self, path: str, fmt: str, options: Dict[str, Any]):
with suppress(Exception):
bpy.ops.object.mode_set(mode="OBJECT")
if fmt in ("gltf", "glb"):
if hasattr(bpy.ops.import_scene, "gltf"):
bpy.ops.import_scene.gltf(filepath=path)
else:
raise RuntimeError("GLTF import operator not available")
elif fmt == "fbx":
if hasattr(bpy.ops.import_scene, "fbx"):
bpy.ops.import_scene.fbx(filepath=path)
else:
raise RuntimeError("FBX import operator not available")
elif fmt == "obj":
if hasattr(bpy.ops.import_scene, "obj"):
bpy.ops.import_scene.obj(filepath=path)
elif hasattr(bpy.ops.wm, "obj_import"):
bpy.ops.wm.obj_import(filepath=path)
else:
raise RuntimeError("OBJ import operator not available")
else:
raise ValueError(f"Unsupported import format: {fmt}")
imported = [o.name for o in bpy.context.selected_objects] if bpy.context.selected_objects else []
return {"imported_objects": imported}
def _export_scene(self, path: str, fmt: str, options: Dict[str, Any]):
with suppress(Exception):
bpy.ops.object.mode_set(mode="OBJECT")
os.makedirs(os.path.dirname(os.path.abspath(path)), exist_ok=True)
if fmt in ("gltf", "glb"):
if hasattr(bpy.ops.export_scene, "gltf"):
bpy.ops.export_scene.gltf(filepath=path, export_format="GLB" if fmt == "glb" else "GLTF_SEPARATE")
else:
raise RuntimeError("GLTF export operator not available")
elif fmt == "fbx":
if hasattr(bpy.ops.export_scene, "fbx"):
bpy.ops.export_scene.fbx(filepath=path)
else:
raise RuntimeError("FBX export operator not available")
elif fmt == "obj":
if hasattr(bpy.ops.export_scene, "obj"):
bpy.ops.export_scene.obj(filepath=path)
elif hasattr(bpy.ops.wm, "obj_export"):
bpy.ops.wm.obj_export(filepath=path)
else:
raise RuntimeError("OBJ export operator not available")
else:
raise ValueError(f"Unsupported export format: {fmt}")
return {"exported_path": path}
def _set_render_settings(self, op: Dict[str, Any]):
scene = bpy.context.scene
engine = op.get("engine")
scene.render.engine = engine
res = op.get("resolution")
if isinstance(res, (list, tuple)) and len(res) == 2:
scene.render.resolution_x = int(res[0])
scene.render.resolution_y = int(res[1])
samples = op.get("samples")
if samples is not None:
if engine == "CYCLES" and hasattr(scene, "cycles"):
scene.cycles.samples = int(samples)
elif hasattr(scene, "eevee"):
with suppress(Exception):
scene.eevee.taa_render_samples = int(samples)
denoise = op.get("denoise")
if denoise is not None and engine == "CYCLES" and hasattr(scene, "cycles"):
with suppress(Exception):
scene.cycles.use_denoising = bool(denoise)
cm = op.get("color_management") or {}
if isinstance(cm, dict):
if "view_transform" in cm and isinstance(cm["view_transform"], str):
with suppress(Exception):
scene.view_settings.view_transform = cm["view_transform"]
if "look" in cm and isinstance(cm["look"], str):
with suppress(Exception):
scene.view_settings.look = cm["look"]
if "exposure" in cm and _is_num(cm["exposure"]):
with suppress(Exception):
scene.view_settings.exposure = float(cm["exposure"])
if "gamma" in cm and _is_num(cm["gamma"]):
with suppress(Exception):
scene.view_settings.gamma = float(cm["gamma"])
return {"ok": True}
def _render_still(self, *, output_path: Optional[str], file_format: str):
scene = bpy.context.scene
if output_path is None:
tmp_dir = tempfile.gettempdir()
output_path = os.path.join(tmp_dir, f"blender_mcp_render_{secrets.token_hex(8)}.png")
if not self._is_path_allowed(output_path, kind="write"):
raise ValueError("Render output path is not allowed by addon path policy")
os.makedirs(os.path.dirname(os.path.abspath(output_path)), exist_ok=True)
scene.render.filepath = output_path
scene.render.image_settings.file_format = file_format
bpy.ops.render.render(write_still=True)
return {"output_path": output_path}
def _render_animation(self, op: Dict[str, Any]):
scene = bpy.context.scene
out_dir = op.get("output_dir")
fs = int(op.get("frame_start"))
fe = int(op.get("frame_end"))
if not self._is_path_allowed(out_dir, kind="write"):
raise ValueError("Animation output dir is not allowed by addon path policy")
os.makedirs(os.path.abspath(out_dir), exist_ok=True)
scene.frame_start = fs
scene.frame_end = fe
scene.render.filepath = os.path.join(os.path.abspath(out_dir), "frame_")
bpy.ops.render.render(animation=True)
return {"output_dir": out_dir, "frame_start": fs, "frame_end": fe}
def get_polyhaven_categories(self, asset_type):
"""Get categories for a specific asset type from Polyhaven"""
try:
req_headers = _get_req_headers()
if asset_type not in ["hdris", "textures", "models", "all"]:
return {"error": f"Invalid asset type: {asset_type}. Must be one of: hdris, textures, models, all"}
response = requests.get(f"https://api.polyhaven.com/categories/{asset_type}", headers=req_headers)
if response.status_code == 200:
return {"categories": response.json()}
else:
return {"error": f"API request failed with status code {response.status_code}"}
except Exception as e:
return {"error": str(e)}
def search_polyhaven_assets(self, asset_type=None, categories=None):
"""Search for assets from Polyhaven with optional filtering"""
try:
req_headers = _get_req_headers()
url = "https://api.polyhaven.com/assets"
params = {}
if asset_type and asset_type != "all":
if asset_type not in ["hdris", "textures", "models"]:
return {"error": f"Invalid asset type: {asset_type}. Must be one of: hdris, textures, models, all"}
params["type"] = asset_type
if categories:
params["categories"] = categories
response = requests.get(url, params=params, headers=req_headers)
if response.status_code == 200:
# Limit the response size to avoid overwhelming Blender
assets = response.json()
# Return only the first 20 assets to keep response size manageable
limited_assets = {}
for i, (key, value) in enumerate(assets.items()):
if i >= 20: # Limit to 20 assets
break
limited_assets[key] = value
return {"assets": limited_assets, "total_count": len(assets), "returned_count": len(limited_assets)}
else:
return {"error": f"API request failed with status code {response.status_code}"}
except Exception as e:
return {"error": str(e)}
def download_polyhaven_asset(self, asset_id, asset_type, resolution="1k", file_format=None):
try:
req_headers = _get_req_headers()
# First get the files information
files_response = requests.get(f"https://api.polyhaven.com/files/{asset_id}", headers=req_headers)
if files_response.status_code != 200:
return {"error": f"Failed to get asset files: {files_response.status_code}"}
files_data = files_response.json()
# Handle different asset types
if asset_type == "hdris":
# For HDRIs, download the .hdr or .exr file
if not file_format:
file_format = "hdr" # Default format for HDRIs
if "hdri" in files_data and resolution in files_data["hdri"] and file_format in files_data["hdri"][resolution]:
file_info = files_data["hdri"][resolution][file_format]
file_url = file_info["url"]
# For HDRIs, we need to save to a temporary file first
# since Blender can't properly load HDR data directly from memory
with tempfile.NamedTemporaryFile(suffix=f".{file_format}", delete=False) as tmp_file:
# Download the file
response = requests.get(file_url, headers=req_headers)
if response.status_code != 200:
return {"error": f"Failed to download HDRI: {response.status_code}"}
tmp_file.write(response.content)
tmp_path = tmp_file.name
try:
# Create a new world if none exists
if not bpy.data.worlds:
bpy.data.worlds.new("World")
world = bpy.data.worlds[0]
world.use_nodes = True
node_tree = world.node_tree
# Clear existing nodes
for node in node_tree.nodes:
node_tree.nodes.remove(node)
# Create nodes
tex_coord = node_tree.nodes.new(type='ShaderNodeTexCoord')
tex_coord.location = (-800, 0)
mapping = node_tree.nodes.new(type='ShaderNodeMapping')
mapping.location = (-600, 0)
# Load the image from the temporary file
env_tex = node_tree.nodes.new(type='ShaderNodeTexEnvironment')
env_tex.location = (-400, 0)
env_tex.image = bpy.data.images.load(tmp_path)
# Use a color space that exists in all Blender versions
if file_format.lower() == 'exr':
# Try to use Linear color space for EXR files
try:
env_tex.image.colorspace_settings.name = 'Linear'
except:
# Fallback to Non-Color if Linear isn't available
env_tex.image.colorspace_settings.name = 'Non-Color'
else: # hdr
# For HDR files, try these options in order
for color_space in ['Linear', 'Linear Rec.709', 'Non-Color']:
try:
env_tex.image.colorspace_settings.name = color_space
break # Stop if we successfully set a color space
except:
continue
background = node_tree.nodes.new(type='ShaderNodeBackground')
background.location = (-200, 0)
output = node_tree.nodes.new(type='ShaderNodeOutputWorld')
output.location = (0, 0)
# Connect nodes
node_tree.links.new(tex_coord.outputs['Generated'], mapping.inputs['Vector'])
node_tree.links.new(mapping.outputs['Vector'], env_tex.inputs['Vector'])
node_tree.links.new(env_tex.outputs['Color'], background.inputs['Color'])
node_tree.links.new(background.outputs['Background'], output.inputs['Surface'])
# Set as active world
bpy.context.scene.world = world
# Try to remove the temp file (Blender keeps the image data in memory)
with suppress(Exception):
os.unlink(tmp_path)
return {
"success": True,
"message": f"HDRI {asset_id} imported successfully",
"image_name": env_tex.image.name
}
except Exception as e:
return {"error": f"Failed to set up HDRI in Blender: {str(e)}"}
else:
return {"error": f"Requested resolution or format not available for this HDRI"}
elif asset_type == "textures":
if not file_format:
file_format = "jpg" # Default format for textures
downloaded_maps = {}
try:
for map_type in files_data:
if map_type not in ["blend", "gltf"]: # Skip non-texture files
if resolution in files_data[map_type] and file_format in files_data[map_type][resolution]:
file_info = files_data[map_type][resolution][file_format]
file_url = file_info["url"]
# Use NamedTemporaryFile like we do for HDRIs
with tempfile.NamedTemporaryFile(suffix=f".{file_format}", delete=False) as tmp_file:
# Download the file
response = requests.get(file_url, headers=req_headers)
if response.status_code == 200:
tmp_file.write(response.content)
tmp_path = tmp_file.name
# Load image from temporary file
image = bpy.data.images.load(tmp_path)
image.name = f"{asset_id}_{map_type}.{file_format}"
# Pack the image into .blend file
image.pack()
# Set color space based on map type
if map_type in ['color', 'diffuse', 'albedo']:
try:
image.colorspace_settings.name = 'sRGB'
except:
pass
else:
try:
image.colorspace_settings.name = 'Non-Color'
except:
pass
downloaded_maps[map_type] = image
# Clean up temporary file
try:
os.unlink(tmp_path)
except:
pass
if not downloaded_maps:
return {"error": f"No texture maps found for the requested resolution and format"}
# Create a new material with the downloaded textures
mat = bpy.data.materials.new(name=asset_id)
mat.use_nodes = True
nodes = mat.node_tree.nodes
links = mat.node_tree.links
# Clear default nodes
for node in nodes:
nodes.remove(node)
# Create output node
output = nodes.new(type='ShaderNodeOutputMaterial')
output.location = (300, 0)
# Create principled BSDF node
principled = nodes.new(type='ShaderNodeBsdfPrincipled')
principled.location = (0, 0)
links.new(principled.outputs[0], output.inputs[0])
# Add texture nodes based on available maps
tex_coord = nodes.new(type='ShaderNodeTexCoord')
tex_coord.location = (-800, 0)
mapping = nodes.new(type='ShaderNodeMapping')
mapping.location = (-600, 0)
mapping.vector_type = 'TEXTURE' # Changed from default 'POINT' to 'TEXTURE'
links.new(tex_coord.outputs['UV'], mapping.inputs['Vector'])
# Position offset for texture nodes
x_pos = -400
y_pos = 300
# Connect different texture maps
for map_type, image in downloaded_maps.items():
tex_node = nodes.new(type='ShaderNodeTexImage')
tex_node.location = (x_pos, y_pos)
tex_node.image = image
# Set color space based on map type
if map_type.lower() in ['color', 'diffuse', 'albedo']:
try:
tex_node.image.colorspace_settings.name = 'sRGB'
except:
pass # Use default if sRGB not available
else:
try:
tex_node.image.colorspace_settings.name = 'Non-Color'
except:
pass # Use default if Non-Color not available
links.new(mapping.outputs['Vector'], tex_node.inputs['Vector'])
# Connect to appropriate input on Principled BSDF
if map_type.lower() in ['color', 'diffuse', 'albedo']:
links.new(tex_node.outputs['Color'], principled.inputs['Base Color'])
elif map_type.lower() in ['roughness', 'rough']:
links.new(tex_node.outputs['Color'], principled.inputs['Roughness'])
elif map_type.lower() in ['metallic', 'metalness', 'metal']:
links.new(tex_node.outputs['Color'], principled.inputs['Metallic'])
elif map_type.lower() in ['normal', 'nor']:
# Add normal map node
normal_map = nodes.new(type='ShaderNodeNormalMap')
normal_map.location = (x_pos + 200, y_pos)
links.new(tex_node.outputs['Color'], normal_map.inputs['Color'])
links.new(normal_map.outputs['Normal'], principled.inputs['Normal'])
elif map_type in ['displacement', 'disp', 'height']:
# Add displacement node
disp_node = nodes.new(type='ShaderNodeDisplacement')
disp_node.location = (x_pos + 200, y_pos - 200)
links.new(tex_node.outputs['Color'], disp_node.inputs['Height'])
links.new(disp_node.outputs['Displacement'], output.inputs['Displacement'])
y_pos -= 250
return {
"success": True,
"message": f"Texture {asset_id} imported as material",
"material": mat.name,
"maps": list(downloaded_maps.keys())
}
except Exception as e:
return {"error": f"Failed to process textures: {str(e)}"}
elif asset_type == "models":
# For models, prefer glTF format if available
if not file_format:
file_format = "gltf" # Default format for models
if file_format in files_data and resolution in files_data[file_format]:
file_info = files_data[file_format][resolution][file_format]
file_url = file_info["url"]
# Create a temporary directory to store the model and its dependencies
temp_dir = tempfile.mkdtemp()
main_file_path = ""
try:
# Download the main model file
main_file_name = file_url.split("/")[-1]
main_file_path = os.path.join(temp_dir, main_file_name)
response = requests.get(file_url, headers=req_headers)
if response.status_code != 200:
return {"error": f"Failed to download model: {response.status_code}"}
with open(main_file_path, "wb") as f:
f.write(response.content)
# Check for included files and download them
if "include" in file_info and file_info["include"]:
for include_path, include_info in file_info["include"].items():
# Get the URL for the included file - this is the fix
include_url = include_info["url"]
# Create the directory structure for the included file
include_file_path = os.path.join(temp_dir, include_path)
os.makedirs(os.path.dirname(include_file_path), exist_ok=True)
# Download the included file
include_response = requests.get(include_url, headers=req_headers)
if include_response.status_code == 200:
with open(include_file_path, "wb") as f:
f.write(include_response.content)
else:
print(f"Failed to download included file: {include_path}")
# Import the model into Blender
if file_format == "gltf" or file_format == "glb":
bpy.ops.import_scene.gltf(filepath=main_file_path)
elif file_format == "fbx":
bpy.ops.import_scene.fbx(filepath=main_file_path)
elif file_format == "obj":
bpy.ops.import_scene.obj(filepath=main_file_path)
elif file_format == "blend":
# For blend files, we need to append or link
with bpy.data.libraries.load(main_file_path, link=False) as (data_from, data_to):
data_to.objects = data_from.objects
# Link the objects to the scene
for obj in data_to.objects:
if obj is not None:
bpy.context.collection.objects.link(obj)
else:
return {"error": f"Unsupported model format: {file_format}"}
# Get the names of imported objects
imported_objects = [obj.name for obj in bpy.context.selected_objects]
return {
"success": True,
"message": f"Model {asset_id} imported successfully",
"imported_objects": imported_objects
}
except Exception as e:
return {"error": f"Failed to import model: {str(e)}"}
finally:
# Clean up temporary directory
with suppress(Exception):
shutil.rmtree(temp_dir)
else:
return {"error": f"Requested format or resolution not available for this model"}
else:
return {"error": f"Unsupported asset type: {asset_type}"}
except Exception as e:
return {"error": f"Failed to download asset: {str(e)}"}
def set_texture(self, object_name, texture_id):
"""Apply a previously downloaded Polyhaven texture to an object by creating a new material"""
try:
# Get the object
obj = bpy.data.objects.get(object_name)
if not obj:
return {"error": f"Object not found: {object_name}"}
# Make sure object can accept materials
if not hasattr(obj, 'data') or not hasattr(obj.data, 'materials'):
return {"error": f"Object {object_name} cannot accept materials"}
# Find all images related to this texture and ensure they're packed
texture_images: dict[str, bpy.types.Image] = {}
for img in bpy.data.images:
if img.name.startswith(texture_id + "_"):
map_type = img.name.split('_')[-1].split('.')[0].lower()
with suppress(Exception):
img.reload()
if not img.packed_file:
with suppress(Exception):
img.pack()
texture_images[map_type] = img
if not texture_images:
return {"error": f"No texture images found for: {texture_id}. Please download the texture first."}
new_mat_name = f"{texture_id}_material_{object_name}"
existing_mat = bpy.data.materials.get(new_mat_name)
if existing_mat:
bpy.data.materials.remove(existing_mat)
new_mat = bpy.data.materials.new(name=new_mat_name)
new_mat.use_nodes = True
nodes = new_mat.node_tree.nodes
links = new_mat.node_tree.links
nodes.clear()
output = nodes.new(type='ShaderNodeOutputMaterial')
output.location = (700, 0)
principled = nodes.new(type='ShaderNodeBsdfPrincipled')
principled.location = (350, 0)
links.new(principled.outputs['BSDF'], output.inputs['Surface'])
tex_coord = nodes.new(type='ShaderNodeTexCoord')
tex_coord.location = (-900, 0)
mapping = nodes.new(type='ShaderNodeMapping')
mapping.location = (-700, 0)
mapping.vector_type = 'TEXTURE'
links.new(tex_coord.outputs['UV'], mapping.inputs['Vector'])
texture_nodes: dict[str, bpy.types.Node] = {}
x_pos = -450
y_pos = 350
for map_type, image in texture_images.items():
tex_node = nodes.new(type='ShaderNodeTexImage')
tex_node.location = (x_pos, y_pos)
tex_node.image = image
if map_type in ['color', 'diffuse', 'albedo']:
with suppress(Exception):
tex_node.image.colorspace_settings.name = 'sRGB'
else:
with suppress(Exception):
tex_node.image.colorspace_settings.name = 'Non-Color'
links.new(mapping.outputs['Vector'], tex_node.inputs['Vector'])
texture_nodes[map_type] = tex_node
y_pos -= 250
def _first_node(names: list[str]):
for n in names:
if n in texture_nodes:
return texture_nodes[n]
return None
base_color_node = _first_node(['color', 'diffuse', 'albedo'])
rough_node = _first_node(['roughness', 'rough'])
metal_node = _first_node(['metallic', 'metalness', 'metal'])
normal_node = _first_node(['normal', 'nor', 'gl', 'dx'])
disp_node_img = _first_node(['displacement', 'disp', 'height'])
ao_node = _first_node(['ao'])
arm_node = _first_node(['arm'])
# Base color (+ optional AO multiply)
if base_color_node:
if ao_node or arm_node:
ao_source = None
if ao_node:
ao_source = ao_node.outputs['Color']
elif arm_node:
sep = nodes.new(type='ShaderNodeSeparateRGB')
sep.location = (-150, -150)
links.new(arm_node.outputs['Color'], sep.inputs['Image'])
ao_source = sep.outputs['R']
mix = nodes.new(type='ShaderNodeMixRGB')
mix.location = (120, 180)
mix.blend_type = 'MULTIPLY'
mix.inputs['Fac'].default_value = 0.8
links.new(base_color_node.outputs['Color'], mix.inputs[1])
links.new(ao_source, mix.inputs[2])
links.new(mix.outputs['Color'], principled.inputs['Base Color'])
else:
links.new(base_color_node.outputs['Color'], principled.inputs['Base Color'])
# Roughness / Metallic (fallback to ARM channels)
if rough_node:
links.new(rough_node.outputs['Color'], principled.inputs['Roughness'])
if metal_node:
links.new(metal_node.outputs['Color'], principled.inputs['Metallic'])
if arm_node and (not rough_node or not metal_node):
sep = nodes.new(type='ShaderNodeSeparateRGB')
sep.location = (-150, -350)
links.new(arm_node.outputs['Color'], sep.inputs['Image'])
if not rough_node:
links.new(sep.outputs['G'], principled.inputs['Roughness'])
if not metal_node:
links.new(sep.outputs['B'], principled.inputs['Metallic'])
# Normal map
if normal_node:
nm = nodes.new(type='ShaderNodeNormalMap')
nm.location = (120, -50)
links.new(normal_node.outputs['Color'], nm.inputs['Color'])
links.new(nm.outputs['Normal'], principled.inputs['Normal'])
# Displacement
if disp_node_img:
disp = nodes.new(type='ShaderNodeDisplacement')
disp.location = (350, -220)
disp.inputs['Scale'].default_value = 0.1
links.new(disp_node_img.outputs['Color'], disp.inputs['Height'])
links.new(disp.outputs['Displacement'], output.inputs['Displacement'])
# CRITICAL: Make sure to clear all existing materials from the object
while len(obj.data.materials) > 0:
obj.data.materials.pop(index=0)
# Assign the new material to the object
obj.data.materials.append(new_mat)
# CRITICAL: Make the object active and select it
bpy.context.view_layer.objects.active = obj
obj.select_set(True)
# CRITICAL: Force Blender to update the material
bpy.context.view_layer.update()
# Get the list of texture maps
texture_maps = sorted(texture_images.keys())
# Get info about texture nodes for debugging
material_info = {
"name": new_mat.name,
"has_nodes": new_mat.use_nodes,
"node_count": len(new_mat.node_tree.nodes),
"texture_nodes": []
}
for node in new_mat.node_tree.nodes:
if node.type == 'TEX_IMAGE' and node.image:
connections = []
for output in node.outputs:
for link in output.links:
connections.append(f"{output.name} → {link.to_node.name}.{link.to_socket.name}")
material_info["texture_nodes"].append({
"name": node.name,
"image": node.image.name,
"colorspace": node.image.colorspace_settings.name,
"connections": connections
})
return {
"success": True,
"message": f"Created new material and applied texture {texture_id} to {object_name}",
"material": new_mat.name,
"maps": texture_maps,
"material_info": material_info
}
except Exception as e:
print(f"Error in set_texture: {str(e)}")
traceback.print_exc()
return {"error": f"Failed to apply texture: {str(e)}"}
def get_polyhaven_status(self):
"""Get the current status of PolyHaven integration"""
enabled = bpy.context.scene.blendermcp_use_polyhaven
if enabled:
return {"enabled": True, "message": "PolyHaven integration is enabled and ready to use."}
else:
return {
"enabled": False,
"message": """PolyHaven integration is currently disabled. To enable it:
1. In the 3D Viewport, find the BlenderMCP panel in the sidebar (press N if hidden)
2. Check the 'Use assets from Poly Haven' checkbox
3. Restart the connection to Claude"""
}
#region Hyper3D
def get_hyper3d_status(self):
"""Get the current status of Hyper3D Rodin integration"""
enabled = bpy.context.scene.blendermcp_use_hyper3d
if enabled:
prefs = _get_prefs()
api_key = prefs.hyper3d_api_key if prefs else ""
if not api_key:
return {
"enabled": False,
"message": """Hyper3D Rodin integration is currently enabled, but API key is not given. To enable it:
1. In the 3D Viewport, find the BlenderMCP panel in the sidebar (press N if hidden)
2. Keep the 'Use Hyper3D Rodin 3D model generation' checkbox checked
3. Choose the right plaform and fill in the API Key
4. Restart the connection to Claude"""
}
mode = bpy.context.scene.blendermcp_hyper3d_mode
message = f"Hyper3D Rodin integration is enabled and ready to use. Mode: {mode}. " + \
f"Key type: {'private' if api_key != RODIN_FREE_TRIAL_KEY else 'free_trial'}"
return {
"enabled": True,
"message": message
}
else:
return {
"enabled": False,
"message": """Hyper3D Rodin integration is currently disabled. To enable it:
1. In the 3D Viewport, find the BlenderMCP panel in the sidebar (press N if hidden)
2. Check the 'Use Hyper3D Rodin 3D model generation' checkbox
3. Restart the connection to Claude"""
}
def create_rodin_job(self, *args, **kwargs):
match bpy.context.scene.blendermcp_hyper3d_mode:
case "MAIN_SITE":
return self.create_rodin_job_main_site(*args, **kwargs)
case "FAL_AI":
return self.create_rodin_job_fal_ai(*args, **kwargs)
case _:
return f"Error: Unknown Hyper3D Rodin mode!"
def create_rodin_job_main_site(
self,
text_prompt: str=None,
images: list[tuple[str, str]]=None,
bbox_condition=None
):
try:
_require_requests("Hyper3D Rodin")
prefs = _get_prefs()
api_key = prefs.hyper3d_api_key if prefs else ""
if images is None:
images = []
"""Call Rodin API, get the job uuid and subscription key"""
files = [
*[("images", (f"{i:04d}{img_suffix}", img)) for i, (img_suffix, img) in enumerate(images)],
("tier", (None, "Sketch")),
("mesh_mode", (None, "Raw")),
]
if text_prompt:
files.append(("prompt", (None, text_prompt)))
if bbox_condition:
files.append(("bbox_condition", (None, json.dumps(bbox_condition))))
response = requests.post(
"https://hyperhuman.deemos.com/api/v2/rodin",
headers={
"Authorization": f"Bearer {api_key}",
},
files=files
)
data = response.json()
return data
except Exception as e:
return {"error": str(e)}
def create_rodin_job_fal_ai(
self,
text_prompt: str=None,
images: list[tuple[str, str]]=None,
bbox_condition=None
):
try:
_require_requests("Hyper3D Rodin")
prefs = _get_prefs()
api_key = prefs.hyper3d_api_key if prefs else ""
req_data = {
"tier": "Sketch",
}
if images:
req_data["input_image_urls"] = images
if text_prompt:
req_data["prompt"] = text_prompt
if bbox_condition:
req_data["bbox_condition"] = bbox_condition
response = requests.post(
"https://queue.fal.run/fal-ai/hyper3d/rodin",
headers={
"Authorization": f"Key {api_key}",
"Content-Type": "application/json",
},
json=req_data
)
data = response.json()
return data
except Exception as e:
return {"error": str(e)}
def poll_rodin_job_status(self, *args, **kwargs):
match bpy.context.scene.blendermcp_hyper3d_mode:
case "MAIN_SITE":
return self.poll_rodin_job_status_main_site(*args, **kwargs)
case "FAL_AI":
return self.poll_rodin_job_status_fal_ai(*args, **kwargs)
case _:
return f"Error: Unknown Hyper3D Rodin mode!"
def poll_rodin_job_status_main_site(self, subscription_key: str):
"""Call the job status API to get the job status"""
_require_requests("Hyper3D Rodin")
prefs = _get_prefs()
api_key = prefs.hyper3d_api_key if prefs else ""
response = requests.post(
"https://hyperhuman.deemos.com/api/v2/status",
headers={
"Authorization": f"Bearer {api_key}",
},
json={
"subscription_key": subscription_key,
},
)
data = response.json()
return {
"status_list": [i["status"] for i in data["jobs"]]
}
def poll_rodin_job_status_fal_ai(self, request_id: str):
"""Call the job status API to get the job status"""
_require_requests("Hyper3D Rodin")
prefs = _get_prefs()
api_key = prefs.hyper3d_api_key if prefs else ""
response = requests.get(
f"https://queue.fal.run/fal-ai/hyper3d/requests/{request_id}/status",
headers={
"Authorization": f"Key {api_key}",
},
)
data = response.json()
return data
@staticmethod
def _clean_imported_glb(filepath, mesh_name=None):
# Get the set of existing objects before import
existing_objects = set(bpy.data.objects)
# Import the GLB file
bpy.ops.import_scene.gltf(filepath=filepath)
# Ensure the context is updated
bpy.context.view_layer.update()
# Get all imported objects
imported_objects = list(set(bpy.data.objects) - existing_objects)
# imported_objects = [obj for obj in bpy.context.view_layer.objects if obj.select_get()]
if not imported_objects:
print("Error: No objects were imported.")
return
# Identify the mesh object
mesh_obj = None
if len(imported_objects) == 1 and imported_objects[0].type == 'MESH':
mesh_obj = imported_objects[0]
print("Single mesh imported, no cleanup needed.")
else:
if len(imported_objects) == 2:
empty_objs = [i for i in imported_objects if i.type == "EMPTY"]
if len(empty_objs) != 1:
print("Error: Expected an empty node with one mesh child or a single mesh object.")
return
parent_obj = empty_objs.pop()
if len(parent_obj.children) == 1:
potential_mesh = parent_obj.children[0]
if potential_mesh.type == 'MESH':
print("GLB structure confirmed: Empty node with one mesh child.")
# Unparent the mesh from the empty node
potential_mesh.parent = None
# Remove the empty node
bpy.data.objects.remove(parent_obj)
print("Removed empty node, keeping only the mesh.")
mesh_obj = potential_mesh
else:
print("Error: Child is not a mesh object.")
return
else:
print("Error: Expected an empty node with one mesh child or a single mesh object.")
return
else:
print("Error: Expected an empty node with one mesh child or a single mesh object.")
return
# Rename the mesh if needed
try:
if mesh_obj and mesh_obj.name is not None and mesh_name:
mesh_obj.name = mesh_name
if mesh_obj.data.name is not None:
mesh_obj.data.name = mesh_name
print(f"Mesh renamed to: {mesh_name}")
except Exception as e:
print("Having issue with renaming, give up renaming.")
return mesh_obj
def import_generated_asset(self, *args, **kwargs):
match bpy.context.scene.blendermcp_hyper3d_mode:
case "MAIN_SITE":
return self.import_generated_asset_main_site(*args, **kwargs)
case "FAL_AI":
return self.import_generated_asset_fal_ai(*args, **kwargs)
case _:
return f"Error: Unknown Hyper3D Rodin mode!"
def import_generated_asset_main_site(self, task_uuid: str, name: str):
"""Fetch the generated asset, import into blender"""
_require_requests("Hyper3D Rodin")
prefs = _get_prefs()
api_key = prefs.hyper3d_api_key if prefs else ""
response = requests.post(
"https://hyperhuman.deemos.com/api/v2/download",
headers={
"Authorization": f"Bearer {api_key}",
},
json={
'task_uuid': task_uuid
}
)
data_ = response.json()
temp_file = None
temp_path = None
for i in data_["list"]:
if i["name"].endswith(".glb"):
temp_file = tempfile.NamedTemporaryFile(
delete=False,
prefix=task_uuid,
suffix=".glb",
)
try:
# Download the content
response = requests.get(i["url"], stream=True)
response.raise_for_status() # Raise an exception for HTTP errors
# Write the content to the temporary file
for chunk in response.iter_content(chunk_size=8192):
temp_file.write(chunk)
# Close the file
temp_path = temp_file.name
temp_file.close()
except Exception as e:
# Clean up the file if there's an error
temp_file.close()
with suppress(Exception):
os.unlink(temp_file.name)
return {"succeed": False, "error": str(e)}
break
else:
return {"succeed": False, "error": "Generation failed. Please first make sure that all jobs of the task are done and then try again later."}
try:
obj = self._clean_imported_glb(
filepath=temp_path,
mesh_name=name
)
result = {
"name": obj.name,
"type": obj.type,
"location": [obj.location.x, obj.location.y, obj.location.z],
"rotation": [obj.rotation_euler.x, obj.rotation_euler.y, obj.rotation_euler.z],
"scale": [obj.scale.x, obj.scale.y, obj.scale.z],
}
if obj.type == "MESH":
bounding_box = self._get_aabb(obj)
result["world_bounding_box"] = bounding_box
return {
"succeed": True, **result
}
except Exception as e:
return {"succeed": False, "error": str(e)}
finally:
if temp_path:
with suppress(Exception):
os.unlink(temp_path)
def import_generated_asset_fal_ai(self, request_id: str, name: str):
"""Fetch the generated asset, import into blender"""
_require_requests("Hyper3D Rodin")
prefs = _get_prefs()
api_key = prefs.hyper3d_api_key if prefs else ""
response = requests.get(
f"https://queue.fal.run/fal-ai/hyper3d/requests/{request_id}",
headers={
"Authorization": f"Key {api_key}",
}
)
data_ = response.json()
temp_file = None
temp_path = None
temp_file = tempfile.NamedTemporaryFile(
delete=False,
prefix=request_id,
suffix=".glb",
)
try:
# Download the content
response = requests.get(data_["model_mesh"]["url"], stream=True)
response.raise_for_status() # Raise an exception for HTTP errors
# Write the content to the temporary file
for chunk in response.iter_content(chunk_size=8192):
temp_file.write(chunk)
# Close the file
temp_path = temp_file.name
temp_file.close()
except Exception as e:
# Clean up the file if there's an error
temp_file.close()
with suppress(Exception):
os.unlink(temp_file.name)
return {"succeed": False, "error": str(e)}
try:
obj = self._clean_imported_glb(
filepath=temp_path,
mesh_name=name
)
result = {
"name": obj.name,
"type": obj.type,
"location": [obj.location.x, obj.location.y, obj.location.z],
"rotation": [obj.rotation_euler.x, obj.rotation_euler.y, obj.rotation_euler.z],
"scale": [obj.scale.x, obj.scale.y, obj.scale.z],
}
if obj.type == "MESH":
bounding_box = self._get_aabb(obj)
result["world_bounding_box"] = bounding_box
return {
"succeed": True, **result
}
except Exception as e:
return {"succeed": False, "error": str(e)}
finally:
if temp_path:
with suppress(Exception):
os.unlink(temp_path)
#endregion
#region Sketchfab API
def get_sketchfab_status(self):
"""Get the current status of Sketchfab integration"""
enabled = bpy.context.scene.blendermcp_use_sketchfab
prefs = _get_prefs()
api_key = prefs.sketchfab_api_key if prefs else ""
# Test the API key if present
if api_key:
try:
_require_requests("Sketchfab")
headers = {
"Authorization": f"Token {api_key}"
}
response = requests.get(
"https://api.sketchfab.com/v3/me",
headers=headers,
timeout=30 # Add timeout of 30 seconds
)
if response.status_code == 200:
user_data = response.json()
username = user_data.get("username", "Unknown user")
return {
"enabled": True,
"message": f"Sketchfab integration is enabled and ready to use. Logged in as: {username}"
}
else:
return {
"enabled": False,
"message": f"Sketchfab API key seems invalid. Status code: {response.status_code}"
}
except requests.exceptions.Timeout:
return {
"enabled": False,
"message": "Timeout connecting to Sketchfab API. Check your internet connection."
}
except Exception as e:
return {
"enabled": False,
"message": f"Error testing Sketchfab API key: {str(e)}"
}
if enabled and api_key:
return {"enabled": True, "message": "Sketchfab integration is enabled and ready to use."}
elif enabled and not api_key:
return {
"enabled": False,
"message": """Sketchfab integration is currently enabled, but API key is not given. To enable it:
1. In the 3D Viewport, find the BlenderMCP panel in the sidebar (press N if hidden)
2. Keep the 'Use Sketchfab' checkbox checked
3. Enter your Sketchfab API Key
4. Restart the connection to Claude"""
}
else:
return {
"enabled": False,
"message": """Sketchfab integration is currently disabled. To enable it:
1. In the 3D Viewport, find the BlenderMCP panel in the sidebar (press N if hidden)
2. Check the 'Use assets from Sketchfab' checkbox
3. Enter your Sketchfab API Key
4. Restart the connection to Claude"""
}
def search_sketchfab_models(self, query, categories=None, count=20, downloadable=True):
"""Search for models on Sketchfab based on query and optional filters"""
try:
_require_requests("Sketchfab")
prefs = _get_prefs()
api_key = prefs.sketchfab_api_key if prefs else ""
if not api_key:
return {"error": "Sketchfab API key is not configured"}
# Build search parameters with exact fields from Sketchfab API docs
params = {
"type": "models",
"q": query,
"count": count,
"downloadable": downloadable,
"archives_flavours": False
}
if categories:
params["categories"] = categories
# Make API request to Sketchfab search endpoint
# The proper format according to Sketchfab API docs for API key auth
headers = {
"Authorization": f"Token {api_key}"
}
# Use the search endpoint as specified in the API documentation
response = requests.get(
"https://api.sketchfab.com/v3/search",
headers=headers,
params=params,
timeout=30 # Add timeout of 30 seconds
)
if response.status_code == 401:
return {"error": "Authentication failed (401). Check your API key."}
if response.status_code != 200:
return {"error": f"API request failed with status code {response.status_code}"}
response_data = response.json()
# Safety check on the response structure
if response_data is None:
return {"error": "Received empty response from Sketchfab API"}
# Handle 'results' potentially missing from response
results = response_data.get("results", [])
if not isinstance(results, list):
return {"error": f"Unexpected response format from Sketchfab API: {response_data}"}
return response_data
except requests.exceptions.Timeout:
return {"error": "Request timed out. Check your internet connection."}
except json.JSONDecodeError as e:
return {"error": f"Invalid JSON response from Sketchfab API: {str(e)}"}
except Exception as e:
import traceback
traceback.print_exc()
return {"error": str(e)}
def download_sketchfab_model(self, uid):
"""Download a model from Sketchfab by its UID"""
try:
_require_requests("Sketchfab")
prefs = _get_prefs()
api_key = prefs.sketchfab_api_key if prefs else ""
if not api_key:
return {"error": "Sketchfab API key is not configured"}
# Use proper authorization header for API key auth
headers = {
"Authorization": f"Token {api_key}"
}
# Request download URL using the exact endpoint from the documentation
download_endpoint = f"https://api.sketchfab.com/v3/models/{uid}/download"
response = requests.get(
download_endpoint,
headers=headers,
timeout=30 # Add timeout of 30 seconds
)
if response.status_code == 401:
return {"error": "Authentication failed (401). Check your API key."}
if response.status_code != 200:
return {"error": f"Download request failed with status code {response.status_code}"}
data = response.json()
# Safety check for None data
if data is None:
return {"error": "Received empty response from Sketchfab API for download request"}
# Extract download URL with safety checks
gltf_data = data.get("gltf")
if not gltf_data:
return {"error": "No gltf download URL available for this model. Response: " + str(data)}
download_url = gltf_data.get("url")
if not download_url:
return {"error": "No download URL available for this model. Make sure the model is downloadable and you have access."}
# Download the model (already has timeout)
model_response = requests.get(download_url, timeout=60) # 60 second timeout
if model_response.status_code != 200:
return {"error": f"Model download failed with status code {model_response.status_code}"}
# Save to temporary file
temp_dir = tempfile.mkdtemp()
zip_file_path = os.path.join(temp_dir, f"{uid}.zip")
with open(zip_file_path, "wb") as f:
f.write(model_response.content)
# Extract the zip file with enhanced security
with zipfile.ZipFile(zip_file_path, 'r') as zip_ref:
# More secure zip slip prevention
for file_info in zip_ref.infolist():
# Get the path of the file
file_path = file_info.filename
# Convert directory separators to the current OS style
# This handles both / and \ in zip entries
target_path = os.path.join(temp_dir, os.path.normpath(file_path))
# Get absolute paths for comparison
abs_temp_dir = os.path.abspath(temp_dir)
abs_target_path = os.path.abspath(target_path)
# Ensure the normalized path doesn't escape the target directory
if not abs_target_path.startswith(abs_temp_dir):
with suppress(Exception):
shutil.rmtree(temp_dir)
return {"error": "Security issue: Zip contains files with path traversal attempt"}
# Additional explicit check for directory traversal
if ".." in file_path:
with suppress(Exception):
shutil.rmtree(temp_dir)
return {"error": "Security issue: Zip contains files with directory traversal sequence"}
# If all files passed security checks, extract them
zip_ref.extractall(temp_dir)
# Find the main glTF file
gltf_files = [f for f in os.listdir(temp_dir) if f.endswith('.gltf') or f.endswith('.glb')]
if not gltf_files:
with suppress(Exception):
shutil.rmtree(temp_dir)
return {"error": "No glTF file found in the downloaded model"}
main_file = os.path.join(temp_dir, gltf_files[0])
# Import the model
bpy.ops.import_scene.gltf(filepath=main_file)
# Get the names of imported objects
imported_objects = [obj.name for obj in bpy.context.selected_objects]
# Clean up temporary files
with suppress(Exception):
shutil.rmtree(temp_dir)
return {
"success": True,
"message": "Model imported successfully",
"imported_objects": imported_objects
}
except requests.exceptions.Timeout:
return {"error": "Request timed out. Check your internet connection and try again with a simpler model."}
except json.JSONDecodeError as e:
return {"error": f"Invalid JSON response from Sketchfab API: {str(e)}"}
except Exception as e:
import traceback
traceback.print_exc()
return {"error": f"Failed to download model: {str(e)}"}
#endregion
#region Hunyuan3D
def get_hunyuan3d_status(self):
"""Get the current status of Hunyuan3D integration"""
enabled = bpy.context.scene.blendermcp_use_hunyuan3d
hunyuan3d_mode = bpy.context.scene.blendermcp_hunyuan3d_mode
prefs = _get_prefs()
if enabled:
match hunyuan3d_mode:
case "OFFICIAL_API":
secret_id = prefs.hunyuan3d_secret_id if prefs else ""
secret_key = prefs.hunyuan3d_secret_key if prefs else ""
if not secret_id or not secret_key:
return {
"enabled": False,
"mode": hunyuan3d_mode,
"message": """Hunyuan3D integration is currently enabled, but SecretId or SecretKey is not given. To enable it:
1. In the 3D Viewport, find the BlenderMCP panel in the sidebar (press N if hidden)
2. Keep the 'Use Tencent Hunyuan 3D model generation' checkbox checked
3. Choose the right platform and fill in the SecretId and SecretKey
4. Restart the connection to Claude"""
}
case "LOCAL_API":
api_url = prefs.hunyuan3d_api_url if prefs else ""
if not api_url:
return {
"enabled": False,
"mode": hunyuan3d_mode,
"message": """Hunyuan3D integration is currently enabled, but API URL is not given. To enable it:
1. In the 3D Viewport, find the BlenderMCP panel in the sidebar (press N if hidden)
2. Keep the 'Use Tencent Hunyuan 3D model generation' checkbox checked
3. Choose the right platform and fill in the API URL
4. Restart the connection to Claude"""
}
case _:
return {
"enabled": False,
"message": "Hunyuan3D integration is enabled and mode is not supported."
}
return {
"enabled": True,
"mode": hunyuan3d_mode,
"message": "Hunyuan3D integration is enabled and ready to use."
}
return {
"enabled": False,
"message": """Hunyuan3D integration is currently disabled. To enable it:
1. In the 3D Viewport, find the BlenderMCP panel in the sidebar (press N if hidden)
2. Check the 'Use Tencent Hunyuan 3D model generation' checkbox
3. Restart the connection to Claude"""
}
@staticmethod
def get_tencent_cloud_sign_headers(
method: str,
path: str,
headParams: dict,
data: dict,
service: str,
region: str,
secret_id: str,
secret_key: str,
host: str = None
):
"""Generate the signature header required for Tencent Cloud API requests headers"""
# Generate timestamp
timestamp = int(time.time())
date = datetime.utcfromtimestamp(timestamp).strftime("%Y-%m-%d")
# If host is not provided, it is generated based on service and region.
if not host:
host = f"{service}.tencentcloudapi.com"
endpoint = f"https://{host}"
# Constructing the request body
payload_str = json.dumps(data)
# ************* Step 1: Concatenate the canonical request string *************
canonical_uri = path
canonical_querystring = ""
ct = "application/json; charset=utf-8"
canonical_headers = f"content-type:{ct}\nhost:{host}\nx-tc-action:{headParams.get('Action', '').lower()}\n"
signed_headers = "content-type;host;x-tc-action"
hashed_request_payload = hashlib.sha256(payload_str.encode("utf-8")).hexdigest()
canonical_request = (method + "\n" +
canonical_uri + "\n" +
canonical_querystring + "\n" +
canonical_headers + "\n" +
signed_headers + "\n" +
hashed_request_payload)
# ************* Step 2: Construct the reception signature string *************
credential_scope = f"{date}/{service}/tc3_request"
hashed_canonical_request = hashlib.sha256(canonical_request.encode("utf-8")).hexdigest()
string_to_sign = ("TC3-HMAC-SHA256" + "\n" +
str(timestamp) + "\n" +
credential_scope + "\n" +
hashed_canonical_request)
# ************* Step 3: Calculate the signature *************
def sign(key, msg):
return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
secret_date = sign(("TC3" + secret_key).encode("utf-8"), date)
secret_service = sign(secret_date, service)
secret_signing = sign(secret_service, "tc3_request")
signature = hmac.new(
secret_signing,
string_to_sign.encode("utf-8"),
hashlib.sha256
).hexdigest()
# ************* Step 4: Connect Authorization *************
authorization = ("TC3-HMAC-SHA256" + " " +
"Credential=" + secret_id + "/" + credential_scope + ", " +
"SignedHeaders=" + signed_headers + ", " +
"Signature=" + signature)
# Constructing request headers
headers = {
"Authorization": authorization,
"Content-Type": "application/json; charset=utf-8",
"Host": host,
"X-TC-Action": headParams.get("Action", ""),
"X-TC-Timestamp": str(timestamp),
"X-TC-Version": headParams.get("Version", ""),
"X-TC-Region": region
}
return headers, endpoint
def create_hunyuan_job(self, *args, **kwargs):
match bpy.context.scene.blendermcp_hunyuan3d_mode:
case "OFFICIAL_API":
return self.create_hunyuan_job_main_site(*args, **kwargs)
case "LOCAL_API":
return self.create_hunyuan_job_local_site(*args, **kwargs)
case _:
return f"Error: Unknown Hunyuan3D mode!"
def create_hunyuan_job_main_site(
self,
text_prompt: str = None,
image: str = None
):
try:
_require_requests("Hunyuan3D")
prefs = _get_prefs()
secret_id = prefs.hunyuan3d_secret_id if prefs else ""
secret_key = prefs.hunyuan3d_secret_key if prefs else ""
if not secret_id or not secret_key:
return {"error": "SecretId or SecretKey is not given"}
# Parameter verification
if not text_prompt and not image:
return {"error": "Prompt or Image is required"}
if text_prompt and image:
return {"error": "Prompt and Image cannot be provided simultaneously"}
# Fixed parameter configuration
service = "hunyuan"
action = "SubmitHunyuanTo3DJob"
version = "2023-09-01"
region = "ap-guangzhou"
headParams={
"Action": action,
"Version": version,
"Region": region,
}
# Constructing request parameters
data = {
"Num": 1 # The current API limit is only 1
}
# Handling text prompts
if text_prompt:
if len(text_prompt) > 200:
return {"error": "Prompt exceeds 200 characters limit"}
data["Prompt"] = text_prompt
# Handling image
if image:
if re.match(r'^https?://', image, re.IGNORECASE) is not None:
data["ImageUrl"] = image
else:
try:
# Convert to Base64 format
with open(image, "rb") as f:
image_base64 = base64.b64encode(f.read()).decode("ascii")
data["ImageBase64"] = image_base64
except Exception as e:
return {"error": f"Image encoding failed: {str(e)}"}
# Get signed headers
headers, endpoint = self.get_tencent_cloud_sign_headers("POST", "/", headParams, data, service, region, secret_id, secret_key)
response = requests.post(
endpoint,
headers = headers,
data = json.dumps(data)
)
if response.status_code == 200:
return response.json()
return {
"error": f"API request failed with status {response.status_code}: {response}"
}
except Exception as e:
return {"error": str(e)}
def create_hunyuan_job_local_site(
self,
text_prompt: str = None,
image: str = None):
try:
_require_requests("Hunyuan3D")
prefs = _get_prefs()
base_url = (prefs.hunyuan3d_api_url if prefs else "").rstrip('/')
octree_resolution = prefs.hunyuan3d_octree_resolution if prefs else 256
num_inference_steps = prefs.hunyuan3d_num_inference_steps if prefs else 20
guidance_scale = prefs.hunyuan3d_guidance_scale if prefs else 5.5
texture = prefs.hunyuan3d_texture if prefs else False
if not base_url:
return {"error": "API URL is not given"}
# Parameter verification
if not text_prompt and not image:
return {"error": "Prompt or Image is required"}
# Constructing request parameters
data = {
"octree_resolution": octree_resolution,
"num_inference_steps": num_inference_steps,
"guidance_scale": guidance_scale,
"texture": texture,
}
# Handling text prompts
if text_prompt:
data["text"] = text_prompt
# Handling image
if image:
if re.match(r'^https?://', image, re.IGNORECASE) is not None:
try:
resImg = requests.get(image)
resImg.raise_for_status()
image_base64 = base64.b64encode(resImg.content).decode("ascii")
data["image"] = image_base64
except Exception as e:
return {"error": f"Failed to download or encode image: {str(e)}"}
else:
try:
# Convert to Base64 format
with open(image, "rb") as f:
image_base64 = base64.b64encode(f.read()).decode("ascii")
data["image"] = image_base64
except Exception as e:
return {"error": f"Image encoding failed: {str(e)}"}
response = requests.post(
f"{base_url}/generate",
json = data,
)
if response.status_code != 200:
return {
"error": f"Generation failed: {response.text}"
}
# Decode base64 and save to temporary file
with tempfile.NamedTemporaryFile(delete=False, suffix=".glb") as temp_file:
temp_file.write(response.content)
temp_file_name = temp_file.name
# Import the GLB file in the main thread
def import_handler():
bpy.ops.import_scene.gltf(filepath=temp_file_name)
with suppress(Exception):
os.unlink(temp_file_name)
return None
bpy.app.timers.register(import_handler)
return {
"status": "DONE",
"message": "Generation and Import glb succeeded"
}
except Exception as e:
print(f"An error occurred: {e}")
return {"error": str(e)}
def poll_hunyuan_job_status(self, *args, **kwargs):
return self.poll_hunyuan_job_status_ai(*args, **kwargs)
def poll_hunyuan_job_status_ai(self, job_id: str):
"""Call the job status API to get the job status"""
print(job_id)
try:
_require_requests("Hunyuan3D")
prefs = _get_prefs()
secret_id = prefs.hunyuan3d_secret_id if prefs else ""
secret_key = prefs.hunyuan3d_secret_key if prefs else ""
if not secret_id or not secret_key:
return {"error": "SecretId or SecretKey is not given"}
if not job_id:
return {"error": "JobId is required"}
service = "hunyuan"
action = "QueryHunyuanTo3DJob"
version = "2023-09-01"
region = "ap-guangzhou"
headParams={
"Action": action,
"Version": version,
"Region": region,
}
clean_job_id = job_id.removeprefix("job_")
data = {
"JobId": clean_job_id
}
headers, endpoint = self.get_tencent_cloud_sign_headers("POST", "/", headParams, data, service, region, secret_id, secret_key)
response = requests.post(
endpoint,
headers=headers,
data=json.dumps(data)
)
if response.status_code == 200:
return response.json()
return {
"error": f"API request failed with status {response.status_code}: {response}"
}
except Exception as e:
return {"error": str(e)}
def import_generated_asset_hunyuan(self, *args, **kwargs):
return self.import_generated_asset_hunyuan_ai(*args, **kwargs)
def import_generated_asset_hunyuan_ai(self, name: str , zip_file_url: str):
if not zip_file_url:
return {"error": "Zip file not found"}
# Validate URL
if not re.match(r'^https?://', zip_file_url, re.IGNORECASE):
return {"error": "Invalid URL format. Must start with http:// or https://"}
# Create a temporary directory
temp_dir = tempfile.mkdtemp(prefix="tencent_obj_")
zip_file_path = osp.join(temp_dir, "model.zip")
obj_file_path = osp.join(temp_dir, "model.obj")
mtl_file_path = osp.join(temp_dir, "model.mtl")
try:
# Download ZIP file
zip_response = requests.get(zip_file_url, stream=True)
zip_response.raise_for_status()
with open(zip_file_path, "wb") as f:
for chunk in zip_response.iter_content(chunk_size=8192):
f.write(chunk)
# Unzip the ZIP
with zipfile.ZipFile(zip_file_path, "r") as zip_ref:
_safe_extract_zip(zip_ref, temp_dir)
# Find the .obj file (there may be multiple, assuming the main file is model.obj)
for file in os.listdir(temp_dir):
if file.endswith(".obj"):
obj_file_path = osp.join(temp_dir, file)
if not osp.exists(obj_file_path):
return {"succeed": False, "error": "OBJ file not found after extraction"}
# Import obj file
if bpy.app.version>=(4, 0, 0):
bpy.ops.wm.obj_import(filepath=obj_file_path)
else:
bpy.ops.import_scene.obj(filepath=obj_file_path)
imported_objs = [obj for obj in bpy.context.selected_objects if obj.type == 'MESH']
if not imported_objs:
return {"succeed": False, "error": "No mesh objects imported"}
obj = imported_objs[0]
if name:
obj.name = name
result = {
"name": obj.name,
"type": obj.type,
"location": [obj.location.x, obj.location.y, obj.location.z],
"rotation": [obj.rotation_euler.x, obj.rotation_euler.y, obj.rotation_euler.z],
"scale": [obj.scale.x, obj.scale.y, obj.scale.z],
}
if obj.type == "MESH":
bounding_box = self._get_aabb(obj)
result["world_bounding_box"] = bounding_box
# Pack any textures loaded from the temp folder so we can safely delete it
abs_temp_dir = os.path.abspath(temp_dir)
for img in bpy.data.images:
with suppress(Exception):
img_path = bpy.path.abspath(img.filepath) if img.filepath else ""
if img_path and os.path.abspath(img_path).startswith(abs_temp_dir + os.sep):
if not img.packed_file:
img.pack()
return {"succeed": True, **result}
except Exception as e:
return {"succeed": False, "error": str(e)}
finally:
# Clean up temporary directory
with suppress(Exception):
shutil.rmtree(temp_dir)
#endregion
# Blender UI Panel
class BLENDERMCP_AddonPreferences(bpy.types.AddonPreferences):
bl_idname = ADDON_IDNAME
host: bpy.props.StringProperty(
name="Host",
description="Host for the BlenderMCP socket server",
default="localhost",
)
port: bpy.props.IntProperty(
name="Port",
description="Port for the BlenderMCP socket server",
default=9876,
min=1024,
max=65535,
)
auth_token: bpy.props.StringProperty(
name="Auth Token",
subtype="PASSWORD",
description="Shared secret required for clients to control Blender (recommended for security)",
default="",
)
hyper3d_api_key: bpy.props.StringProperty(
name="Hyper3D API Key",
subtype="PASSWORD",
description="API Key provided by Hyper3D",
default="",
)
sketchfab_api_key: bpy.props.StringProperty(
name="Sketchfab API Key",
subtype="PASSWORD",
description="API Key provided by Sketchfab",
default="",
)
hunyuan3d_secret_id: bpy.props.StringProperty(
name="Hunyuan 3D SecretId",
description="SecretId provided by Hunyuan 3D",
default="",
)
hunyuan3d_secret_key: bpy.props.StringProperty(
name="Hunyuan 3D SecretKey",
subtype="PASSWORD",
description="SecretKey provided by Hunyuan 3D",
default="",
)
hunyuan3d_api_url: bpy.props.StringProperty(
name="API URL",
description="URL of the Hunyuan 3D API service (for LOCAL_API mode)",
default="http://localhost:8081",
)
hunyuan3d_octree_resolution: bpy.props.IntProperty(
name="Octree Resolution",
description="Octree resolution for the 3D generation",
default=256,
min=128,
max=512,
)
hunyuan3d_num_inference_steps: bpy.props.IntProperty(
name="Number of Inference Steps",
description="Number of inference steps for the 3D generation",
default=20,
min=20,
max=50,
)
hunyuan3d_guidance_scale: bpy.props.FloatProperty(
name="Guidance Scale",
description="Guidance scale for the 3D generation",
default=5.5,
min=1.0,
max=10.0,
)
hunyuan3d_texture: bpy.props.BoolProperty(
name="Generate Texture",
description="Whether to generate texture for the 3D model",
default=False,
)
class BLENDERMCP_OT_GenerateAuthToken(bpy.types.Operator):
bl_idname = "blendermcp.generate_auth_token"
bl_label = "Generate Auth Token"
def execute(self, context):
prefs = _get_prefs()
if prefs is None:
return {'CANCELLED'}
prefs.auth_token = secrets.token_urlsafe(32)
self.report({'INFO'}, "Auth token generated")
return {'FINISHED'}
class BLENDERMCP_PT_Panel(bpy.types.Panel):
bl_label = "Blender MCP"
bl_idname = "BLENDERMCP_PT_Panel"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = 'BlenderMCP'
def draw(self, context):
layout = self.layout
scene = context.scene
prefs = _get_prefs()
if prefs:
box = layout.box()
box.label(text="Connection")
box.prop(prefs, "host")
box.prop(prefs, "port")
box.prop(prefs, "auth_token")
box.operator("blendermcp.generate_auth_token", text="Generate Auth Token")
layout.prop(scene, "blendermcp_use_polyhaven", text="Use assets from Poly Haven")
layout.prop(scene, "blendermcp_use_hyper3d", text="Use Hyper3D Rodin 3D model generation")
if scene.blendermcp_use_hyper3d:
layout.prop(scene, "blendermcp_hyper3d_mode", text="Rodin Mode")
if prefs:
layout.prop(prefs, "hyper3d_api_key", text="API Key")
layout.operator("blendermcp.set_hyper3d_free_trial_api_key", text="Set Free Trial API Key")
layout.prop(scene, "blendermcp_use_sketchfab", text="Use assets from Sketchfab")
if scene.blendermcp_use_sketchfab:
if prefs:
layout.prop(prefs, "sketchfab_api_key", text="API Key")
layout.prop(scene, "blendermcp_use_hunyuan3d", text="Use Tencent Hunyuan 3D model generation")
if scene.blendermcp_use_hunyuan3d:
layout.prop(scene, "blendermcp_hunyuan3d_mode", text="Hunyuan3D Mode")
if scene.blendermcp_hunyuan3d_mode == 'OFFICIAL_API':
if prefs:
layout.prop(prefs, "hunyuan3d_secret_id", text="SecretId")
layout.prop(prefs, "hunyuan3d_secret_key", text="SecretKey")
if scene.blendermcp_hunyuan3d_mode == 'LOCAL_API':
if prefs:
layout.prop(prefs, "hunyuan3d_api_url", text="API URL")
layout.prop(prefs, "hunyuan3d_octree_resolution", text="Octree Resolution")
layout.prop(prefs, "hunyuan3d_num_inference_steps", text="Number of Inference Steps")
layout.prop(prefs, "hunyuan3d_guidance_scale", text="Guidance Scale")
layout.prop(prefs, "hunyuan3d_texture", text="Generate Texture")
if not scene.blendermcp_server_running:
layout.operator("blendermcp.start_server", text="Connect to MCP server")
else:
layout.operator("blendermcp.stop_server", text="Disconnect from MCP server")
if prefs:
layout.label(text=f"Running on {prefs.host}:{prefs.port}")
# Operator to set Hyper3D API Key
class BLENDERMCP_OT_SetFreeTrialHyper3DAPIKey(bpy.types.Operator):
bl_idname = "blendermcp.set_hyper3d_free_trial_api_key"
bl_label = "Set Free Trial API Key"
def execute(self, context):
prefs = _get_prefs()
if prefs:
prefs.hyper3d_api_key = RODIN_FREE_TRIAL_KEY
context.scene.blendermcp_hyper3d_mode = 'MAIN_SITE'
self.report({'INFO'}, "API Key set successfully!")
return {'FINISHED'}
# Operator to start the server
class BLENDERMCP_OT_StartServer(bpy.types.Operator):
bl_idname = "blendermcp.start_server"
bl_label = "Connect to Claude"
bl_description = "Start the BlenderMCP server to connect with Claude"
def execute(self, context):
scene = context.scene
prefs = _get_prefs()
host = prefs.host if prefs else "localhost"
port = prefs.port if prefs else 9876
# Create a new server instance
if not hasattr(bpy.types, "blendermcp_server") or not bpy.types.blendermcp_server:
bpy.types.blendermcp_server = BlenderMCPServer(host=host, port=port)
# Start the server
bpy.types.blendermcp_server.start()
scene.blendermcp_server_running = True
return {'FINISHED'}
# Operator to stop the server
class BLENDERMCP_OT_StopServer(bpy.types.Operator):
bl_idname = "blendermcp.stop_server"
bl_label = "Stop the connection to Claude"
bl_description = "Stop the connection to Claude"
def execute(self, context):
scene = context.scene
# Stop the server if it exists
if hasattr(bpy.types, "blendermcp_server") and bpy.types.blendermcp_server:
bpy.types.blendermcp_server.stop()
del bpy.types.blendermcp_server
scene.blendermcp_server_running = False
return {'FINISHED'}
# Registration functions
def register():
bpy.types.Scene.blendermcp_server_running = bpy.props.BoolProperty(
name="Server Running",
default=False
)
bpy.types.Scene.blendermcp_use_polyhaven = bpy.props.BoolProperty(
name="Use Poly Haven",
description="Enable Poly Haven asset integration",
default=False
)
bpy.types.Scene.blendermcp_use_hyper3d = bpy.props.BoolProperty(
name="Use Hyper3D Rodin",
description="Enable Hyper3D Rodin generatino integration",
default=False
)
bpy.types.Scene.blendermcp_hyper3d_mode = bpy.props.EnumProperty(
name="Rodin Mode",
description="Choose the platform used to call Rodin APIs",
items=[
("MAIN_SITE", "hyper3d.ai", "hyper3d.ai"),
("FAL_AI", "fal.ai", "fal.ai"),
],
default="MAIN_SITE"
)
bpy.types.Scene.blendermcp_use_hunyuan3d = bpy.props.BoolProperty(
name="Use Hunyuan 3D",
description="Enable Hunyuan asset integration",
default=False
)
bpy.types.Scene.blendermcp_hunyuan3d_mode = bpy.props.EnumProperty(
name="Hunyuan3D Mode",
description="Choose a local or official APIs",
items=[
("LOCAL_API", "local api", "local api"),
("OFFICIAL_API", "official api", "official api"),
],
default="LOCAL_API"
)
bpy.types.Scene.blendermcp_use_sketchfab = bpy.props.BoolProperty(
name="Use Sketchfab",
description="Enable Sketchfab asset integration",
default=False
)
bpy.utils.register_class(BLENDERMCP_AddonPreferences)
bpy.utils.register_class(BLENDERMCP_PT_Panel)
bpy.utils.register_class(BLENDERMCP_OT_GenerateAuthToken)
bpy.utils.register_class(BLENDERMCP_OT_SetFreeTrialHyper3DAPIKey)
bpy.utils.register_class(BLENDERMCP_OT_StartServer)
bpy.utils.register_class(BLENDERMCP_OT_StopServer)
print("BlenderMCP addon registered")
def unregister():
# Stop the server if it's running
if hasattr(bpy.types, "blendermcp_server") and bpy.types.blendermcp_server:
bpy.types.blendermcp_server.stop()
del bpy.types.blendermcp_server
bpy.utils.unregister_class(BLENDERMCP_OT_StopServer)
bpy.utils.unregister_class(BLENDERMCP_OT_StartServer)
bpy.utils.unregister_class(BLENDERMCP_OT_SetFreeTrialHyper3DAPIKey)
bpy.utils.unregister_class(BLENDERMCP_OT_GenerateAuthToken)
bpy.utils.unregister_class(BLENDERMCP_PT_Panel)
bpy.utils.unregister_class(BLENDERMCP_AddonPreferences)
del bpy.types.Scene.blendermcp_server_running
del bpy.types.Scene.blendermcp_use_polyhaven
del bpy.types.Scene.blendermcp_use_hyper3d
del bpy.types.Scene.blendermcp_hyper3d_mode
del bpy.types.Scene.blendermcp_use_sketchfab
del bpy.types.Scene.blendermcp_use_hunyuan3d
del bpy.types.Scene.blendermcp_hunyuan3d_mode
print("BlenderMCP addon unregistered")
if __name__ == "__main__":
register()