# Code created by Siddharth Ahuja: www.github.com/ahujasid © 2025
import re
import bpy
import mathutils
import json
import threading
import socket
import time
import requests
import tempfile
import traceback
import os
import shutil
import zipfile
from bpy.props import IntProperty
import io
from datetime import datetime
import hashlib, hmac, base64
import os.path as osp
from contextlib import redirect_stdout, suppress
bl_info = {
"name": "Blender MCP",
"author": "BlenderMCP",
"version": (1, 2),
"blender": (3, 0, 0),
"location": "View3D > Sidebar > BlenderMCP",
"description": "Connect Blender to Claude via MCP",
"category": "Interface",
}
RODIN_FREE_TRIAL_KEY = "k9TcfFoEhNd9cCPP2guHAHHHkctZHIRhZDywZ1euGUXwihbYLpOjQhofby80NJez"
# Add User-Agent as required by Poly Haven API
REQ_HEADERS = requests.utils.default_headers()
REQ_HEADERS.update({"User-Agent": "blender-mcp"})
class BlenderMCPServer:
def __init__(self, host='localhost', port=9876):
self.host = host
self.port = port
self.running = False
self.socket = None
self.server_thread = None
def start(self):
if self.running:
print("Server is already running")
return
self.running = True
try:
# Create socket
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind((self.host, self.port))
self.socket.listen(1)
# Start server thread
self.server_thread = threading.Thread(target=self._server_loop)
self.server_thread.daemon = True
self.server_thread.start()
print(f"BlenderMCP server started on {self.host}:{self.port}")
except Exception as e:
print(f"Failed to start server: {str(e)}")
self.stop()
def stop(self):
self.running = False
# Close socket
if self.socket:
try:
self.socket.close()
except:
pass
self.socket = None
# Wait for thread to finish
if self.server_thread:
try:
if self.server_thread.is_alive():
self.server_thread.join(timeout=1.0)
except:
pass
self.server_thread = None
print("BlenderMCP server stopped")
def _server_loop(self):
"""Main server loop in a separate thread"""
print("Server thread started")
self.socket.settimeout(1.0) # Timeout to allow for stopping
while self.running:
try:
# Accept new connection
try:
client, address = self.socket.accept()
print(f"Connected to client: {address}")
# Handle client in a separate thread
client_thread = threading.Thread(
target=self._handle_client,
args=(client,)
)
client_thread.daemon = True
client_thread.start()
except socket.timeout:
# Just check running condition
continue
except Exception as e:
print(f"Error accepting connection: {str(e)}")
time.sleep(0.5)
except Exception as e:
print(f"Error in server loop: {str(e)}")
if not self.running:
break
time.sleep(0.5)
print("Server thread stopped")
def _handle_client(self, client):
"""Handle connected client with improved buffering and stability"""
print("Client handler started")
# Configure socket for better stability
client.settimeout(300.0) # 5 minute timeout for long operations
client.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
# Large buffer for handling big messages (scripts, etc.)
RECV_BUFFER_SIZE = 65536 # 64KB buffer
MAX_MESSAGE_SIZE = 10 * 1024 * 1024 # 10MB max message size
buffer = b''
response_event = threading.Event()
response_result = {'data': None, 'error': None}
try:
while self.running:
# Receive data with larger buffer
try:
data = client.recv(RECV_BUFFER_SIZE)
if not data:
print("Client disconnected (no data)")
break
buffer += data
# Safety check: prevent memory exhaustion
if len(buffer) > MAX_MESSAGE_SIZE:
print(f"Message too large ({len(buffer)} bytes), rejecting")
buffer = b''
error_response = {
"status": "error",
"message": f"Message exceeds maximum size of {MAX_MESSAGE_SIZE} bytes"
}
client.sendall(json.dumps(error_response).encode('utf-8'))
continue
# Try to parse complete JSON message
try:
message_str = buffer.decode('utf-8')
command = json.loads(message_str)
buffer = b'' # Clear buffer after successful parse
print(f"Received command: {command.get('type', 'unknown')} ({len(message_str)} bytes)")
# Reset event for this request
response_event.clear()
response_result['data'] = None
response_result['error'] = None
# Execute command in Blender's main thread
def execute_wrapper():
try:
response = self.execute_command(command)
response_result['data'] = response
except Exception as e:
print(f"Error executing command: {str(e)}")
traceback.print_exc()
response_result['error'] = str(e)
finally:
response_event.set()
return None
# Schedule execution in main thread
bpy.app.timers.register(execute_wrapper, first_interval=0.0)
# Wait for execution to complete (with timeout)
if response_event.wait(timeout=300.0): # 5 minute timeout
if response_result['error']:
response_json = json.dumps({
"status": "error",
"message": response_result['error']
})
else:
response_json = json.dumps(response_result['data'])
# Send response in chunks if large
response_bytes = response_json.encode('utf-8')
try:
client.sendall(response_bytes)
print(f"Sent response ({len(response_bytes)} bytes)")
except Exception as send_err:
print(f"Failed to send response: {send_err}")
break
else:
# Timeout waiting for execution
print("Command execution timeout (300s)")
timeout_response = json.dumps({
"status": "error",
"message": "Command execution timeout (300 seconds)"
})
try:
client.sendall(timeout_response.encode('utf-8'))
except:
pass
except json.JSONDecodeError:
# Incomplete JSON data, wait for more chunks
# This is normal for large messages
pass
except UnicodeDecodeError as ude:
print(f"Unicode decode error: {ude}")
buffer = b'' # Reset buffer on decode error
except socket.timeout:
# Socket timeout - check if we should continue
if not self.running:
break
# Otherwise continue waiting
continue
except ConnectionResetError:
print("Connection reset by client")
break
except BrokenPipeError:
print("Broken pipe - client disconnected")
break
except Exception as e:
print(f"Error receiving data: {str(e)}")
traceback.print_exc()
break
except Exception as e:
print(f"Error in client handler: {str(e)}")
traceback.print_exc()
finally:
try:
client.close()
except:
pass
print("Client handler stopped")
def execute_command(self, command):
"""Execute a command in the main Blender thread"""
try:
return self._execute_command_internal(command)
except Exception as e:
print(f"Error executing command: {str(e)}")
traceback.print_exc()
return {"status": "error", "message": str(e)}
def _execute_command_internal(self, command):
"""Internal command execution with proper context"""
cmd_type = command.get("type")
params = command.get("params", {})
# Add a handler for checking PolyHaven status
if cmd_type == "get_polyhaven_status":
return {"status": "success", "result": self.get_polyhaven_status()}
# Base handlers that are always available
handlers = {
# Scene/Object info
"get_scene_info": self.get_scene_info,
"get_object_info": self.get_object_info,
"get_viewport_screenshot": self.get_viewport_screenshot,
"execute_code": self.execute_code,
"execute_blender_code": self.execute_code, # Alias for MCP compatibility
# Object manipulation (P0/P1 priority)
"create_primitive": self.create_primitive,
"modify_object": self.modify_object,
"delete_object": self.delete_object,
# Material system (P2 priority)
"create_material": self.create_material,
"apply_material": self.apply_material,
"set_material_property": self.set_material_property,
# Collection management
"create_collection": self.create_collection,
"add_to_collection": self.add_to_collection,
"list_collections": self.list_collections,
# File operations
"save_file": self.save_file,
"render_scene": self.render_scene,
# File system operations
"get_project_directory": self.get_project_directory,
"list_files": self.list_files,
"create_directory": self.create_directory,
# Import/Export operations
"import_asset": self.import_asset,
"export_asset": self.export_asset,
"get_supported_formats": self.get_supported_formats,
"optimize_asset": self.optimize_asset,
"organize_assets_by_type": self.organize_assets_by_type,
# Status checks
"get_polyhaven_status": self.get_polyhaven_status,
"get_hyper3d_status": self.get_hyper3d_status,
"get_sketchfab_status": self.get_sketchfab_status,
"get_hunyuan3d_status": self.get_hunyuan3d_status,
}
# Add Polyhaven handlers only if enabled
if bpy.context.scene.blendermcp_use_polyhaven:
polyhaven_handlers = {
"get_polyhaven_categories": self.get_polyhaven_categories,
"search_polyhaven_assets": self.search_polyhaven_assets,
"download_polyhaven_asset": self.download_polyhaven_asset,
"set_texture": self.set_texture,
}
handlers.update(polyhaven_handlers)
# Add Hyper3d handlers only if enabled
if bpy.context.scene.blendermcp_use_hyper3d:
polyhaven_handlers = {
"create_rodin_job": self.create_rodin_job,
"poll_rodin_job_status": self.poll_rodin_job_status,
"import_generated_asset": self.import_generated_asset,
}
handlers.update(polyhaven_handlers)
# Add Sketchfab handlers only if enabled
if bpy.context.scene.blendermcp_use_sketchfab:
sketchfab_handlers = {
"search_sketchfab_models": self.search_sketchfab_models,
"download_sketchfab_model": self.download_sketchfab_model,
}
handlers.update(sketchfab_handlers)
# Add Hunyuan3d handlers only if enabled
if bpy.context.scene.blendermcp_use_hunyuan3d:
hunyuan_handlers = {
"create_hunyuan_job": self.create_hunyuan_job,
"poll_hunyuan_job_status": self.poll_hunyuan_job_status,
"import_generated_asset_hunyuan": self.import_generated_asset_hunyuan
}
handlers.update(hunyuan_handlers)
handler = handlers.get(cmd_type)
if handler:
try:
print(f"Executing handler for {cmd_type}")
result = handler(**params)
print(f"Handler execution complete")
return {"status": "success", "result": result}
except Exception as e:
print(f"Error in handler: {str(e)}")
traceback.print_exc()
return {"status": "error", "message": str(e)}
else:
return {"status": "error", "message": f"Unknown command type: {cmd_type}"}
def get_scene_info(self):
"""Get information about the current Blender scene"""
try:
print("Getting scene info...")
# Simplify the scene info to reduce data size
scene_info = {
"name": bpy.context.scene.name,
"object_count": len(bpy.context.scene.objects),
"objects": [],
"materials_count": len(bpy.data.materials),
}
# Collect minimal object information (limit to first 10 objects)
for i, obj in enumerate(bpy.context.scene.objects):
if i >= 10: # Reduced from 20 to 10
break
obj_info = {
"name": obj.name,
"type": obj.type,
# Only include basic location data
"location": [round(float(obj.location.x), 2),
round(float(obj.location.y), 2),
round(float(obj.location.z), 2)],
}
scene_info["objects"].append(obj_info)
print(f"Scene info collected: {len(scene_info['objects'])} objects")
return scene_info
except Exception as e:
print(f"Error in get_scene_info: {str(e)}")
traceback.print_exc()
return {"error": str(e)}
@staticmethod
def _get_aabb(obj):
""" Returns the world-space axis-aligned bounding box (AABB) of an object. """
if obj.type != 'MESH':
raise TypeError("Object must be a mesh")
# Get the bounding box corners in local space
local_bbox_corners = [mathutils.Vector(corner) for corner in obj.bound_box]
# Convert to world coordinates
world_bbox_corners = [obj.matrix_world @ corner for corner in local_bbox_corners]
# Compute axis-aligned min/max coordinates
min_corner = mathutils.Vector(map(min, zip(*world_bbox_corners)))
max_corner = mathutils.Vector(map(max, zip(*world_bbox_corners)))
return [
[*min_corner], [*max_corner]
]
def get_object_info(self, name):
"""Get detailed information about a specific object"""
obj = bpy.data.objects.get(name)
if not obj:
raise ValueError(f"Object not found: {name}")
# Basic object info
obj_info = {
"name": obj.name,
"type": obj.type,
"location": [obj.location.x, obj.location.y, obj.location.z],
"rotation": [obj.rotation_euler.x, obj.rotation_euler.y, obj.rotation_euler.z],
"scale": [obj.scale.x, obj.scale.y, obj.scale.z],
"visible": obj.visible_get(),
"materials": [],
}
if obj.type == "MESH":
bounding_box = self._get_aabb(obj)
obj_info["world_bounding_box"] = bounding_box
# Add material slots
for slot in obj.material_slots:
if slot.material:
obj_info["materials"].append(slot.material.name)
# Add mesh data if applicable
if obj.type == 'MESH' and obj.data:
mesh = obj.data
obj_info["mesh"] = {
"vertices": len(mesh.vertices),
"edges": len(mesh.edges),
"polygons": len(mesh.polygons),
}
return obj_info
def get_viewport_screenshot(self, max_size=800, filepath=None, format="png"):
"""
Capture a screenshot of the current 3D viewport and save it to the specified path.
Parameters:
- max_size: Maximum size in pixels for the largest dimension of the image
- filepath: Path where to save the screenshot file
- format: Image format (png, jpg, etc.)
Returns success/error status
"""
try:
if not filepath:
return {"error": "No filepath provided"}
# Find the active 3D viewport
area = None
for a in bpy.context.screen.areas:
if a.type == 'VIEW_3D':
area = a
break
if not area:
return {"error": "No 3D viewport found"}
# Take screenshot with proper context override
with bpy.context.temp_override(area=area):
bpy.ops.screen.screenshot_area(filepath=filepath)
# Load and resize if needed
img = bpy.data.images.load(filepath)
width, height = img.size
if max(width, height) > max_size:
scale = max_size / max(width, height)
new_width = int(width * scale)
new_height = int(height * scale)
img.scale(new_width, new_height)
# Set format and save
img.file_format = format.upper()
img.save()
width, height = new_width, new_height
# Cleanup Blender image data
bpy.data.images.remove(img)
return {
"success": True,
"width": width,
"height": height,
"filepath": filepath
}
except Exception as e:
return {"error": str(e)}
def execute_code(self, code):
"""Execute arbitrary Blender Python code"""
# This is powerful but potentially dangerous - use with caution
try:
# Create a local namespace for execution
namespace = {"bpy": bpy}
# Capture stdout during execution, and return it as result
capture_buffer = io.StringIO()
with redirect_stdout(capture_buffer):
exec(code, namespace)
captured_output = capture_buffer.getvalue()
return {"executed": True, "result": captured_output}
except Exception as e:
raise Exception(f"Code execution error: {str(e)}")
# ========================================
# OBJECT MANIPULATION HANDLERS (P0/P1)
# ========================================
def create_primitive(self, primitive_type, name=None, location=None, scale=None):
"""Create a primitive mesh object in Blender"""
try:
# Map primitive types to Blender operators
primitive_ops = {
'CUBE': bpy.ops.mesh.primitive_cube_add,
'SPHERE': bpy.ops.mesh.primitive_uv_sphere_add,
'UV_SPHERE': bpy.ops.mesh.primitive_uv_sphere_add,
'ICO_SPHERE': bpy.ops.mesh.primitive_ico_sphere_add,
'CYLINDER': bpy.ops.mesh.primitive_cylinder_add,
'CONE': bpy.ops.mesh.primitive_cone_add,
'TORUS': bpy.ops.mesh.primitive_torus_add,
'PLANE': bpy.ops.mesh.primitive_plane_add,
'MONKEY': bpy.ops.mesh.primitive_monkey_add,
}
primitive_type_upper = primitive_type.upper()
if primitive_type_upper not in primitive_ops:
raise ValueError(f"Unknown primitive type: {primitive_type}. Supported: {list(primitive_ops.keys())}")
# Prepare location
loc = (0, 0, 0)
if location:
loc = tuple(location) if isinstance(location, (list, tuple)) else (location, location, location)
# Create the primitive
op = primitive_ops[primitive_type_upper]
op(location=loc)
# Get the newly created object (it's automatically selected)
obj = bpy.context.active_object
# Apply custom name if provided
if name:
obj.name = name
# Apply scale if provided
if scale:
if isinstance(scale, (list, tuple)):
obj.scale = tuple(scale)
else:
obj.scale = (scale, scale, scale)
return {
"created": True,
"object_name": obj.name,
"type": primitive_type_upper,
"location": list(obj.location),
"scale": list(obj.scale)
}
except Exception as e:
raise Exception(f"Failed to create primitive: {str(e)}")
def modify_object(self, object_name, location=None, rotation=None, scale=None):
"""Modify an existing object's transforms"""
try:
obj = bpy.data.objects.get(object_name)
if not obj:
raise ValueError(f"Object not found: {object_name}")
modified = []
if location is not None:
obj.location = tuple(location) if isinstance(location, (list, tuple)) else (location, location, location)
modified.append("location")
if rotation is not None:
obj.rotation_euler = tuple(rotation) if isinstance(rotation, (list, tuple)) else (rotation, rotation, rotation)
modified.append("rotation")
if scale is not None:
obj.scale = tuple(scale) if isinstance(scale, (list, tuple)) else (scale, scale, scale)
modified.append("scale")
if not modified:
raise ValueError("No transform properties provided. Specify at least one of: location, rotation, scale")
return {
"modified": True,
"object_name": obj.name,
"properties_changed": modified,
"location": list(obj.location),
"rotation": list(obj.rotation_euler),
"scale": list(obj.scale)
}
except Exception as e:
raise Exception(f"Failed to modify object: {str(e)}")
def delete_object(self, object_name):
"""Delete an object from the scene"""
try:
obj = bpy.data.objects.get(object_name)
if not obj:
raise ValueError(f"Object not found: {object_name}")
# Store name before deletion
deleted_name = obj.name
obj_type = obj.type
# Remove from all collections first
for collection in obj.users_collection:
collection.objects.unlink(obj)
# Delete the object data if it exists and is not used elsewhere
obj_data = obj.data
bpy.data.objects.remove(obj, do_unlink=True)
# Clean up orphaned mesh data
if obj_data and obj_data.users == 0:
if obj_type == 'MESH':
bpy.data.meshes.remove(obj_data)
elif obj_type == 'CURVE':
bpy.data.curves.remove(obj_data)
elif obj_type == 'LIGHT':
bpy.data.lights.remove(obj_data)
elif obj_type == 'CAMERA':
bpy.data.cameras.remove(obj_data)
return {
"deleted": True,
"object_name": deleted_name,
"type": obj_type
}
except Exception as e:
raise Exception(f"Failed to delete object: {str(e)}")
# ========================================
# MATERIAL SYSTEM HANDLERS (P2)
# ========================================
def create_material(self, material_name, base_color=None, metallic=None, roughness=None,
emission_color=None, emission_strength=None):
"""Create a new PBR material with Principled BSDF shader"""
try:
# Check if material already exists
if material_name in bpy.data.materials:
mat = bpy.data.materials[material_name]
else:
mat = bpy.data.materials.new(name=material_name)
mat.use_nodes = True
nodes = mat.node_tree.nodes
# Get or create Principled BSDF node
principled = None
for node in nodes:
if node.type == 'BSDF_PRINCIPLED':
principled = node
break
if not principled:
# Clear existing nodes and create new setup
nodes.clear()
principled = nodes.new(type='ShaderNodeBsdfPrincipled')
principled.location = (0, 0)
output = nodes.new(type='ShaderNodeOutputMaterial')
output.location = (300, 0)
mat.node_tree.links.new(principled.outputs['BSDF'], output.inputs['Surface'])
# Apply properties
if base_color is not None:
if len(base_color) == 3:
base_color = list(base_color) + [1.0] # Add alpha
principled.inputs['Base Color'].default_value = tuple(base_color)
if metallic is not None:
principled.inputs['Metallic'].default_value = float(metallic)
if roughness is not None:
principled.inputs['Roughness'].default_value = float(roughness)
if emission_color is not None:
if len(emission_color) == 3:
emission_color = list(emission_color) + [1.0]
# Blender 4.0+ uses 'Emission Color', older versions use 'Emission'
if 'Emission Color' in principled.inputs:
principled.inputs['Emission Color'].default_value = tuple(emission_color)
elif 'Emission' in principled.inputs:
principled.inputs['Emission'].default_value = tuple(emission_color)
if emission_strength is not None:
if 'Emission Strength' in principled.inputs:
principled.inputs['Emission Strength'].default_value = float(emission_strength)
return {
"created": True,
"material_name": mat.name,
"has_nodes": mat.use_nodes
}
except Exception as e:
raise Exception(f"Failed to create material: {str(e)}")
def apply_material(self, object_name, material_name):
"""Apply a material to an object"""
try:
obj = bpy.data.objects.get(object_name)
if not obj:
raise ValueError(f"Object not found: {object_name}")
mat = bpy.data.materials.get(material_name)
if not mat:
raise ValueError(f"Material not found: {material_name}")
# Check if object can have materials
if obj.type not in ['MESH', 'CURVE', 'SURFACE', 'META', 'FONT', 'GPENCIL']:
raise ValueError(f"Object type '{obj.type}' does not support materials")
# Apply material to first slot or create new slot
if obj.data.materials:
obj.data.materials[0] = mat
else:
obj.data.materials.append(mat)
return {
"applied": True,
"object_name": obj.name,
"material_name": mat.name
}
except Exception as e:
raise Exception(f"Failed to apply material: {str(e)}")
def set_material_property(self, material_name, property, value):
"""Set a specific property on a material"""
try:
mat = bpy.data.materials.get(material_name)
if not mat:
raise ValueError(f"Material not found: {material_name}")
if not mat.use_nodes:
raise ValueError(f"Material '{material_name}' does not use nodes")
# Find Principled BSDF
principled = None
for node in mat.node_tree.nodes:
if node.type == 'BSDF_PRINCIPLED':
principled = node
break
if not principled:
raise ValueError(f"Material '{material_name}' has no Principled BSDF node")
# Map property names to node inputs
property_map = {
'base_color': 'Base Color',
'metallic': 'Metallic',
'roughness': 'Roughness',
'emission_color': 'Emission Color',
'emission_strength': 'Emission Strength',
}
if property not in property_map:
raise ValueError(f"Unknown property: {property}. Supported: {list(property_map.keys())}")
input_name = property_map[property]
# Handle fallback for older Blender versions
if input_name == 'Emission Color' and input_name not in principled.inputs:
input_name = 'Emission'
if input_name not in principled.inputs:
raise ValueError(f"Property '{property}' not available in this Blender version")
# Set the value
if isinstance(value, (list, tuple)):
if len(value) == 3:
value = list(value) + [1.0]
principled.inputs[input_name].default_value = tuple(value)
else:
principled.inputs[input_name].default_value = float(value)
return {
"set": True,
"material_name": mat.name,
"property": property,
"value": value
}
except Exception as e:
raise Exception(f"Failed to set material property: {str(e)}")
# ========================================
# COLLECTION MANAGEMENT HANDLERS (P3)
# ========================================
def create_collection(self, collection_name, parent_name=None):
"""Create a new collection"""
try:
# Check if collection already exists
if collection_name in bpy.data.collections:
return {
"created": False,
"collection_name": collection_name,
"message": "Collection already exists"
}
# Create new collection
new_collection = bpy.data.collections.new(collection_name)
# Link to parent or scene
if parent_name:
parent = bpy.data.collections.get(parent_name)
if not parent:
raise ValueError(f"Parent collection not found: {parent_name}")
parent.children.link(new_collection)
else:
bpy.context.scene.collection.children.link(new_collection)
return {
"created": True,
"collection_name": new_collection.name,
"parent": parent_name or "Scene Collection"
}
except Exception as e:
raise Exception(f"Failed to create collection: {str(e)}")
def add_to_collection(self, object_name, collection_name):
"""Add an object to a collection"""
try:
obj = bpy.data.objects.get(object_name)
if not obj:
raise ValueError(f"Object not found: {object_name}")
collection = bpy.data.collections.get(collection_name)
if not collection:
raise ValueError(f"Collection not found: {collection_name}")
# Check if already in collection
if obj.name in collection.objects:
return {
"added": False,
"object_name": obj.name,
"collection_name": collection.name,
"message": "Object already in collection"
}
collection.objects.link(obj)
return {
"added": True,
"object_name": obj.name,
"collection_name": collection.name
}
except Exception as e:
raise Exception(f"Failed to add to collection: {str(e)}")
def list_collections(self):
"""List all collections in the scene"""
try:
collections = []
def get_collection_info(coll, depth=0):
info = {
"name": coll.name,
"object_count": len(coll.objects),
"children_count": len(coll.children),
"depth": depth
}
collections.append(info)
for child in coll.children:
get_collection_info(child, depth + 1)
# Start from scene collection
get_collection_info(bpy.context.scene.collection)
return {
"collections": collections,
"total_count": len(collections)
}
except Exception as e:
raise Exception(f"Failed to list collections: {str(e)}")
# ========================================
# FILE OPERATIONS HANDLERS
# ========================================
def save_file(self, filepath=None):
"""Save the current Blender file"""
try:
if filepath:
# Ensure .blend extension
if not filepath.endswith('.blend'):
filepath += '.blend'
bpy.ops.wm.save_as_mainfile(filepath=filepath)
return {
"saved": True,
"filepath": filepath,
"method": "save_as"
}
else:
# Save to current file
if bpy.data.filepath:
bpy.ops.wm.save_mainfile()
return {
"saved": True,
"filepath": bpy.data.filepath,
"method": "save"
}
else:
raise ValueError("No filepath provided and file has never been saved. Please provide a filepath.")
except Exception as e:
raise Exception(f"Failed to save file: {str(e)}")
def render_scene(self, output_path=None, resolution_x=None, resolution_y=None,
samples=None, engine=None):
"""Render the current scene"""
try:
scene = bpy.context.scene
# Store original settings
original_path = scene.render.filepath
original_res_x = scene.render.resolution_x
original_res_y = scene.render.resolution_y
# Apply settings
if output_path:
scene.render.filepath = output_path
elif not scene.render.filepath:
# Default output path
scene.render.filepath = "//render_output"
if resolution_x:
scene.render.resolution_x = int(resolution_x)
if resolution_y:
scene.render.resolution_y = int(resolution_y)
if engine:
engine_map = {
'EEVEE': 'BLENDER_EEVEE',
'CYCLES': 'CYCLES',
'WORKBENCH': 'BLENDER_WORKBENCH'
}
scene.render.engine = engine_map.get(engine.upper(), engine)
if samples and scene.render.engine == 'CYCLES':
scene.cycles.samples = int(samples)
# Render
bpy.ops.render.render(write_still=True)
result = {
"rendered": True,
"output_path": scene.render.filepath,
"resolution": [scene.render.resolution_x, scene.render.resolution_y],
"engine": scene.render.engine
}
# Restore original settings if we changed them temporarily
if output_path:
scene.render.filepath = original_path
if resolution_x:
scene.render.resolution_x = original_res_x
if resolution_y:
scene.render.resolution_y = original_res_y
return result
except Exception as e:
raise Exception(f"Failed to render scene: {str(e)}")
# ========================================
# FILE SYSTEM OPERATIONS
# ========================================
def get_project_directory(self):
"""Get the current project directory (where .blend file is saved)"""
try:
filepath = bpy.data.filepath
if filepath:
project_dir = os.path.dirname(filepath)
return {
"project_directory": project_dir,
"blend_file": os.path.basename(filepath),
"exists": os.path.isdir(project_dir)
}
else:
# File not saved yet, return temp directory
temp_dir = tempfile.gettempdir()
return {
"project_directory": temp_dir,
"blend_file": None,
"exists": True,
"note": "File not saved. Using temp directory."
}
except Exception as e:
raise Exception(f"Failed to get project directory: {str(e)}")
def list_files(self, directory=None, pattern=None, recursive=False):
"""List files in a directory with optional filtering"""
try:
# Use project directory if not specified
if not directory:
filepath = bpy.data.filepath
if filepath:
directory = os.path.dirname(filepath)
else:
directory = tempfile.gettempdir()
if not os.path.isdir(directory):
raise ValueError(f"Directory not found: {directory}")
files = []
if recursive:
for root, dirs, filenames in os.walk(directory):
for filename in filenames:
if pattern:
import fnmatch
if fnmatch.fnmatch(filename.lower(), pattern.lower()):
files.append(os.path.join(root, filename))
else:
files.append(os.path.join(root, filename))
else:
for entry in os.listdir(directory):
full_path = os.path.join(directory, entry)
if os.path.isfile(full_path):
if pattern:
import fnmatch
if fnmatch.fnmatch(entry.lower(), pattern.lower()):
files.append(full_path)
else:
files.append(full_path)
# Return file info
file_info = []
for f in files[:100]: # Limit to 100 files
stat = os.stat(f)
file_info.append({
"path": f,
"name": os.path.basename(f),
"size": stat.st_size,
"extension": os.path.splitext(f)[1].lower()
})
return {
"directory": directory,
"files": file_info,
"total_count": len(files),
"returned_count": len(file_info)
}
except Exception as e:
raise Exception(f"Failed to list files: {str(e)}")
def create_directory(self, path, parents=True):
"""Create a directory"""
try:
if os.path.exists(path):
return {
"created": False,
"path": path,
"message": "Directory already exists"
}
if parents:
os.makedirs(path, exist_ok=True)
else:
os.mkdir(path)
return {
"created": True,
"path": path
}
except Exception as e:
raise Exception(f"Failed to create directory: {str(e)}")
# ========================================
# IMPORT/EXPORT OPERATIONS
# ========================================
def import_asset(self, filepath, asset_type=None):
"""Import a 3D asset file into Blender"""
try:
if not os.path.exists(filepath):
raise ValueError(f"File not found: {filepath}")
ext = os.path.splitext(filepath)[1].lower()
# Count objects before import
objects_before = set(bpy.data.objects.keys())
# Import based on file extension
if ext == '.obj':
bpy.ops.wm.obj_import(filepath=filepath)
elif ext == '.fbx':
bpy.ops.import_scene.fbx(filepath=filepath)
elif ext == '.gltf' or ext == '.glb':
bpy.ops.import_scene.gltf(filepath=filepath)
elif ext == '.stl':
bpy.ops.wm.stl_import(filepath=filepath)
elif ext == '.ply':
bpy.ops.wm.ply_import(filepath=filepath)
elif ext == '.dae':
bpy.ops.wm.collada_import(filepath=filepath)
elif ext == '.abc':
bpy.ops.wm.alembic_import(filepath=filepath)
elif ext == '.usd' or ext == '.usda' or ext == '.usdc' or ext == '.usdz':
bpy.ops.wm.usd_import(filepath=filepath)
elif ext == '.blend':
# Append all objects from blend file
with bpy.data.libraries.load(filepath) as (data_from, data_to):
data_to.objects = data_from.objects
# Link objects to current scene
for obj in data_to.objects:
if obj is not None:
bpy.context.scene.collection.objects.link(obj)
else:
raise ValueError(f"Unsupported file format: {ext}")
# Find newly imported objects
objects_after = set(bpy.data.objects.keys())
new_objects = list(objects_after - objects_before)
return {
"imported": True,
"filepath": filepath,
"format": ext,
"new_objects": new_objects,
"object_count": len(new_objects)
}
except Exception as e:
raise Exception(f"Failed to import asset: {str(e)}")
def export_asset(self, filepath, objects=None, format=None):
"""Export objects to a file"""
try:
# Determine format from filepath or parameter
if format:
ext = f".{format.lower().strip('.')}"
else:
ext = os.path.splitext(filepath)[1].lower()
# Ensure filepath has correct extension
if not filepath.lower().endswith(ext):
filepath = filepath + ext
# Select objects to export
if objects:
bpy.ops.object.select_all(action='DESELECT')
for obj_name in objects:
obj = bpy.data.objects.get(obj_name)
if obj:
obj.select_set(True)
bpy.context.view_layer.objects.active = obj
# Export based on format
if ext == '.obj':
bpy.ops.wm.obj_export(filepath=filepath, export_selected_objects=bool(objects))
elif ext == '.fbx':
bpy.ops.export_scene.fbx(filepath=filepath, use_selection=bool(objects))
elif ext == '.gltf':
bpy.ops.export_scene.gltf(filepath=filepath, use_selection=bool(objects), export_format='GLTF_SEPARATE')
elif ext == '.glb':
bpy.ops.export_scene.gltf(filepath=filepath, use_selection=bool(objects), export_format='GLB')
elif ext == '.stl':
bpy.ops.wm.stl_export(filepath=filepath, export_selected_objects=bool(objects))
elif ext == '.ply':
bpy.ops.wm.ply_export(filepath=filepath, export_selected_objects=bool(objects))
elif ext == '.dae':
bpy.ops.wm.collada_export(filepath=filepath, selected=bool(objects))
elif ext == '.abc':
bpy.ops.wm.alembic_export(filepath=filepath, selected=bool(objects))
elif ext == '.usd' or ext == '.usda' or ext == '.usdc':
bpy.ops.wm.usd_export(filepath=filepath, selected_objects_only=bool(objects))
else:
raise ValueError(f"Unsupported export format: {ext}")
return {
"exported": True,
"filepath": filepath,
"format": ext,
"objects_exported": objects if objects else "all"
}
except Exception as e:
raise Exception(f"Failed to export asset: {str(e)}")
def get_supported_formats(self, operation="both"):
"""Get list of supported import/export formats"""
try:
import_formats = {
".obj": "Wavefront OBJ",
".fbx": "Autodesk FBX",
".gltf": "glTF 2.0",
".glb": "glTF 2.0 Binary",
".stl": "STL (Stereolithography)",
".ply": "Stanford PLY",
".dae": "Collada DAE",
".abc": "Alembic",
".usd": "Universal Scene Description",
".usda": "USD ASCII",
".usdc": "USD Crate",
".blend": "Blender File"
}
export_formats = {
".obj": "Wavefront OBJ",
".fbx": "Autodesk FBX",
".gltf": "glTF 2.0 Separate",
".glb": "glTF 2.0 Binary",
".stl": "STL (Stereolithography)",
".ply": "Stanford PLY",
".dae": "Collada DAE",
".abc": "Alembic",
".usd": "Universal Scene Description",
".usda": "USD ASCII",
".usdc": "USD Crate"
}
if operation == "import":
return {"import_formats": import_formats}
elif operation == "export":
return {"export_formats": export_formats}
else:
return {
"import_formats": import_formats,
"export_formats": export_formats
}
except Exception as e:
raise Exception(f"Failed to get supported formats: {str(e)}")
def optimize_asset(self, object_name, decimate_ratio=None, remove_doubles=True,
merge_distance=0.0001, apply_modifiers=False):
"""Optimize a mesh asset"""
try:
obj = bpy.data.objects.get(object_name)
if not obj:
raise ValueError(f"Object not found: {object_name}")
if obj.type != 'MESH':
raise ValueError(f"Object '{object_name}' is not a mesh")
original_verts = len(obj.data.vertices)
original_faces = len(obj.data.polygons)
# Make object active
bpy.context.view_layer.objects.active = obj
obj.select_set(True)
# Enter edit mode for mesh operations
bpy.ops.object.mode_set(mode='EDIT')
# Remove doubles (merge by distance)
if remove_doubles:
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.remove_doubles(threshold=merge_distance)
bpy.ops.object.mode_set(mode='OBJECT')
# Apply decimate modifier
if decimate_ratio and 0 < decimate_ratio < 1:
decimate = obj.modifiers.new(name="Decimate", type='DECIMATE')
decimate.ratio = decimate_ratio
if apply_modifiers:
bpy.ops.object.modifier_apply(modifier="Decimate")
else:
# Apply and remove
bpy.ops.object.modifier_apply(modifier="Decimate")
final_verts = len(obj.data.vertices)
final_faces = len(obj.data.polygons)
return {
"optimized": True,
"object_name": obj.name,
"original_vertices": original_verts,
"final_vertices": final_verts,
"original_faces": original_faces,
"final_faces": final_faces,
"vertices_reduced": original_verts - final_verts,
"faces_reduced": original_faces - final_faces
}
except Exception as e:
# Make sure we're back in object mode
try:
bpy.ops.object.mode_set(mode='OBJECT')
except:
pass
raise Exception(f"Failed to optimize asset: {str(e)}")
def organize_assets_by_type(self):
"""Organize all objects in scene into collections by type"""
try:
# Define type categories
type_categories = {
'MESH': 'Meshes',
'CURVE': 'Curves',
'SURFACE': 'Surfaces',
'META': 'Metaballs',
'FONT': 'Text',
'ARMATURE': 'Armatures',
'LATTICE': 'Lattices',
'EMPTY': 'Empties',
'GPENCIL': 'Grease Pencil',
'CAMERA': 'Cameras',
'LIGHT': 'Lights',
'SPEAKER': 'Speakers',
'LIGHT_PROBE': 'Light Probes'
}
# Create collections for each type
created_collections = []
moved_objects = {}
for obj in bpy.context.scene.objects:
obj_type = obj.type
collection_name = type_categories.get(obj_type, 'Other')
# Create collection if it doesn't exist
if collection_name not in bpy.data.collections:
new_coll = bpy.data.collections.new(collection_name)
bpy.context.scene.collection.children.link(new_coll)
created_collections.append(collection_name)
coll = bpy.data.collections[collection_name]
# Add object to collection if not already there
if obj.name not in coll.objects:
coll.objects.link(obj)
if collection_name not in moved_objects:
moved_objects[collection_name] = []
moved_objects[collection_name].append(obj.name)
return {
"organized": True,
"collections_created": created_collections,
"objects_organized": moved_objects,
"total_objects": len(bpy.context.scene.objects)
}
except Exception as e:
raise Exception(f"Failed to organize assets: {str(e)}")
def get_polyhaven_categories(self, asset_type):
"""Get categories for a specific asset type from Polyhaven"""
try:
if asset_type not in ["hdris", "textures", "models", "all"]:
return {"error": f"Invalid asset type: {asset_type}. Must be one of: hdris, textures, models, all"}
response = requests.get(f"https://api.polyhaven.com/categories/{asset_type}", headers=REQ_HEADERS)
if response.status_code == 200:
return {"categories": response.json()}
else:
return {"error": f"API request failed with status code {response.status_code}"}
except Exception as e:
return {"error": str(e)}
def search_polyhaven_assets(self, asset_type=None, categories=None):
"""Search for assets from Polyhaven with optional filtering"""
try:
url = "https://api.polyhaven.com/assets"
params = {}
if asset_type and asset_type != "all":
if asset_type not in ["hdris", "textures", "models"]:
return {"error": f"Invalid asset type: {asset_type}. Must be one of: hdris, textures, models, all"}
params["type"] = asset_type
if categories:
params["categories"] = categories
response = requests.get(url, params=params, headers=REQ_HEADERS)
if response.status_code == 200:
# Limit the response size to avoid overwhelming Blender
assets = response.json()
# Return only the first 20 assets to keep response size manageable
limited_assets = {}
for i, (key, value) in enumerate(assets.items()):
if i >= 20: # Limit to 20 assets
break
limited_assets[key] = value
return {"assets": limited_assets, "total_count": len(assets), "returned_count": len(limited_assets)}
else:
return {"error": f"API request failed with status code {response.status_code}"}
except Exception as e:
return {"error": str(e)}
def download_polyhaven_asset(self, asset_id, asset_type, resolution="1k", file_format=None):
try:
# First get the files information
files_response = requests.get(f"https://api.polyhaven.com/files/{asset_id}", headers=REQ_HEADERS)
if files_response.status_code != 200:
return {"error": f"Failed to get asset files: {files_response.status_code}"}
files_data = files_response.json()
# Handle different asset types
if asset_type == "hdris":
# For HDRIs, download the .hdr or .exr file
if not file_format:
file_format = "hdr" # Default format for HDRIs
if "hdri" in files_data and resolution in files_data["hdri"] and file_format in files_data["hdri"][resolution]:
file_info = files_data["hdri"][resolution][file_format]
file_url = file_info["url"]
# For HDRIs, we need to save to a temporary file first
# since Blender can't properly load HDR data directly from memory
with tempfile.NamedTemporaryFile(suffix=f".{file_format}", delete=False) as tmp_file:
# Download the file
response = requests.get(file_url, headers=REQ_HEADERS)
if response.status_code != 200:
return {"error": f"Failed to download HDRI: {response.status_code}"}
tmp_file.write(response.content)
tmp_path = tmp_file.name
try:
# Create a new world if none exists
if not bpy.data.worlds:
bpy.data.worlds.new("World")
world = bpy.data.worlds[0]
world.use_nodes = True
node_tree = world.node_tree
# Clear existing nodes
for node in node_tree.nodes:
node_tree.nodes.remove(node)
# Create nodes
tex_coord = node_tree.nodes.new(type='ShaderNodeTexCoord')
tex_coord.location = (-800, 0)
mapping = node_tree.nodes.new(type='ShaderNodeMapping')
mapping.location = (-600, 0)
# Load the image from the temporary file
env_tex = node_tree.nodes.new(type='ShaderNodeTexEnvironment')
env_tex.location = (-400, 0)
env_tex.image = bpy.data.images.load(tmp_path)
# Use a color space that exists in all Blender versions
if file_format.lower() == 'exr':
# Try to use Linear color space for EXR files
try:
env_tex.image.colorspace_settings.name = 'Linear'
except:
# Fallback to Non-Color if Linear isn't available
env_tex.image.colorspace_settings.name = 'Non-Color'
else: # hdr
# For HDR files, try these options in order
for color_space in ['Linear', 'Linear Rec.709', 'Non-Color']:
try:
env_tex.image.colorspace_settings.name = color_space
break # Stop if we successfully set a color space
except:
continue
background = node_tree.nodes.new(type='ShaderNodeBackground')
background.location = (-200, 0)
output = node_tree.nodes.new(type='ShaderNodeOutputWorld')
output.location = (0, 0)
# Connect nodes
node_tree.links.new(tex_coord.outputs['Generated'], mapping.inputs['Vector'])
node_tree.links.new(mapping.outputs['Vector'], env_tex.inputs['Vector'])
node_tree.links.new(env_tex.outputs['Color'], background.inputs['Color'])
node_tree.links.new(background.outputs['Background'], output.inputs['Surface'])
# Set as active world
bpy.context.scene.world = world
# Clean up temporary file
try:
tempfile._cleanup() # This will clean up all temporary files
except:
pass
return {
"success": True,
"message": f"HDRI {asset_id} imported successfully",
"image_name": env_tex.image.name
}
except Exception as e:
return {"error": f"Failed to set up HDRI in Blender: {str(e)}"}
else:
return {"error": f"Requested resolution or format not available for this HDRI"}
elif asset_type == "textures":
if not file_format:
file_format = "jpg" # Default format for textures
downloaded_maps = {}
try:
for map_type in files_data:
if map_type not in ["blend", "gltf"]: # Skip non-texture files
if resolution in files_data[map_type] and file_format in files_data[map_type][resolution]:
file_info = files_data[map_type][resolution][file_format]
file_url = file_info["url"]
# Use NamedTemporaryFile like we do for HDRIs
with tempfile.NamedTemporaryFile(suffix=f".{file_format}", delete=False) as tmp_file:
# Download the file
response = requests.get(file_url, headers=REQ_HEADERS)
if response.status_code == 200:
tmp_file.write(response.content)
tmp_path = tmp_file.name
# Load image from temporary file
image = bpy.data.images.load(tmp_path)
image.name = f"{asset_id}_{map_type}.{file_format}"
# Pack the image into .blend file
image.pack()
# Set color space based on map type
if map_type in ['color', 'diffuse', 'albedo']:
try:
image.colorspace_settings.name = 'sRGB'
except:
pass
else:
try:
image.colorspace_settings.name = 'Non-Color'
except:
pass
downloaded_maps[map_type] = image
# Clean up temporary file
try:
os.unlink(tmp_path)
except:
pass
if not downloaded_maps:
return {"error": f"No texture maps found for the requested resolution and format"}
# Create a new material with the downloaded textures
mat = bpy.data.materials.new(name=asset_id)
mat.use_nodes = True
nodes = mat.node_tree.nodes
links = mat.node_tree.links
# Clear default nodes
for node in nodes:
nodes.remove(node)
# Create output node
output = nodes.new(type='ShaderNodeOutputMaterial')
output.location = (300, 0)
# Create principled BSDF node
principled = nodes.new(type='ShaderNodeBsdfPrincipled')
principled.location = (0, 0)
links.new(principled.outputs[0], output.inputs[0])
# Add texture nodes based on available maps
tex_coord = nodes.new(type='ShaderNodeTexCoord')
tex_coord.location = (-800, 0)
mapping = nodes.new(type='ShaderNodeMapping')
mapping.location = (-600, 0)
mapping.vector_type = 'TEXTURE' # Changed from default 'POINT' to 'TEXTURE'
links.new(tex_coord.outputs['UV'], mapping.inputs['Vector'])
# Position offset for texture nodes
x_pos = -400
y_pos = 300
# Connect different texture maps
for map_type, image in downloaded_maps.items():
tex_node = nodes.new(type='ShaderNodeTexImage')
tex_node.location = (x_pos, y_pos)
tex_node.image = image
# Set color space based on map type
if map_type.lower() in ['color', 'diffuse', 'albedo']:
try:
tex_node.image.colorspace_settings.name = 'sRGB'
except:
pass # Use default if sRGB not available
else:
try:
tex_node.image.colorspace_settings.name = 'Non-Color'
except:
pass # Use default if Non-Color not available
links.new(mapping.outputs['Vector'], tex_node.inputs['Vector'])
# Connect to appropriate input on Principled BSDF
if map_type.lower() in ['color', 'diffuse', 'albedo']:
links.new(tex_node.outputs['Color'], principled.inputs['Base Color'])
elif map_type.lower() in ['roughness', 'rough']:
links.new(tex_node.outputs['Color'], principled.inputs['Roughness'])
elif map_type.lower() in ['metallic', 'metalness', 'metal']:
links.new(tex_node.outputs['Color'], principled.inputs['Metallic'])
elif map_type.lower() in ['normal', 'nor']:
# Add normal map node
normal_map = nodes.new(type='ShaderNodeNormalMap')
normal_map.location = (x_pos + 200, y_pos)
links.new(tex_node.outputs['Color'], normal_map.inputs['Color'])
links.new(normal_map.outputs['Normal'], principled.inputs['Normal'])
elif map_type in ['displacement', 'disp', 'height']:
# Add displacement node
disp_node = nodes.new(type='ShaderNodeDisplacement')
disp_node.location = (x_pos + 200, y_pos - 200)
links.new(tex_node.outputs['Color'], disp_node.inputs['Height'])
links.new(disp_node.outputs['Displacement'], output.inputs['Displacement'])
y_pos -= 250
return {
"success": True,
"message": f"Texture {asset_id} imported as material",
"material": mat.name,
"maps": list(downloaded_maps.keys())
}
except Exception as e:
return {"error": f"Failed to process textures: {str(e)}"}
elif asset_type == "models":
# For models, prefer glTF format if available
if not file_format:
file_format = "gltf" # Default format for models
if file_format in files_data and resolution in files_data[file_format]:
file_info = files_data[file_format][resolution][file_format]
file_url = file_info["url"]
# Create a temporary directory to store the model and its dependencies
temp_dir = tempfile.mkdtemp()
main_file_path = ""
try:
# Download the main model file
main_file_name = file_url.split("/")[-1]
main_file_path = os.path.join(temp_dir, main_file_name)
response = requests.get(file_url, headers=REQ_HEADERS)
if response.status_code != 200:
return {"error": f"Failed to download model: {response.status_code}"}
with open(main_file_path, "wb") as f:
f.write(response.content)
# Check for included files and download them
if "include" in file_info and file_info["include"]:
for include_path, include_info in file_info["include"].items():
# Get the URL for the included file - this is the fix
include_url = include_info["url"]
# Create the directory structure for the included file
include_file_path = os.path.join(temp_dir, include_path)
os.makedirs(os.path.dirname(include_file_path), exist_ok=True)
# Download the included file
include_response = requests.get(include_url, headers=REQ_HEADERS)
if include_response.status_code == 200:
with open(include_file_path, "wb") as f:
f.write(include_response.content)
else:
print(f"Failed to download included file: {include_path}")
# Import the model into Blender
if file_format == "gltf" or file_format == "glb":
bpy.ops.import_scene.gltf(filepath=main_file_path)
elif file_format == "fbx":
bpy.ops.import_scene.fbx(filepath=main_file_path)
elif file_format == "obj":
bpy.ops.import_scene.obj(filepath=main_file_path)
elif file_format == "blend":
# For blend files, we need to append or link
with bpy.data.libraries.load(main_file_path, link=False) as (data_from, data_to):
data_to.objects = data_from.objects
# Link the objects to the scene
for obj in data_to.objects:
if obj is not None:
bpy.context.collection.objects.link(obj)
else:
return {"error": f"Unsupported model format: {file_format}"}
# Get the names of imported objects
imported_objects = [obj.name for obj in bpy.context.selected_objects]
return {
"success": True,
"message": f"Model {asset_id} imported successfully",
"imported_objects": imported_objects
}
except Exception as e:
return {"error": f"Failed to import model: {str(e)}"}
finally:
# Clean up temporary directory
with suppress(Exception):
shutil.rmtree(temp_dir)
else:
return {"error": f"Requested format or resolution not available for this model"}
else:
return {"error": f"Unsupported asset type: {asset_type}"}
except Exception as e:
return {"error": f"Failed to download asset: {str(e)}"}
def set_texture(self, object_name, texture_id):
"""Apply a previously downloaded Polyhaven texture to an object by creating a new material"""
try:
# Get the object
obj = bpy.data.objects.get(object_name)
if not obj:
return {"error": f"Object not found: {object_name}"}
# Make sure object can accept materials
if not hasattr(obj, 'data') or not hasattr(obj.data, 'materials'):
return {"error": f"Object {object_name} cannot accept materials"}
# Find all images related to this texture and ensure they're properly loaded
texture_images = {}
for img in bpy.data.images:
if img.name.startswith(texture_id + "_"):
# Extract the map type from the image name
map_type = img.name.split('_')[-1].split('.')[0]
# Force a reload of the image
img.reload()
# Ensure proper color space
if map_type.lower() in ['color', 'diffuse', 'albedo']:
try:
img.colorspace_settings.name = 'sRGB'
except:
pass
else:
try:
img.colorspace_settings.name = 'Non-Color'
except:
pass
# Ensure the image is packed
if not img.packed_file:
img.pack()
texture_images[map_type] = img
print(f"Loaded texture map: {map_type} - {img.name}")
# Debug info
print(f"Image size: {img.size[0]}x{img.size[1]}")
print(f"Color space: {img.colorspace_settings.name}")
print(f"File format: {img.file_format}")
print(f"Is packed: {bool(img.packed_file)}")
if not texture_images:
return {"error": f"No texture images found for: {texture_id}. Please download the texture first."}
# Create a new material
new_mat_name = f"{texture_id}_material_{object_name}"
# Remove any existing material with this name to avoid conflicts
existing_mat = bpy.data.materials.get(new_mat_name)
if existing_mat:
bpy.data.materials.remove(existing_mat)
new_mat = bpy.data.materials.new(name=new_mat_name)
new_mat.use_nodes = True
# Set up the material nodes
nodes = new_mat.node_tree.nodes
links = new_mat.node_tree.links
# Clear default nodes
nodes.clear()
# Create output node
output = nodes.new(type='ShaderNodeOutputMaterial')
output.location = (600, 0)
# Create principled BSDF node
principled = nodes.new(type='ShaderNodeBsdfPrincipled')
principled.location = (300, 0)
links.new(principled.outputs[0], output.inputs[0])
# Add texture nodes based on available maps
tex_coord = nodes.new(type='ShaderNodeTexCoord')
tex_coord.location = (-800, 0)
mapping = nodes.new(type='ShaderNodeMapping')
mapping.location = (-600, 0)
mapping.vector_type = 'TEXTURE' # Changed from default 'POINT' to 'TEXTURE'
links.new(tex_coord.outputs['UV'], mapping.inputs['Vector'])
# Position offset for texture nodes
x_pos = -400
y_pos = 300
# Connect different texture maps
for map_type, image in texture_images.items():
tex_node = nodes.new(type='ShaderNodeTexImage')
tex_node.location = (x_pos, y_pos)
tex_node.image = image
# Set color space based on map type
if map_type.lower() in ['color', 'diffuse', 'albedo']:
try:
tex_node.image.colorspace_settings.name = 'sRGB'
except:
pass # Use default if sRGB not available
else:
try:
tex_node.image.colorspace_settings.name = 'Non-Color'
except:
pass # Use default if Non-Color not available
links.new(mapping.outputs['Vector'], tex_node.inputs['Vector'])
# Connect to appropriate input on Principled BSDF
if map_type.lower() in ['color', 'diffuse', 'albedo']:
links.new(tex_node.outputs['Color'], principled.inputs['Base Color'])
elif map_type.lower() in ['roughness', 'rough']:
links.new(tex_node.outputs['Color'], principled.inputs['Roughness'])
elif map_type.lower() in ['metallic', 'metalness', 'metal']:
links.new(tex_node.outputs['Color'], principled.inputs['Metallic'])
elif map_type.lower() in ['normal', 'nor', 'dx', 'gl']:
# Add normal map node
normal_map = nodes.new(type='ShaderNodeNormalMap')
normal_map.location = (x_pos + 200, y_pos)
links.new(tex_node.outputs['Color'], normal_map.inputs['Color'])
links.new(normal_map.outputs['Normal'], principled.inputs['Normal'])
elif map_type.lower() in ['displacement', 'disp', 'height']:
# Add displacement node
disp_node = nodes.new(type='ShaderNodeDisplacement')
disp_node.location = (x_pos + 200, y_pos - 200)
disp_node.inputs['Scale'].default_value = 0.1 # Reduce displacement strength
links.new(tex_node.outputs['Color'], disp_node.inputs['Height'])
links.new(disp_node.outputs['Displacement'], output.inputs['Displacement'])
y_pos -= 250
# Second pass: Connect nodes with proper handling for special cases
texture_nodes = {}
# First find all texture nodes and store them by map type
for node in nodes:
if node.type == 'TEX_IMAGE' and node.image:
for map_type, image in texture_images.items():
if node.image == image:
texture_nodes[map_type] = node
break
# Now connect everything using the nodes instead of images
# Handle base color (diffuse)
for map_name in ['color', 'diffuse', 'albedo']:
if map_name in texture_nodes:
links.new(texture_nodes[map_name].outputs['Color'], principled.inputs['Base Color'])
print(f"Connected {map_name} to Base Color")
break
# Handle roughness
for map_name in ['roughness', 'rough']:
if map_name in texture_nodes:
links.new(texture_nodes[map_name].outputs['Color'], principled.inputs['Roughness'])
print(f"Connected {map_name} to Roughness")
break
# Handle metallic
for map_name in ['metallic', 'metalness', 'metal']:
if map_name in texture_nodes:
links.new(texture_nodes[map_name].outputs['Color'], principled.inputs['Metallic'])
print(f"Connected {map_name} to Metallic")
break
# Handle normal maps
for map_name in ['gl', 'dx', 'nor']:
if map_name in texture_nodes:
normal_map_node = nodes.new(type='ShaderNodeNormalMap')
normal_map_node.location = (100, 100)
links.new(texture_nodes[map_name].outputs['Color'], normal_map_node.inputs['Color'])
links.new(normal_map_node.outputs['Normal'], principled.inputs['Normal'])
print(f"Connected {map_name} to Normal")
break
# Handle displacement
for map_name in ['displacement', 'disp', 'height']:
if map_name in texture_nodes:
disp_node = nodes.new(type='ShaderNodeDisplacement')
disp_node.location = (300, -200)
disp_node.inputs['Scale'].default_value = 0.1 # Reduce displacement strength
links.new(texture_nodes[map_name].outputs['Color'], disp_node.inputs['Height'])
links.new(disp_node.outputs['Displacement'], output.inputs['Displacement'])
print(f"Connected {map_name} to Displacement")
break
# Handle ARM texture (Ambient Occlusion, Roughness, Metallic)
if 'arm' in texture_nodes:
separate_rgb = nodes.new(type='ShaderNodeSeparateRGB')
separate_rgb.location = (-200, -100)
links.new(texture_nodes['arm'].outputs['Color'], separate_rgb.inputs['Image'])
# Connect Roughness (G) if no dedicated roughness map
if not any(map_name in texture_nodes for map_name in ['roughness', 'rough']):
links.new(separate_rgb.outputs['G'], principled.inputs['Roughness'])
print("Connected ARM.G to Roughness")
# Connect Metallic (B) if no dedicated metallic map
if not any(map_name in texture_nodes for map_name in ['metallic', 'metalness', 'metal']):
links.new(separate_rgb.outputs['B'], principled.inputs['Metallic'])
print("Connected ARM.B to Metallic")
# For AO (R channel), multiply with base color if we have one
base_color_node = None
for map_name in ['color', 'diffuse', 'albedo']:
if map_name in texture_nodes:
base_color_node = texture_nodes[map_name]
break
if base_color_node:
mix_node = nodes.new(type='ShaderNodeMixRGB')
mix_node.location = (100, 200)
mix_node.blend_type = 'MULTIPLY'
mix_node.inputs['Fac'].default_value = 0.8 # 80% influence
# Disconnect direct connection to base color
for link in base_color_node.outputs['Color'].links:
if link.to_socket == principled.inputs['Base Color']:
links.remove(link)
# Connect through the mix node
links.new(base_color_node.outputs['Color'], mix_node.inputs[1])
links.new(separate_rgb.outputs['R'], mix_node.inputs[2])
links.new(mix_node.outputs['Color'], principled.inputs['Base Color'])
print("Connected ARM.R to AO mix with Base Color")
# Handle AO (Ambient Occlusion) if separate
if 'ao' in texture_nodes:
base_color_node = None
for map_name in ['color', 'diffuse', 'albedo']:
if map_name in texture_nodes:
base_color_node = texture_nodes[map_name]
break
if base_color_node:
mix_node = nodes.new(type='ShaderNodeMixRGB')
mix_node.location = (100, 200)
mix_node.blend_type = 'MULTIPLY'
mix_node.inputs['Fac'].default_value = 0.8 # 80% influence
# Disconnect direct connection to base color
for link in base_color_node.outputs['Color'].links:
if link.to_socket == principled.inputs['Base Color']:
links.remove(link)
# Connect through the mix node
links.new(base_color_node.outputs['Color'], mix_node.inputs[1])
links.new(texture_nodes['ao'].outputs['Color'], mix_node.inputs[2])
links.new(mix_node.outputs['Color'], principled.inputs['Base Color'])
print("Connected AO to mix with Base Color")
# CRITICAL: Make sure to clear all existing materials from the object
while len(obj.data.materials) > 0:
obj.data.materials.pop(index=0)
# Assign the new material to the object
obj.data.materials.append(new_mat)
# CRITICAL: Make the object active and select it
bpy.context.view_layer.objects.active = obj
obj.select_set(True)
# CRITICAL: Force Blender to update the material
bpy.context.view_layer.update()
# Get the list of texture maps
texture_maps = list(texture_images.keys())
# Get info about texture nodes for debugging
material_info = {
"name": new_mat.name,
"has_nodes": new_mat.use_nodes,
"node_count": len(new_mat.node_tree.nodes),
"texture_nodes": []
}
for node in new_mat.node_tree.nodes:
if node.type == 'TEX_IMAGE' and node.image:
connections = []
for output in node.outputs:
for link in output.links:
connections.append(f"{output.name} → {link.to_node.name}.{link.to_socket.name}")
material_info["texture_nodes"].append({
"name": node.name,
"image": node.image.name,
"colorspace": node.image.colorspace_settings.name,
"connections": connections
})
return {
"success": True,
"message": f"Created new material and applied texture {texture_id} to {object_name}",
"material": new_mat.name,
"maps": texture_maps,
"material_info": material_info
}
except Exception as e:
print(f"Error in set_texture: {str(e)}")
traceback.print_exc()
return {"error": f"Failed to apply texture: {str(e)}"}
def get_polyhaven_status(self):
"""Get the current status of PolyHaven integration"""
enabled = bpy.context.scene.blendermcp_use_polyhaven
if enabled:
return {"enabled": True, "message": "PolyHaven integration is enabled and ready to use."}
else:
return {
"enabled": False,
"message": """PolyHaven integration is currently disabled. To enable it:
1. In the 3D Viewport, find the BlenderMCP panel in the sidebar (press N if hidden)
2. Check the 'Use assets from Poly Haven' checkbox
3. Restart the connection to Claude"""
}
#region Hyper3D
def get_hyper3d_status(self):
"""Get the current status of Hyper3D Rodin integration"""
enabled = bpy.context.scene.blendermcp_use_hyper3d
if enabled:
if not bpy.context.scene.blendermcp_hyper3d_api_key:
return {
"enabled": False,
"message": """Hyper3D Rodin integration is currently enabled, but API key is not given. To enable it:
1. In the 3D Viewport, find the BlenderMCP panel in the sidebar (press N if hidden)
2. Keep the 'Use Hyper3D Rodin 3D model generation' checkbox checked
3. Choose the right plaform and fill in the API Key
4. Restart the connection to Claude"""
}
mode = bpy.context.scene.blendermcp_hyper3d_mode
message = f"Hyper3D Rodin integration is enabled and ready to use. Mode: {mode}. " + \
f"Key type: {'private' if bpy.context.scene.blendermcp_hyper3d_api_key != RODIN_FREE_TRIAL_KEY else 'free_trial'}"
return {
"enabled": True,
"message": message
}
else:
return {
"enabled": False,
"message": """Hyper3D Rodin integration is currently disabled. To enable it:
1. In the 3D Viewport, find the BlenderMCP panel in the sidebar (press N if hidden)
2. Check the 'Use Hyper3D Rodin 3D model generation' checkbox
3. Restart the connection to Claude"""
}
def create_rodin_job(self, *args, **kwargs):
match bpy.context.scene.blendermcp_hyper3d_mode:
case "MAIN_SITE":
return self.create_rodin_job_main_site(*args, **kwargs)
case "FAL_AI":
return self.create_rodin_job_fal_ai(*args, **kwargs)
case _:
return f"Error: Unknown Hyper3D Rodin mode!"
def create_rodin_job_main_site(
self,
text_prompt: str=None,
images: list[tuple[str, str]]=None,
bbox_condition=None
):
try:
if images is None:
images = []
"""Call Rodin API, get the job uuid and subscription key"""
files = [
*[("images", (f"{i:04d}{img_suffix}", img)) for i, (img_suffix, img) in enumerate(images)],
("tier", (None, "Sketch")),
("mesh_mode", (None, "Raw")),
]
if text_prompt:
files.append(("prompt", (None, text_prompt)))
if bbox_condition:
files.append(("bbox_condition", (None, json.dumps(bbox_condition))))
response = requests.post(
"https://hyperhuman.deemos.com/api/v2/rodin",
headers={
"Authorization": f"Bearer {bpy.context.scene.blendermcp_hyper3d_api_key}",
},
files=files
)
data = response.json()
return data
except Exception as e:
return {"error": str(e)}
def create_rodin_job_fal_ai(
self,
text_prompt: str=None,
images: list[tuple[str, str]]=None,
bbox_condition=None
):
try:
req_data = {
"tier": "Sketch",
}
if images:
req_data["input_image_urls"] = images
if text_prompt:
req_data["prompt"] = text_prompt
if bbox_condition:
req_data["bbox_condition"] = bbox_condition
response = requests.post(
"https://queue.fal.run/fal-ai/hyper3d/rodin",
headers={
"Authorization": f"Key {bpy.context.scene.blendermcp_hyper3d_api_key}",
"Content-Type": "application/json",
},
json=req_data
)
data = response.json()
return data
except Exception as e:
return {"error": str(e)}
def poll_rodin_job_status(self, *args, **kwargs):
match bpy.context.scene.blendermcp_hyper3d_mode:
case "MAIN_SITE":
return self.poll_rodin_job_status_main_site(*args, **kwargs)
case "FAL_AI":
return self.poll_rodin_job_status_fal_ai(*args, **kwargs)
case _:
return f"Error: Unknown Hyper3D Rodin mode!"
def poll_rodin_job_status_main_site(self, subscription_key: str):
"""Call the job status API to get the job status"""
response = requests.post(
"https://hyperhuman.deemos.com/api/v2/status",
headers={
"Authorization": f"Bearer {bpy.context.scene.blendermcp_hyper3d_api_key}",
},
json={
"subscription_key": subscription_key,
},
)
data = response.json()
return {
"status_list": [i["status"] for i in data["jobs"]]
}
def poll_rodin_job_status_fal_ai(self, request_id: str):
"""Call the job status API to get the job status"""
response = requests.get(
f"https://queue.fal.run/fal-ai/hyper3d/requests/{request_id}/status",
headers={
"Authorization": f"KEY {bpy.context.scene.blendermcp_hyper3d_api_key}",
},
)
data = response.json()
return data
@staticmethod
def _clean_imported_glb(filepath, mesh_name=None):
# Get the set of existing objects before import
existing_objects = set(bpy.data.objects)
# Import the GLB file
bpy.ops.import_scene.gltf(filepath=filepath)
# Ensure the context is updated
bpy.context.view_layer.update()
# Get all imported objects
imported_objects = list(set(bpy.data.objects) - existing_objects)
# imported_objects = [obj for obj in bpy.context.view_layer.objects if obj.select_get()]
if not imported_objects:
print("Error: No objects were imported.")
return
# Identify the mesh object
mesh_obj = None
if len(imported_objects) == 1 and imported_objects[0].type == 'MESH':
mesh_obj = imported_objects[0]
print("Single mesh imported, no cleanup needed.")
else:
if len(imported_objects) == 2:
empty_objs = [i for i in imported_objects if i.type == "EMPTY"]
if len(empty_objs) != 1:
print("Error: Expected an empty node with one mesh child or a single mesh object.")
return
parent_obj = empty_objs.pop()
if len(parent_obj.children) == 1:
potential_mesh = parent_obj.children[0]
if potential_mesh.type == 'MESH':
print("GLB structure confirmed: Empty node with one mesh child.")
# Unparent the mesh from the empty node
potential_mesh.parent = None
# Remove the empty node
bpy.data.objects.remove(parent_obj)
print("Removed empty node, keeping only the mesh.")
mesh_obj = potential_mesh
else:
print("Error: Child is not a mesh object.")
return
else:
print("Error: Expected an empty node with one mesh child or a single mesh object.")
return
else:
print("Error: Expected an empty node with one mesh child or a single mesh object.")
return
# Rename the mesh if needed
try:
if mesh_obj and mesh_obj.name is not None and mesh_name:
mesh_obj.name = mesh_name
if mesh_obj.data.name is not None:
mesh_obj.data.name = mesh_name
print(f"Mesh renamed to: {mesh_name}")
except Exception as e:
print("Having issue with renaming, give up renaming.")
return mesh_obj
def import_generated_asset(self, *args, **kwargs):
match bpy.context.scene.blendermcp_hyper3d_mode:
case "MAIN_SITE":
return self.import_generated_asset_main_site(*args, **kwargs)
case "FAL_AI":
return self.import_generated_asset_fal_ai(*args, **kwargs)
case _:
return f"Error: Unknown Hyper3D Rodin mode!"
def import_generated_asset_main_site(self, task_uuid: str, name: str):
"""Fetch the generated asset, import into blender"""
response = requests.post(
"https://hyperhuman.deemos.com/api/v2/download",
headers={
"Authorization": f"Bearer {bpy.context.scene.blendermcp_hyper3d_api_key}",
},
json={
'task_uuid': task_uuid
}
)
data_ = response.json()
temp_file = None
for i in data_["list"]:
if i["name"].endswith(".glb"):
temp_file = tempfile.NamedTemporaryFile(
delete=False,
prefix=task_uuid,
suffix=".glb",
)
try:
# Download the content
response = requests.get(i["url"], stream=True)
response.raise_for_status() # Raise an exception for HTTP errors
# Write the content to the temporary file
for chunk in response.iter_content(chunk_size=8192):
temp_file.write(chunk)
# Close the file
temp_file.close()
except Exception as e:
# Clean up the file if there's an error
temp_file.close()
os.unlink(temp_file.name)
return {"succeed": False, "error": str(e)}
break
else:
return {"succeed": False, "error": "Generation failed. Please first make sure that all jobs of the task are done and then try again later."}
try:
obj = self._clean_imported_glb(
filepath=temp_file.name,
mesh_name=name
)
result = {
"name": obj.name,
"type": obj.type,
"location": [obj.location.x, obj.location.y, obj.location.z],
"rotation": [obj.rotation_euler.x, obj.rotation_euler.y, obj.rotation_euler.z],
"scale": [obj.scale.x, obj.scale.y, obj.scale.z],
}
if obj.type == "MESH":
bounding_box = self._get_aabb(obj)
result["world_bounding_box"] = bounding_box
return {
"succeed": True, **result
}
except Exception as e:
return {"succeed": False, "error": str(e)}
def import_generated_asset_fal_ai(self, request_id: str, name: str):
"""Fetch the generated asset, import into blender"""
response = requests.get(
f"https://queue.fal.run/fal-ai/hyper3d/requests/{request_id}",
headers={
"Authorization": f"Key {bpy.context.scene.blendermcp_hyper3d_api_key}",
}
)
data_ = response.json()
temp_file = None
temp_file = tempfile.NamedTemporaryFile(
delete=False,
prefix=request_id,
suffix=".glb",
)
try:
# Download the content
response = requests.get(data_["model_mesh"]["url"], stream=True)
response.raise_for_status() # Raise an exception for HTTP errors
# Write the content to the temporary file
for chunk in response.iter_content(chunk_size=8192):
temp_file.write(chunk)
# Close the file
temp_file.close()
except Exception as e:
# Clean up the file if there's an error
temp_file.close()
os.unlink(temp_file.name)
return {"succeed": False, "error": str(e)}
try:
obj = self._clean_imported_glb(
filepath=temp_file.name,
mesh_name=name
)
result = {
"name": obj.name,
"type": obj.type,
"location": [obj.location.x, obj.location.y, obj.location.z],
"rotation": [obj.rotation_euler.x, obj.rotation_euler.y, obj.rotation_euler.z],
"scale": [obj.scale.x, obj.scale.y, obj.scale.z],
}
if obj.type == "MESH":
bounding_box = self._get_aabb(obj)
result["world_bounding_box"] = bounding_box
return {
"succeed": True, **result
}
except Exception as e:
return {"succeed": False, "error": str(e)}
#endregion
#region Sketchfab API
def get_sketchfab_status(self):
"""Get the current status of Sketchfab integration"""
enabled = bpy.context.scene.blendermcp_use_sketchfab
api_key = bpy.context.scene.blendermcp_sketchfab_api_key
# Test the API key if present
if api_key:
try:
headers = {
"Authorization": f"Token {api_key}"
}
response = requests.get(
"https://api.sketchfab.com/v3/me",
headers=headers,
timeout=30 # Add timeout of 30 seconds
)
if response.status_code == 200:
user_data = response.json()
username = user_data.get("username", "Unknown user")
return {
"enabled": True,
"message": f"Sketchfab integration is enabled and ready to use. Logged in as: {username}"
}
else:
return {
"enabled": False,
"message": f"Sketchfab API key seems invalid. Status code: {response.status_code}"
}
except requests.exceptions.Timeout:
return {
"enabled": False,
"message": "Timeout connecting to Sketchfab API. Check your internet connection."
}
except Exception as e:
return {
"enabled": False,
"message": f"Error testing Sketchfab API key: {str(e)}"
}
if enabled and api_key:
return {"enabled": True, "message": "Sketchfab integration is enabled and ready to use."}
elif enabled and not api_key:
return {
"enabled": False,
"message": """Sketchfab integration is currently enabled, but API key is not given. To enable it:
1. In the 3D Viewport, find the BlenderMCP panel in the sidebar (press N if hidden)
2. Keep the 'Use Sketchfab' checkbox checked
3. Enter your Sketchfab API Key
4. Restart the connection to Claude"""
}
else:
return {
"enabled": False,
"message": """Sketchfab integration is currently disabled. To enable it:
1. In the 3D Viewport, find the BlenderMCP panel in the sidebar (press N if hidden)
2. Check the 'Use assets from Sketchfab' checkbox
3. Enter your Sketchfab API Key
4. Restart the connection to Claude"""
}
def search_sketchfab_models(self, query, categories=None, count=20, downloadable=True):
"""Search for models on Sketchfab based on query and optional filters"""
try:
api_key = bpy.context.scene.blendermcp_sketchfab_api_key
if not api_key:
return {"error": "Sketchfab API key is not configured"}
# Build search parameters with exact fields from Sketchfab API docs
params = {
"type": "models",
"q": query,
"count": count,
"downloadable": downloadable,
"archives_flavours": False
}
if categories:
params["categories"] = categories
# Make API request to Sketchfab search endpoint
# The proper format according to Sketchfab API docs for API key auth
headers = {
"Authorization": f"Token {api_key}"
}
# Use the search endpoint as specified in the API documentation
response = requests.get(
"https://api.sketchfab.com/v3/search",
headers=headers,
params=params,
timeout=30 # Add timeout of 30 seconds
)
if response.status_code == 401:
return {"error": "Authentication failed (401). Check your API key."}
if response.status_code != 200:
return {"error": f"API request failed with status code {response.status_code}"}
response_data = response.json()
# Safety check on the response structure
if response_data is None:
return {"error": "Received empty response from Sketchfab API"}
# Handle 'results' potentially missing from response
results = response_data.get("results", [])
if not isinstance(results, list):
return {"error": f"Unexpected response format from Sketchfab API: {response_data}"}
return response_data
except requests.exceptions.Timeout:
return {"error": "Request timed out. Check your internet connection."}
except json.JSONDecodeError as e:
return {"error": f"Invalid JSON response from Sketchfab API: {str(e)}"}
except Exception as e:
import traceback
traceback.print_exc()
return {"error": str(e)}
def download_sketchfab_model(self, uid):
"""Download a model from Sketchfab by its UID"""
try:
api_key = bpy.context.scene.blendermcp_sketchfab_api_key
if not api_key:
return {"error": "Sketchfab API key is not configured"}
# Use proper authorization header for API key auth
headers = {
"Authorization": f"Token {api_key}"
}
# Request download URL using the exact endpoint from the documentation
download_endpoint = f"https://api.sketchfab.com/v3/models/{uid}/download"
response = requests.get(
download_endpoint,
headers=headers,
timeout=30 # Add timeout of 30 seconds
)
if response.status_code == 401:
return {"error": "Authentication failed (401). Check your API key."}
if response.status_code != 200:
return {"error": f"Download request failed with status code {response.status_code}"}
data = response.json()
# Safety check for None data
if data is None:
return {"error": "Received empty response from Sketchfab API for download request"}
# Extract download URL with safety checks
gltf_data = data.get("gltf")
if not gltf_data:
return {"error": "No gltf download URL available for this model. Response: " + str(data)}
download_url = gltf_data.get("url")
if not download_url:
return {"error": "No download URL available for this model. Make sure the model is downloadable and you have access."}
# Download the model (already has timeout)
model_response = requests.get(download_url, timeout=60) # 60 second timeout
if model_response.status_code != 200:
return {"error": f"Model download failed with status code {model_response.status_code}"}
# Save to temporary file
temp_dir = tempfile.mkdtemp()
zip_file_path = os.path.join(temp_dir, f"{uid}.zip")
with open(zip_file_path, "wb") as f:
f.write(model_response.content)
# Extract the zip file with enhanced security
with zipfile.ZipFile(zip_file_path, 'r') as zip_ref:
# More secure zip slip prevention
for file_info in zip_ref.infolist():
# Get the path of the file
file_path = file_info.filename
# Convert directory separators to the current OS style
# This handles both / and \ in zip entries
target_path = os.path.join(temp_dir, os.path.normpath(file_path))
# Get absolute paths for comparison
abs_temp_dir = os.path.abspath(temp_dir)
abs_target_path = os.path.abspath(target_path)
# Ensure the normalized path doesn't escape the target directory
if not abs_target_path.startswith(abs_temp_dir):
with suppress(Exception):
shutil.rmtree(temp_dir)
return {"error": "Security issue: Zip contains files with path traversal attempt"}
# Additional explicit check for directory traversal
if ".." in file_path:
with suppress(Exception):
shutil.rmtree(temp_dir)
return {"error": "Security issue: Zip contains files with directory traversal sequence"}
# If all files passed security checks, extract them
zip_ref.extractall(temp_dir)
# Find the main glTF file
gltf_files = [f for f in os.listdir(temp_dir) if f.endswith('.gltf') or f.endswith('.glb')]
if not gltf_files:
with suppress(Exception):
shutil.rmtree(temp_dir)
return {"error": "No glTF file found in the downloaded model"}
main_file = os.path.join(temp_dir, gltf_files[0])
# Import the model
bpy.ops.import_scene.gltf(filepath=main_file)
# Get the names of imported objects
imported_objects = [obj.name for obj in bpy.context.selected_objects]
# Clean up temporary files
with suppress(Exception):
shutil.rmtree(temp_dir)
return {
"success": True,
"message": "Model imported successfully",
"imported_objects": imported_objects
}
except requests.exceptions.Timeout:
return {"error": "Request timed out. Check your internet connection and try again with a simpler model."}
except json.JSONDecodeError as e:
return {"error": f"Invalid JSON response from Sketchfab API: {str(e)}"}
except Exception as e:
import traceback
traceback.print_exc()
return {"error": f"Failed to download model: {str(e)}"}
#endregion
#region Hunyuan3D
def get_hunyuan3d_status(self):
"""Get the current status of Hunyuan3D integration"""
enabled = bpy.context.scene.blendermcp_use_hunyuan3d
hunyuan3d_mode = bpy.context.scene.blendermcp_hunyuan3d_mode
if enabled:
match hunyuan3d_mode:
case "OFFICIAL_API":
if not bpy.context.scene.blendermcp_hunyuan3d_secret_id or not bpy.context.scene.blendermcp_hunyuan3d_secret_key:
return {
"enabled": False,
"mode": hunyuan3d_mode,
"message": """Hunyuan3D integration is currently enabled, but SecretId or SecretKey is not given. To enable it:
1. In the 3D Viewport, find the BlenderMCP panel in the sidebar (press N if hidden)
2. Keep the 'Use Tencent Hunyuan 3D model generation' checkbox checked
3. Choose the right platform and fill in the SecretId and SecretKey
4. Restart the connection to Claude"""
}
case "LOCAL_API":
if not bpy.context.scene.blendermcp_hunyuan3d_api_url:
return {
"enabled": False,
"mode": hunyuan3d_mode,
"message": """Hunyuan3D integration is currently enabled, but API URL is not given. To enable it:
1. In the 3D Viewport, find the BlenderMCP panel in the sidebar (press N if hidden)
2. Keep the 'Use Tencent Hunyuan 3D model generation' checkbox checked
3. Choose the right platform and fill in the API URL
4. Restart the connection to Claude"""
}
case _:
return {
"enabled": False,
"message": "Hunyuan3D integration is enabled and mode is not supported."
}
return {
"enabled": True,
"mode": hunyuan3d_mode,
"message": "Hunyuan3D integration is enabled and ready to use."
}
return {
"enabled": False,
"message": """Hunyuan3D integration is currently disabled. To enable it:
1. In the 3D Viewport, find the BlenderMCP panel in the sidebar (press N if hidden)
2. Check the 'Use Tencent Hunyuan 3D model generation' checkbox
3. Restart the connection to Claude"""
}
@staticmethod
def get_tencent_cloud_sign_headers(
method: str,
path: str,
headParams: dict,
data: dict,
service: str,
region: str,
secret_id: str,
secret_key: str,
host: str = None
):
"""Generate the signature header required for Tencent Cloud API requests headers"""
# Generate timestamp
timestamp = int(time.time())
date = datetime.utcfromtimestamp(timestamp).strftime("%Y-%m-%d")
# If host is not provided, it is generated based on service and region.
if not host:
host = f"{service}.tencentcloudapi.com"
endpoint = f"https://{host}"
# Constructing the request body
payload_str = json.dumps(data)
# ************* Step 1: Concatenate the canonical request string *************
canonical_uri = path
canonical_querystring = ""
ct = "application/json; charset=utf-8"
canonical_headers = f"content-type:{ct}\nhost:{host}\nx-tc-action:{headParams.get('Action', '').lower()}\n"
signed_headers = "content-type;host;x-tc-action"
hashed_request_payload = hashlib.sha256(payload_str.encode("utf-8")).hexdigest()
canonical_request = (method + "\n" +
canonical_uri + "\n" +
canonical_querystring + "\n" +
canonical_headers + "\n" +
signed_headers + "\n" +
hashed_request_payload)
# ************* Step 2: Construct the reception signature string *************
credential_scope = f"{date}/{service}/tc3_request"
hashed_canonical_request = hashlib.sha256(canonical_request.encode("utf-8")).hexdigest()
string_to_sign = ("TC3-HMAC-SHA256" + "\n" +
str(timestamp) + "\n" +
credential_scope + "\n" +
hashed_canonical_request)
# ************* Step 3: Calculate the signature *************
def sign(key, msg):
return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
secret_date = sign(("TC3" + secret_key).encode("utf-8"), date)
secret_service = sign(secret_date, service)
secret_signing = sign(secret_service, "tc3_request")
signature = hmac.new(
secret_signing,
string_to_sign.encode("utf-8"),
hashlib.sha256
).hexdigest()
# ************* Step 4: Connect Authorization *************
authorization = ("TC3-HMAC-SHA256" + " " +
"Credential=" + secret_id + "/" + credential_scope + ", " +
"SignedHeaders=" + signed_headers + ", " +
"Signature=" + signature)
# Constructing request headers
headers = {
"Authorization": authorization,
"Content-Type": "application/json; charset=utf-8",
"Host": host,
"X-TC-Action": headParams.get("Action", ""),
"X-TC-Timestamp": str(timestamp),
"X-TC-Version": headParams.get("Version", ""),
"X-TC-Region": region
}
return headers, endpoint
def create_hunyuan_job(self, *args, **kwargs):
match bpy.context.scene.blendermcp_hunyuan3d_mode:
case "OFFICIAL_API":
return self.create_hunyuan_job_main_site(*args, **kwargs)
case "LOCAL_API":
return self.create_hunyuan_job_local_site(*args, **kwargs)
case _:
return f"Error: Unknown Hunyuan3D mode!"
def create_hunyuan_job_main_site(
self,
text_prompt: str = None,
image: str = None
):
try:
secret_id = bpy.context.scene.blendermcp_hunyuan3d_secret_id
secret_key = bpy.context.scene.blendermcp_hunyuan3d_secret_key
if not secret_id or not secret_key:
return {"error": "SecretId or SecretKey is not given"}
# Parameter verification
if not text_prompt and not image:
return {"error": "Prompt or Image is required"}
if text_prompt and image:
return {"error": "Prompt and Image cannot be provided simultaneously"}
# Fixed parameter configuration
service = "hunyuan"
action = "SubmitHunyuanTo3DJob"
version = "2023-09-01"
region = "ap-guangzhou"
headParams={
"Action": action,
"Version": version,
"Region": region,
}
# Constructing request parameters
data = {
"Num": 1 # The current API limit is only 1
}
# Handling text prompts
if text_prompt:
if len(text_prompt) > 200:
return {"error": "Prompt exceeds 200 characters limit"}
data["Prompt"] = text_prompt
# Handling image
if image:
if re.match(r'^https?://', image, re.IGNORECASE) is not None:
data["ImageUrl"] = image
else:
try:
# Convert to Base64 format
with open(image, "rb") as f:
image_base64 = base64.b64encode(f.read()).decode("ascii")
data["ImageBase64"] = image_base64
except Exception as e:
return {"error": f"Image encoding failed: {str(e)}"}
# Get signed headers
headers, endpoint = self.get_tencent_cloud_sign_headers("POST", "/", headParams, data, service, region, secret_id, secret_key)
response = requests.post(
endpoint,
headers = headers,
data = json.dumps(data)
)
if response.status_code == 200:
return response.json()
return {
"error": f"API request failed with status {response.status_code}: {response}"
}
except Exception as e:
return {"error": str(e)}
def create_hunyuan_job_local_site(
self,
text_prompt: str = None,
image: str = None):
try:
base_url = bpy.context.scene.blendermcp_hunyuan3d_api_url.rstrip('/')
octree_resolution = bpy.context.scene.blendermcp_hunyuan3d_octree_resolution
num_inference_steps = bpy.context.scene.blendermcp_hunyuan3d_num_inference_steps
guidance_scale = bpy.context.scene.blendermcp_hunyuan3d_guidance_scale
texture = bpy.context.scene.blendermcp_hunyuan3d_texture
if not base_url:
return {"error": "API URL is not given"}
# Parameter verification
if not text_prompt and not image:
return {"error": "Prompt or Image is required"}
# Constructing request parameters
data = {
"octree_resolution": octree_resolution,
"num_inference_steps": num_inference_steps,
"guidance_scale": guidance_scale,
"texture": texture,
}
# Handling text prompts
if text_prompt:
data["text"] = text_prompt
# Handling image
if image:
if re.match(r'^https?://', image, re.IGNORECASE) is not None:
try:
resImg = requests.get(image)
resImg.raise_for_status()
image_base64 = base64.b64encode(resImg.content).decode("ascii")
data["image"] = image_base64
except Exception as e:
return {"error": f"Failed to download or encode image: {str(e)}"}
else:
try:
# Convert to Base64 format
with open(image, "rb") as f:
image_base64 = base64.b64encode(f.read()).decode("ascii")
data["image"] = image_base64
except Exception as e:
return {"error": f"Image encoding failed: {str(e)}"}
response = requests.post(
f"{base_url}/generate",
json = data,
)
if response.status_code != 200:
return {
"error": f"Generation failed: {response.text}"
}
# Decode base64 and save to temporary file
with tempfile.NamedTemporaryFile(delete=False, suffix=".glb") as temp_file:
temp_file.write(response.content)
temp_file_name = temp_file.name
# Import the GLB file in the main thread
def import_handler():
bpy.ops.import_scene.gltf(filepath=temp_file_name)
os.unlink(temp_file.name)
return None
bpy.app.timers.register(import_handler)
return {
"status": "DONE",
"message": "Generation and Import glb succeeded"
}
except Exception as e:
print(f"An error occurred: {e}")
return {"error": str(e)}
def poll_hunyuan_job_status(self, *args, **kwargs):
return self.poll_hunyuan_job_status_ai(*args, **kwargs)
def poll_hunyuan_job_status_ai(self, job_id: str):
"""Call the job status API to get the job status"""
print(job_id)
try:
secret_id = bpy.context.scene.blendermcp_hunyuan3d_secret_id
secret_key = bpy.context.scene.blendermcp_hunyuan3d_secret_key
if not secret_id or not secret_key:
return {"error": "SecretId or SecretKey is not given"}
if not job_id:
return {"error": "JobId is required"}
service = "hunyuan"
action = "QueryHunyuanTo3DJob"
version = "2023-09-01"
region = "ap-guangzhou"
headParams={
"Action": action,
"Version": version,
"Region": region,
}
clean_job_id = job_id.removeprefix("job_")
data = {
"JobId": clean_job_id
}
headers, endpoint = self.get_tencent_cloud_sign_headers("POST", "/", headParams, data, service, region, secret_id, secret_key)
response = requests.post(
endpoint,
headers=headers,
data=json.dumps(data)
)
if response.status_code == 200:
return response.json()
return {
"error": f"API request failed with status {response.status_code}: {response}"
}
except Exception as e:
return {"error": str(e)}
def import_generated_asset_hunyuan(self, *args, **kwargs):
return self.import_generated_asset_hunyuan_ai(*args, **kwargs)
def import_generated_asset_hunyuan_ai(self, name: str , zip_file_url: str):
if not zip_file_url:
return {"error": "Zip file not found"}
# Validate URL
if not re.match(r'^https?://', zip_file_url, re.IGNORECASE):
return {"error": "Invalid URL format. Must start with http:// or https://"}
# Create a temporary directory
temp_dir = tempfile.mkdtemp(prefix="tencent_obj_")
zip_file_path = osp.join(temp_dir, "model.zip")
obj_file_path = osp.join(temp_dir, "model.obj")
mtl_file_path = osp.join(temp_dir, "model.mtl")
try:
# Download ZIP file
zip_response = requests.get(zip_file_url, stream=True)
zip_response.raise_for_status()
with open(zip_file_path, "wb") as f:
for chunk in zip_response.iter_content(chunk_size=8192):
f.write(chunk)
# Unzip the ZIP
with zipfile.ZipFile(zip_file_path, "r") as zip_ref:
zip_ref.extractall(temp_dir)
# Find the .obj file (there may be multiple, assuming the main file is model.obj)
for file in os.listdir(temp_dir):
if file.endswith(".obj"):
obj_file_path = osp.join(temp_dir, file)
if not osp.exists(obj_file_path):
return {"succeed": False, "error": "OBJ file not found after extraction"}
# Import obj file
if bpy.app.version>=(4, 0, 0):
bpy.ops.wm.obj_import(filepath=obj_file_path)
else:
bpy.ops.import_scene.obj(filepath=obj_file_path)
imported_objs = [obj for obj in bpy.context.selected_objects if obj.type == 'MESH']
if not imported_objs:
return {"succeed": False, "error": "No mesh objects imported"}
obj = imported_objs[0]
if name:
obj.name = name
result = {
"name": obj.name,
"type": obj.type,
"location": [obj.location.x, obj.location.y, obj.location.z],
"rotation": [obj.rotation_euler.x, obj.rotation_euler.y, obj.rotation_euler.z],
"scale": [obj.scale.x, obj.scale.y, obj.scale.z],
}
if obj.type == "MESH":
bounding_box = self._get_aabb(obj)
result["world_bounding_box"] = bounding_box
return {"succeed": True, **result}
except Exception as e:
return {"succeed": False, "error": str(e)}
finally:
# Clean up temporary zip and obj, save texture and mtl
try:
if os.path.exists(zip_file_path):
os.remove(zip_file_path)
if os.path.exists(obj_file_path):
os.remove(obj_file_path)
except Exception as e:
print(f"Failed to clean up temporary directory {temp_dir}: {e}")
#endregion
# Blender UI Panel
class BLENDERMCP_PT_Panel(bpy.types.Panel):
bl_label = "Blender MCP"
bl_idname = "BLENDERMCP_PT_Panel"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = 'BlenderMCP'
def draw(self, context):
layout = self.layout
scene = context.scene
layout.prop(scene, "blendermcp_port")
layout.prop(scene, "blendermcp_use_polyhaven", text="Use assets from Poly Haven")
layout.prop(scene, "blendermcp_use_hyper3d", text="Use Hyper3D Rodin 3D model generation")
if scene.blendermcp_use_hyper3d:
layout.prop(scene, "blendermcp_hyper3d_mode", text="Rodin Mode")
layout.prop(scene, "blendermcp_hyper3d_api_key", text="API Key")
layout.operator("blendermcp.set_hyper3d_free_trial_api_key", text="Set Free Trial API Key")
layout.prop(scene, "blendermcp_use_sketchfab", text="Use assets from Sketchfab")
if scene.blendermcp_use_sketchfab:
layout.prop(scene, "blendermcp_sketchfab_api_key", text="API Key")
layout.prop(scene, "blendermcp_use_hunyuan3d", text="Use Tencent Hunyuan 3D model generation")
if scene.blendermcp_use_hunyuan3d:
layout.prop(scene, "blendermcp_hunyuan3d_mode", text="Hunyuan3D Mode")
if scene.blendermcp_hunyuan3d_mode == 'OFFICIAL_API':
layout.prop(scene, "blendermcp_hunyuan3d_secret_id", text="SecretId")
layout.prop(scene, "blendermcp_hunyuan3d_secret_key", text="SecretKey")
if scene.blendermcp_hunyuan3d_mode == 'LOCAL_API':
layout.prop(scene, "blendermcp_hunyuan3d_api_url", text="API URL")
layout.prop(scene, "blendermcp_hunyuan3d_octree_resolution", text="Octree Resolution")
layout.prop(scene, "blendermcp_hunyuan3d_num_inference_steps", text="Number of Inference Steps")
layout.prop(scene, "blendermcp_hunyuan3d_guidance_scale", text="Guidance Scale")
layout.prop(scene, "blendermcp_hunyuan3d_texture", text="Generate Texture")
if not scene.blendermcp_server_running:
layout.operator("blendermcp.start_server", text="Connect to MCP server")
else:
layout.operator("blendermcp.stop_server", text="Disconnect from MCP server")
layout.label(text=f"Running on port {scene.blendermcp_port}")
# Operator to set Hyper3D API Key
class BLENDERMCP_OT_SetFreeTrialHyper3DAPIKey(bpy.types.Operator):
bl_idname = "blendermcp.set_hyper3d_free_trial_api_key"
bl_label = "Set Free Trial API Key"
def execute(self, context):
context.scene.blendermcp_hyper3d_api_key = RODIN_FREE_TRIAL_KEY
context.scene.blendermcp_hyper3d_mode = 'MAIN_SITE'
self.report({'INFO'}, "API Key set successfully!")
return {'FINISHED'}
# Operator to start the server
class BLENDERMCP_OT_StartServer(bpy.types.Operator):
bl_idname = "blendermcp.start_server"
bl_label = "Connect to Claude"
bl_description = "Start the BlenderMCP server to connect with Claude"
def execute(self, context):
scene = context.scene
# Create a new server instance
if not hasattr(bpy.types, "blendermcp_server") or not bpy.types.blendermcp_server:
bpy.types.blendermcp_server = BlenderMCPServer(port=scene.blendermcp_port)
# Start the server
bpy.types.blendermcp_server.start()
scene.blendermcp_server_running = True
return {'FINISHED'}
# Operator to stop the server
class BLENDERMCP_OT_StopServer(bpy.types.Operator):
bl_idname = "blendermcp.stop_server"
bl_label = "Stop the connection to Claude"
bl_description = "Stop the connection to Claude"
def execute(self, context):
scene = context.scene
# Stop the server if it exists
if hasattr(bpy.types, "blendermcp_server") and bpy.types.blendermcp_server:
bpy.types.blendermcp_server.stop()
del bpy.types.blendermcp_server
scene.blendermcp_server_running = False
return {'FINISHED'}
# Registration functions
def register():
bpy.types.Scene.blendermcp_port = IntProperty(
name="Port",
description="Port for the BlenderMCP server",
default=9876,
min=1024,
max=65535
)
bpy.types.Scene.blendermcp_server_running = bpy.props.BoolProperty(
name="Server Running",
default=False
)
bpy.types.Scene.blendermcp_use_polyhaven = bpy.props.BoolProperty(
name="Use Poly Haven",
description="Enable Poly Haven asset integration",
default=False
)
bpy.types.Scene.blendermcp_use_hyper3d = bpy.props.BoolProperty(
name="Use Hyper3D Rodin",
description="Enable Hyper3D Rodin generatino integration",
default=False
)
bpy.types.Scene.blendermcp_hyper3d_mode = bpy.props.EnumProperty(
name="Rodin Mode",
description="Choose the platform used to call Rodin APIs",
items=[
("MAIN_SITE", "hyper3d.ai", "hyper3d.ai"),
("FAL_AI", "fal.ai", "fal.ai"),
],
default="MAIN_SITE"
)
bpy.types.Scene.blendermcp_hyper3d_api_key = bpy.props.StringProperty(
name="Hyper3D API Key",
subtype="PASSWORD",
description="API Key provided by Hyper3D",
default=""
)
bpy.types.Scene.blendermcp_use_hunyuan3d = bpy.props.BoolProperty(
name="Use Hunyuan 3D",
description="Enable Hunyuan asset integration",
default=False
)
bpy.types.Scene.blendermcp_hunyuan3d_mode = bpy.props.EnumProperty(
name="Hunyuan3D Mode",
description="Choose a local or official APIs",
items=[
("LOCAL_API", "local api", "local api"),
("OFFICIAL_API", "official api", "official api"),
],
default="LOCAL_API"
)
bpy.types.Scene.blendermcp_hunyuan3d_secret_id = bpy.props.StringProperty(
name="Hunyuan 3D SecretId",
description="SecretId provided by Hunyuan 3D",
default=""
)
bpy.types.Scene.blendermcp_hunyuan3d_secret_key = bpy.props.StringProperty(
name="Hunyuan 3D SecretKey",
subtype="PASSWORD",
description="SecretKey provided by Hunyuan 3D",
default=""
)
bpy.types.Scene.blendermcp_hunyuan3d_api_url = bpy.props.StringProperty(
name="API URL",
description="URL of the Hunyuan 3D API service",
default="http://localhost:8081"
)
bpy.types.Scene.blendermcp_hunyuan3d_octree_resolution = bpy.props.IntProperty(
name="Octree Resolution",
description="Octree resolution for the 3D generation",
default=256,
min=128,
max=512,
)
bpy.types.Scene.blendermcp_hunyuan3d_num_inference_steps = bpy.props.IntProperty(
name="Number of Inference Steps",
description="Number of inference steps for the 3D generation",
default=20,
min=20,
max=50,
)
bpy.types.Scene.blendermcp_hunyuan3d_guidance_scale = bpy.props.FloatProperty(
name="Guidance Scale",
description="Guidance scale for the 3D generation",
default=5.5,
min=1.0,
max=10.0,
)
bpy.types.Scene.blendermcp_hunyuan3d_texture = bpy.props.BoolProperty(
name="Generate Texture",
description="Whether to generate texture for the 3D model",
default=False,
)
bpy.types.Scene.blendermcp_use_sketchfab = bpy.props.BoolProperty(
name="Use Sketchfab",
description="Enable Sketchfab asset integration",
default=False
)
bpy.types.Scene.blendermcp_sketchfab_api_key = bpy.props.StringProperty(
name="Sketchfab API Key",
subtype="PASSWORD",
description="API Key provided by Sketchfab",
default=""
)
bpy.utils.register_class(BLENDERMCP_PT_Panel)
bpy.utils.register_class(BLENDERMCP_OT_SetFreeTrialHyper3DAPIKey)
bpy.utils.register_class(BLENDERMCP_OT_StartServer)
bpy.utils.register_class(BLENDERMCP_OT_StopServer)
print("BlenderMCP addon registered")
def unregister():
# Stop the server if it's running
if hasattr(bpy.types, "blendermcp_server") and bpy.types.blendermcp_server:
bpy.types.blendermcp_server.stop()
del bpy.types.blendermcp_server
bpy.utils.unregister_class(BLENDERMCP_PT_Panel)
bpy.utils.unregister_class(BLENDERMCP_OT_SetFreeTrialHyper3DAPIKey)
bpy.utils.unregister_class(BLENDERMCP_OT_StartServer)
bpy.utils.unregister_class(BLENDERMCP_OT_StopServer)
del bpy.types.Scene.blendermcp_port
del bpy.types.Scene.blendermcp_server_running
del bpy.types.Scene.blendermcp_use_polyhaven
del bpy.types.Scene.blendermcp_use_hyper3d
del bpy.types.Scene.blendermcp_hyper3d_mode
del bpy.types.Scene.blendermcp_hyper3d_api_key
del bpy.types.Scene.blendermcp_use_sketchfab
del bpy.types.Scene.blendermcp_sketchfab_api_key
del bpy.types.Scene.blendermcp_use_hunyuan3d
del bpy.types.Scene.blendermcp_hunyuan3d_mode
del bpy.types.Scene.blendermcp_hunyuan3d_secret_id
del bpy.types.Scene.blendermcp_hunyuan3d_secret_key
del bpy.types.Scene.blendermcp_hunyuan3d_api_url
del bpy.types.Scene.blendermcp_hunyuan3d_octree_resolution
del bpy.types.Scene.blendermcp_hunyuan3d_num_inference_steps
del bpy.types.Scene.blendermcp_hunyuan3d_guidance_scale
del bpy.types.Scene.blendermcp_hunyuan3d_texture
print("BlenderMCP addon unregistered")
if __name__ == "__main__":
register()