save_to_mind
Store content in a neural memory system that automatically activates related nodes and strengthens associations through use.
Instructions
Save content to Hebbian Mind with automatic node activation and Hebbian edge strengthening.
Input Schema
TableJSON Schema
| Name | Required | Description | Default |
|---|---|---|---|
| content | Yes | Content to save | |
| summary | No | Optional summary | |
| source | No | Source identifier (default: HEBBIAN_MIND) | |
| importance | No | Importance 0-1 (default: 0.5) | |
| emotional_intensity | No | Emotional intensity 0-1 (default: 0.5) |
Implementation Reference
- src/hebbian_mind/server.py:1147-1240 (handler)The 'save_to_mind' tool handler in 'call_tool' processes input, calls 'db.analyze_content', and then 'db.save_memory' to persist the data to the neural graph database.
if name == "save_to_mind": content = _validate_string(arguments.get("content", ""), "content") summary = arguments.get("summary", "") if summary: summary = _validate_string(summary, "summary", max_length=10000) source = arguments.get("source", "HEBBIAN_MIND") if source: source = _validate_string(source, "source", max_length=200) importance = _validate_number(arguments.get("importance", 0.5), "importance", 0.0, 1.0) emotional_intensity = _validate_number( arguments.get("emotional_intensity", 0.5), "emotional_intensity", 0.0, 1.0 ) activations = db.analyze_content(content) if not activations: return [ types.TextContent( type="text", text=json.dumps( { "success": False, "message": "No concept nodes activated above threshold", "threshold": Config.ACTIVATION_THRESHOLD, }, indent=2, ), ) ] memory_id = f"hebbian_mind_{uuid.uuid4().hex[:16]}" if not summary: top_nodes = [a["name"] for a in activations[:5]] summary = f"Activated {len(activations)} concepts: {', '.join(top_nodes)}" try: db.save_memory( memory_id, content, summary, source, activations, importance, emotional_intensity, ) success = True except RuntimeError as save_err: logger.error(f"save_to_mind: {save_err}") return [ types.TextContent( type="text", text=json.dumps( { "success": False, "error": sanitize_error_message(save_err), }, indent=2, ), ) ] # Extract PRECOG concepts from first activation if present precog_concepts = activations[0].get("precog_concepts", []) if activations else [] precog_boosted_count = sum(1 for a in activations if a.get("precog_boosted", False)) return [ types.TextContent( type="text", text=json.dumps( { "success": success, "memory_id": memory_id, "dual_write": db.disk_conn is not None, "precog_available": PRECOG_AVAILABLE, "precog_concepts": precog_concepts, "precog_boosted_nodes": precog_boosted_count, "activations": [ { "node": a["node_name"], "name": a["name"], "category": a["category"], "score": round(a["score"], 3), "precog_boosted": a.get("precog_boosted", False), } for a in activations ], "edges_strengthened": (len(activations) * (len(activations) - 1)) // 2, "summary": summary, }, indent=2, ), ) ] - src/hebbian_mind/server.py:604-686 (handler)'HebbianMindDatabase.save_memory' executes the actual logic for saving the memory, managing transactions, node activation updates, and Hebbian edge strengthening.
def save_memory( self, memory_id: str, content: str, summary: str, source: str, activations: List[Dict], importance: float = 0.5, emotional_intensity: float = 0.5, ) -> bool: """Save a memory with all activations and edges in single transaction. Thread-safe: entire operation runs under _lock. Raises: RuntimeError: On failure, with details about what went wrong. """ with self._lock: try: self._begin_transaction() now = time.time() # Insert memory (no OR REPLACE -- UUID prevents collisions, # and we must never silently overwrite existing memories) self._dual_write( """ INSERT INTO memories (memory_id, content, summary, source, importance, emotional_intensity, last_accessed, effective_importance, access_count) VALUES (?, ?, ?, ?, ?, ?, ?, ?, 0) """, ( memory_id, content, summary, source, importance, emotional_intensity, now, importance, ), ) # Record activations and update node counts for activation in activations: self._dual_write( """ INSERT INTO memory_activations (memory_id, node_id, activation_score) VALUES (?, ?, ?) """, (memory_id, activation["node_id"], activation["score"]), ) self._dual_write( """ UPDATE nodes SET activation_count = activation_count + 1, last_activated = CURRENT_TIMESTAMP WHERE id = ? """, (activation["node_id"],), ) # Hebbian learning: strengthen edges between co-activated nodes node_ids = [a["node_id"] for a in activations] for i, source_id in enumerate(node_ids): for target_id in node_ids[i + 1 :]: self._strengthen_edge(source_id, target_id) # Homeostatic maintenance every N co-activations self._coactivation_count += 1 if self._coactivation_count % HOMEOSTATIC_INTERVAL == 0: self._apply_time_decay() self._apply_homeostatic_scaling() self._commit_transaction() return True except Exception as e: logger.error(f"save_memory failed: {e}") self._rollback_transaction() raise RuntimeError(f"save_memory failed for memory_id={memory_id}: {e}") from e - src/hebbian_mind/server.py:1008-1031 (registration)Registration of the 'save_to_mind' tool in the list_tools function, including its schema definition.
types.Tool( name="save_to_mind", description="Save content to Hebbian Mind with automatic node activation and Hebbian edge strengthening.", inputSchema={ "type": "object", "properties": { "content": {"type": "string", "description": "Content to save"}, "summary": {"type": "string", "description": "Optional summary"}, "source": { "type": "string", "description": "Source identifier (default: HEBBIAN_MIND)", }, "importance": { "type": "number", "description": "Importance 0-1 (default: 0.5)", }, "emotional_intensity": { "type": "number", "description": "Emotional intensity 0-1 (default: 0.5)", }, }, "required": ["content"], }, ),