Skip to main content
Glama

MemOS-MCP

by qinshu1109
Apache 2.0
3
  • Linux
  • Apple
test_ollama.py2.01 kB
import unittest from unittest.mock import patch from memos.configs.embedder import EmbedderConfigFactory from memos.embedders.factory import EmbedderFactory, OllamaEmbedder class TestEmbedderFactory(unittest.TestCase): @patch.object(OllamaEmbedder, "embed") def test_embed_single_text(self, mock_embed): """Test embedding a single text.""" mock_embed.return_value = [[0.1, 0.2, 0.3, 0.4, 0.5, 0.6]] config = EmbedderConfigFactory.model_validate( { "backend": "ollama", "config": { "model_name_or_path": "nomic-embed-text:latest", "embedding_dims": 768, }, } ) embedder = EmbedderFactory.from_config(config) text = "This is a sample text for embedding generation." result = embedder.embed([text]) mock_embed.assert_called_once_with([text]) self.assertEqual(len(result[0]), 6) @patch.object(OllamaEmbedder, "embed") def test_embed_batch_text(self, mock_embed): """Test embedding multiple texts at once.""" mock_embed.return_value = [ [0.1, 0.2, 0.3, 0.4, 0.5, 0.6], [0.6, 0.5, 0.4, 0.3, 0.2, 0.1], [0.3, 0.4, 0.5, 0.6, 0.1, 0.2], ] config = EmbedderConfigFactory.model_validate( { "backend": "ollama", "config": { "model_name_or_path": "nomic-embed-text:latest", "embedding_dims": 768, }, } ) embedder = EmbedderFactory.from_config(config) texts = [ "First sample text for batch embedding.", "Second sample text for batch embedding.", "Third sample text for batch embedding.", ] result = embedder.embed(texts) mock_embed.assert_called_once_with(texts) self.assertEqual(len(result), 3) self.assertEqual(len(result[0]), 6)

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/qinshu1109/memos-MCP'

If you have feedback or need assistance with the MCP directory API, please join our Discord server