Skip to main content
Glama

MemOS-MCP

by qinshu1109
Apache 2.0
3
  • Linux
  • Apple
config.json2.01 kB
{ "model_schema": "memos.configs.mem_cube.GeneralMemCubeConfig", "user_id": "Ki-Seki", "cube_id": "Ki-Seki/mem_cube_2", "config_filename": "config.json", "text_mem": { "backend": "general_text", "config": { "cube_id": "Ki-Seki/mem_cube_2", "memory_filename": "textual_memory.json", "extractor_llm": { "backend": "huggingface", "config": { "model_name_or_path": "Qwen/Qwen3-1.7B", "temperature": 0.8, "max_tokens": 1024, "top_p": 0.9, "top_k": 50, "add_generation_prompt": true, "remove_think_prefix": false } }, "vector_db": { "backend": "qdrant", "config": { "collection_name": "Ki-Seki/mem_cube_2", "vector_dimension": 768, "distance_metric": "cosine" } }, "embedder": { "backend": "ollama", "config": { "model_name_or_path": "nomic-embed-text:latest", "embedding_dims": null, "api_base": "http://localhost:11434" } } } }, "act_mem": { "backend": "kv_cache", "config": { "memory_filename": "activation_memory.pickle", "extractor_llm": { "backend": "huggingface", "config": { "model_name_or_path": "Qwen/Qwen3-1.7B", "temperature": 0.8, "max_tokens": 1024, "top_p": 0.9, "top_k": 50, "add_generation_prompt": true, "remove_think_prefix": false } } } }, "para_mem": { "backend": "lora", "config": { "memory_filename": "parametric_memory.adapter", "extractor_llm": { "backend": "huggingface", "config": { "model_name_or_path": "Qwen/Qwen3-1.7B", "temperature": 0.8, "max_tokens": 1024, "top_p": 0.9, "top_k": 50, "add_generation_prompt": true, "remove_think_prefix": false } } } } }

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/qinshu1109/memos-MCP'

If you have feedback or need assistance with the MCP directory API, please join our Discord server