import os
import pytest
import yaml
from pathlib import Path
from pydantic import ValidationError
import logging # For caplog
from knowledge_mcp.config import Config, KnowledgeBaseConfig, LightRAGEmbeddingConfig, LightRAGLLMConfig, EmbeddingCacheConfig, LoggingConfig
# Fixture to reset Config singleton state before each test
@pytest.fixture(autouse=True)
def reset_config_singleton():
Config._instance = None
Config._loaded = False
# Also clear any potentially loaded environment variables from .env files
# by removing them if they were set by a previous test's .env file.
env_vars_to_clear = [
"MY_LLM_API_KEY", "MY_EMBEDDING_API_KEY", "MY_API_BASE", "NOT_SET_API_KEY",
"DOTENV_LLM_KEY", "DOTENV_EMBEDDING_KEY", "MY_VAR", "TEST_VAR_IN_DOTENV",
"SPECIFIC_ENV_KEY", "DATABASE_URL", "LOG_LEVEL" # Common examples
]
for var in env_vars_to_clear:
if var in os.environ:
del os.environ[var]
yield # Test runs here
@pytest.fixture
def minimal_config_dict():
"""Provides a dictionary for a minimal valid configuration."""
return {
"knowledge_base": {
"base_dir": "kb_data/"
},
"lightrag": {
"llm": {
"provider": "openai",
"model_name": "gpt-3.5-turbo",
"max_token_size": 4096,
"api_key": "test_llm_api_key"
},
"embedding": {
"provider": "openai",
"model_name": "text-embedding-ada-002",
"api_key": "test_embedding_api_key",
"embedding_dim": 1536,
"max_token_size": 8192
},
"embedding_cache": {
"enabled": True,
"similarity_threshold": 0.95
}
},
"logging": { # Keep some defaults to test them as well
"level": "DEBUG",
},
"env_file": ".env.test" # Relative to the config file
}
@pytest.fixture
def create_config_file(tmp_path, minimal_config_dict):
"""
Helper fixture to create a temporary config file.
Returns a function that takes content_override (dict) and filename.
The config file is created in tmp_path.
"""
def _create(content_override: dict = None, filename: str = "config.yaml"):
config_content = minimal_config_dict.copy()
if content_override:
# Deep merge override logic
for key, value in content_override.items():
if isinstance(value, dict) and key in config_content and isinstance(config_content[key], dict):
current_level = config_content[key]
for sub_key, sub_value in value.items():
if isinstance(sub_value, dict) and sub_key in current_level and isinstance(current_level[sub_key], dict):
current_level[sub_key].update(sub_value)
else:
current_level[sub_key] = sub_value
else:
config_content[key] = value
config_file_path = tmp_path / filename
with open(config_file_path, 'w') as f:
yaml.dump(config_content, f)
return config_file_path
return _create
# --- Test Cases ---
def test_successful_config_loading(create_config_file, tmp_path):
"""1. Test loading a valid configuration file."""
# Create a .env file that will be referenced by the config
dot_env_file = tmp_path / ".env.test"
dot_env_file.write_text("TEST_VAR_IN_DOTENV=some_value_from_dotenv")
config_file = create_config_file() # Uses .env.test
Config.load(config_file)
instance = Config.get_instance()
assert instance is not None
# 1. Verify Pydantic models are correctly populated
assert isinstance(instance.knowledge_base, KnowledgeBaseConfig)
assert instance.knowledge_base.base_dir == (tmp_path / "kb_data").resolve()
assert isinstance(instance.lightrag.llm, LightRAGLLMConfig)
assert instance.lightrag.llm.provider == "openai"
assert instance.lightrag.llm.model_name == "gpt-3.5-turbo"
assert instance.lightrag.llm.max_token_size == 4096
assert instance.lightrag.llm.api_key == "test_llm_api_key"
assert instance.lightrag.llm.api_base is None # Optional, not in minimal_config
assert instance.lightrag.llm.kwargs == {} # Default
assert isinstance(instance.lightrag.embedding, LightRAGEmbeddingConfig)
assert instance.lightrag.embedding.provider == "openai"
assert instance.lightrag.embedding.model_name == "text-embedding-ada-002"
assert instance.lightrag.embedding.api_key == "test_embedding_api_key"
assert instance.lightrag.embedding.api_base is None # Optional
assert instance.lightrag.embedding.embedding_dim == 1536
assert instance.lightrag.embedding.max_token_size == 8192
assert isinstance(instance.lightrag.embedding_cache, EmbeddingCacheConfig)
assert instance.lightrag.embedding_cache.enabled is True
assert instance.lightrag.embedding_cache.similarity_threshold == 0.95
assert isinstance(instance.logging, LoggingConfig)
assert instance.logging.level == "DEBUG" # From minimal_config_dict
# Test defaults for fields not in minimal_config_dict's logging section
assert instance.logging.max_bytes == 10485760 # Default
assert instance.logging.backup_count == 5 # Default
assert instance.logging.detailed_format == "%(asctime)s - %(name)s - %(levelname)s - %(message)s" # Default
assert instance.logging.default_format == "%(levelname)s: %(message)s" # Default
assert instance.env_file == dot_env_file.resolve() # Path resolved correctly
def test_env_variable_substitution_set(create_config_file, monkeypatch, tmp_path):
"""2. Test substitution of environment variables (when set)."""
monkeypatch.setenv("MY_LLM_API_KEY", "llm_key_from_actual_env")
monkeypatch.setenv("MY_EMBEDDING_API_KEY", "embedding_key_from_actual_env")
monkeypatch.setenv("MY_API_BASE", "https://api.example.com/v1")
config_override = {
"lightrag": {
"llm": {"api_key": "${MY_LLM_API_KEY}", "api_base": "${MY_API_BASE}"},
"embedding": {"api_key": "${MY_EMBEDDING_API_KEY}"}
},
"env_file": ".env.substitution_test" # Ensure a specific .env for this test
}
# Create the dummy .env file specified in the config
(tmp_path / ".env.substitution_test").write_text("SOME_OTHER_VAR=some_value")
config_file = create_config_file(content_override=config_override)
Config.load(config_file)
instance = Config.get_instance()
assert instance.lightrag.llm.api_key == "llm_key_from_actual_env"
assert instance.lightrag.llm.api_base == "https://api.example.com/v1"
assert instance.lightrag.embedding.api_key == "embedding_key_from_actual_env"
def test_env_variable_substitution_not_set(create_config_file, tmp_path, caplog):
"""2. Test substitution when an environment variable is not set (should log warning and keep placeholder)."""
config_override = {
"lightrag": {
"llm": {"api_key": "${NOT_SET_API_KEY}"}
},
"env_file": ".env.not_set_test"
}
(tmp_path / ".env.not_set_test").write_text("") # Create empty .env
config_file = create_config_file(content_override=config_override)
Config.load(config_file)
instance = Config.get_instance()
assert instance.lightrag.llm.api_key == "${NOT_SET_API_KEY}" # Should remain unsubstituted
assert "Env var 'NOT_SET_API_KEY' not found for substitution." in caplog.text
def test_path_resolution(create_config_file, tmp_path):
"""3. Test correct resolution of paths relative to the config file."""
# config.yaml will be in tmp_path. my_kb/ and my_env.env are relative to it.
override = {
"knowledge_base": {"base_dir": "my_kb/"}, # Relative path
"env_file": "my_env.env" # Relative path
}
# Create the .env file that will be referenced by this config
(tmp_path / "my_env.env").write_text("TEST_VAR_IN_MY_ENV=resolved_path_test")
config_file = create_config_file(content_override=override, filename="path_test_config.yaml")
Config.load(config_file)
instance = Config.get_instance()
# 3. Verify knowledge_base.base_dir resolution
expected_kb_path = (tmp_path / "my_kb").resolve()
assert instance.knowledge_base.base_dir == expected_kb_path
# 3. Verify env_file path resolution
expected_env_file_path = (tmp_path / "my_env.env").resolve()
assert instance.env_file == expected_env_file_path
def test_dot_env_file_loading_and_precedence(create_config_file, tmp_path, monkeypatch):
"""4. Test loading from .env and precedence (env > .env)."""
# This var is in both .env and actual environment
monkeypatch.setenv("API_KEY_MAIN", "key_from_ACTUAL_ENV")
# This var is only in .env
# This var is only in actual environment
monkeypatch.setenv("API_KEY_ONLY_ENV", "key_only_from_ACTUAL_ENV")
dot_env_content = (
"API_KEY_MAIN=key_from_DOTENV\n"
"API_KEY_ONLY_DOTENV=key_only_from_DOTENV\n"
)
# .env.test is the default in minimal_config_dict
(tmp_path / ".env.test").write_text(dot_env_content)
config_override = {
"lightrag": {
"llm": {"api_key": "${API_KEY_MAIN}"},
"embedding": {"api_key": "${API_KEY_ONLY_DOTENV}"},
# Add a new field to test a var only in os.environ
"embedding_cache": {"similarity_threshold": "${API_KEY_ONLY_ENV}"} # Re-using a field for test simplicity
}
}
config_file = create_config_file(content_override=config_override)
Config.load(config_file)
instance = Config.get_instance()
# 4. Test .env loading: API_KEY_ONLY_DOTENV should be loaded from .env
assert instance.lightrag.embedding.api_key == "key_only_from_DOTENV"
# 4. Test precedence: API_KEY_MAIN from actual env should override .env
# because load_dotenv(override=True) is used in config.py
assert instance.lightrag.llm.api_key == "key_from_ACTUAL_ENV"
# Test var only in actual env (was not in .env)
assert instance.lightrag.embedding_cache.similarity_threshold == "key_only_from_ACTUAL_ENV"
def test_dot_env_file_specified_but_not_found(create_config_file, tmp_path, caplog):
"""4. & 5. Test RuntimeError (wrapping FileNotFoundError) if .env file is specified but not found."""
# config will specify ".env.missing" which we will not create
config_override = {"env_file": ".env.missing"}
config_file = create_config_file(content_override=config_override, filename="config_with_missing_env.yaml")
# Ensure the .env file does NOT exist
missing_env_file = tmp_path / ".env.missing"
if missing_env_file.exists(): # Should not exist with tmp_path, but good practice
missing_env_file.unlink()
with pytest.raises(RuntimeError) as excinfo:
Config.load(config_file)
assert isinstance(excinfo.value.__cause__, FileNotFoundError)
assert f"Specified env_file not found: {missing_env_file.resolve()}" in str(excinfo.value)
# Check log message as well
assert f"Specified env_file not found: {missing_env_file.resolve()}" in caplog.text
def test_error_main_config_file_not_found(caplog):
"""5. Test FileNotFoundError (via RuntimeError) if the main config file is not found."""
with pytest.raises(RuntimeError) as excinfo:
Config.load("non_existent_config.yaml")
assert isinstance(excinfo.value.__cause__, FileNotFoundError)
assert "Config file not found: non_existent_config.yaml" in str(excinfo.value)
assert "Config file not found" in caplog.text # Check log
def test_error_invalid_yaml_format(tmp_path, caplog):
"""5. Test ValueError/RuntimeError if the config file contains invalid YAML."""
invalid_yaml_file = tmp_path / "invalid.yaml"
# Indentation error makes it invalid YAML
invalid_yaml_file.write_text("knowledge_base: {base_dir: kb\nlightrag: \n llm: BAD_YAML_INDENT")
# Create a dummy .env file because preliminary parse for env_file happens before full YAML parse
# and might fail if env_file is specified and not found, obscuring the YAML error.
# Here, we assume env_file is not specified or found at root of bad YAML.
# If env_file was specified in the bad YAML, this test might hit that first.
# For simplicity, assume env_file is not the primary issue here.
with pytest.raises(RuntimeError) as excinfo:
Config.load(invalid_yaml_file)
# The error could be from preliminary parse or final parse
assert ("Invalid YAML format" in str(excinfo.value) or \
"Could not preliminary parse YAML" in str(excinfo.value) or \
"Error parsing final YAML" in str(excinfo.value.__cause__)) # Check cause for more specific error
assert "Error parsing final YAML" in caplog.text or "Could not preliminary parse YAML" in caplog.text
def test_error_pydantic_validation_missing_field(create_config_file, tmp_path, caplog):
"""5. Test RuntimeError (wrapping ValidationError) if required fields are missing."""
# Missing 'api_key', 'model_name', etc. from lightrag.llm
config_override = {
"lightrag": {"llm": {"provider": "openai_missing_fields"}},
"env_file": ".env.validation_test"
}
(tmp_path / ".env.validation_test").write_text("")
config_file = create_config_file(content_override=config_override)
with pytest.raises(RuntimeError) as excinfo:
Config.load(config_file)
assert isinstance(excinfo.value.__cause__, ValidationError)
assert "validation error for Config" in str(excinfo.value) # Pydantic v2 style
assert "Failed to load or validate configuration" in caplog.text
def test_error_pydantic_validation_incorrect_type(create_config_file, tmp_path, caplog):
"""5. Test RuntimeError (wrapping ValidationError) if data has incorrect type."""
config_override = {
"lightrag": {"embedding_cache": {"enabled": "this_is_not_a_boolean"}}, # Incorrect type
"env_file": ".env.validation_type_test"
}
(tmp_path / ".env.validation_type_test").write_text("")
config_file = create_config_file(content_override=config_override)
with pytest.raises(RuntimeError) as excinfo:
Config.load(config_file)
assert isinstance(excinfo.value.__cause__, ValidationError)
assert "validation error for Config" in str(excinfo.value)
assert "Failed to load or validate configuration" in caplog.text
def test_singleton_behavior_get_instance(create_config_file, tmp_path):
"""6. Test Config.get_instance() returns same instance and works after load."""
(tmp_path / ".env.test").write_text("") # From minimal_config_dict
config_file = create_config_file()
Config.load(config_file)
instance1 = Config.get_instance()
instance2 = Config.get_instance()
assert instance1 is instance2 # Should be the exact same object
assert Config._instance is instance1 # Internal state check
def test_singleton_get_instance_before_load(caplog):
"""6. Test RuntimeError if get_instance() is called before load()."""
with pytest.raises(RuntimeError) as excinfo:
Config.get_instance()
assert "Configuration has not been loaded. Call load(path) first." in str(excinfo.value)
# Config._loaded should be False
assert Config._loaded is False
def test_singleton_reload_configuration_updates_instance(create_config_file, tmp_path, caplog):
"""6. Test that a second call to load() with a different config updates the instance."""
# First load
(tmp_path / ".env.test").write_text("KEY=val1")
config_file1 = create_config_file(filename="config1.yaml") # Uses .env.test
Config.load(config_file1)
instance1 = Config.get_instance()
assert instance1.knowledge_base.base_dir == (tmp_path / "kb_data").resolve()
assert instance1.env_file == (tmp_path / ".env.test").resolve()
# Second load with a different configuration
config_override2 = {
"knowledge_base": {"base_dir": "kb_data_alt/"}, # Different base_dir
"env_file": ".env.alt" # Different .env file
}
(tmp_path / ".env.alt").write_text("KEY_ALT=val2_alt")
config_file2 = create_config_file(content_override=config_override2, filename="config2.yaml")
Config.load(config_file2) # This should replace the old instance
instance2 = Config.get_instance()
assert instance1 is not instance2 # A new instance should be created
assert Config._instance is instance2 # Internal state check
assert instance2.knowledge_base.base_dir == (tmp_path / "kb_data_alt").resolve()
assert instance2.env_file == (tmp_path / ".env.alt").resolve()
assert "Configuration loaded and validated successfully." in caplog.text # Should log success for both loads
def test_default_values_applied(create_config_file, tmp_path):
"""7. Test that default values in Pydantic models are applied correctly."""
# Create a config that omits fields with defaults
# logging.level has default "INFO"
# logging.max_bytes, backup_count, formats also have defaults
# lightrag.llm.kwargs has default_factory=dict
config_content_missing_defaults = {
"knowledge_base": {"base_dir": "kb_for_defaults"},
"lightrag": {
"llm": {"provider": "a", "model_name": "b", "max_token_size": 1, "api_key": "c"}, # No kwargs
"embedding": {"provider": "d", "model_name": "e", "api_key": "f", "embedding_dim": 10, "max_token_size": 100},
"embedding_cache": {"enabled": False, "similarity_threshold": 0.8}
},
"logging": { # Empty logging section, should get all defaults
},
"env_file": ".env.defaults_test"
}
(tmp_path / ".env.defaults_test").write_text("")
config_file = create_config_file(content_override=config_content_missing_defaults, filename="config_defaults.yaml")
Config.load(config_file)
instance = Config.get_instance()
# 7. Check LoggingConfig defaults
assert instance.logging.level == "INFO"
assert instance.logging.max_bytes == 10485760
assert instance.logging.backup_count == 5
assert instance.logging.detailed_format == "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
assert instance.logging.default_format == "%(levelname)s: %(message)s"
# 7. Check LightRAGLLMConfig default for kwargs
assert instance.lightrag.llm.kwargs == {}
# 7. Check optional fields not provided (already tested in successful_config_loading)
assert instance.lightrag.llm.api_base is None
assert instance.lightrag.embedding.api_base is None
def test_empty_env_file_specified_and_exists(create_config_file, tmp_path, caplog):
"""Test behavior when an empty .env file is specified and exists (should be fine)."""
# minimal_config_dict specifies .env.test. We create it as empty.
(tmp_path / ".env.test").write_text("")
config_file = create_config_file()
Config.load(config_file)
instance = Config.get_instance()
assert instance is not None # Should load successfully
assert f"Loading environment variables from: {(tmp_path / '.env.test').resolve()}" in caplog.text
# Ensure no errors about .env file processing itself, other than it being loaded.
assert "Error loading .env file" not in caplog.text
def test_env_file_in_different_directory_relative(tmp_path, minimal_config_dict, monkeypatch):
"""Test when env_file path is relative and points to a different directory."""
# Setup:
# tmp_path/
# config_files/
# settings.yaml (env_file: ../env_storage/.my_app_env)
# env_storage/
# .my_app_env
config_dir = tmp_path / "config_files"
config_dir.mkdir()
env_storage_dir = tmp_path / "env_storage"
env_storage_dir.mkdir()
config_content = minimal_config_dict.copy()
# env_file path is relative to the location of 'settings.yaml'
config_content["env_file"] = "../env_storage/.my_app_env"
# Adjust a field to be substituted by this specific .env file
config_content["lightrag"]["llm"]["api_key"] = "${SPECIFIC_LLM_KEY}"
config_file_path = config_dir / "settings.yaml"
with open(config_file_path, 'w') as f:
yaml.dump(config_content, f)
specific_env_file = env_storage_dir / ".my_app_env"
specific_env_file.write_text("SPECIFIC_LLM_KEY=key_from_specific_env_in_another_dir")
Config.load(config_file_path)
instance = Config.get_instance()
assert instance.lightrag.llm.api_key == "key_from_specific_env_in_another_dir"
assert instance.env_file == specific_env_file.resolve() # Check resolved path
def test_config_file_root_not_dictionary(tmp_path, caplog):
"""Test error if YAML root is a list, not a dictionary."""
config_file = tmp_path / "list_config.yaml"
config_file.write_text("- item1\n- item2") # YAML is a list, not a map
with pytest.raises(RuntimeError) as excinfo:
Config.load(config_file)
# This error can be caught either at preliminary parse (if env_file is sought)
# or at the final full parse.
assert ("YAML root is not a dictionary" in str(excinfo.value) or \
"Could not preliminary parse YAML" in str(excinfo.value) or \
(excinfo.value.__cause__ is not None and "YAML root is not a dictionary" in str(excinfo.value.__cause__)))
assert ("YAML root is not a dictionary." in caplog.text or \
"Could not preliminary parse YAML" in caplog.text)
def test_direct_resolve_path_static_method(tmp_path):
"""Test the _resolve_path static method directly for various cases."""
# Base path setup: /tmp/pytest-of-user/pytest-current/base_config_dir/config.yaml
base_config_file_dir = tmp_path / "base_config_dir"
base_config_file_dir.mkdir()
base_config_file_path = base_config_file_dir / "config.yaml" # This file doesn't need to exist for this test
# Case 1: Simple relative path
rel_path = "data/my_files"
expected = (base_config_file_dir / rel_path).resolve()
assert Config._resolve_path(base_config_file_path, rel_path) == expected
# Case 2: Relative path with ".."
rel_path_parent = "../other_data"
# Expected: tmp_path / other_data
expected_parent = (base_config_file_dir.parent / "other_data").resolve()
assert Config._resolve_path(base_config_file_path, rel_path_parent) == expected_parent
# Case 3: Absolute path string input
# Path.resolve() on an absolute path returns itself.
# (Path("/foo/bar") / "/abs/path").resolve() results in Path("/abs/path")
abs_path_str = "/absolute/path/to/data"
expected_abs = Path(abs_path_str).resolve()
assert Config._resolve_path(base_config_file_path, abs_path_str) == expected_abs
# Case 4: Absolute Path object input
abs_path_obj = Path("/another/abs/path.txt")
expected_abs_obj = abs_path_obj.resolve()
assert Config._resolve_path(base_config_file_path, abs_path_obj) == expected_abs_obj
# Case 5: Empty path (should resolve to the parent dir of base_config_file_path)
empty_path = ""
expected_empty = base_config_file_dir.resolve() # Parent of base_config_file_path
assert Config._resolve_path(base_config_file_path, empty_path) == expected_empty
# Case 6: Current directory path "."
current_dir_path = "."
expected_current_dir = base_config_file_dir.resolve() # Parent of base_config_file_path
assert Config._resolve_path(base_config_file_path, current_dir_path) == expected_current_dir
# This comment block serves as the summary for the submit_subtask_report.
# It's not part of the Python code.
"""
This commit delivers a comprehensive test suite for the `knowledge_mcp.config.Config` class.
The tests cover the following key functionalities and scenarios:
1. **Successful Configuration Loading:**
* Verified loading of a valid YAML configuration file (`test_successful_config_loading`).
* Ensured correct population of all Pydantic models (`KnowledgeBaseConfig`, `LightRAGEmbeddingConfig`, `LightRAGLLMConfig`, `EmbeddingCacheConfig`, `LoggingConfig`, `Config`) with expected values.
2. **Environment Variable Substitution:**
* Tested correct substitution of `${VAR_NAME}` syntax from environment variables (`test_env_variable_substitution_set`).
* Handled cases where environment variables are not set, ensuring the placeholder remains and a warning is logged (`test_env_variable_substitution_not_set`).
3. **Path Resolution:**
* Validated that `knowledge_base.base_dir` is resolved correctly relative to the config file path (`test_path_resolution`, `test_successful_config_loading`).
* Validated that `env_file` path is resolved correctly relative to the config file path (`test_path_resolution`, `test_env_file_in_different_directory_relative`).
* Directly tested the `_resolve_path` static method for various relative and absolute path inputs (`test_direct_resolve_path_static_method`).
4. **`.env` File Loading:**
* Confirmed loading of environment variables from a specified `.env` file (`test_dot_env_file_loading_and_precedence`, `test_successful_config_loading`).
* Verified correct behavior when an empty `.env` file is provided (`test_empty_env_file_specified_and_exists`).
* Tested that environment variables set directly in the environment take precedence over those in the `.env` file due to `override=True` in `load_dotenv` (`test_dot_env_file_loading_and_precedence`).
* Ensured `RuntimeError` (wrapping `FileNotFoundError`) is raised if a specified `env_file` is not found (`test_dot_env_file_specified_but_not_found`).
5. **Error Handling in `Config.load()`:**
* Tested `RuntimeError` (wrapping `FileNotFoundError`) for a missing main config file (`test_error_main_config_file_not_found`).
* Tested `RuntimeError` (wrapping `yaml.YAMLError` or `ValueError`) for invalid YAML structure (`test_error_invalid_yaml_format`, `test_config_file_root_not_dictionary`).
* Tested `RuntimeError` (wrapping `ValidationError`) for missing required fields (`test_error_pydantic_validation_missing_field`) and incorrect data types (`test_error_pydantic_validation_incorrect_type`).
6. **Singleton Behavior (`Config.get_instance()`):**
* Verified that `Config.get_instance()` returns the same instance after `Config.load()` (`test_singleton_behavior_get_instance`).
* Confirmed `RuntimeError` if `Config.get_instance()` is called before `Config.load()` (`test_singleton_get_instance_before_load`).
* Tested that subsequent calls to `Config.load()` (e.g., with a different config file) update the singleton instance, effectively allowing a reload (`test_singleton_reload_configuration_updates_instance`).
7. **Default Values:**
* Ensured that default values specified in Pydantic models (e.g., `LoggingConfig.level`, `LightRAGLLMConfig.kwargs`) are correctly applied when not provided in the configuration file (`test_default_values_applied`, `test_successful_config_loading`).
* Confirmed optional fields (like `api_base`) are `None` if not provided.
8. **Fixture Usage and Test Isolation:**
* Extensively used `pytest` fixtures (`tmp_path`, `monkeypatch`, `caplog`) for managing temporary files, environment variables, and capturing logs.
* Implemented an `autouse` fixture (`reset_config_singleton`) to clear `Config._instance`, `Config._loaded`, and relevant environment variables before each test, ensuring robust test isolation.
The previous tests related to `ConfigService` are implicitly removed as this file, `tests/test_config.py`, is overwritten with this new comprehensive suite. The tests are designed to be thorough, covering various valid inputs, error conditions, and edge cases.
"""