Skip to main content

Documentation Index

Fetch the complete documentation index at: https://docs.maximem.ai/llms.txt

Use this file to discover all available pages before exploring further.

Testing code that calls the Synap SDK falls into three patterns. Pick by how realistic you need the test to be.

1. Unit tests with mocks

Use this for testing your business logic that happens around Synap calls — prompt assembly, decision logic, response handling. Mock the SDK so the test is fast and deterministic and doesn’t need network.
# tests/test_chat.py
import pytest
from unittest.mock import AsyncMock, MagicMock
from maximem_synap.models.context import ContextResponse, Fact, ResponseMetadata
from datetime import datetime, timezone

from myapp.chat import handle_turn       # your code under test


def make_fake_context(facts: list[str]) -> ContextResponse:
    """Build a realistic ContextResponse without hitting the network."""
    return ContextResponse(
        facts=[
            Fact(
                id=f"fact_{i}",
                content=content,
                confidence=0.9,
                source="test",
                extracted_at=datetime.now(timezone.utc),
            )
            for i, content in enumerate(facts)
        ],
        metadata=ResponseMetadata(
            correlation_id="test-corr-id",
            ttl_seconds=300,
            source="cloud",
        ),
    )


@pytest.fixture
def fake_sdk():
    sdk = MagicMock()
    sdk.conversation.context.fetch = AsyncMock(return_value=make_fake_context([]))
    sdk.memories.create = AsyncMock(return_value=MagicMock(ingestion_id="ing_test"))
    return sdk


async def test_handle_turn_uses_facts_in_prompt(fake_sdk):
    fake_sdk.conversation.context.fetch.return_value = make_fake_context(
        ["User prefers dark mode", "User is on the Pro plan"]
    )

    reply = await handle_turn(
        sdk=fake_sdk,
        user_id="user_test",
        customer_id="cust_test",
        conversation_id="3f6b1a2c-4d5e-6f7a-8b9c-0d1e2f3a4b5c",
        message="What plan am I on?",
    )

    # Assert the system prompt mentioned both retrieved facts
    call_args = fake_sdk.openai_client.chat.completions.create.call_args  # if you injected it
    system = call_args.kwargs["messages"][0]["content"]
    assert "dark mode" in system
    assert "Pro plan" in system


async def test_handle_turn_ingests_the_turn(fake_sdk):
    await handle_turn(sdk=fake_sdk, user_id="u", customer_id="c",
                      conversation_id="3f6b1a2c-4d5e-6f7a-8b9c-0d1e2f3a4b5c",
                      message="Hi")

    fake_sdk.memories.create.assert_called_once()
    kwargs = fake_sdk.memories.create.call_args.kwargs
    assert kwargs["user_id"] == "u"
    assert kwargs["customer_id"] == "c"
    assert kwargs["document_type"] == "ai-chat-conversation"
Why use the real Pydantic models when mocking Constructing a real ContextResponse instead of MagicMock catches field-name typos at test-write time. If you later upgrade the SDK and a field is removed, the test fails loudly instead of silently passing on a mock that “accepts everything.”

2. FastAPI integration tests with dependency overrides

If you wired the SDK via Depends(get_sdk) (the recommended pattern in Setup → Integration), FastAPI’s app.dependency_overrides swaps it out per test.
from fastapi.testclient import TestClient
from myapp.main import app, get_sdk

def test_chat_endpoint_with_mock_sdk():
    fake_sdk = MagicMock()
    fake_sdk.conversation.context.fetch = AsyncMock(return_value=make_fake_context([]))
    fake_sdk.memories.create = AsyncMock()

    app.dependency_overrides[get_sdk] = lambda: fake_sdk
    try:
        client = TestClient(app)
        response = client.post("/chat", json={
            "message": "hello",
            "user_id": "u",
            "customer_id": "c",
            "conversation_id": "3f6b1a2c-4d5e-6f7a-8b9c-0d1e2f3a4b5c",
        })
        assert response.status_code == 200
    finally:
        app.dependency_overrides.clear()

3. End-to-end against a real test Instance

For the highest-fidelity tests — pre-release smoke tests, contract tests against new SDK versions — run against a dedicated test Instance in Synap Cloud.
import os
import uuid
import pytest
from maximem_synap import MaximemSynapSDK

pytestmark = pytest.mark.integration   # opt-in marker so unit suite stays fast


@pytest.fixture(scope="session")
async def real_sdk():
    """One real SDK per test session, using a dedicated test instance."""
    sdk = MaximemSynapSDK(api_key=os.environ["SYNAP_TEST_API_KEY"])
    await sdk.initialize()
    yield sdk
    await sdk.shutdown()


@pytest.fixture
def test_ids():
    """Per-test isolated user/customer/conversation IDs so tests don't interfere."""
    return {
        "user_id": f"test_user_{uuid.uuid4()}",
        "customer_id": f"test_cust_{uuid.uuid4()}",
        "conversation_id": str(uuid.uuid4()),
    }


async def test_ingest_then_retrieve_roundtrip(real_sdk, test_ids):
    """Smoke test: ingestion → retrieval works end-to-end."""
    await real_sdk.memories.create(
        document="User: I prefer dark mode.\nAssistant: Noted!",
        document_type="ai-chat-conversation",
        **test_ids,
    )

    # Wait for async processing
    await asyncio.sleep(5)

    ctx = await real_sdk.conversation.context.fetch(
        conversation_id=test_ids["conversation_id"],
        search_query=["dark mode"],
    )

    assert any("dark mode" in p.content.lower() for p in ctx.preferences)
Use _force_new=True in tests to bypass the SDK singleton cache The SDK caches one instance per process by default. In tests where you want a fresh instance per test:
sdk = MaximemSynapSDK(api_key="...", _force_new=True)
Don’t use this in production — it creates multiple instances competing for the same credential files and local cache. Use unique IDs per test Synap memories persist. Two tests that both ingest user_id="alice" will see each other’s data, and your assertions will be flaky. Always derive user_id, customer_id, conversation_id from uuid.uuid4() inside each test.

4. Snapshot testing of prompts

If your application generates LLM system prompts that incorporate Synap context, snapshot-test the rendered prompt to catch unintended drift.
from syrupy import snapshot

async def test_prompt_renders_consistently(fake_sdk, snapshot):
    fake_sdk.conversation.context.fetch.return_value = make_fake_context([
        "User prefers dark mode",
        "User is on Pro plan",
    ])
    prompt = await build_prompt(fake_sdk, user_id="u", customer_id="c",
                                conversation_id="3f6b1a2c-4d5e-6f7a-8b9c-0d1e2f3a4b5c",
                                message="What plan am I on?")
    assert prompt == snapshot
Re-snapshot intentionally when you change prompt format; fail loudly when you change it by accident.

What to skip in tests

  • Don’t mock at the HTTP transport layer. Mock the SDK methods (memories.create, context.fetch) — those are your seams. Mocking the transport couples your tests to internal SDK structure that will change.
  • Don’t snapshot ContextResponse objects directly. They include timestamps and correlation IDs that change every run. Snapshot the prompt string you assemble from them.
  • Don’t share user_ids across tests. Synap memories are real and persist; cross-test pollution will bite.