- Frontend: Vite + React + TypeScript chat interface - Backend: FastAPI gateway with LangGraph routing - Knowledge Service: ChromaDB RAG with Gitea scraper - LangGraph Service: Multi-agent orchestration - Airflow: Scheduled Gitea ingestion DAG - Documentation: Complete plan and implementation guides Architecture: - Modular Docker Compose per service - External ai-mesh network for communication - Fast rebuilds with /app/packages pattern - Intelligent agent routing (no hardcoded keywords) Services: - Frontend (5173): React chat UI - Chat Gateway (8000): FastAPI entry point - LangGraph (8090): Agent orchestration - Knowledge (8080): ChromaDB RAG - Airflow (8081): Scheduled ingestion - PostgreSQL (5432): Chat history Excludes: node_modules, .venv, chroma_db, logs, .env files Includes: All source code, configs, docs, docker files
80 lines
3.3 KiB
Python
80 lines
3.3 KiB
Python
import pytest
|
|
from fastapi.testclient import TestClient
|
|
from main import app
|
|
import httpx
|
|
from unittest.mock import AsyncMock, patch
|
|
|
|
client = TestClient(app)
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_chat_general_query():
|
|
"""Test that a general query (no personal keywords) skips the Librarian."""
|
|
with patch("httpx.AsyncClient.post", new_callable=AsyncMock) as mock_post:
|
|
# Mock Brain response
|
|
mock_response = AsyncMock()
|
|
mock_response.status_code = 200
|
|
mock_response.json.return_value = {
|
|
"info": {"id": "msg_123"},
|
|
"parts": [{"type": "text", "text": "I am a general AI."}]
|
|
}
|
|
|
|
# First call is for session creation, second for message
|
|
mock_post.side_effect = [AsyncMock(status_code=200, json=lambda: {"id": "ses_123"}), mock_response]
|
|
|
|
response = client.post("/chat", json={"message": "What is 2+2?"})
|
|
|
|
assert response.status_code == 200
|
|
assert response.json()["response"] == "I am a general AI."
|
|
# Verify Librarian (knowledge-service) was NOT called
|
|
# The knowledge service URL is http://knowledge-service:8080/query
|
|
calls = [call.args[0] for call in mock_post.call_args_list]
|
|
assert not any("knowledge-service" in url for url in calls)
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_chat_personal_query_success():
|
|
"""Test that a personal query calls the Librarian and injects context."""
|
|
with patch("httpx.AsyncClient.post", new_callable=AsyncMock) as mock_post:
|
|
# 1. Mock Librarian Response
|
|
mock_k_res = AsyncMock()
|
|
mock_k_res.status_code = 200
|
|
mock_k_res.json.return_value = {"context": "Sam likes red guitars."}
|
|
|
|
# 2. Mock Brain Session Response
|
|
mock_s_res = AsyncMock()
|
|
mock_s_res.status_code = 200
|
|
mock_s_res.json.return_value = {"id": "ses_123"}
|
|
|
|
# 3. Mock Brain Message Response
|
|
mock_b_res = AsyncMock()
|
|
mock_b_res.status_code = 200
|
|
mock_b_res.json.return_value = {
|
|
"parts": [{"type": "text", "text": "I see Sam likes red guitars."}]
|
|
}
|
|
|
|
mock_post.side_effect = [mock_k_res, mock_s_res, mock_b_res]
|
|
|
|
response = client.post("/chat", json={"message": "Tell me about Sam's music"})
|
|
|
|
assert response.status_code == 200
|
|
assert "red guitars" in response.json()["response"]
|
|
|
|
# Verify Librarian was called
|
|
calls = [call.args[0] for call in mock_post.call_args_list]
|
|
assert any("knowledge-service" in url for url in calls)
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_chat_librarian_timeout_failover():
|
|
"""Test that the gateway fails over instantly (5s) if Librarian is slow."""
|
|
with patch("httpx.AsyncClient.post", new_callable=AsyncMock) as mock_post:
|
|
# Mock Librarian Timeout
|
|
mock_post.side_effect = [
|
|
httpx.TimeoutException("Timeout"), # Librarian call
|
|
AsyncMock(status_code=200, json=lambda: {"id": "ses_123"}), # Brain Session
|
|
AsyncMock(status_code=200, json=lambda: {"parts": [{"type": "text", "text": "Direct Brain Response"}]}) # Brain Msg
|
|
]
|
|
|
|
response = client.post("/chat", json={"message": "Sam's hobbies?"})
|
|
|
|
assert response.status_code == 200
|
|
assert response.json()["response"] == "Direct Brain Response"
|