Restructure: Move services from root to unified repo
Moved updated services from /home/sam/development/ root into aboutme_chat_demo/: - knowledge_service/ (with ChromaDB, gitea_scraper, FastAPI) - langgraph_service/ (with LangGraph agent orchestration) - airflow/ (with DAGs for scheduled ingestion) All services now in single repo location. Modular docker-compose files per service maintained. Removed duplicate nested directories. Updated files reflect latest working versions.
This commit is contained in:
@@ -5,6 +5,7 @@ import httpx
|
||||
import logging
|
||||
import sys
|
||||
import traceback
|
||||
import os
|
||||
|
||||
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s", handlers=[logging.StreamHandler(sys.stdout)])
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -15,44 +16,53 @@ app.add_middleware(CORSMiddleware, allow_origins=["*"], allow_credentials=True,
|
||||
class MessageRequest(BaseModel):
|
||||
message: str
|
||||
|
||||
BRAIN_URL = "http://opencode-brain:5000"
|
||||
KNOWLEDGE_URL = "http://knowledge-service:8080/query"
|
||||
AUTH = httpx.BasicAuth("opencode", "sam4jo")
|
||||
LANGGRAPH_URL = os.getenv("LANGGRAPH_URL", "http://langgraph-service:8090")
|
||||
|
||||
@app.post("/chat")
|
||||
async def chat(request: MessageRequest):
|
||||
user_msg = request.message.lower()
|
||||
timeout_long = httpx.Timeout(180.0, connect=10.0)
|
||||
timeout_short = httpx.Timeout(5.0, connect=2.0)
|
||||
"""
|
||||
Routes all queries through LangGraph Supervisor.
|
||||
No hardcoded keywords - LangGraph intelligently routes to:
|
||||
- Librarian: For knowledge base queries (RAG)
|
||||
- Opencode: For coding tasks
|
||||
- Brain: For general LLM queries
|
||||
"""
|
||||
logger.info(f"Gateway: Routing query to LangGraph: {request.message}")
|
||||
|
||||
context = ""
|
||||
# Check for keywords to trigger Librarian (DB) lookup
|
||||
if any(kw in user_msg for kw in ["sam", "hobby", "music", "guitar", "skiing", "experience"]):
|
||||
logger.info("Gateway: Consulting Librarian (DB)...")
|
||||
async with httpx.AsyncClient(timeout=timeout_short) as client:
|
||||
try:
|
||||
k_res = await client.post(KNOWLEDGE_URL, json={"question": request.message})
|
||||
if k_res.status_code == 200:
|
||||
context = k_res.json().get("context", "")
|
||||
except Exception as e:
|
||||
logger.warning(f"Gateway: Librarian offline/slow: {str(e)}")
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=httpx.Timeout(60.0, connect=10.0)) as client:
|
||||
response = await client.post(
|
||||
f"{LANGGRAPH_URL}/query",
|
||||
json={"query": request.message}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
agent_used = result.get("agent_used", "unknown")
|
||||
logger.info(f"Gateway: Response from {agent_used} agent")
|
||||
return {"response": result["response"]}
|
||||
else:
|
||||
logger.error(f"Gateway: LangGraph error {response.status_code}")
|
||||
return {"response": "Error: Orchestration service unavailable"}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Gateway: Error routing through LangGraph: {traceback.format_exc()}")
|
||||
return {"response": "Error: Unable to process your request at this time."}
|
||||
|
||||
@app.get("/health")
|
||||
async def health():
|
||||
return {"status": "healthy", "service": "chat-gateway"}
|
||||
|
||||
@app.get("/agents")
|
||||
async def list_agents():
|
||||
"""List available agents from LangGraph."""
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=httpx.Timeout(10.0)) as client:
|
||||
response = await client.get(f"{LANGGRAPH_URL}/agents")
|
||||
if response.status_code == 200:
|
||||
return response.json()
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching agents: {e}")
|
||||
|
||||
return {"agents": [], "error": "Could not retrieve agent list"}
|
||||
|
||||
# Forward to Brain (LLM)
|
||||
async with httpx.AsyncClient(auth=AUTH, timeout=timeout_long) as brain_client:
|
||||
try:
|
||||
session_res = await brain_client.post(f"{BRAIN_URL}/session", json={"title": "Demo"})
|
||||
session_id = session_res.json()["id"]
|
||||
final_prompt = f"CONTEXT:\n{context}\n\nUSER: {request.message}" if context else request.message
|
||||
response = await brain_client.post(f"{BRAIN_URL}/session/{session_id}/message", json={"parts": [{"type": "text", "text": final_prompt}]})
|
||||
|
||||
# FIX: Iterate through parts array to find text response
|
||||
data = response.json()
|
||||
if "parts" in data:
|
||||
for part in data["parts"]:
|
||||
if part.get("type") == "text" and "text" in part:
|
||||
return {"response": part["text"]}
|
||||
|
||||
return {"response": "AI responded but no text found in expected format."}
|
||||
except Exception:
|
||||
logger.error(f"Gateway: Brain failure: {traceback.format_exc()}")
|
||||
return {"response": "Error: The Brain is taking too long or is disconnected."}
|
||||
|
||||
Reference in New Issue
Block a user