Initial commit: Multi-service AI agent system

- Frontend: Vite + React + TypeScript chat interface
- Backend: FastAPI gateway with LangGraph routing
- Knowledge Service: ChromaDB RAG with Gitea scraper
- LangGraph Service: Multi-agent orchestration
- Airflow: Scheduled Gitea ingestion DAG
- Documentation: Complete plan and implementation guides

Architecture:
- Modular Docker Compose per service
- External ai-mesh network for communication
- Fast rebuilds with /app/packages pattern
- Intelligent agent routing (no hardcoded keywords)

Services:
- Frontend (5173): React chat UI
- Chat Gateway (8000): FastAPI entry point
- LangGraph (8090): Agent orchestration
- Knowledge (8080): ChromaDB RAG
- Airflow (8081): Scheduled ingestion
- PostgreSQL (5432): Chat history

Excludes: node_modules, .venv, chroma_db, logs, .env files
Includes: All source code, configs, docs, docker files
This commit is contained in:
2026-02-27 19:51:06 +11:00
commit 628ba96998
44 changed files with 7177 additions and 0 deletions

View File

@@ -0,0 +1,22 @@
FROM python:3.11-slim
# Install system dependencies
RUN apt-get update && apt-get install -y \
gcc \
g++ \
&& rm -rf /var/lib/apt/lists/*
# Create app directory
WORKDIR /app
# Copy requirements
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# Copy code
COPY . .
EXPOSE 8090
CMD ["python3", "-m", "uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8090"]

80
langgraph_service/main.py Normal file
View File

@@ -0,0 +1,80 @@
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from supervisor_agent import process_query
import logging
import sys
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
logger = logging.getLogger(__name__)
app = FastAPI(title="LangGraph Supervisor Service")
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
class QueryRequest(BaseModel):
query: str
class QueryResponse(BaseModel):
response: str
agent_used: str
context: dict
@app.get("/health")
async def health():
return {"status": "healthy", "service": "langgraph-supervisor"}
@app.post("/query", response_model=QueryResponse)
async def query_supervisor(request: QueryRequest):
"""Main entry point for agent orchestration."""
logger.info(f"Received query: {request.query}")
try:
result = await process_query(request.query)
return QueryResponse(
response=result["response"],
agent_used=result["context"].get("source", "unknown"),
context=result["context"]
)
except Exception as e:
logger.error(f"Error processing query: {e}")
return QueryResponse(
response="Error processing your request",
agent_used="error",
context={"error": str(e)}
)
@app.get("/agents")
async def list_agents():
"""List available specialist agents."""
return {
"agents": [
{
"name": "librarian",
"description": "Queries the knowledge base for semantic information",
"triggers": ["repo", "code", "git", "hobby", "about", "skill"]
},
{
"name": "opencode",
"description": "Handles coding tasks and file modifications",
"triggers": ["write", "edit", "create", "fix", "implement"]
},
{
"name": "brain",
"description": "General LLM for reasoning and generation",
"triggers": ["default", "general questions"]
}
]
}
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8090)

View File

@@ -0,0 +1,9 @@
fastapi
uvicorn
langgraph
langchain
langchain-community
langchain-openai
httpx
pydantic

View File

@@ -0,0 +1,153 @@
from typing import TypedDict, Annotated, Sequence
from langgraph.graph import StateGraph, END
from langchain_core.messages import BaseMessage, HumanMessage, AIMessage
import operator
import httpx
import os
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# State definition
class AgentState(TypedDict):
messages: Annotated[Sequence[BaseMessage], operator.add]
next_agent: str
context: dict
# Agent routing logic
def supervisor_node(state: AgentState):
"""Supervisor decides which specialist agent to call."""
last_message = state["messages"][-1].content.lower()
# Simple routing logic based on keywords
if any(kw in last_message for kw in ["repo", "code", "git", "github", "gitea", "project", "development"]):
return {"next_agent": "librarian"}
elif any(kw in last_message for kw in ["write", "edit", "create", "fix", "bug", "implement", "code change"]):
return {"next_agent": "opencode"}
elif any(kw in last_message for kw in ["sam", "hobby", "music", "experience", "skill", "about"]):
return {"next_agent": "librarian"}
else:
return {"next_agent": "brain"} # Default to general LLM
def librarian_agent(state: AgentState):
"""Librarian agent - queries knowledge base (ChromaDB)."""
last_message = state["messages"][-1].content
try:
# Call knowledge service
response = httpx.post(
"http://knowledge-service:8080/query",
json={"question": last_message},
timeout=10.0
)
if response.status_code == 200:
context = response.json().get("context", "")
return {
"messages": [AIMessage(content=f"Based on my knowledge base:\n\n{context}")],
"context": {"source": "librarian", "context": context}
}
except Exception as e:
logger.error(f"Librarian error: {e}")
return {
"messages": [AIMessage(content="I couldn't find relevant information in the knowledge base.")],
"context": {"source": "librarian", "error": str(e)}
}
def opencode_agent(state: AgentState):
"""Opencode agent - handles coding tasks via MCP."""
last_message = state["messages"][-1].content
# Placeholder - would integrate with opencode-brain
return {
"messages": [AIMessage(content=f"I'm the coding agent. I would help you with: {last_message}")],
"context": {"source": "opencode", "action": "coding_task"}
}
def brain_agent(state: AgentState):
"""Brain agent - general LLM fallback."""
last_message = state["messages"][-1].content
try:
# Call opencode-brain service
auth = httpx.BasicAuth("opencode", os.getenv("OPENCODE_PASSWORD", "sam4jo"))
timeout_long = httpx.Timeout(180.0, connect=10.0)
with httpx.AsyncClient(auth=auth, timeout=timeout_long) as client:
# Create session
session_res = client.post("http://opencode-brain:5000/session", json={"title": "Supervisor Query"})
session_id = session_res.json()["id"]
# Send message
response = client.post(
f"http://opencode-brain:5000/session/{session_id}/message",
json={"parts": [{"type": "text", "text": last_message}]}
)
data = response.json()
if "parts" in data:
for part in data["parts"]:
if part.get("type") == "text":
return {
"messages": [AIMessage(content=part["text"])],
"context": {"source": "brain"}
}
except Exception as e:
logger.error(f"Brain error: {e}")
return {
"messages": [AIMessage(content="I'm thinking about this...")],
"context": {"source": "brain"}
}
def route_decision(state: AgentState):
"""Routing function based on supervisor decision."""
return state["next_agent"]
# Build the graph
workflow = StateGraph(AgentState)
# Add nodes
workflow.add_node("supervisor", supervisor_node)
workflow.add_node("librarian", librarian_agent)
workflow.add_node("opencode", opencode_agent)
workflow.add_node("brain", brain_agent)
# Add edges
workflow.set_entry_point("supervisor")
# Conditional routing from supervisor
workflow.add_conditional_edges(
"supervisor",
route_decision,
{
"librarian": "librarian",
"opencode": "opencode",
"brain": "brain"
}
)
# All specialist agents end
workflow.add_edge("librarian", END)
workflow.add_edge("opencode", END)
workflow.add_edge("brain", END)
# Compile the graph
supervisor_graph = workflow.compile()
# Main entry point for queries
async def process_query(query: str) -> dict:
"""Process a query through the supervisor graph."""
result = await supervisor_graph.ainvoke({
"messages": [HumanMessage(content=query)],
"next_agent": "",
"context": {}
})
return {
"response": result["messages"][-1].content,
"context": result.get("context", {})
}