Add plannotator extension v0.19.10
This commit is contained in:
133
extensions/plannotator/generated/agent-jobs.ts
Normal file
133
extensions/plannotator/generated/agent-jobs.ts
Normal file
@@ -0,0 +1,133 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/shared/agent-jobs.ts
|
||||
/**
|
||||
* Agent Jobs — shared types, state machine, and SSE helpers.
|
||||
*
|
||||
* Runtime-agnostic: no node:fs, no node:http, no Bun APIs.
|
||||
* Both the Bun server handler and (future) Node handler import
|
||||
* this module and wrap it with their respective HTTP transport layers.
|
||||
*
|
||||
* Mirrors packages/shared/external-annotation.ts in structure.
|
||||
*/
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Types
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export type AgentJobStatus = "starting" | "running" | "done" | "failed" | "killed";
|
||||
|
||||
/**
|
||||
* Snapshot of the diff the reviewer was looking at when this job was launched.
|
||||
* Carried on the job so downstream UIs (agent-result panel "Copy All") export
|
||||
* the same `**Diff:** ...` header the job was actually run against — if the
|
||||
* reviewer switches the UI to a different diff afterwards, the job's snapshot
|
||||
* still reflects truth. Structurally compatible with the UI-side
|
||||
* `FeedbackDiffContext` in `packages/review-editor/utils/exportFeedback.ts`.
|
||||
*/
|
||||
export interface AgentJobDiffContext {
|
||||
mode: string;
|
||||
base?: string;
|
||||
worktreePath?: string | null;
|
||||
}
|
||||
|
||||
export interface AgentJobInfo {
|
||||
/** Unique job identifier (UUID). */
|
||||
id: string;
|
||||
/** Source identifier for external annotations — "agent-{id prefix}". */
|
||||
source: string;
|
||||
/** Provider that spawned this job — "claude", "codex", "tour", "shell", etc. */
|
||||
provider: string;
|
||||
/** Underlying engine used (e.g., "claude" or "codex"). Set when provider is "tour". */
|
||||
engine?: string;
|
||||
/** Model used (e.g., "sonnet", "opus"). Set when provider is "tour" with Claude engine. */
|
||||
model?: string;
|
||||
/** Claude --effort level (e.g., "low", "medium", "high", "xhigh", "max"). */
|
||||
effort?: string;
|
||||
/** Codex reasoning effort level (e.g., "high", "medium"). */
|
||||
reasoningEffort?: string;
|
||||
/** Whether Codex fast mode (service_tier=fast) was enabled. */
|
||||
fastMode?: boolean;
|
||||
/** Human-readable label for the job. */
|
||||
label: string;
|
||||
/** Current lifecycle status. */
|
||||
status: AgentJobStatus;
|
||||
/** Timestamp when the job was created. */
|
||||
startedAt: number;
|
||||
/** Timestamp when the job reached a terminal state. */
|
||||
endedAt?: number;
|
||||
/** Process exit code (set on done/failed). */
|
||||
exitCode?: number;
|
||||
/** Last ~500 chars of stderr on failure. */
|
||||
error?: string;
|
||||
/** The actual command that was spawned (for display/debug). */
|
||||
command: string[];
|
||||
/** Working directory where the process was spawned. */
|
||||
cwd?: string;
|
||||
/** The review prompt text (system + user message). Stored separately from command for providers that use stdin. */
|
||||
prompt?: string;
|
||||
/** Review summary set by the agent on completion. */
|
||||
summary?: {
|
||||
correctness: string;
|
||||
explanation: string;
|
||||
confidence: number;
|
||||
};
|
||||
/** PR URL at launch time — used to attribute findings to the correct PR. */
|
||||
prUrl?: string;
|
||||
/** PR diff scope at launch time — "layer" or "full-stack". */
|
||||
diffScope?: string;
|
||||
/** Diff context at launch time (see AgentJobDiffContext). */
|
||||
diffContext?: AgentJobDiffContext;
|
||||
}
|
||||
|
||||
export interface AgentCapability {
|
||||
id: string;
|
||||
name: string;
|
||||
available: boolean;
|
||||
}
|
||||
|
||||
export interface AgentCapabilities {
|
||||
mode: "plan" | "review" | "annotate";
|
||||
providers: AgentCapability[];
|
||||
/** True if at least one provider is available. */
|
||||
available: boolean;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// SSE event types
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export type AgentJobEvent =
|
||||
| { type: "snapshot"; jobs: AgentJobInfo[] }
|
||||
| { type: "job:started"; job: AgentJobInfo }
|
||||
| { type: "job:updated"; job: AgentJobInfo }
|
||||
| { type: "job:completed"; job: AgentJobInfo }
|
||||
| { type: "job:log"; jobId: string; delta: string }
|
||||
| { type: "jobs:cleared" };
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// SSE helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/** Heartbeat comment to keep SSE connections alive (sent every 30s). */
|
||||
export const AGENT_HEARTBEAT_COMMENT = ":\n\n";
|
||||
|
||||
/** Interval in ms between heartbeat comments. */
|
||||
export const AGENT_HEARTBEAT_INTERVAL_MS = 30_000;
|
||||
|
||||
/** Encode an event as an SSE `data:` line. */
|
||||
export function serializeAgentSSEEvent(event: AgentJobEvent): string {
|
||||
return `data: ${JSON.stringify(event)}\n\n`;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/** Check if a status is terminal (no further transitions). */
|
||||
export function isTerminalStatus(status: AgentJobStatus): boolean {
|
||||
return status === "done" || status === "failed" || status === "killed";
|
||||
}
|
||||
|
||||
/** Generate the source identifier for a job from its ID. */
|
||||
export function jobSource(id: string): string {
|
||||
return "agent-" + id.slice(0, 8);
|
||||
}
|
||||
95
extensions/plannotator/generated/ai/base-session.ts
Normal file
95
extensions/plannotator/generated/ai/base-session.ts
Normal file
@@ -0,0 +1,95 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/ai/base-session.ts
|
||||
/**
|
||||
* Shared session base class — extracts the common lifecycle, abort, and
|
||||
* ID-resolution logic that every AIProvider session needs.
|
||||
*
|
||||
* Concrete providers extend this and implement query().
|
||||
*/
|
||||
|
||||
import type { AIMessage, AISession } from "./types.ts";
|
||||
|
||||
export abstract class BaseSession implements AISession {
|
||||
readonly parentSessionId: string | null;
|
||||
onIdResolved?: (oldId: string, newId: string) => void;
|
||||
|
||||
protected _placeholderId: string;
|
||||
protected _resolvedId: string | null = null;
|
||||
protected _isActive = false;
|
||||
protected _currentAbort: AbortController | null = null;
|
||||
protected _queryGen = 0;
|
||||
protected _firstQuerySent = false;
|
||||
|
||||
constructor(opts: { parentSessionId: string | null; initialId?: string }) {
|
||||
this.parentSessionId = opts.parentSessionId;
|
||||
this._placeholderId = opts.initialId ?? crypto.randomUUID();
|
||||
}
|
||||
|
||||
get id(): string {
|
||||
return this._resolvedId ?? this._placeholderId;
|
||||
}
|
||||
|
||||
get isActive(): boolean {
|
||||
return this._isActive;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Query lifecycle helpers — call from concrete query() implementations
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/** Error message returned when a query is already active. */
|
||||
static readonly BUSY_ERROR: AIMessage = {
|
||||
type: "error",
|
||||
error:
|
||||
"A query is already in progress. Abort the current query before sending a new one.",
|
||||
code: "session_busy",
|
||||
};
|
||||
|
||||
/**
|
||||
* Call at the start of query(). Returns the generation number and abort
|
||||
* signal, or null if the session is busy.
|
||||
*/
|
||||
protected startQuery(): { gen: number; signal: AbortSignal } | null {
|
||||
if (this._isActive) return null;
|
||||
|
||||
const gen = ++this._queryGen;
|
||||
this._isActive = true;
|
||||
this._currentAbort = new AbortController();
|
||||
return { gen, signal: this._currentAbort.signal };
|
||||
}
|
||||
|
||||
/**
|
||||
* Call in the finally block of query(). Only clears state if the
|
||||
* generation matches (prevents a stale finally from clobbering a newer query).
|
||||
*/
|
||||
protected endQuery(gen: number): void {
|
||||
if (this._queryGen === gen) {
|
||||
this._isActive = false;
|
||||
this._currentAbort = null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Call when the provider resolves the real session ID from the backend.
|
||||
* Fires the onIdResolved callback so the SessionManager can remap its key.
|
||||
*/
|
||||
protected resolveId(newId: string): void {
|
||||
if (this._resolvedId) return; // Already resolved
|
||||
const oldId = this._placeholderId;
|
||||
this._resolvedId = newId;
|
||||
this.onIdResolved?.(oldId, newId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Abort the current in-flight query. Subclasses should call super.abort()
|
||||
* after any provider-specific cleanup.
|
||||
*/
|
||||
abort(): void {
|
||||
if (this._currentAbort) {
|
||||
this._currentAbort.abort();
|
||||
this._isActive = false;
|
||||
this._currentAbort = null;
|
||||
}
|
||||
}
|
||||
|
||||
abstract query(prompt: string): AsyncIterable<AIMessage>;
|
||||
}
|
||||
212
extensions/plannotator/generated/ai/context.ts
Normal file
212
extensions/plannotator/generated/ai/context.ts
Normal file
@@ -0,0 +1,212 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/ai/context.ts
|
||||
/**
|
||||
* Context builders — translate Plannotator review state into system prompts
|
||||
* that give the AI session the right background for answering questions.
|
||||
*
|
||||
* These are provider-agnostic: any AIProvider implementation can use them
|
||||
* to build the system prompt it needs.
|
||||
*/
|
||||
|
||||
import type { AIContext } from "./types.ts";
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Public API
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* Build a system prompt from the given context.
|
||||
*
|
||||
* The prompt tells the AI:
|
||||
* - What role it plays (plan reviewer, code reviewer, etc.)
|
||||
* - The content it should reference (plan markdown, diff patch, file)
|
||||
* - Any annotations the user has already made
|
||||
* - That it's operating inside Plannotator (not a general coding session)
|
||||
*/
|
||||
export function buildSystemPrompt(ctx: AIContext): string {
|
||||
switch (ctx.mode) {
|
||||
case "plan-review":
|
||||
return buildPlanReviewPrompt(ctx);
|
||||
case "code-review":
|
||||
return buildCodeReviewPrompt(ctx);
|
||||
case "annotate":
|
||||
return buildAnnotatePrompt(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a compact context summary suitable for injecting into a fork prompt.
|
||||
*
|
||||
* When forking from a parent session, we don't need a full system prompt
|
||||
* (the parent's history already provides context). Instead, we inject a
|
||||
* short "you are now in Plannotator" preamble with the relevant content.
|
||||
*/
|
||||
export function buildForkPreamble(ctx: AIContext): string {
|
||||
const lines: string[] = [
|
||||
"The user is now reviewing your work in Plannotator and has a question.",
|
||||
"Answer concisely based on the conversation history and the context below.",
|
||||
"",
|
||||
];
|
||||
|
||||
switch (ctx.mode) {
|
||||
case "plan-review": {
|
||||
lines.push("## Current Plan Under Review");
|
||||
lines.push("");
|
||||
lines.push(truncate(ctx.plan.plan, MAX_PLAN_CHARS));
|
||||
if (ctx.plan.annotations) {
|
||||
lines.push("");
|
||||
lines.push("## User Annotations So Far");
|
||||
lines.push(ctx.plan.annotations);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case "code-review": {
|
||||
if (ctx.review.filePath) {
|
||||
lines.push(`## Reviewing: ${ctx.review.filePath}`);
|
||||
}
|
||||
if (ctx.review.selectedCode) {
|
||||
lines.push("");
|
||||
lines.push("### Selected Code");
|
||||
lines.push("```");
|
||||
lines.push(ctx.review.selectedCode);
|
||||
lines.push("```");
|
||||
}
|
||||
if (ctx.review.lineRange) {
|
||||
const { start, end, side } = ctx.review.lineRange;
|
||||
lines.push(`Lines ${start}-${end} (${side} side)`);
|
||||
}
|
||||
lines.push("");
|
||||
lines.push("## Diff Patch");
|
||||
lines.push("```diff");
|
||||
lines.push(truncate(ctx.review.patch, MAX_DIFF_CHARS));
|
||||
lines.push("```");
|
||||
if (ctx.review.annotations) {
|
||||
lines.push("");
|
||||
lines.push("## User Annotations So Far");
|
||||
lines.push(ctx.review.annotations);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case "annotate": {
|
||||
lines.push(`## Annotating: ${ctx.annotate.filePath}`);
|
||||
lines.push("");
|
||||
lines.push(truncate(ctx.annotate.content, MAX_PLAN_CHARS));
|
||||
if (ctx.annotate.annotations) {
|
||||
lines.push("");
|
||||
lines.push("## User Annotations So Far");
|
||||
lines.push(ctx.annotate.annotations);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return lines.join("\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the effective prompt for a query, prepending a preamble on the first
|
||||
* message. Used by providers that inject context via the prompt itself (Codex,
|
||||
* Pi) rather than a separate system-prompt channel (Claude).
|
||||
*/
|
||||
export function buildEffectivePrompt(
|
||||
userPrompt: string,
|
||||
preamble: string | null,
|
||||
firstQuerySent: boolean,
|
||||
): string {
|
||||
if (!firstQuerySent && preamble) {
|
||||
return `${preamble}\n\n---\n\nUser question: ${userPrompt}`;
|
||||
}
|
||||
return userPrompt;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Internals
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const MAX_PLAN_CHARS = 60_000;
|
||||
const MAX_DIFF_CHARS = 40_000;
|
||||
|
||||
function truncate(text: string, max: number): string {
|
||||
if (text.length <= max) return text;
|
||||
return `${text.slice(0, max)}\n\n... [truncated for context window]`;
|
||||
}
|
||||
|
||||
function buildPlanReviewPrompt(
|
||||
ctx: Extract<AIContext, { mode: "plan-review" }>
|
||||
): string {
|
||||
const sections: string[] = [
|
||||
"The user is reviewing an implementation plan in Plannotator.",
|
||||
"",
|
||||
"## Plan Under Review",
|
||||
"",
|
||||
truncate(ctx.plan.plan, MAX_PLAN_CHARS),
|
||||
];
|
||||
|
||||
if (ctx.plan.previousPlan) {
|
||||
sections.push("");
|
||||
sections.push("## Previous Plan Version (for reference)");
|
||||
sections.push(truncate(ctx.plan.previousPlan, MAX_PLAN_CHARS / 2));
|
||||
}
|
||||
|
||||
if (ctx.plan.annotations) {
|
||||
sections.push("");
|
||||
sections.push("## User Annotations");
|
||||
sections.push(ctx.plan.annotations);
|
||||
}
|
||||
|
||||
return sections.join("\n");
|
||||
}
|
||||
|
||||
function buildCodeReviewPrompt(
|
||||
ctx: Extract<AIContext, { mode: "code-review" }>
|
||||
): string {
|
||||
const sections: string[] = [
|
||||
"The user is reviewing a code diff in Plannotator.",
|
||||
];
|
||||
|
||||
if (ctx.review.filePath) {
|
||||
sections.push("");
|
||||
sections.push(`## Currently Viewing: ${ctx.review.filePath}`);
|
||||
}
|
||||
|
||||
if (ctx.review.selectedCode) {
|
||||
sections.push("");
|
||||
sections.push("## Selected Code");
|
||||
sections.push("```");
|
||||
sections.push(ctx.review.selectedCode);
|
||||
sections.push("```");
|
||||
}
|
||||
|
||||
sections.push("");
|
||||
sections.push("## Diff");
|
||||
sections.push("```diff");
|
||||
sections.push(truncate(ctx.review.patch, MAX_DIFF_CHARS));
|
||||
sections.push("```");
|
||||
|
||||
if (ctx.review.annotations) {
|
||||
sections.push("");
|
||||
sections.push("## User Annotations");
|
||||
sections.push(ctx.review.annotations);
|
||||
}
|
||||
|
||||
return sections.join("\n");
|
||||
}
|
||||
|
||||
function buildAnnotatePrompt(
|
||||
ctx: Extract<AIContext, { mode: "annotate" }>
|
||||
): string {
|
||||
const sections: string[] = [
|
||||
"The user is annotating a markdown document in Plannotator.",
|
||||
"",
|
||||
`## Document: ${ctx.annotate.filePath}`,
|
||||
"",
|
||||
truncate(ctx.annotate.content, MAX_PLAN_CHARS),
|
||||
];
|
||||
|
||||
if (ctx.annotate.annotations) {
|
||||
sections.push("");
|
||||
sections.push("## User Annotations");
|
||||
sections.push(ctx.annotate.annotations);
|
||||
}
|
||||
|
||||
return sections.join("\n");
|
||||
}
|
||||
309
extensions/plannotator/generated/ai/endpoints.ts
Normal file
309
extensions/plannotator/generated/ai/endpoints.ts
Normal file
@@ -0,0 +1,309 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/ai/endpoints.ts
|
||||
/**
|
||||
* HTTP endpoint handlers for AI features.
|
||||
*
|
||||
* These handlers are provider-agnostic — they work with whatever AIProvider
|
||||
* is registered in the provided ProviderRegistry. They're designed to be
|
||||
* mounted into any Plannotator server (plan review, code review, annotate).
|
||||
*
|
||||
* Endpoints:
|
||||
* POST /api/ai/session — Create or fork an AI session
|
||||
* POST /api/ai/query — Send a message and stream the response
|
||||
* POST /api/ai/abort — Abort the current query
|
||||
* GET /api/ai/sessions — List active sessions
|
||||
* GET /api/ai/capabilities — Check if AI features are available
|
||||
*/
|
||||
|
||||
import type { AIContext, AIMessage, CreateSessionOptions } from "./types.ts";
|
||||
import type { ProviderRegistry } from "./provider.ts";
|
||||
import type { SessionManager } from "./session-manager.ts";
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Types for request/response
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export interface CreateSessionRequest {
|
||||
/** The context mode and content for the session. */
|
||||
context: AIContext;
|
||||
/** Instance ID of the provider to use (optional — uses default if omitted). */
|
||||
providerId?: string;
|
||||
/** Optional model override. */
|
||||
model?: string;
|
||||
/** Max agentic turns. */
|
||||
maxTurns?: number;
|
||||
/** Max budget in USD. */
|
||||
maxBudgetUsd?: number;
|
||||
/** Reasoning effort (Codex only). */
|
||||
reasoningEffort?: "minimal" | "low" | "medium" | "high" | "xhigh";
|
||||
}
|
||||
|
||||
export interface QueryRequest {
|
||||
/** The session ID to query. */
|
||||
sessionId: string;
|
||||
/** The user's prompt/question. */
|
||||
prompt: string;
|
||||
/** Optional context update (e.g., new annotations since session was created). */
|
||||
contextUpdate?: string;
|
||||
}
|
||||
|
||||
export interface AbortRequest {
|
||||
/** The session ID to abort. */
|
||||
sessionId: string;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Handler factory
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export interface AIEndpointDeps {
|
||||
/** Provider registry (one per server or shared). */
|
||||
registry: ProviderRegistry;
|
||||
/** Session manager instance (one per server). */
|
||||
sessionManager: SessionManager;
|
||||
/** Resolve the current working directory for new AI sessions. */
|
||||
getCwd?: () => string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create the route handler map for AI endpoints.
|
||||
*
|
||||
* Usage in a Bun server:
|
||||
* ```ts
|
||||
* const aiHandlers = createAIEndpoints({ registry, sessionManager });
|
||||
*
|
||||
* // In your request handler:
|
||||
* if (url.pathname.startsWith('/api/ai/')) {
|
||||
* const handler = aiHandlers[url.pathname];
|
||||
* if (handler) return handler(req);
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
export function createAIEndpoints(deps: AIEndpointDeps) {
|
||||
const { registry, sessionManager, getCwd } = deps;
|
||||
|
||||
return {
|
||||
"/api/ai/capabilities": async (_req: Request) => {
|
||||
const defaultEntry = registry.getDefault();
|
||||
const providerDetails = registry.list().map(id => {
|
||||
const p = registry.get(id)!;
|
||||
return {
|
||||
id,
|
||||
name: p.name,
|
||||
capabilities: p.capabilities,
|
||||
models: p.models ?? [],
|
||||
};
|
||||
});
|
||||
return Response.json({
|
||||
available: !!defaultEntry,
|
||||
providers: providerDetails,
|
||||
defaultProvider: defaultEntry?.id ?? null,
|
||||
});
|
||||
},
|
||||
|
||||
"/api/ai/session": async (req: Request) => {
|
||||
if (req.method !== "POST") {
|
||||
return new Response("Method not allowed", { status: 405 });
|
||||
}
|
||||
|
||||
const body = (await req.json()) as CreateSessionRequest;
|
||||
const { context, providerId, model, maxTurns, maxBudgetUsd, reasoningEffort } = body;
|
||||
|
||||
if (!context?.mode) {
|
||||
return Response.json(
|
||||
{ error: "Missing context.mode" },
|
||||
{ status: 400 }
|
||||
);
|
||||
}
|
||||
|
||||
// Resolve provider: by ID, or default
|
||||
const provider = providerId
|
||||
? registry.get(providerId)
|
||||
: registry.getDefault()?.provider;
|
||||
|
||||
if (!provider) {
|
||||
return Response.json(
|
||||
{ error: providerId ? `Provider "${providerId}" not found` : "No AI provider available" },
|
||||
{ status: 503 }
|
||||
);
|
||||
}
|
||||
|
||||
try {
|
||||
const options: CreateSessionOptions = {
|
||||
context,
|
||||
cwd: getCwd?.(),
|
||||
model,
|
||||
maxTurns,
|
||||
maxBudgetUsd,
|
||||
reasoningEffort,
|
||||
};
|
||||
|
||||
// Fork if parent session is provided AND provider supports it.
|
||||
// Providers that can't fork (e.g. Codex) fall back to a fresh
|
||||
// session with the full system prompt — no fake history.
|
||||
const shouldFork = context.parent && provider.capabilities.fork;
|
||||
const session = shouldFork
|
||||
? await provider.forkSession(options)
|
||||
: await provider.createSession(options);
|
||||
|
||||
const entry = sessionManager.track(session, context.mode);
|
||||
|
||||
return Response.json({
|
||||
sessionId: session.id,
|
||||
parentSessionId: session.parentSessionId,
|
||||
mode: context.mode,
|
||||
createdAt: entry.createdAt,
|
||||
});
|
||||
} catch (err) {
|
||||
return Response.json(
|
||||
{
|
||||
error:
|
||||
err instanceof Error ? err.message : "Failed to create session",
|
||||
},
|
||||
{ status: 500 }
|
||||
);
|
||||
}
|
||||
},
|
||||
|
||||
"/api/ai/query": async (req: Request) => {
|
||||
if (req.method !== "POST") {
|
||||
return new Response("Method not allowed", { status: 405 });
|
||||
}
|
||||
|
||||
const body = (await req.json()) as QueryRequest;
|
||||
const { sessionId, prompt, contextUpdate } = body;
|
||||
|
||||
if (!sessionId || !prompt) {
|
||||
return Response.json(
|
||||
{ error: "Missing sessionId or prompt" },
|
||||
{ status: 400 }
|
||||
);
|
||||
}
|
||||
|
||||
const entry = sessionManager.get(sessionId);
|
||||
if (!entry) {
|
||||
return Response.json(
|
||||
{ error: "Session not found" },
|
||||
{ status: 404 }
|
||||
);
|
||||
}
|
||||
|
||||
sessionManager.touch(sessionId);
|
||||
|
||||
// If context update provided, prepend it to the prompt
|
||||
const effectivePrompt = contextUpdate
|
||||
? `[Context update: the user has made changes since this conversation started]\n${contextUpdate}\n\n${prompt}`
|
||||
: prompt;
|
||||
|
||||
// Set label from first query if not already set
|
||||
if (!entry.label) {
|
||||
entry.label = prompt.slice(0, 80);
|
||||
}
|
||||
|
||||
// Stream the response using Server-Sent Events (SSE)
|
||||
const encoder = new TextEncoder();
|
||||
const stream = new ReadableStream({
|
||||
async start(controller) {
|
||||
try {
|
||||
for await (const message of entry.session.query(effectivePrompt)) {
|
||||
const data = JSON.stringify(message);
|
||||
controller.enqueue(
|
||||
encoder.encode(`data: ${data}\n\n`)
|
||||
);
|
||||
}
|
||||
controller.enqueue(encoder.encode("data: [DONE]\n\n"));
|
||||
} catch (err) {
|
||||
const errorMsg: AIMessage = {
|
||||
type: "error",
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
code: "stream_error",
|
||||
};
|
||||
controller.enqueue(
|
||||
encoder.encode(`data: ${JSON.stringify(errorMsg)}\n\n`)
|
||||
);
|
||||
} finally {
|
||||
controller.close();
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
return new Response(stream, {
|
||||
headers: {
|
||||
"Content-Type": "text/event-stream",
|
||||
"Cache-Control": "no-cache",
|
||||
Connection: "keep-alive",
|
||||
},
|
||||
});
|
||||
},
|
||||
|
||||
"/api/ai/abort": async (req: Request) => {
|
||||
if (req.method !== "POST") {
|
||||
return new Response("Method not allowed", { status: 405 });
|
||||
}
|
||||
|
||||
const body = (await req.json()) as AbortRequest;
|
||||
const entry = sessionManager.get(body.sessionId);
|
||||
if (!entry) {
|
||||
return Response.json(
|
||||
{ error: "Session not found" },
|
||||
{ status: 404 }
|
||||
);
|
||||
}
|
||||
|
||||
entry.session.abort();
|
||||
return Response.json({ ok: true });
|
||||
},
|
||||
|
||||
"/api/ai/permission": async (req: Request) => {
|
||||
if (req.method !== "POST") {
|
||||
return new Response("Method not allowed", { status: 405 });
|
||||
}
|
||||
|
||||
const body = (await req.json()) as {
|
||||
sessionId: string;
|
||||
requestId: string;
|
||||
allow: boolean;
|
||||
message?: string;
|
||||
};
|
||||
|
||||
if (!body.sessionId || !body.requestId) {
|
||||
return Response.json(
|
||||
{ error: "Missing sessionId or requestId" },
|
||||
{ status: 400 }
|
||||
);
|
||||
}
|
||||
|
||||
const entry = sessionManager.get(body.sessionId);
|
||||
if (!entry) {
|
||||
return Response.json(
|
||||
{ error: "Session not found" },
|
||||
{ status: 404 }
|
||||
);
|
||||
}
|
||||
|
||||
entry.session.respondToPermission?.(
|
||||
body.requestId,
|
||||
body.allow,
|
||||
body.message
|
||||
);
|
||||
|
||||
return Response.json({ ok: true });
|
||||
},
|
||||
|
||||
"/api/ai/sessions": async (_req: Request) => {
|
||||
const entries = sessionManager.list();
|
||||
return Response.json(
|
||||
entries.map((e) => ({
|
||||
sessionId: e.session.id,
|
||||
mode: e.mode,
|
||||
parentSessionId: e.parentSessionId,
|
||||
createdAt: e.createdAt,
|
||||
lastActiveAt: e.lastActiveAt,
|
||||
isActive: e.session.isActive,
|
||||
label: e.label,
|
||||
}))
|
||||
);
|
||||
},
|
||||
} as const;
|
||||
}
|
||||
|
||||
export type AIEndpoints = ReturnType<typeof createAIEndpoints>;
|
||||
106
extensions/plannotator/generated/ai/index.ts
Normal file
106
extensions/plannotator/generated/ai/index.ts
Normal file
@@ -0,0 +1,106 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/ai/index.ts
|
||||
/**
|
||||
* @plannotator/ai — AI provider layer for Plannotator.
|
||||
*
|
||||
* This package provides the backbone for AI-powered features (inline chat,
|
||||
* plan Q&A, code review assistance) across all Plannotator surfaces.
|
||||
*
|
||||
* Architecture:
|
||||
*
|
||||
* ┌─────────────────┐ ┌──────────────┐
|
||||
* │ Plan Review UI │────▶│ │
|
||||
* ├─────────────────┤ │ AI Endpoints │──▶ SSE stream
|
||||
* │ Code Review UI │────▶│ (HTTP) │
|
||||
* ├─────────────────┤ │ │
|
||||
* │ Annotate UI │────▶└──────┬───────┘
|
||||
* └─────────────────┘ │
|
||||
* ▼
|
||||
* ┌────────────────┐
|
||||
* │ Session Manager │
|
||||
* └────────┬───────┘
|
||||
* │
|
||||
* ┌────────▼───────┐
|
||||
* │ AIProvider │ (abstract)
|
||||
* └────────┬───────┘
|
||||
* │
|
||||
* ┌─────────────┼──────────────┐
|
||||
* ▼ ▼ ▼
|
||||
* ┌──────────────┐ ┌──────────┐ ┌───────────┐
|
||||
* │ Claude Agent │ │ OpenCode │ │ Future │
|
||||
* │ SDK Provider │ │ Provider │ │ Providers │
|
||||
* └──────────────┘ └──────────┘ └───────────┘
|
||||
*
|
||||
* Quick start:
|
||||
*
|
||||
* ```ts
|
||||
* import "@plannotator/ai/providers/claude-agent-sdk";
|
||||
* import { ProviderRegistry, createProvider, createAIEndpoints, SessionManager } from "@plannotator/ai";
|
||||
*
|
||||
* // 1. Create a registry and provider
|
||||
* const registry = new ProviderRegistry();
|
||||
* const provider = await createProvider({ type: "claude-agent-sdk", cwd: process.cwd() });
|
||||
* registry.register(provider);
|
||||
*
|
||||
* // 2. Create endpoints and session manager
|
||||
* const sessionManager = new SessionManager();
|
||||
* const aiEndpoints = createAIEndpoints({ registry, sessionManager });
|
||||
*
|
||||
* // 3. Mount endpoints in your Bun server
|
||||
* // aiEndpoints["/api/ai/query"](request) → SSE Response
|
||||
* ```
|
||||
*/
|
||||
|
||||
// Types
|
||||
export type {
|
||||
AIProvider,
|
||||
AIProviderCapabilities,
|
||||
AIProviderConfig,
|
||||
AISession,
|
||||
AIMessage,
|
||||
AITextMessage,
|
||||
AITextDeltaMessage,
|
||||
AIToolUseMessage,
|
||||
AIToolResultMessage,
|
||||
AIErrorMessage,
|
||||
AIResultMessage,
|
||||
AIPermissionRequestMessage,
|
||||
AIUnknownMessage,
|
||||
AIContext,
|
||||
AIContextMode,
|
||||
PlanContext,
|
||||
CodeReviewContext,
|
||||
AnnotateContext,
|
||||
ParentSession,
|
||||
CreateSessionOptions,
|
||||
ClaudeAgentSDKConfig,
|
||||
CodexSDKConfig,
|
||||
PiSDKConfig,
|
||||
OpenCodeConfig,
|
||||
} from "./types.ts";
|
||||
|
||||
// Provider registry
|
||||
export {
|
||||
ProviderRegistry,
|
||||
registerProviderFactory,
|
||||
createProvider,
|
||||
} from "./provider.ts";
|
||||
|
||||
// Context builders
|
||||
export { buildSystemPrompt, buildForkPreamble, buildEffectivePrompt } from "./context.ts";
|
||||
|
||||
// Base session
|
||||
export { BaseSession } from "./base-session.ts";
|
||||
|
||||
// Session manager
|
||||
export { SessionManager } from "./session-manager.ts";
|
||||
export type { SessionEntry, SessionManagerOptions } from "./session-manager.ts";
|
||||
|
||||
// HTTP endpoints
|
||||
export { createAIEndpoints } from "./endpoints.ts";
|
||||
export type {
|
||||
AIEndpoints,
|
||||
AIEndpointDeps,
|
||||
CreateSessionRequest,
|
||||
QueryRequest,
|
||||
AbortRequest,
|
||||
} from "./endpoints.ts";
|
||||
104
extensions/plannotator/generated/ai/provider.ts
Normal file
104
extensions/plannotator/generated/ai/provider.ts
Normal file
@@ -0,0 +1,104 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/ai/provider.ts
|
||||
/**
|
||||
* Provider registry — manages AI provider instances.
|
||||
*
|
||||
* Supports multiple instances of the same provider type (e.g., two Claude
|
||||
* Agent SDK providers with different configs) keyed by instance ID.
|
||||
*
|
||||
* Each server (plan review, code review, annotate) should create its own
|
||||
* ProviderRegistry or share one — no module-level global state.
|
||||
*/
|
||||
|
||||
import type { AIProvider, AIProviderConfig } from "./types.ts";
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Factory registry (global — factories are stateless type→constructor maps)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
type ProviderFactory = (config: AIProviderConfig) => Promise<AIProvider>;
|
||||
const factories = new Map<string, ProviderFactory>();
|
||||
|
||||
/** Register a factory function for a provider type. */
|
||||
export function registerProviderFactory(
|
||||
type: string,
|
||||
factory: ProviderFactory
|
||||
): void {
|
||||
factories.set(type, factory);
|
||||
}
|
||||
|
||||
/** Create a provider from config using a registered factory. Does NOT auto-register. */
|
||||
export async function createProvider(
|
||||
config: AIProviderConfig
|
||||
): Promise<AIProvider> {
|
||||
const factory = factories.get(config.type);
|
||||
if (!factory) {
|
||||
throw new Error(
|
||||
`No AI provider factory registered for type "${config.type}". ` +
|
||||
`Available: ${[...factories.keys()].join(", ") || "(none)"}`
|
||||
);
|
||||
}
|
||||
return factory(config);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Registry
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export class ProviderRegistry {
|
||||
private instances = new Map<string, AIProvider>();
|
||||
|
||||
/**
|
||||
* Register a provider instance under an ID.
|
||||
* If no instanceId is provided, uses `provider.name`.
|
||||
* Returns the instanceId used.
|
||||
*/
|
||||
register(provider: AIProvider, instanceId?: string): string {
|
||||
const id = instanceId ?? provider.name;
|
||||
this.instances.set(id, provider);
|
||||
return id;
|
||||
}
|
||||
|
||||
/** Get a provider by instance ID. */
|
||||
get(instanceId: string): AIProvider | undefined {
|
||||
return this.instances.get(instanceId);
|
||||
}
|
||||
|
||||
/** Get the first registered provider (convenience for single-provider setups). */
|
||||
getDefault(): { id: string; provider: AIProvider } | undefined {
|
||||
const first = this.instances.entries().next();
|
||||
if (first.done) return undefined;
|
||||
return { id: first.value[0], provider: first.value[1] };
|
||||
}
|
||||
|
||||
/** Get all instances of a given provider type (by provider.name). */
|
||||
getByType(typeName: string): AIProvider[] {
|
||||
return [...this.instances.values()].filter((p) => p.name === typeName);
|
||||
}
|
||||
|
||||
/** List all instance IDs. */
|
||||
list(): string[] {
|
||||
return [...this.instances.keys()];
|
||||
}
|
||||
|
||||
/** Dispose and remove a single instance. No-op if not found. */
|
||||
dispose(instanceId: string): void {
|
||||
const provider = this.instances.get(instanceId);
|
||||
if (provider) {
|
||||
provider.dispose();
|
||||
this.instances.delete(instanceId);
|
||||
}
|
||||
}
|
||||
|
||||
/** Dispose all providers and clear the registry. */
|
||||
disposeAll(): void {
|
||||
for (const provider of this.instances.values()) {
|
||||
provider.dispose();
|
||||
}
|
||||
this.instances.clear();
|
||||
}
|
||||
|
||||
/** Number of registered instances. */
|
||||
get size(): number {
|
||||
return this.instances.size;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,445 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/ai/providers/claude-agent-sdk.ts
|
||||
/**
|
||||
* Claude Agent SDK provider — the first concrete AIProvider implementation.
|
||||
*
|
||||
* Uses @anthropic-ai/claude-agent-sdk to create sessions that can:
|
||||
* - Start fresh with Plannotator context as the system prompt
|
||||
* - Fork from a parent Claude Code session (preserving full history)
|
||||
* - Resume a previous Plannotator inline chat session
|
||||
* - Stream text deltas back to the UI in real time
|
||||
*
|
||||
* Sessions are read-only by default (tools limited to Read, Glob, Grep)
|
||||
* to keep inline chat safe and cost-bounded.
|
||||
*/
|
||||
|
||||
import { buildSystemPrompt, buildForkPreamble, buildEffectivePrompt } from "../context.ts";
|
||||
import { BaseSession } from "../base-session.ts";
|
||||
import type {
|
||||
AIProvider,
|
||||
AIProviderCapabilities,
|
||||
AISession,
|
||||
AIMessage,
|
||||
CreateSessionOptions,
|
||||
ClaudeAgentSDKConfig,
|
||||
} from "../types.ts";
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Constants
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const PROVIDER_NAME = "claude-agent-sdk";
|
||||
|
||||
/** Default read-only tools for inline chat. */
|
||||
const DEFAULT_ALLOWED_TOOLS = ["Read", "Glob", "Grep", "WebSearch"];
|
||||
|
||||
const DEFAULT_MAX_TURNS = 99;
|
||||
const DEFAULT_MODEL = "claude-sonnet-4-6";
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// SDK query options — typed to catch typos at compile time
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
interface ClaudeSDKQueryOptions {
|
||||
model: string;
|
||||
maxTurns: number;
|
||||
allowedTools: string[];
|
||||
cwd: string;
|
||||
abortController: AbortController;
|
||||
includePartialMessages: boolean;
|
||||
persistSession: boolean;
|
||||
maxBudgetUsd?: number;
|
||||
systemPrompt?: string | { type: "preset"; preset: string; append?: string };
|
||||
resume?: string;
|
||||
forkSession?: boolean;
|
||||
permissionMode?: ClaudeAgentSDKConfig['permissionMode'];
|
||||
allowDangerouslySkipPermissions?: boolean;
|
||||
pathToClaudeCodeExecutable?: string;
|
||||
settingSources?: string[];
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Provider
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export class ClaudeAgentSDKProvider implements AIProvider {
|
||||
readonly name = PROVIDER_NAME;
|
||||
readonly capabilities: AIProviderCapabilities = {
|
||||
fork: true,
|
||||
resume: true,
|
||||
streaming: true,
|
||||
tools: true,
|
||||
};
|
||||
readonly models = [
|
||||
{ id: 'claude-sonnet-4-6', label: 'Sonnet 4.6', default: true },
|
||||
{ id: 'claude-sonnet-4-6[1m]', label: 'Sonnet 4.6 (1M)' },
|
||||
{ id: 'claude-opus-4-7', label: 'Opus 4.7' },
|
||||
{ id: 'claude-opus-4-7[1m]', label: 'Opus 4.7 (1M)' },
|
||||
{ id: 'claude-opus-4-6', label: 'Opus 4.6' },
|
||||
{ id: 'claude-opus-4-6[1m]', label: 'Opus 4.6 (1M)' },
|
||||
{ id: 'claude-haiku-4-5', label: 'Haiku 4.5' },
|
||||
] as const;
|
||||
|
||||
private config: ClaudeAgentSDKConfig;
|
||||
|
||||
constructor(config: ClaudeAgentSDKConfig) {
|
||||
this.config = config;
|
||||
}
|
||||
|
||||
async createSession(options: CreateSessionOptions): Promise<AISession> {
|
||||
return new ClaudeAgentSDKSession({
|
||||
...this.baseConfig(options),
|
||||
systemPrompt: buildSystemPrompt(options.context),
|
||||
cwd: options.cwd ?? this.config.cwd ?? process.cwd(),
|
||||
parentSessionId: null,
|
||||
forkFromSession: null,
|
||||
});
|
||||
}
|
||||
|
||||
async forkSession(options: CreateSessionOptions): Promise<AISession> {
|
||||
const parent = options.context.parent;
|
||||
if (!parent) {
|
||||
throw new Error(
|
||||
"Cannot fork: no parent session provided in context. " +
|
||||
"Use createSession() for standalone sessions."
|
||||
);
|
||||
}
|
||||
|
||||
return new ClaudeAgentSDKSession({
|
||||
...this.baseConfig(options),
|
||||
systemPrompt: null,
|
||||
forkPreamble: buildForkPreamble(options.context),
|
||||
cwd: parent.cwd,
|
||||
parentSessionId: parent.sessionId,
|
||||
forkFromSession: parent.sessionId,
|
||||
});
|
||||
}
|
||||
|
||||
async resumeSession(sessionId: string): Promise<AISession> {
|
||||
return new ClaudeAgentSDKSession({
|
||||
...this.baseConfig(),
|
||||
systemPrompt: null,
|
||||
cwd: this.config.cwd ?? process.cwd(),
|
||||
parentSessionId: null,
|
||||
forkFromSession: null,
|
||||
resumeSessionId: sessionId,
|
||||
});
|
||||
}
|
||||
|
||||
dispose(): void {
|
||||
// No persistent resources to clean up
|
||||
}
|
||||
|
||||
private baseConfig(options?: CreateSessionOptions) {
|
||||
return {
|
||||
model: options?.model ?? this.config.model ?? DEFAULT_MODEL,
|
||||
maxTurns: options?.maxTurns ?? DEFAULT_MAX_TURNS,
|
||||
maxBudgetUsd: options?.maxBudgetUsd,
|
||||
allowedTools: this.config.allowedTools ?? DEFAULT_ALLOWED_TOOLS,
|
||||
permissionMode: this.config.permissionMode ?? "default",
|
||||
claudeExecutablePath: this.config.claudeExecutablePath,
|
||||
settingSources: this.config.settingSources ?? ['user', 'project'],
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// SDK import cache — resolve once, reuse across all queries
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// biome-ignore lint/suspicious/noExplicitAny: SDK types resolved at runtime via dynamic import
|
||||
let sdkQueryFn: ((...args: any[]) => any) | null = null;
|
||||
|
||||
async function getSDKQuery() {
|
||||
if (!sdkQueryFn) {
|
||||
const sdk = await import("@anthropic-ai/claude-agent-sdk");
|
||||
sdkQueryFn = sdk.query;
|
||||
}
|
||||
return sdkQueryFn!;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Session
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
interface SessionConfig {
|
||||
systemPrompt: string | null;
|
||||
forkPreamble?: string;
|
||||
model: string;
|
||||
maxTurns: number;
|
||||
maxBudgetUsd?: number;
|
||||
allowedTools: string[];
|
||||
permissionMode: ClaudeAgentSDKConfig['permissionMode'];
|
||||
cwd: string;
|
||||
parentSessionId: string | null;
|
||||
forkFromSession: string | null;
|
||||
resumeSessionId?: string;
|
||||
claudeExecutablePath?: string;
|
||||
settingSources?: string[];
|
||||
}
|
||||
|
||||
class ClaudeAgentSDKSession extends BaseSession {
|
||||
private config: SessionConfig;
|
||||
/** Active Query object — needed to send control responses (permission decisions) */
|
||||
private _activeQuery: { streamInput: (iter: AsyncIterable<unknown>) => Promise<void> } | null = null;
|
||||
|
||||
constructor(config: SessionConfig) {
|
||||
super({
|
||||
parentSessionId: config.parentSessionId,
|
||||
initialId: config.resumeSessionId,
|
||||
});
|
||||
this.config = config;
|
||||
}
|
||||
|
||||
async *query(prompt: string): AsyncIterable<AIMessage> {
|
||||
const started = this.startQuery();
|
||||
if (!started) { yield BaseSession.BUSY_ERROR; return; }
|
||||
const { gen } = started;
|
||||
|
||||
try {
|
||||
const queryFn = await getSDKQuery();
|
||||
|
||||
const queryPrompt = buildEffectivePrompt(
|
||||
prompt,
|
||||
this.config.forkPreamble ?? null,
|
||||
this._firstQuerySent,
|
||||
);
|
||||
const options = this.buildQueryOptions();
|
||||
|
||||
const stream = queryFn({ prompt: queryPrompt, options }) as
|
||||
AsyncIterable<Record<string, unknown>> & { streamInput: (iter: AsyncIterable<unknown>) => Promise<void> };
|
||||
this._activeQuery = stream;
|
||||
|
||||
this._firstQuerySent = true;
|
||||
|
||||
for await (const message of stream) {
|
||||
const mapped = mapSDKMessage(message);
|
||||
|
||||
// Capture the real session ID from the init message
|
||||
if (
|
||||
!this._resolvedId &&
|
||||
"session_id" in message &&
|
||||
typeof message.session_id === "string" &&
|
||||
message.session_id
|
||||
) {
|
||||
this.resolveId(message.session_id);
|
||||
}
|
||||
|
||||
for (const msg of mapped) {
|
||||
yield msg;
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
yield {
|
||||
type: "error",
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
code: "provider_error",
|
||||
};
|
||||
} finally {
|
||||
this.endQuery(gen);
|
||||
this._activeQuery = null;
|
||||
}
|
||||
}
|
||||
|
||||
abort(): void {
|
||||
this._activeQuery = null;
|
||||
super.abort();
|
||||
}
|
||||
|
||||
respondToPermission(requestId: string, allow: boolean, message?: string): void {
|
||||
if (!this._activeQuery || !this._activeQuery.streamInput) return;
|
||||
|
||||
const response = allow
|
||||
? { type: 'control_response', response: { subtype: 'success', request_id: requestId, response: { behavior: 'allow' } } }
|
||||
: { type: 'control_response', response: { subtype: 'success', request_id: requestId, response: { behavior: 'deny', message: message ?? 'User denied this action' } } };
|
||||
|
||||
this._activeQuery.streamInput(
|
||||
(async function* () { yield response; })()
|
||||
).catch(() => {});
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Internal
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
private buildQueryOptions(): ClaudeSDKQueryOptions {
|
||||
const opts: ClaudeSDKQueryOptions = {
|
||||
model: this.config.model,
|
||||
maxTurns: this.config.maxTurns,
|
||||
allowedTools: this.config.allowedTools,
|
||||
cwd: this.config.cwd,
|
||||
abortController: this._currentAbort!,
|
||||
includePartialMessages: true,
|
||||
persistSession: true,
|
||||
...(this.config.claudeExecutablePath && {
|
||||
pathToClaudeCodeExecutable: this.config.claudeExecutablePath,
|
||||
}),
|
||||
...(this.config.settingSources && {
|
||||
settingSources: this.config.settingSources,
|
||||
}),
|
||||
};
|
||||
|
||||
if (this.config.maxBudgetUsd) {
|
||||
opts.maxBudgetUsd = this.config.maxBudgetUsd;
|
||||
}
|
||||
|
||||
// After the first query resolves a real session ID, all subsequent
|
||||
// queries must resume that session to continue the conversation.
|
||||
if (this._resolvedId) {
|
||||
opts.resume = this._resolvedId;
|
||||
return this.applyPermissionMode(opts);
|
||||
}
|
||||
|
||||
// First query: use Claude Code's built-in prompt with our context appended
|
||||
if (this.config.systemPrompt) {
|
||||
opts.systemPrompt = {
|
||||
type: "preset",
|
||||
preset: "claude_code",
|
||||
append: this.config.systemPrompt,
|
||||
};
|
||||
}
|
||||
|
||||
if (this.config.forkFromSession) {
|
||||
opts.resume = this.config.forkFromSession;
|
||||
opts.forkSession = true;
|
||||
}
|
||||
|
||||
if (this.config.resumeSessionId) {
|
||||
opts.resume = this.config.resumeSessionId;
|
||||
}
|
||||
|
||||
return this.applyPermissionMode(opts);
|
||||
}
|
||||
|
||||
private applyPermissionMode(opts: ClaudeSDKQueryOptions): ClaudeSDKQueryOptions {
|
||||
if (this.config.permissionMode === "bypassPermissions") {
|
||||
opts.permissionMode = "bypassPermissions";
|
||||
opts.allowDangerouslySkipPermissions = true;
|
||||
} else if (this.config.permissionMode === "plan") {
|
||||
opts.permissionMode = "plan";
|
||||
}
|
||||
return opts;
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Message mapping
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* Map an SDK message to one or more AIMessages.
|
||||
*
|
||||
* An SDK assistant message can contain both text and tool_use content blocks
|
||||
* in a single response. We emit each block as a separate AIMessage so no
|
||||
* content is dropped.
|
||||
*/
|
||||
function mapSDKMessage(msg: Record<string, unknown>): AIMessage[] {
|
||||
const type = msg.type as string;
|
||||
|
||||
switch (type) {
|
||||
case "assistant": {
|
||||
const message = msg.message as Record<string, unknown> | undefined;
|
||||
if (!message) return [{ type: "unknown", raw: msg }];
|
||||
const content = message.content as Array<Record<string, unknown>>;
|
||||
if (!content) return [{ type: "unknown", raw: msg }];
|
||||
|
||||
const messages: AIMessage[] = [];
|
||||
const textParts: string[] = [];
|
||||
|
||||
for (const block of content) {
|
||||
if (block.type === "text" && typeof block.text === "string") {
|
||||
textParts.push(block.text);
|
||||
} else if (block.type === "tool_use") {
|
||||
// Flush accumulated text before the tool_use block
|
||||
if (textParts.length > 0) {
|
||||
messages.push({ type: "text", text: textParts.join("") });
|
||||
textParts.length = 0;
|
||||
}
|
||||
messages.push({
|
||||
type: "tool_use",
|
||||
toolName: block.name as string,
|
||||
toolInput: block.input as Record<string, unknown>,
|
||||
toolUseId: block.id as string,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Flush any remaining text after the last block
|
||||
if (textParts.length > 0) {
|
||||
messages.push({ type: "text", text: textParts.join("") });
|
||||
}
|
||||
|
||||
return messages.length > 0 ? messages : [{ type: "unknown", raw: msg }];
|
||||
}
|
||||
|
||||
case "stream_event": {
|
||||
const event = msg.event as Record<string, unknown> | undefined;
|
||||
if (!event) return [{ type: "unknown", raw: msg }];
|
||||
const eventType = event.type as string;
|
||||
|
||||
if (eventType === "content_block_delta") {
|
||||
const delta = event.delta as Record<string, unknown>;
|
||||
if (delta?.type === "text_delta" && typeof delta.text === "string") {
|
||||
return [{ type: "text_delta", delta: delta.text }];
|
||||
}
|
||||
}
|
||||
return [{ type: "unknown", raw: msg }];
|
||||
}
|
||||
|
||||
case "user": {
|
||||
// SDK wraps tool results in SDKUserMessage (type: "user")
|
||||
if (msg.tool_use_result != null) {
|
||||
return [{
|
||||
type: "tool_result",
|
||||
result: typeof msg.tool_use_result === "string"
|
||||
? msg.tool_use_result
|
||||
: JSON.stringify(msg.tool_use_result),
|
||||
}];
|
||||
}
|
||||
return [{ type: "unknown", raw: msg }];
|
||||
}
|
||||
|
||||
case "control_request": {
|
||||
const request = msg.request as Record<string, unknown> | undefined;
|
||||
if (request?.subtype === "can_use_tool") {
|
||||
return [{
|
||||
type: "permission_request",
|
||||
requestId: msg.request_id as string,
|
||||
toolName: request.tool_name as string,
|
||||
toolInput: (request.input as Record<string, unknown>) ?? {},
|
||||
title: request.title as string | undefined,
|
||||
displayName: request.display_name as string | undefined,
|
||||
description: request.description as string | undefined,
|
||||
toolUseId: request.tool_use_id as string,
|
||||
}];
|
||||
}
|
||||
return [{ type: "unknown", raw: msg }];
|
||||
}
|
||||
|
||||
case "result": {
|
||||
const sessionId = (msg.session_id as string) ?? "";
|
||||
const subtype = msg.subtype as string;
|
||||
return [{
|
||||
type: "result",
|
||||
sessionId,
|
||||
success: subtype === "success",
|
||||
result: (msg.result as string) ?? undefined,
|
||||
costUsd: msg.total_cost_usd as number | undefined,
|
||||
turns: msg.num_turns as number | undefined,
|
||||
}];
|
||||
}
|
||||
|
||||
default:
|
||||
return [{ type: "unknown", raw: msg }];
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Factory registration
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
import { registerProviderFactory } from "../provider.ts";
|
||||
|
||||
registerProviderFactory(
|
||||
PROVIDER_NAME,
|
||||
async (config) => new ClaudeAgentSDKProvider(config as ClaudeAgentSDKConfig)
|
||||
);
|
||||
431
extensions/plannotator/generated/ai/providers/codex-sdk.ts
Normal file
431
extensions/plannotator/generated/ai/providers/codex-sdk.ts
Normal file
@@ -0,0 +1,431 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/ai/providers/codex-sdk.ts
|
||||
/**
|
||||
* Codex SDK provider — bridges Plannotator's AI layer with OpenAI's Codex agent.
|
||||
*
|
||||
* Uses @openai/codex-sdk to create sessions that can:
|
||||
* - Start fresh with Plannotator context as the system prompt
|
||||
* - Fake-fork from a parent session (fresh thread + preamble, no real history)
|
||||
* - Resume a previous thread by ID
|
||||
* - Stream text deltas back to the UI in real time
|
||||
*
|
||||
* Sessions default to read-only sandbox mode for safety in inline chat.
|
||||
*/
|
||||
|
||||
import { buildSystemPrompt, buildEffectivePrompt } from "../context.ts";
|
||||
import { BaseSession } from "../base-session.ts";
|
||||
import type {
|
||||
AIProvider,
|
||||
AIProviderCapabilities,
|
||||
AISession,
|
||||
AIMessage,
|
||||
CreateSessionOptions,
|
||||
CodexSDKConfig,
|
||||
} from "../types.ts";
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Constants
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const PROVIDER_NAME = "codex-sdk";
|
||||
const DEFAULT_MODEL = "gpt-5.4";
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Provider
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export class CodexSDKProvider implements AIProvider {
|
||||
readonly name = PROVIDER_NAME;
|
||||
readonly capabilities: AIProviderCapabilities = {
|
||||
fork: false, // No real fork — faked with fresh thread + preamble
|
||||
resume: true,
|
||||
streaming: true,
|
||||
tools: true,
|
||||
};
|
||||
readonly models = [
|
||||
{ id: 'gpt-5.5', label: 'GPT-5.5' },
|
||||
{ id: 'gpt-5.4', label: 'GPT-5.4', default: true },
|
||||
{ id: 'gpt-5.4-mini', label: 'GPT-5.4 Mini' },
|
||||
{ id: 'gpt-5.3-codex', label: 'GPT-5.3 Codex' },
|
||||
{ id: 'gpt-5.3-codex-spark', label: 'GPT-5.3 Codex Spark' },
|
||||
{ id: 'gpt-5.2-codex', label: 'GPT-5.2 Codex' },
|
||||
{ id: 'gpt-5.2', label: 'GPT-5.2' },
|
||||
] as const;
|
||||
|
||||
private config: CodexSDKConfig;
|
||||
|
||||
constructor(config: CodexSDKConfig) {
|
||||
this.config = config;
|
||||
}
|
||||
|
||||
async createSession(options: CreateSessionOptions): Promise<AISession> {
|
||||
return new CodexSDKSession({
|
||||
...this.baseConfig(options),
|
||||
systemPrompt: buildSystemPrompt(options.context),
|
||||
cwd: options.cwd ?? this.config.cwd ?? process.cwd(),
|
||||
parentSessionId: null,
|
||||
});
|
||||
}
|
||||
|
||||
async forkSession(_options: CreateSessionOptions): Promise<AISession> {
|
||||
throw new Error(
|
||||
"Codex does not support session forking. " +
|
||||
"The endpoint layer should fall back to createSession()."
|
||||
);
|
||||
}
|
||||
|
||||
async resumeSession(sessionId: string): Promise<AISession> {
|
||||
return new CodexSDKSession({
|
||||
...this.baseConfig(),
|
||||
systemPrompt: null,
|
||||
cwd: this.config.cwd ?? process.cwd(),
|
||||
parentSessionId: null,
|
||||
resumeThreadId: sessionId,
|
||||
});
|
||||
}
|
||||
|
||||
dispose(): void {
|
||||
// No persistent resources to clean up
|
||||
}
|
||||
|
||||
private baseConfig(options?: CreateSessionOptions) {
|
||||
return {
|
||||
model: options?.model ?? this.config.model ?? DEFAULT_MODEL,
|
||||
maxTurns: options?.maxTurns ?? 99,
|
||||
sandboxMode: this.config.sandboxMode ?? "read-only" as const,
|
||||
codexExecutablePath: this.config.codexExecutablePath,
|
||||
reasoningEffort: options?.reasoningEffort,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// SDK import cache — resolve once, reuse across all sessions
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// biome-ignore lint/suspicious/noExplicitAny: SDK type not available at compile time
|
||||
let CodexClass: any = null;
|
||||
|
||||
async function getCodexClass() {
|
||||
if (!CodexClass) {
|
||||
// biome-ignore lint/suspicious/noExplicitAny: SDK exports vary between versions
|
||||
const mod = await import("@openai/codex-sdk") as any;
|
||||
CodexClass = mod.default ?? mod.Codex;
|
||||
}
|
||||
return CodexClass;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Session
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
interface SessionConfig {
|
||||
systemPrompt: string | null;
|
||||
model: string;
|
||||
maxTurns: number;
|
||||
sandboxMode: "read-only" | "workspace-write" | "danger-full-access";
|
||||
cwd: string;
|
||||
parentSessionId: string | null;
|
||||
resumeThreadId?: string;
|
||||
codexExecutablePath?: string;
|
||||
reasoningEffort?: "minimal" | "low" | "medium" | "high" | "xhigh";
|
||||
}
|
||||
|
||||
class CodexSDKSession extends BaseSession {
|
||||
private config: SessionConfig;
|
||||
// biome-ignore lint/suspicious/noExplicitAny: SDK types not available at compile time
|
||||
private _codexInstance: any = null;
|
||||
// biome-ignore lint/suspicious/noExplicitAny: SDK types not available at compile time
|
||||
private _thread: any = null;
|
||||
/** Tracks cumulative text length per item for delta extraction. */
|
||||
private _itemTextOffsets = new Map<string, number>();
|
||||
|
||||
constructor(config: SessionConfig) {
|
||||
super({
|
||||
parentSessionId: config.parentSessionId,
|
||||
initialId: config.resumeThreadId,
|
||||
});
|
||||
this.config = config;
|
||||
// If resuming, treat the thread ID as already resolved
|
||||
if (config.resumeThreadId) {
|
||||
this._resolvedId = config.resumeThreadId;
|
||||
}
|
||||
}
|
||||
|
||||
async *query(prompt: string): AsyncIterable<AIMessage> {
|
||||
const started = this.startQuery();
|
||||
if (!started) { yield BaseSession.BUSY_ERROR; return; }
|
||||
const { gen, signal } = started;
|
||||
|
||||
this._itemTextOffsets.clear();
|
||||
|
||||
try {
|
||||
const Codex = await getCodexClass();
|
||||
|
||||
// Lazy-create the Codex instance
|
||||
if (!this._codexInstance) {
|
||||
this._codexInstance = new Codex({
|
||||
...(this.config.codexExecutablePath && { codexPathOverride: this.config.codexExecutablePath }),
|
||||
});
|
||||
}
|
||||
|
||||
// Lazy-create or resume the thread
|
||||
if (!this._thread) {
|
||||
if (this.config.resumeThreadId) {
|
||||
this._thread = this._codexInstance.resumeThread(this.config.resumeThreadId, {
|
||||
model: this.config.model,
|
||||
workingDirectory: this.config.cwd,
|
||||
sandboxMode: this.config.sandboxMode,
|
||||
...(this.config.reasoningEffort && { modelReasoningEffort: this.config.reasoningEffort }),
|
||||
});
|
||||
} else {
|
||||
this._thread = this._codexInstance.startThread({
|
||||
model: this.config.model,
|
||||
workingDirectory: this.config.cwd,
|
||||
sandboxMode: this.config.sandboxMode,
|
||||
...(this.config.reasoningEffort && { modelReasoningEffort: this.config.reasoningEffort }),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
const effectivePrompt = buildEffectivePrompt(
|
||||
prompt,
|
||||
this.config.systemPrompt,
|
||||
this._firstQuerySent,
|
||||
);
|
||||
const streamed = await this._thread.runStreamed(effectivePrompt, {
|
||||
signal,
|
||||
});
|
||||
|
||||
this._firstQuerySent = true;
|
||||
let turnFailed = false;
|
||||
|
||||
for await (const event of streamed.events) {
|
||||
// ID resolution from thread.started
|
||||
if (
|
||||
!this._resolvedId &&
|
||||
event.type === "thread.started" &&
|
||||
typeof event.thread_id === "string"
|
||||
) {
|
||||
this.resolveId(event.thread_id);
|
||||
}
|
||||
|
||||
if (event.type === "turn.failed") {
|
||||
turnFailed = true;
|
||||
}
|
||||
|
||||
const mapped = mapCodexEvent(event, this._itemTextOffsets);
|
||||
for (const msg of mapped) {
|
||||
yield msg;
|
||||
}
|
||||
}
|
||||
|
||||
// Emit synthetic result after stream ends
|
||||
if (!turnFailed) {
|
||||
yield {
|
||||
type: "result",
|
||||
sessionId: this.id,
|
||||
success: true,
|
||||
};
|
||||
}
|
||||
} catch (err) {
|
||||
yield {
|
||||
type: "error",
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
code: "provider_error",
|
||||
};
|
||||
} finally {
|
||||
this.endQuery(gen);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Event mapping
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* Map a Codex SDK ThreadEvent to one or more AIMessages.
|
||||
*
|
||||
* The itemTextOffsets map tracks cumulative text length per item ID
|
||||
* so we can extract true deltas from the cumulative text in item.updated events.
|
||||
*/
|
||||
function mapCodexEvent(
|
||||
event: Record<string, unknown>,
|
||||
itemTextOffsets: Map<string, number>,
|
||||
): AIMessage[] {
|
||||
const eventType = event.type as string;
|
||||
|
||||
switch (eventType) {
|
||||
case "thread.started":
|
||||
case "turn.started":
|
||||
return [];
|
||||
|
||||
case "turn.completed":
|
||||
return [];
|
||||
|
||||
case "turn.failed": {
|
||||
const error = event.error as Record<string, unknown> | undefined;
|
||||
return [{
|
||||
type: "error",
|
||||
error: (error?.message as string) ?? "Turn failed",
|
||||
code: "turn_failed",
|
||||
}];
|
||||
}
|
||||
|
||||
case "error":
|
||||
return [{
|
||||
type: "error",
|
||||
error: (event.message as string) ?? "Unknown error",
|
||||
code: "codex_error",
|
||||
}];
|
||||
|
||||
case "item.started":
|
||||
case "item.updated":
|
||||
case "item.completed":
|
||||
return mapCodexItem(event, itemTextOffsets);
|
||||
|
||||
default:
|
||||
return [{ type: "unknown", raw: event }];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Map item-level events to AIMessages.
|
||||
*/
|
||||
function mapCodexItem(
|
||||
event: Record<string, unknown>,
|
||||
itemTextOffsets: Map<string, number>,
|
||||
): AIMessage[] {
|
||||
const item = event.item as Record<string, unknown>;
|
||||
if (!item) return [{ type: "unknown", raw: event }];
|
||||
|
||||
const eventType = event.type as string;
|
||||
const itemType = item.type as string;
|
||||
const itemId = (item.id as string) ?? "";
|
||||
const isStarted = eventType === "item.started";
|
||||
const isCompleted = eventType === "item.completed";
|
||||
|
||||
switch (itemType) {
|
||||
case "agent_message": {
|
||||
const text = (item.text as string) ?? "";
|
||||
|
||||
if (isStarted) {
|
||||
// Reset offset tracking for this item
|
||||
itemTextOffsets.set(itemId, 0);
|
||||
return [];
|
||||
}
|
||||
|
||||
if (isCompleted) {
|
||||
// Emit final complete text
|
||||
itemTextOffsets.delete(itemId);
|
||||
return text ? [{ type: "text", text }] : [];
|
||||
}
|
||||
|
||||
// item.updated — extract delta from cumulative text
|
||||
const prevOffset = itemTextOffsets.get(itemId) ?? 0;
|
||||
if (text.length > prevOffset) {
|
||||
const delta = text.slice(prevOffset);
|
||||
itemTextOffsets.set(itemId, text.length);
|
||||
return [{ type: "text_delta", delta }];
|
||||
}
|
||||
return [];
|
||||
}
|
||||
|
||||
case "command_execution": {
|
||||
const messages: AIMessage[] = [];
|
||||
if (isStarted) {
|
||||
messages.push({
|
||||
type: "tool_use",
|
||||
toolName: "Bash",
|
||||
toolInput: { command: item.command as string },
|
||||
toolUseId: itemId,
|
||||
});
|
||||
}
|
||||
if (isCompleted) {
|
||||
const output = (item.aggregated_output as string) ?? "";
|
||||
const exitCode = item.exit_code as number | undefined;
|
||||
messages.push({
|
||||
type: "tool_result",
|
||||
toolUseId: itemId,
|
||||
result: exitCode != null ? `${output}\n[exit code: ${exitCode}]` : output,
|
||||
});
|
||||
}
|
||||
return messages;
|
||||
}
|
||||
|
||||
case "file_change": {
|
||||
const changes = item.changes as Array<{ path: string; kind: string }> | undefined;
|
||||
if (isStarted || isCompleted) {
|
||||
return [{
|
||||
type: "tool_use",
|
||||
toolName: "FileChange",
|
||||
toolInput: { changes: changes ?? [] },
|
||||
toolUseId: itemId,
|
||||
}];
|
||||
}
|
||||
return [];
|
||||
}
|
||||
|
||||
case "mcp_tool_call": {
|
||||
const messages: AIMessage[] = [];
|
||||
if (isStarted) {
|
||||
messages.push({
|
||||
type: "tool_use",
|
||||
toolName: `${item.server as string}/${item.tool as string}`,
|
||||
toolInput: (item.arguments as Record<string, unknown>) ?? {},
|
||||
toolUseId: itemId,
|
||||
});
|
||||
}
|
||||
if (isCompleted) {
|
||||
if (item.result != null) {
|
||||
messages.push({
|
||||
type: "tool_result",
|
||||
toolUseId: itemId,
|
||||
result: typeof item.result === "string" ? item.result : JSON.stringify(item.result),
|
||||
});
|
||||
}
|
||||
if (item.error) {
|
||||
const err = item.error as Record<string, unknown>;
|
||||
messages.push({
|
||||
type: "error",
|
||||
error: (err.message as string) ?? "MCP tool call failed",
|
||||
code: "mcp_error",
|
||||
});
|
||||
}
|
||||
}
|
||||
return messages;
|
||||
}
|
||||
|
||||
case "error":
|
||||
return [{
|
||||
type: "error",
|
||||
error: (item.message as string) ?? "Unknown error",
|
||||
}];
|
||||
|
||||
case "reasoning":
|
||||
case "web_search":
|
||||
case "todo_list":
|
||||
return [{ type: "unknown", raw: { eventType, item } }];
|
||||
|
||||
default:
|
||||
return [{ type: "unknown", raw: { eventType, item } }];
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Exported for testing
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export { mapCodexEvent, mapCodexItem };
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Factory registration
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
import { registerProviderFactory } from "../provider.ts";
|
||||
|
||||
registerProviderFactory(
|
||||
PROVIDER_NAME,
|
||||
async (config) => new CodexSDKProvider(config as CodexSDKConfig)
|
||||
);
|
||||
547
extensions/plannotator/generated/ai/providers/opencode-sdk.ts
Normal file
547
extensions/plannotator/generated/ai/providers/opencode-sdk.ts
Normal file
@@ -0,0 +1,547 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/ai/providers/opencode-sdk.ts
|
||||
/**
|
||||
* OpenCode provider — bridges Plannotator's AI layer with OpenCode's agent server.
|
||||
*
|
||||
* Uses @opencode-ai/sdk to connect to an existing `opencode serve` first and
|
||||
* only spawns a new server when nothing is reachable. One server is shared
|
||||
* across all sessions. The user must have the `opencode` CLI installed and
|
||||
* authenticated.
|
||||
*/
|
||||
|
||||
import type { OpencodeClient } from "@opencode-ai/sdk";
|
||||
import { BaseSession } from "../base-session.ts";
|
||||
import { buildSystemPrompt } from "../context.ts";
|
||||
import type {
|
||||
AIMessage,
|
||||
AIProvider,
|
||||
AIProviderCapabilities,
|
||||
AISession,
|
||||
CreateSessionOptions,
|
||||
OpenCodeConfig,
|
||||
} from "../types.ts";
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Constants
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const PROVIDER_NAME = "opencode-sdk";
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// SDK import cache — resolve once, reuse across all sessions
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// biome-ignore lint/suspicious/noExplicitAny: SDK types not available at compile time
|
||||
let sdk: any = null;
|
||||
|
||||
async function getSDK() {
|
||||
if (!sdk) {
|
||||
sdk = await import("@opencode-ai/sdk");
|
||||
}
|
||||
return sdk;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Provider
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export class OpenCodeProvider implements AIProvider {
|
||||
readonly name = PROVIDER_NAME;
|
||||
readonly capabilities: AIProviderCapabilities = {
|
||||
fork: true,
|
||||
resume: true,
|
||||
streaming: true,
|
||||
tools: true,
|
||||
};
|
||||
models?: Array<{ id: string; label: string; default?: boolean }>;
|
||||
|
||||
private config: OpenCodeConfig;
|
||||
// biome-ignore lint/suspicious/noExplicitAny: SDK types not available at compile time
|
||||
private server: { url: string; close: () => void } | null = null;
|
||||
private client: OpencodeClient | null = null;
|
||||
private startPromise: Promise<void> | null = null;
|
||||
private lastAttachError: string | null = null;
|
||||
|
||||
constructor(config: OpenCodeConfig) {
|
||||
this.config = config;
|
||||
}
|
||||
|
||||
/** Attach to an existing OpenCode server or spawn one if needed. */
|
||||
async ensureServer(): Promise<void> {
|
||||
if (this.client) return;
|
||||
this.startPromise ??= this.doStart().catch((err) => {
|
||||
this.startPromise = null;
|
||||
throw err;
|
||||
});
|
||||
return this.startPromise;
|
||||
}
|
||||
|
||||
private async doStart(): Promise<void> {
|
||||
this.lastAttachError = null;
|
||||
const { createOpencodeServer, createOpencodeClient } = await getSDK();
|
||||
const attachedClient = await this.tryAttachExistingServer(createOpencodeClient);
|
||||
if (attachedClient) {
|
||||
this.client = attachedClient;
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
this.server = await createOpencodeServer({
|
||||
hostname: this.config.hostname ?? "127.0.0.1",
|
||||
...(this.config.port != null && { port: this.config.port }),
|
||||
timeout: 15_000,
|
||||
});
|
||||
} catch (err) {
|
||||
const spawnMessage = err instanceof Error ? err.message : String(err);
|
||||
if (this.lastAttachError) {
|
||||
throw new Error(`${this.lastAttachError}\nFallback startup also failed: ${spawnMessage}`);
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
|
||||
this.client = createOpencodeClient({
|
||||
baseUrl: this.server!.url,
|
||||
directory: this.config.cwd ?? process.cwd(),
|
||||
});
|
||||
}
|
||||
|
||||
private async tryAttachExistingServer(
|
||||
createOpencodeClient: (config?: { baseUrl?: string; directory?: string }) => OpencodeClient,
|
||||
): Promise<OpencodeClient | null> {
|
||||
const cwd = this.config.cwd ?? process.cwd();
|
||||
const baseUrl = `http://${this.config.hostname ?? "127.0.0.1"}:${this.config.port ?? 4096}`;
|
||||
const client = createOpencodeClient({
|
||||
baseUrl,
|
||||
directory: cwd,
|
||||
});
|
||||
|
||||
try {
|
||||
await client.config.get({
|
||||
throwOnError: true,
|
||||
signal: AbortSignal.timeout(1_000),
|
||||
});
|
||||
return client;
|
||||
} catch (err) {
|
||||
const message = err instanceof Error ? err.message : String(err);
|
||||
this.lastAttachError = `Failed to attach to existing OpenCode server at ${baseUrl}: ${message}`;
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private getClient(): OpencodeClient {
|
||||
if (!this.client) {
|
||||
throw new Error("OpenCode client is not initialized.");
|
||||
}
|
||||
return this.client;
|
||||
}
|
||||
|
||||
async createSession(options: CreateSessionOptions): Promise<AISession> {
|
||||
await this.ensureServer();
|
||||
const client = this.getClient();
|
||||
|
||||
const result = await client.session.create({
|
||||
query: { directory: options.cwd ?? this.config.cwd ?? process.cwd() },
|
||||
});
|
||||
const sessionData = result.data;
|
||||
if (!sessionData) {
|
||||
throw new Error("OpenCode did not return session data.");
|
||||
}
|
||||
|
||||
const session = new OpenCodeSession({
|
||||
sessionId: sessionData.id,
|
||||
systemPrompt: buildSystemPrompt(options.context),
|
||||
client,
|
||||
model: options.model,
|
||||
parentSessionId: null,
|
||||
});
|
||||
return session;
|
||||
}
|
||||
|
||||
async forkSession(options: CreateSessionOptions): Promise<AISession> {
|
||||
await this.ensureServer();
|
||||
const client = this.getClient();
|
||||
|
||||
const parentId = options.context.parent?.sessionId;
|
||||
if (!parentId) {
|
||||
throw new Error("Fork requires a parent session ID.");
|
||||
}
|
||||
|
||||
const result = await client.session.fork({
|
||||
path: { id: parentId },
|
||||
});
|
||||
const sessionData = result.data;
|
||||
if (!sessionData) {
|
||||
throw new Error("OpenCode did not return forked session data.");
|
||||
}
|
||||
|
||||
return new OpenCodeSession({
|
||||
sessionId: sessionData.id,
|
||||
systemPrompt: buildSystemPrompt(options.context),
|
||||
client,
|
||||
model: options.model,
|
||||
parentSessionId: parentId,
|
||||
});
|
||||
}
|
||||
|
||||
async resumeSession(sessionId: string): Promise<AISession> {
|
||||
await this.ensureServer();
|
||||
const client = this.getClient();
|
||||
|
||||
// Verify session exists
|
||||
await client.session.get({ path: { id: sessionId } });
|
||||
|
||||
return new OpenCodeSession({
|
||||
sessionId,
|
||||
systemPrompt: null,
|
||||
client,
|
||||
model: undefined,
|
||||
parentSessionId: null,
|
||||
});
|
||||
}
|
||||
|
||||
dispose(): void {
|
||||
if (this.server) {
|
||||
this.server.close();
|
||||
this.server = null;
|
||||
}
|
||||
this.client = null;
|
||||
this.startPromise = null;
|
||||
}
|
||||
|
||||
/** Fetch available models from OpenCode. Call before registering the provider. */
|
||||
async fetchModels(): Promise<void> {
|
||||
try {
|
||||
await this.ensureServer();
|
||||
const client = this.getClient();
|
||||
|
||||
const result = await client.provider.list({
|
||||
query: { directory: this.config.cwd ?? process.cwd() },
|
||||
});
|
||||
const data = result.data;
|
||||
if (!data) {
|
||||
return;
|
||||
}
|
||||
const connected = new Set(data.connected ?? []);
|
||||
const allProviders = data.all ?? [];
|
||||
|
||||
const models: Array<{ id: string; label: string; default?: boolean }> = [];
|
||||
for (const provider of allProviders) {
|
||||
if (!connected.has(provider.id)) continue;
|
||||
for (const model of Object.values(provider.models)) {
|
||||
models.push({
|
||||
id: `${provider.id}/${model.id}`,
|
||||
label: model.name ?? model.id,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if (models.length > 0) {
|
||||
// Mark first model as default
|
||||
models[0].default = true;
|
||||
this.models = models;
|
||||
}
|
||||
} catch {
|
||||
// OpenCode not configured or no models available
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Session
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
interface SessionConfig {
|
||||
sessionId: string;
|
||||
systemPrompt: string | null;
|
||||
// biome-ignore lint/suspicious/noExplicitAny: SDK types not available at compile time
|
||||
client: any;
|
||||
/** Model in "providerID/modelID" format. */
|
||||
model?: string;
|
||||
parentSessionId: string | null;
|
||||
}
|
||||
|
||||
class OpenCodeSession extends BaseSession {
|
||||
private config: SessionConfig;
|
||||
|
||||
constructor(config: SessionConfig) {
|
||||
super({
|
||||
parentSessionId: config.parentSessionId,
|
||||
initialId: config.sessionId,
|
||||
});
|
||||
this.config = config;
|
||||
this._resolvedId = config.sessionId;
|
||||
}
|
||||
|
||||
async *query(prompt: string): AsyncIterable<AIMessage> {
|
||||
const started = this.startQuery();
|
||||
if (!started) {
|
||||
yield BaseSession.BUSY_ERROR;
|
||||
return;
|
||||
}
|
||||
const { gen } = started;
|
||||
|
||||
try {
|
||||
// Build model param if specified
|
||||
let modelParam: { providerID: string; modelID: string } | undefined;
|
||||
if (this.config.model) {
|
||||
const [providerID, ...rest] = this.config.model.split("/");
|
||||
const modelID = rest.join("/");
|
||||
if (providerID && modelID) {
|
||||
modelParam = { providerID, modelID };
|
||||
}
|
||||
}
|
||||
|
||||
// Subscribe to SSE events
|
||||
const { stream } = await this.config.client.event.subscribe();
|
||||
|
||||
try {
|
||||
// Send prompt asynchronously
|
||||
try {
|
||||
await this.config.client.session.promptAsync({
|
||||
path: { id: this.config.sessionId },
|
||||
body: {
|
||||
...(!this._firstQuerySent &&
|
||||
this.config.systemPrompt && {
|
||||
system: this.config.systemPrompt,
|
||||
}),
|
||||
...(modelParam && { model: modelParam }),
|
||||
parts: [{ type: "text", text: prompt }],
|
||||
},
|
||||
});
|
||||
} catch (err) {
|
||||
yield {
|
||||
type: "error",
|
||||
error: `OpenCode rejected prompt: ${err instanceof Error ? err.message : String(err)}`,
|
||||
code: "opencode_prompt_rejected",
|
||||
};
|
||||
return;
|
||||
}
|
||||
this._firstQuerySent = true;
|
||||
|
||||
// Drain SSE events filtered by session ID
|
||||
for await (const event of stream) {
|
||||
const eventType = event.type as string;
|
||||
const props = event.properties as Record<string, unknown> | undefined;
|
||||
if (!props) continue;
|
||||
|
||||
// Filter: only events for our session
|
||||
const eventSessionId =
|
||||
(props.sessionID as string) ??
|
||||
((props.info as Record<string, unknown>)?.sessionID as string) ??
|
||||
((props.part as Record<string, unknown>)?.sessionID as string);
|
||||
if (eventSessionId && eventSessionId !== this.config.sessionId) continue;
|
||||
|
||||
const mapped = mapOpenCodeEvent(eventType, props, this.id);
|
||||
for (const msg of mapped) {
|
||||
yield msg;
|
||||
if (msg.type === "result" || (msg.type === "error" && isTerminalEvent(eventType))) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
stream.return?.();
|
||||
}
|
||||
} catch (err) {
|
||||
yield {
|
||||
type: "error",
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
code: "provider_error",
|
||||
};
|
||||
} finally {
|
||||
this.endQuery(gen);
|
||||
}
|
||||
}
|
||||
|
||||
abort(): void {
|
||||
this.config.client.session
|
||||
.abort({ path: { id: this.config.sessionId } })
|
||||
.catch(() => {});
|
||||
super.abort();
|
||||
}
|
||||
|
||||
respondToPermission(
|
||||
requestId: string,
|
||||
allow: boolean,
|
||||
_message?: string,
|
||||
): void {
|
||||
this.config.client
|
||||
.postSessionIdPermissionsPermissionId({
|
||||
path: { id: this.config.sessionId, permissionID: requestId },
|
||||
body: { response: allow ? "once" : "reject" },
|
||||
})
|
||||
.catch(() => {});
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Event mapping
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/** Returns true for events that should terminate the query when mapped to an error. */
|
||||
function isTerminalEvent(eventType: string): boolean {
|
||||
return eventType === "session.error" || eventType === "session.status";
|
||||
}
|
||||
|
||||
/**
|
||||
* Map an OpenCode SSE event to AIMessage[].
|
||||
*
|
||||
* Key events:
|
||||
* message.part.delta → text_delta (streaming text)
|
||||
* message.part.updated → tool_use / tool_result (tool lifecycle)
|
||||
* permission.updated → permission_request
|
||||
* session.status → result (when idle)
|
||||
* message.updated → error (when message has error)
|
||||
*/
|
||||
export function mapOpenCodeEvent(
|
||||
eventType: string,
|
||||
props: Record<string, unknown>,
|
||||
sessionId: string,
|
||||
): AIMessage[] {
|
||||
switch (eventType) {
|
||||
case "message.part.delta": {
|
||||
const field = props.field as string;
|
||||
const delta = props.delta as string;
|
||||
if (field === "text" && delta) {
|
||||
return [{ type: "text_delta", delta }];
|
||||
}
|
||||
return [];
|
||||
}
|
||||
|
||||
case "message.part.updated": {
|
||||
const part = props.part as Record<string, unknown>;
|
||||
if (!part) return [];
|
||||
|
||||
const partType = part.type as string;
|
||||
|
||||
if (partType === "tool") {
|
||||
const state = part.state as Record<string, unknown>;
|
||||
if (!state) return [];
|
||||
|
||||
const status = state.status as string;
|
||||
const callID = (part.callID as string) ?? (part.id as string);
|
||||
const toolName = part.tool as string;
|
||||
|
||||
switch (status) {
|
||||
case "running":
|
||||
return [
|
||||
{
|
||||
type: "tool_use",
|
||||
toolName: toolName ?? "unknown",
|
||||
toolInput: (state.input as Record<string, unknown>) ?? {},
|
||||
toolUseId: callID,
|
||||
},
|
||||
];
|
||||
|
||||
case "completed": {
|
||||
const output = (state.output as string) ?? "";
|
||||
return [
|
||||
{
|
||||
type: "tool_result",
|
||||
toolUseId: callID,
|
||||
result: output,
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
case "error": {
|
||||
const error = (state.error as string) ?? "Tool execution failed";
|
||||
return [
|
||||
{
|
||||
type: "tool_result",
|
||||
toolUseId: callID,
|
||||
result: `[Error] ${error}`,
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
default:
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
return [];
|
||||
}
|
||||
|
||||
case "permission.updated": {
|
||||
const id = props.id as string;
|
||||
const permType = props.type as string;
|
||||
const title = props.title as string;
|
||||
const callID = props.callID as string;
|
||||
const metadata = (props.metadata as Record<string, unknown>) ?? {};
|
||||
|
||||
return [
|
||||
{
|
||||
type: "permission_request",
|
||||
requestId: id,
|
||||
toolName: permType ?? "unknown",
|
||||
toolInput: metadata,
|
||||
title: title ?? permType,
|
||||
toolUseId: callID ?? id,
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
case "session.status": {
|
||||
const status = props.status as Record<string, unknown>;
|
||||
if (status?.type === "idle") {
|
||||
return [
|
||||
{
|
||||
type: "result",
|
||||
sessionId,
|
||||
success: true,
|
||||
},
|
||||
];
|
||||
}
|
||||
return [];
|
||||
}
|
||||
|
||||
case "session.error": {
|
||||
const error = props.error as Record<string, unknown>;
|
||||
const message =
|
||||
(error?.message as string) ?? (props.message as string) ?? "Session error";
|
||||
return [
|
||||
{
|
||||
type: "error",
|
||||
error: message,
|
||||
code: "opencode_session_error",
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
case "message.updated": {
|
||||
const info = props.info as Record<string, unknown>;
|
||||
if (!info) return [];
|
||||
|
||||
const msgError = info.error as Record<string, unknown>;
|
||||
if (msgError) {
|
||||
const errorData = msgError.data as Record<string, unknown>;
|
||||
const message =
|
||||
(errorData?.message as string) ??
|
||||
(msgError.name as string) ??
|
||||
"Message error";
|
||||
return [
|
||||
{
|
||||
type: "error",
|
||||
error: message,
|
||||
code: "opencode_message_error",
|
||||
},
|
||||
];
|
||||
}
|
||||
return [];
|
||||
}
|
||||
|
||||
default:
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Factory registration
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
import { registerProviderFactory } from "../provider.ts";
|
||||
|
||||
registerProviderFactory(
|
||||
PROVIDER_NAME,
|
||||
async (config) => new OpenCodeProvider(config as OpenCodeConfig),
|
||||
);
|
||||
111
extensions/plannotator/generated/ai/providers/pi-events.ts
Normal file
111
extensions/plannotator/generated/ai/providers/pi-events.ts
Normal file
@@ -0,0 +1,111 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/ai/providers/pi-events.ts
|
||||
/**
|
||||
* Pi event mapping — shared between Bun and Node.js Pi providers.
|
||||
*
|
||||
* Pure function, no runtime-specific dependencies.
|
||||
*/
|
||||
|
||||
import type { AIMessage } from "../types.ts";
|
||||
|
||||
/**
|
||||
* Map a Pi AgentEvent (received as JSONL) to AIMessage[].
|
||||
*
|
||||
* Pi event hierarchy:
|
||||
* agent_start > turn_start > message_start > message_update* > message_end
|
||||
* > tool_execution_start > tool_execution_end > turn_end > agent_end
|
||||
*
|
||||
* We extract:
|
||||
* - text_delta from message_update.assistantMessageEvent
|
||||
* - tool_use from toolcall_end
|
||||
* - tool_result from tool_execution_end
|
||||
* - result from agent_end
|
||||
*/
|
||||
export function mapPiEvent(
|
||||
event: Record<string, unknown>,
|
||||
sessionId: string,
|
||||
): AIMessage[] {
|
||||
const eventType = event.type as string;
|
||||
|
||||
switch (eventType) {
|
||||
case "message_update": {
|
||||
const ame = event.assistantMessageEvent as
|
||||
| Record<string, unknown>
|
||||
| undefined;
|
||||
if (!ame) return [];
|
||||
|
||||
const subType = ame.type as string;
|
||||
|
||||
switch (subType) {
|
||||
case "text_delta":
|
||||
return [{ type: "text_delta", delta: ame.delta as string }];
|
||||
|
||||
case "toolcall_end": {
|
||||
const tc = ame.toolCall as Record<string, unknown>;
|
||||
if (!tc) return [];
|
||||
return [
|
||||
{
|
||||
type: "tool_use",
|
||||
toolName: tc.name as string,
|
||||
toolInput: (tc.arguments as Record<string, unknown>) ?? {},
|
||||
toolUseId: tc.id as string,
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
case "error": {
|
||||
const partial = ame.error as Record<string, unknown> | undefined;
|
||||
const errorMessage =
|
||||
(partial?.errorMessage as string) ?? "Stream error";
|
||||
return [
|
||||
{ type: "error", error: errorMessage, code: "pi_stream_error" },
|
||||
];
|
||||
}
|
||||
|
||||
default:
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
case "tool_execution_end": {
|
||||
const result = event.result;
|
||||
const isError = event.isError as boolean;
|
||||
const resultStr =
|
||||
result == null
|
||||
? ""
|
||||
: typeof result === "string"
|
||||
? result
|
||||
: JSON.stringify(result);
|
||||
|
||||
return [
|
||||
{
|
||||
type: "tool_result",
|
||||
toolUseId: event.toolCallId as string,
|
||||
result: isError
|
||||
? `[Error] ${resultStr || "Tool execution failed"}`
|
||||
: resultStr,
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
case "agent_end":
|
||||
return [
|
||||
{
|
||||
type: "result",
|
||||
sessionId,
|
||||
success: true,
|
||||
},
|
||||
];
|
||||
|
||||
case "process_exited":
|
||||
return [
|
||||
{
|
||||
type: "error",
|
||||
error: "Pi process exited unexpectedly.",
|
||||
code: "pi_process_exit",
|
||||
},
|
||||
];
|
||||
|
||||
default:
|
||||
return [];
|
||||
}
|
||||
}
|
||||
377
extensions/plannotator/generated/ai/providers/pi-sdk-node.ts
Normal file
377
extensions/plannotator/generated/ai/providers/pi-sdk-node.ts
Normal file
@@ -0,0 +1,377 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/ai/providers/pi-sdk-node.ts
|
||||
/**
|
||||
* Pi SDK provider — Node.js variant.
|
||||
*
|
||||
* Identical to pi-sdk.ts except PiProcess uses child_process.spawn()
|
||||
* instead of Bun.spawn(). Everything else (PiSDKProvider, PiSDKSession,
|
||||
* mapPiEvent) is re-exported from the Bun version unchanged.
|
||||
*
|
||||
* Used by the Pi extension which runs under jiti (Node.js).
|
||||
*/
|
||||
|
||||
import { spawn, type ChildProcess } from "node:child_process";
|
||||
import { BaseSession } from "../base-session.ts";
|
||||
import { buildEffectivePrompt, buildSystemPrompt } from "../context.ts";
|
||||
import type {
|
||||
AIMessage,
|
||||
AIProvider,
|
||||
AIProviderCapabilities,
|
||||
CreateSessionOptions,
|
||||
PiSDKConfig,
|
||||
} from "../types.ts";
|
||||
import { registerProviderFactory } from "../provider.ts";
|
||||
|
||||
// Re-export mapPiEvent from shared (runtime-agnostic)
|
||||
export { mapPiEvent } from "./pi-events.ts";
|
||||
|
||||
const PROVIDER_NAME = "pi-sdk";
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// JSONL subprocess wrapper (Node.js)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
type EventListener = (event: Record<string, unknown>) => void;
|
||||
|
||||
class PiProcessNode {
|
||||
private proc: ChildProcess | null = null;
|
||||
private listeners: EventListener[] = [];
|
||||
private pendingRequests = new Map<
|
||||
string,
|
||||
{
|
||||
resolve: (data: Record<string, unknown>) => void;
|
||||
reject: (err: Error) => void;
|
||||
}
|
||||
>();
|
||||
private nextId = 0;
|
||||
private buffer = "";
|
||||
private _alive = false;
|
||||
|
||||
async spawn(piPath: string, cwd: string): Promise<void> {
|
||||
this.proc = spawn(piPath, ["--mode", "rpc"], {
|
||||
cwd,
|
||||
stdio: ["pipe", "pipe", "pipe"],
|
||||
});
|
||||
this._alive = true;
|
||||
|
||||
this.readStream();
|
||||
|
||||
this.proc.on("exit", () => {
|
||||
this._alive = false;
|
||||
for (const [, pending] of this.pendingRequests) {
|
||||
pending.reject(new Error("Pi process exited unexpectedly"));
|
||||
}
|
||||
this.pendingRequests.clear();
|
||||
for (const listener of this.listeners) {
|
||||
listener({ type: "process_exited" });
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private readStream(): void {
|
||||
if (!this.proc?.stdout) return;
|
||||
|
||||
this.proc.stdout.on("data", (chunk: Buffer) => {
|
||||
this.buffer += chunk.toString();
|
||||
const lines = this.buffer.split("\n");
|
||||
this.buffer = lines.pop() ?? "";
|
||||
|
||||
for (const line of lines) {
|
||||
const trimmed = line.replace(/\r$/, "");
|
||||
if (!trimmed) continue;
|
||||
try {
|
||||
const parsed = JSON.parse(trimmed);
|
||||
this.routeMessage(parsed);
|
||||
} catch {
|
||||
// Ignore malformed lines
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private routeMessage(msg: Record<string, unknown>): void {
|
||||
if (msg.type === "response" && typeof msg.id === "string") {
|
||||
const pending = this.pendingRequests.get(msg.id);
|
||||
if (pending) {
|
||||
this.pendingRequests.delete(msg.id);
|
||||
if (msg.success === false) {
|
||||
pending.reject(new Error((msg.error as string) ?? "RPC error"));
|
||||
} else {
|
||||
pending.resolve((msg.data as Record<string, unknown>) ?? {});
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
for (const listener of this.listeners) {
|
||||
listener(msg);
|
||||
}
|
||||
}
|
||||
|
||||
send(command: Record<string, unknown>): void {
|
||||
if (!this.proc?.stdin || this.proc.stdin.destroyed) return;
|
||||
this.proc.stdin.write(`${JSON.stringify(command)}\n`);
|
||||
}
|
||||
|
||||
sendAndWait(
|
||||
command: Record<string, unknown>,
|
||||
): Promise<Record<string, unknown>> {
|
||||
const id = `req_${++this.nextId}`;
|
||||
return new Promise((resolve, reject) => {
|
||||
this.pendingRequests.set(id, { resolve, reject });
|
||||
this.send({ ...command, id });
|
||||
});
|
||||
}
|
||||
|
||||
onEvent(listener: EventListener): () => void {
|
||||
this.listeners.push(listener);
|
||||
return () => {
|
||||
const idx = this.listeners.indexOf(listener);
|
||||
if (idx >= 0) this.listeners.splice(idx, 1);
|
||||
};
|
||||
}
|
||||
|
||||
get alive(): boolean {
|
||||
return this._alive;
|
||||
}
|
||||
|
||||
kill(): void {
|
||||
this._alive = false;
|
||||
if (this.proc) {
|
||||
this.proc.kill();
|
||||
this.proc = null;
|
||||
}
|
||||
this.listeners.length = 0;
|
||||
for (const [, pending] of this.pendingRequests) {
|
||||
pending.reject(new Error("Process killed"));
|
||||
}
|
||||
this.pendingRequests.clear();
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Provider (identical to pi-sdk.ts, using PiProcessNode)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export class PiSDKNodeProvider implements AIProvider {
|
||||
readonly name = PROVIDER_NAME;
|
||||
readonly capabilities: AIProviderCapabilities = {
|
||||
fork: false,
|
||||
resume: false,
|
||||
streaming: true,
|
||||
tools: true,
|
||||
};
|
||||
models?: Array<{ id: string; label: string; default?: boolean }>;
|
||||
|
||||
private config: PiSDKConfig;
|
||||
private sessions = new Map<string, PiSDKNodeSession>();
|
||||
|
||||
constructor(config: PiSDKConfig) {
|
||||
this.config = config;
|
||||
}
|
||||
|
||||
async createSession(options: CreateSessionOptions): Promise<PiSDKNodeSession> {
|
||||
const session = new PiSDKNodeSession({
|
||||
systemPrompt: buildSystemPrompt(options.context),
|
||||
cwd: options.cwd ?? this.config.cwd ?? process.cwd(),
|
||||
parentSessionId: null,
|
||||
piExecutablePath: this.config.piExecutablePath ?? "pi",
|
||||
model: options.model ?? this.config.model,
|
||||
});
|
||||
this.sessions.set(session.id, session);
|
||||
return session;
|
||||
}
|
||||
|
||||
async forkSession(): Promise<never> {
|
||||
throw new Error(
|
||||
"Pi does not support session forking. " +
|
||||
"The endpoint layer should fall back to createSession().",
|
||||
);
|
||||
}
|
||||
|
||||
async resumeSession(): Promise<never> {
|
||||
throw new Error("Pi does not support session resuming.");
|
||||
}
|
||||
|
||||
dispose(): void {
|
||||
for (const session of this.sessions.values()) {
|
||||
session.killProcess();
|
||||
}
|
||||
this.sessions.clear();
|
||||
}
|
||||
|
||||
async fetchModels(): Promise<void> {
|
||||
const piPath = this.config.piExecutablePath ?? "pi";
|
||||
let proc: PiProcessNode | undefined;
|
||||
try {
|
||||
proc = new PiProcessNode();
|
||||
await proc.spawn(piPath, this.config.cwd ?? process.cwd());
|
||||
const data = await Promise.race([
|
||||
proc.sendAndWait({ type: "get_available_models" }),
|
||||
new Promise<never>((_, reject) =>
|
||||
setTimeout(() => reject(new Error("Timeout")), 10_000),
|
||||
),
|
||||
]);
|
||||
const rawModels = (
|
||||
data as { models?: Array<{ provider: string; id: string; name?: string }> }
|
||||
).models;
|
||||
if (rawModels && rawModels.length > 0) {
|
||||
this.models = rawModels.map((m, i) => ({
|
||||
id: `${m.provider}/${m.id}`,
|
||||
label: m.name ?? m.id,
|
||||
...(i === 0 && { default: true }),
|
||||
}));
|
||||
}
|
||||
} catch {
|
||||
// Pi not configured or no models available
|
||||
} finally {
|
||||
proc?.kill();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Session (identical to pi-sdk.ts, using PiProcessNode)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
interface SessionConfig {
|
||||
systemPrompt: string;
|
||||
cwd: string;
|
||||
parentSessionId: string | null;
|
||||
piExecutablePath: string;
|
||||
model?: string;
|
||||
}
|
||||
|
||||
class PiSDKNodeSession extends BaseSession {
|
||||
private config: SessionConfig;
|
||||
private process: PiProcessNode | null = null;
|
||||
|
||||
constructor(config: SessionConfig) {
|
||||
super({ parentSessionId: config.parentSessionId });
|
||||
this.config = config;
|
||||
}
|
||||
|
||||
async *query(prompt: string): AsyncIterable<AIMessage> {
|
||||
const { mapPiEvent } = await import("./pi-events.ts");
|
||||
|
||||
const started = this.startQuery();
|
||||
if (!started) {
|
||||
yield BaseSession.BUSY_ERROR;
|
||||
return;
|
||||
}
|
||||
const { gen } = started;
|
||||
|
||||
try {
|
||||
if (!this.process || !this.process.alive) {
|
||||
this.process = new PiProcessNode();
|
||||
await this.process.spawn(this.config.piExecutablePath, this.config.cwd);
|
||||
|
||||
if (this.config.model) {
|
||||
const [provider, ...rest] = this.config.model.split("/");
|
||||
const modelId = rest.join("/");
|
||||
if (provider && modelId) {
|
||||
try {
|
||||
await this.process.sendAndWait({ type: "set_model", provider, modelId });
|
||||
} catch { /* Continue with Pi's default model */ }
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
const state = await this.process.sendAndWait({ type: "get_state" });
|
||||
if (typeof state.sessionId === "string") {
|
||||
this.resolveId(state.sessionId);
|
||||
}
|
||||
} catch { /* Continue with placeholder ID */ }
|
||||
|
||||
if (!this.process.alive) {
|
||||
yield {
|
||||
type: "error",
|
||||
error: "Pi process exited during startup. Check that Pi is configured correctly (API keys, models).",
|
||||
code: "pi_startup_error",
|
||||
};
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
const effectivePrompt = buildEffectivePrompt(
|
||||
prompt,
|
||||
this.config.systemPrompt,
|
||||
this._firstQuerySent,
|
||||
);
|
||||
|
||||
const queue: AIMessage[] = [];
|
||||
let resolve: (() => void) | null = null;
|
||||
let done = false;
|
||||
|
||||
const push = (msg: AIMessage) => { queue.push(msg); resolve?.(); };
|
||||
const finish = () => { done = true; resolve?.(); };
|
||||
|
||||
const unsubscribe = this.process.onEvent((event) => {
|
||||
const mapped = mapPiEvent(event, this.id);
|
||||
for (const msg of mapped) {
|
||||
push(msg);
|
||||
if (
|
||||
msg.type === "result" ||
|
||||
(msg.type === "error" && (event.type === "agent_end" || event.type === "process_exited"))
|
||||
) {
|
||||
finish();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
try {
|
||||
await this.process.sendAndWait({ type: "prompt", message: effectivePrompt });
|
||||
} catch (err) {
|
||||
unsubscribe();
|
||||
yield {
|
||||
type: "error",
|
||||
error: `Pi rejected prompt: ${err instanceof Error ? err.message : String(err)}`,
|
||||
code: "pi_prompt_rejected",
|
||||
};
|
||||
return;
|
||||
}
|
||||
this._firstQuerySent = true;
|
||||
|
||||
try {
|
||||
while (!done || queue.length > 0) {
|
||||
if (queue.length > 0) {
|
||||
yield queue.shift()!;
|
||||
} else {
|
||||
await new Promise<void>((r) => { resolve = r; });
|
||||
resolve = null;
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
unsubscribe();
|
||||
}
|
||||
} catch (err) {
|
||||
yield {
|
||||
type: "error",
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
code: "provider_error",
|
||||
};
|
||||
} finally {
|
||||
this.endQuery(gen);
|
||||
}
|
||||
}
|
||||
|
||||
abort(): void {
|
||||
if (this.process?.alive) {
|
||||
this.process.send({ type: "abort" });
|
||||
}
|
||||
super.abort();
|
||||
}
|
||||
|
||||
killProcess(): void {
|
||||
this.process?.kill();
|
||||
this.process = null;
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Factory registration
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
registerProviderFactory(
|
||||
PROVIDER_NAME,
|
||||
async (config) => new PiSDKNodeProvider(config as PiSDKConfig),
|
||||
);
|
||||
442
extensions/plannotator/generated/ai/providers/pi-sdk.ts
Normal file
442
extensions/plannotator/generated/ai/providers/pi-sdk.ts
Normal file
@@ -0,0 +1,442 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/ai/providers/pi-sdk.ts
|
||||
/**
|
||||
* Pi SDK provider — bridges Plannotator's AI layer with Pi's coding agent.
|
||||
*
|
||||
* Spawns `pi --mode rpc` as a subprocess and communicates via JSONL over
|
||||
* stdio. No Pi SDK is imported — this is a thin protocol adapter.
|
||||
*
|
||||
* One subprocess per session. The user must have the `pi` CLI installed.
|
||||
*/
|
||||
|
||||
import { BaseSession } from "../base-session.ts";
|
||||
import { buildEffectivePrompt, buildSystemPrompt } from "../context.ts";
|
||||
import type {
|
||||
AIMessage,
|
||||
AIProvider,
|
||||
AIProviderCapabilities,
|
||||
CreateSessionOptions,
|
||||
PiSDKConfig,
|
||||
} from "../types.ts";
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Constants
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const PROVIDER_NAME = "pi-sdk";
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// JSONL subprocess wrapper
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
type EventListener = (event: Record<string, unknown>) => void;
|
||||
|
||||
class PiProcess {
|
||||
private proc: ReturnType<typeof Bun.spawn> | null = null;
|
||||
private listeners: EventListener[] = [];
|
||||
private pendingRequests = new Map<
|
||||
string,
|
||||
{
|
||||
resolve: (data: Record<string, unknown>) => void;
|
||||
reject: (err: Error) => void;
|
||||
}
|
||||
>();
|
||||
private nextId = 0;
|
||||
private buffer = "";
|
||||
private _alive = false;
|
||||
|
||||
async spawn(piPath: string, cwd: string): Promise<void> {
|
||||
this.proc = Bun.spawn([piPath, "--mode", "rpc"], {
|
||||
cwd,
|
||||
stdin: "pipe",
|
||||
stdout: "pipe",
|
||||
stderr: "pipe",
|
||||
});
|
||||
this._alive = true;
|
||||
|
||||
this.readStream();
|
||||
|
||||
this.proc.exited.then(() => {
|
||||
this._alive = false;
|
||||
for (const [, pending] of this.pendingRequests) {
|
||||
pending.reject(new Error("Pi process exited unexpectedly"));
|
||||
}
|
||||
this.pendingRequests.clear();
|
||||
// Signal active query listeners so the drain loop exits with an error
|
||||
for (const listener of this.listeners) {
|
||||
listener({ type: "process_exited" });
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private async readStream(): Promise<void> {
|
||||
if (!this.proc?.stdout || typeof this.proc.stdout === "number") return;
|
||||
const reader = (this.proc.stdout as ReadableStream<Uint8Array>).getReader();
|
||||
const decoder = new TextDecoder();
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
this.buffer += decoder.decode(value, { stream: true });
|
||||
const lines = this.buffer.split("\n");
|
||||
this.buffer = lines.pop() ?? "";
|
||||
|
||||
for (const line of lines) {
|
||||
const trimmed = line.replace(/\r$/, "");
|
||||
if (!trimmed) continue;
|
||||
try {
|
||||
const parsed = JSON.parse(trimmed);
|
||||
this.routeMessage(parsed);
|
||||
} catch {
|
||||
// Ignore malformed lines
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// Stream closed
|
||||
}
|
||||
}
|
||||
|
||||
private routeMessage(msg: Record<string, unknown>): void {
|
||||
// Response to a command we sent
|
||||
if (msg.type === "response" && typeof msg.id === "string") {
|
||||
const pending = this.pendingRequests.get(msg.id);
|
||||
if (pending) {
|
||||
this.pendingRequests.delete(msg.id);
|
||||
if (msg.success === false) {
|
||||
pending.reject(new Error((msg.error as string) ?? "RPC error"));
|
||||
} else {
|
||||
pending.resolve((msg.data as Record<string, unknown>) ?? {});
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Agent event — forward to listeners
|
||||
for (const listener of this.listeners) {
|
||||
listener(msg);
|
||||
}
|
||||
}
|
||||
|
||||
/** Send a command without waiting for a response. */
|
||||
send(command: Record<string, unknown>): void {
|
||||
if (!this.proc?.stdin || typeof this.proc.stdin === "number") return;
|
||||
// Bun.spawn stdin is a FileSink with .write(), not a WritableStream
|
||||
const sink = this.proc.stdin as { write(data: string): void; flush(): void };
|
||||
sink.write(`${JSON.stringify(command)}\n`);
|
||||
sink.flush();
|
||||
}
|
||||
|
||||
/** Send a command and wait for the correlated response. */
|
||||
sendAndWait(
|
||||
command: Record<string, unknown>,
|
||||
): Promise<Record<string, unknown>> {
|
||||
const id = `req_${++this.nextId}`;
|
||||
return new Promise((resolve, reject) => {
|
||||
this.pendingRequests.set(id, { resolve, reject });
|
||||
this.send({ ...command, id });
|
||||
});
|
||||
}
|
||||
|
||||
/** Register a listener for agent events (non-response messages). */
|
||||
onEvent(listener: EventListener): () => void {
|
||||
this.listeners.push(listener);
|
||||
return () => {
|
||||
const idx = this.listeners.indexOf(listener);
|
||||
if (idx >= 0) this.listeners.splice(idx, 1);
|
||||
};
|
||||
}
|
||||
|
||||
get alive(): boolean {
|
||||
return this._alive;
|
||||
}
|
||||
|
||||
kill(): void {
|
||||
this._alive = false;
|
||||
if (this.proc) {
|
||||
this.proc.kill();
|
||||
this.proc = null;
|
||||
}
|
||||
this.listeners.length = 0;
|
||||
for (const [, pending] of this.pendingRequests) {
|
||||
pending.reject(new Error("Process killed"));
|
||||
}
|
||||
this.pendingRequests.clear();
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Provider
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export class PiSDKProvider implements AIProvider {
|
||||
readonly name = PROVIDER_NAME;
|
||||
readonly capabilities: AIProviderCapabilities = {
|
||||
fork: false,
|
||||
resume: false,
|
||||
streaming: true,
|
||||
tools: true,
|
||||
};
|
||||
models?: Array<{ id: string; label: string; default?: boolean }>;
|
||||
|
||||
private config: PiSDKConfig;
|
||||
private sessions = new Map<string, PiSDKSession>();
|
||||
|
||||
constructor(config: PiSDKConfig) {
|
||||
this.config = config;
|
||||
}
|
||||
|
||||
async createSession(options: CreateSessionOptions): Promise<PiSDKSession> {
|
||||
const session = new PiSDKSession({
|
||||
systemPrompt: buildSystemPrompt(options.context),
|
||||
cwd: options.cwd ?? this.config.cwd ?? process.cwd(),
|
||||
parentSessionId: null,
|
||||
piExecutablePath: this.config.piExecutablePath ?? "pi",
|
||||
model: options.model ?? this.config.model,
|
||||
});
|
||||
this.sessions.set(session.id, session);
|
||||
return session;
|
||||
}
|
||||
|
||||
async forkSession(): Promise<never> {
|
||||
throw new Error(
|
||||
"Pi does not support session forking. " +
|
||||
"The endpoint layer should fall back to createSession().",
|
||||
);
|
||||
}
|
||||
|
||||
async resumeSession(): Promise<never> {
|
||||
throw new Error("Pi does not support session resuming.");
|
||||
}
|
||||
|
||||
dispose(): void {
|
||||
for (const session of this.sessions.values()) {
|
||||
session.killProcess();
|
||||
}
|
||||
this.sessions.clear();
|
||||
}
|
||||
|
||||
/** Fetch available models from Pi. Call before registering the provider. */
|
||||
async fetchModels(): Promise<void> {
|
||||
const piPath = this.config.piExecutablePath ?? "pi";
|
||||
|
||||
let proc: PiProcess | undefined;
|
||||
|
||||
try {
|
||||
proc = new PiProcess();
|
||||
await proc.spawn(piPath, this.config.cwd ?? process.cwd());
|
||||
|
||||
const data = await Promise.race([
|
||||
proc.sendAndWait({ type: "get_available_models" }),
|
||||
new Promise<never>((_, reject) =>
|
||||
setTimeout(() => reject(new Error("Timeout")), 10_000),
|
||||
),
|
||||
]);
|
||||
|
||||
const rawModels = (
|
||||
data as {
|
||||
models?: Array<{ provider: string; id: string; name?: string }>;
|
||||
}
|
||||
).models;
|
||||
if (rawModels && rawModels.length > 0) {
|
||||
this.models = rawModels.map((m, i) => ({
|
||||
id: `${m.provider}/${m.id}`,
|
||||
label: m.name ?? m.id,
|
||||
...(i === 0 && { default: true }),
|
||||
}));
|
||||
}
|
||||
} catch {
|
||||
// Pi not configured or no models available
|
||||
} finally {
|
||||
proc?.kill();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Session
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
interface SessionConfig {
|
||||
systemPrompt: string;
|
||||
cwd: string;
|
||||
parentSessionId: string | null;
|
||||
piExecutablePath: string;
|
||||
/** Model in "provider/modelId" format, e.g. "anthropic/claude-haiku-4-5". */
|
||||
model?: string;
|
||||
}
|
||||
|
||||
class PiSDKSession extends BaseSession {
|
||||
private config: SessionConfig;
|
||||
private process: PiProcess | null = null;
|
||||
|
||||
constructor(config: SessionConfig) {
|
||||
super({ parentSessionId: config.parentSessionId });
|
||||
this.config = config;
|
||||
}
|
||||
|
||||
async *query(prompt: string): AsyncIterable<AIMessage> {
|
||||
const started = this.startQuery();
|
||||
if (!started) {
|
||||
yield BaseSession.BUSY_ERROR;
|
||||
return;
|
||||
}
|
||||
const { gen } = started;
|
||||
|
||||
try {
|
||||
// Lazy-spawn subprocess
|
||||
if (!this.process || !this.process.alive) {
|
||||
this.process = new PiProcess();
|
||||
await this.process.spawn(this.config.piExecutablePath, this.config.cwd);
|
||||
|
||||
// Set model if specified (format: "provider/modelId")
|
||||
if (this.config.model) {
|
||||
const [provider, ...rest] = this.config.model.split("/");
|
||||
const modelId = rest.join("/");
|
||||
if (provider && modelId) {
|
||||
try {
|
||||
await this.process.sendAndWait({
|
||||
type: "set_model",
|
||||
provider,
|
||||
modelId,
|
||||
});
|
||||
} catch {
|
||||
// Continue with Pi's default model
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get session ID
|
||||
try {
|
||||
const state = await this.process.sendAndWait({ type: "get_state" });
|
||||
if (typeof state.sessionId === "string") {
|
||||
this.resolveId(state.sessionId);
|
||||
}
|
||||
} catch {
|
||||
// Continue with placeholder ID
|
||||
}
|
||||
|
||||
// If subprocess died during startup, surface the error immediately
|
||||
if (!this.process.alive) {
|
||||
yield {
|
||||
type: "error",
|
||||
error:
|
||||
"Pi process exited during startup. Check that Pi is configured correctly (API keys, models).",
|
||||
code: "pi_startup_error",
|
||||
};
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Build effective prompt (prepend system prompt on first query)
|
||||
const effectivePrompt = buildEffectivePrompt(
|
||||
prompt,
|
||||
this.config.systemPrompt,
|
||||
this._firstQuerySent,
|
||||
);
|
||||
|
||||
// Set up async queue to bridge callback events → async iterable
|
||||
const queue: AIMessage[] = [];
|
||||
let resolve: (() => void) | null = null;
|
||||
let done = false;
|
||||
|
||||
const push = (msg: AIMessage) => {
|
||||
queue.push(msg);
|
||||
resolve?.();
|
||||
};
|
||||
|
||||
const finish = () => {
|
||||
done = true;
|
||||
resolve?.();
|
||||
};
|
||||
|
||||
const unsubscribe = this.process.onEvent((event) => {
|
||||
const mapped = mapPiEvent(event, this.id);
|
||||
for (const msg of mapped) {
|
||||
push(msg);
|
||||
if (
|
||||
msg.type === "result" ||
|
||||
(msg.type === "error" &&
|
||||
(event.type === "agent_end" || event.type === "process_exited"))
|
||||
) {
|
||||
finish();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Send prompt — use sendAndWait to catch RPC-level rejections
|
||||
// (e.g. expired credentials, invalid session)
|
||||
try {
|
||||
await this.process.sendAndWait({
|
||||
type: "prompt",
|
||||
message: effectivePrompt,
|
||||
});
|
||||
} catch (err) {
|
||||
unsubscribe();
|
||||
yield {
|
||||
type: "error",
|
||||
error: `Pi rejected prompt: ${err instanceof Error ? err.message : String(err)}`,
|
||||
code: "pi_prompt_rejected",
|
||||
};
|
||||
return;
|
||||
}
|
||||
this._firstQuerySent = true;
|
||||
|
||||
// Drain queue
|
||||
try {
|
||||
while (!done || queue.length > 0) {
|
||||
if (queue.length > 0) {
|
||||
yield queue.shift()!;
|
||||
} else {
|
||||
await new Promise<void>((r) => {
|
||||
resolve = r;
|
||||
});
|
||||
resolve = null;
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
unsubscribe();
|
||||
}
|
||||
} catch (err) {
|
||||
yield {
|
||||
type: "error",
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
code: "provider_error",
|
||||
};
|
||||
} finally {
|
||||
this.endQuery(gen);
|
||||
}
|
||||
}
|
||||
|
||||
abort(): void {
|
||||
if (this.process?.alive) {
|
||||
this.process.send({ type: "abort" });
|
||||
}
|
||||
super.abort();
|
||||
}
|
||||
|
||||
/** Kill the subprocess. Called by the provider on dispose. */
|
||||
killProcess(): void {
|
||||
this.process?.kill();
|
||||
this.process = null;
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Event mapping — shared with pi-sdk-node.ts
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
import { mapPiEvent } from "./pi-events.ts";
|
||||
export { mapPiEvent } from "./pi-events.ts";
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Factory registration
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
import { registerProviderFactory } from "../provider.ts";
|
||||
|
||||
registerProviderFactory(
|
||||
PROVIDER_NAME,
|
||||
async (config) => new PiSDKProvider(config as PiSDKConfig),
|
||||
);
|
||||
196
extensions/plannotator/generated/ai/session-manager.ts
Normal file
196
extensions/plannotator/generated/ai/session-manager.ts
Normal file
@@ -0,0 +1,196 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/ai/session-manager.ts
|
||||
/**
|
||||
* Session manager — tracks active and historical AI sessions.
|
||||
*
|
||||
* Each Plannotator server instance (plan review, code review, annotate)
|
||||
* gets its own SessionManager. It tracks:
|
||||
*
|
||||
* - Active sessions (currently streaming or idle but resumable)
|
||||
* - The lineage from forked sessions back to their parent
|
||||
* - Metadata for UI display (timestamps, mode, status)
|
||||
*
|
||||
* This is an in-memory store scoped to the server's lifetime. Sessions
|
||||
* are not persisted to disk by the manager (the underlying provider
|
||||
* handles its own persistence via the agent SDK).
|
||||
*/
|
||||
|
||||
import type { AISession, AIContextMode } from "./types.ts";
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Types
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export interface SessionEntry {
|
||||
/** The live session handle (if still active). */
|
||||
session: AISession;
|
||||
/** What mode this session was created for. */
|
||||
mode: AIContextMode;
|
||||
/** The parent session ID this was forked from (null if standalone). */
|
||||
parentSessionId: string | null;
|
||||
/** When this session was created. */
|
||||
createdAt: number;
|
||||
/** When the last query was sent. */
|
||||
lastActiveAt: number;
|
||||
/** Short description for UI display (e.g., the user's first question). */
|
||||
label?: string;
|
||||
}
|
||||
|
||||
export interface SessionManagerOptions {
|
||||
/**
|
||||
* Maximum number of sessions to keep in the manager.
|
||||
* Oldest idle sessions are evicted when the limit is reached.
|
||||
* Default: 20.
|
||||
*/
|
||||
maxSessions?: number;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Implementation
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export class SessionManager {
|
||||
private sessions = new Map<string, SessionEntry>();
|
||||
private aliases = new Map<string, string>();
|
||||
private maxSessions: number;
|
||||
|
||||
constructor(options: SessionManagerOptions = {}) {
|
||||
this.maxSessions = options.maxSessions ?? 20;
|
||||
}
|
||||
|
||||
/**
|
||||
* Track a newly created session.
|
||||
*
|
||||
* If the session supports ID resolution (e.g., the real SDK session ID
|
||||
* arrives after the first query), call `remapId()` to update the key.
|
||||
*/
|
||||
track(session: AISession, mode: AIContextMode, label?: string): SessionEntry {
|
||||
this.evictIfNeeded();
|
||||
|
||||
const entry: SessionEntry = {
|
||||
session,
|
||||
mode,
|
||||
parentSessionId: session.parentSessionId,
|
||||
createdAt: Date.now(),
|
||||
lastActiveAt: Date.now(),
|
||||
label,
|
||||
};
|
||||
this.sessions.set(session.id, entry);
|
||||
|
||||
// Wire up ID remapping so providers can resolve the real session ID later
|
||||
session.onIdResolved = (oldId, newId) => this.remapId(oldId, newId);
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
/**
|
||||
* Remap a session from one ID to another.
|
||||
* Used when the real session ID is resolved after initial tracking.
|
||||
*/
|
||||
remapId(oldId: string, newId: string): void {
|
||||
const entry = this.sessions.get(oldId);
|
||||
if (entry) {
|
||||
this.sessions.delete(oldId);
|
||||
this.sessions.set(newId, entry);
|
||||
// Keep the old ID as an alias so clients using the original ID still work
|
||||
this.aliases.set(oldId, newId);
|
||||
}
|
||||
}
|
||||
|
||||
/** Resolve an alias to the canonical ID, or return the ID as-is. */
|
||||
private resolve(sessionId: string): string {
|
||||
return this.aliases.get(sessionId) ?? sessionId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a tracked session by ID (or alias).
|
||||
*/
|
||||
get(sessionId: string): SessionEntry | undefined {
|
||||
return this.sessions.get(this.resolve(sessionId));
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark a session as recently active (updates lastActiveAt).
|
||||
*/
|
||||
touch(sessionId: string): void {
|
||||
const entry = this.sessions.get(this.resolve(sessionId));
|
||||
if (entry) {
|
||||
entry.lastActiveAt = Date.now();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove a session from tracking.
|
||||
* Does NOT abort the session — call session.abort() first if needed.
|
||||
*/
|
||||
remove(sessionId: string): void {
|
||||
const canonical = this.resolve(sessionId);
|
||||
this.sessions.delete(canonical);
|
||||
// Clean up any aliases pointing to this session
|
||||
for (const [alias, target] of this.aliases) {
|
||||
if (target === canonical) this.aliases.delete(alias);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* List all tracked sessions, newest first.
|
||||
*/
|
||||
list(): SessionEntry[] {
|
||||
return [...this.sessions.values()].sort(
|
||||
(a, b) => b.lastActiveAt - a.lastActiveAt
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* List sessions forked from a specific parent.
|
||||
*/
|
||||
forksOf(parentSessionId: string): SessionEntry[] {
|
||||
return this.list().filter(
|
||||
(e) => e.parentSessionId === parentSessionId
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the number of tracked sessions.
|
||||
*/
|
||||
get size(): number {
|
||||
return this.sessions.size;
|
||||
}
|
||||
|
||||
/**
|
||||
* Abort all active sessions and clear tracking.
|
||||
*/
|
||||
disposeAll(): void {
|
||||
for (const entry of this.sessions.values()) {
|
||||
if (entry.session.isActive) {
|
||||
entry.session.abort();
|
||||
}
|
||||
}
|
||||
this.sessions.clear();
|
||||
this.aliases.clear();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Internal
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
private evictIfNeeded(): void {
|
||||
if (this.sessions.size < this.maxSessions) return;
|
||||
|
||||
// Find the oldest idle session to evict
|
||||
let oldest: { id: string; at: number } | null = null;
|
||||
for (const [id, entry] of this.sessions) {
|
||||
if (entry.session.isActive) continue; // don't evict active sessions
|
||||
if (!oldest || entry.lastActiveAt < oldest.at) {
|
||||
oldest = { id, at: entry.lastActiveAt };
|
||||
}
|
||||
}
|
||||
|
||||
if (oldest) {
|
||||
this.sessions.delete(oldest.id);
|
||||
// Clean up aliases pointing to the evicted session
|
||||
for (const [alias, target] of this.aliases) {
|
||||
if (target === oldest.id) this.aliases.delete(alias);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
370
extensions/plannotator/generated/ai/types.ts
Normal file
370
extensions/plannotator/generated/ai/types.ts
Normal file
@@ -0,0 +1,370 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/ai/types.ts
|
||||
/**
|
||||
* Core types for the Plannotator AI provider layer.
|
||||
*
|
||||
* This module defines the abstract interfaces that any agent runtime
|
||||
* (Claude Agent SDK, OpenCode, future providers) must implement to
|
||||
* power AI features inside Plannotator's plan review and code review UIs.
|
||||
*/
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Context — what the AI session knows about
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/** The surface the user is interacting with when they invoke AI. */
|
||||
export type AIContextMode = "plan-review" | "code-review" | "annotate";
|
||||
|
||||
/**
|
||||
* Describes the parent agent session that originally produced the plan or diff.
|
||||
* Used to fork conversations with full history.
|
||||
*/
|
||||
export interface ParentSession {
|
||||
/** Session ID from the host agent (e.g. Claude Code session UUID). */
|
||||
sessionId: string;
|
||||
/** Working directory the parent session was running in. */
|
||||
cwd: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Snapshot of plan-review-specific context.
|
||||
* Passed when AIContextMode is "plan-review".
|
||||
*/
|
||||
export interface PlanContext {
|
||||
/** The full plan markdown as submitted by the agent. */
|
||||
plan: string;
|
||||
/** Previous plan version (if this is a resubmission). */
|
||||
previousPlan?: string;
|
||||
/** The version number in the plan's history. */
|
||||
version?: number;
|
||||
/** Annotations the user has made so far (serialised for the prompt). */
|
||||
annotations?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Snapshot of code-review-specific context.
|
||||
* Passed when AIContextMode is "code-review".
|
||||
*/
|
||||
export interface CodeReviewContext {
|
||||
/** The unified diff patch. */
|
||||
patch: string;
|
||||
/** The specific file being discussed (if scoped). */
|
||||
filePath?: string;
|
||||
/** The line range being discussed (if scoped). */
|
||||
lineRange?: { start: number; end: number; side: "old" | "new" };
|
||||
/** The code snippet being discussed (if scoped). */
|
||||
selectedCode?: string;
|
||||
/** Summary of annotations the user has made. */
|
||||
annotations?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Snapshot of annotate-mode context.
|
||||
* Passed when AIContextMode is "annotate".
|
||||
*/
|
||||
export interface AnnotateContext {
|
||||
/** The markdown file content being annotated. */
|
||||
content: string;
|
||||
/** Path to the file on disk. */
|
||||
filePath: string;
|
||||
/** Summary of annotations the user has made. */
|
||||
annotations?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Union of mode-specific contexts, discriminated by `mode`.
|
||||
*/
|
||||
export type AIContext =
|
||||
| { mode: "plan-review"; plan: PlanContext; parent?: ParentSession }
|
||||
| { mode: "code-review"; review: CodeReviewContext; parent?: ParentSession }
|
||||
| { mode: "annotate"; annotate: AnnotateContext; parent?: ParentSession };
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Messages — what streams back from the AI
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export interface AITextMessage {
|
||||
type: "text";
|
||||
text: string;
|
||||
}
|
||||
|
||||
export interface AITextDeltaMessage {
|
||||
type: "text_delta";
|
||||
delta: string;
|
||||
}
|
||||
|
||||
export interface AIToolUseMessage {
|
||||
type: "tool_use";
|
||||
toolName: string;
|
||||
toolInput: Record<string, unknown>;
|
||||
toolUseId: string;
|
||||
}
|
||||
|
||||
export interface AIToolResultMessage {
|
||||
type: "tool_result";
|
||||
toolUseId?: string;
|
||||
result: string;
|
||||
}
|
||||
|
||||
export interface AIErrorMessage {
|
||||
type: "error";
|
||||
error: string;
|
||||
code?: string;
|
||||
}
|
||||
|
||||
export interface AIResultMessage {
|
||||
type: "result";
|
||||
sessionId: string;
|
||||
success: boolean;
|
||||
/** The final text result (if success). */
|
||||
result?: string;
|
||||
/** Total cost in USD (if available). */
|
||||
costUsd?: number;
|
||||
/** Number of agentic turns used. */
|
||||
turns?: number;
|
||||
}
|
||||
|
||||
export interface AIPermissionRequestMessage {
|
||||
type: "permission_request";
|
||||
requestId: string;
|
||||
toolName: string;
|
||||
toolInput: Record<string, unknown>;
|
||||
title?: string;
|
||||
displayName?: string;
|
||||
description?: string;
|
||||
toolUseId: string;
|
||||
}
|
||||
|
||||
export interface AIUnknownMessage {
|
||||
type: "unknown";
|
||||
/** The raw message from the provider, for debugging/transparency. */
|
||||
raw: Record<string, unknown>;
|
||||
}
|
||||
|
||||
export type AIMessage =
|
||||
| AITextMessage
|
||||
| AITextDeltaMessage
|
||||
| AIToolUseMessage
|
||||
| AIToolResultMessage
|
||||
| AIErrorMessage
|
||||
| AIResultMessage
|
||||
| AIPermissionRequestMessage
|
||||
| AIUnknownMessage;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Session — a live conversation with the AI
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export interface AISession {
|
||||
/** Unique identifier for this session. */
|
||||
readonly id: string;
|
||||
|
||||
/**
|
||||
* The parent session this was forked from, if any.
|
||||
* Null for fresh sessions.
|
||||
*/
|
||||
readonly parentSessionId: string | null;
|
||||
|
||||
/**
|
||||
* Send a prompt and stream back messages.
|
||||
* The returned async iterable yields messages as they arrive.
|
||||
*/
|
||||
query(prompt: string): AsyncIterable<AIMessage>;
|
||||
|
||||
/**
|
||||
* Abort the current in-flight query.
|
||||
* Safe to call if no query is running (no-op).
|
||||
*/
|
||||
abort(): void;
|
||||
|
||||
/** Whether a query is currently in progress. */
|
||||
readonly isActive: boolean;
|
||||
|
||||
/**
|
||||
* Respond to a permission request from the provider.
|
||||
* Called when the user approves or denies a tool use in the UI.
|
||||
*/
|
||||
respondToPermission?(requestId: string, allow: boolean, message?: string): void;
|
||||
|
||||
/**
|
||||
* Callback invoked when the real session ID is resolved from the provider.
|
||||
* Set by the SessionManager to remap its internal tracking key.
|
||||
*/
|
||||
onIdResolved?: (oldId: string, newId: string) => void;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Provider — the pluggable backend
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export interface AIProviderCapabilities {
|
||||
/** Whether the provider supports forking from a parent session. */
|
||||
fork: boolean;
|
||||
/** Whether the provider supports resuming a prior session by ID. */
|
||||
resume: boolean;
|
||||
/** Whether the provider streams partial text deltas. */
|
||||
streaming: boolean;
|
||||
/** Whether the provider can execute tools (read files, search, etc.). */
|
||||
tools: boolean;
|
||||
}
|
||||
|
||||
export interface CreateSessionOptions {
|
||||
/** The context (plan, diff, file) to seed the session with. */
|
||||
context: AIContext;
|
||||
/**
|
||||
* Working directory override for the agent session.
|
||||
* Falls back to the provider's configured cwd if omitted.
|
||||
*/
|
||||
cwd?: string;
|
||||
/**
|
||||
* Model override. Provider-specific string.
|
||||
* Falls back to provider default if omitted.
|
||||
*/
|
||||
model?: string;
|
||||
/**
|
||||
* Maximum agentic turns for the session.
|
||||
* Keeps inline chat cost-bounded.
|
||||
*/
|
||||
maxTurns?: number;
|
||||
/**
|
||||
* Maximum budget in USD for this session.
|
||||
*/
|
||||
maxBudgetUsd?: number;
|
||||
/**
|
||||
* Reasoning effort level (Codex only).
|
||||
* Controls how much thinking the model does before responding.
|
||||
*/
|
||||
reasoningEffort?: "minimal" | "low" | "medium" | "high" | "xhigh";
|
||||
}
|
||||
|
||||
/**
|
||||
* An AI provider implements the bridge between Plannotator and a specific
|
||||
* agent runtime. The provider is responsible for:
|
||||
*
|
||||
* 1. Creating new AI sessions seeded with review context
|
||||
* 2. Forking from parent agent sessions to maintain conversation history
|
||||
* 3. Streaming responses back as AIMessage events
|
||||
*
|
||||
* Providers are registered by name and selected at runtime based on the
|
||||
* host environment (Claude Code → "claude-agent-sdk", OpenCode → "opencode-sdk").
|
||||
*/
|
||||
export interface AIProvider {
|
||||
/** Unique name for this provider (e.g. "claude-agent-sdk"). */
|
||||
readonly name: string;
|
||||
|
||||
/** What this provider can do. */
|
||||
readonly capabilities: AIProviderCapabilities;
|
||||
|
||||
/** Available models for this provider. */
|
||||
readonly models?: ReadonlyArray<{ id: string; label: string; default?: boolean }>;
|
||||
|
||||
/**
|
||||
* Create a fresh session (no parent history).
|
||||
* Context is injected via the system prompt.
|
||||
*/
|
||||
createSession(options: CreateSessionOptions): Promise<AISession>;
|
||||
|
||||
/**
|
||||
* Fork from a parent agent session.
|
||||
*
|
||||
* The new session inherits the parent's full conversation history
|
||||
* (files read, analysis performed, decisions made) and additionally
|
||||
* receives the Plannotator review context. This enables the user to
|
||||
* ask contextual questions like "why did you change this function?"
|
||||
* without the AI losing insight.
|
||||
*
|
||||
* Providers that don't support real forking MUST throw. The endpoint
|
||||
* layer checks `capabilities.fork` before calling this, so it should
|
||||
* only be reached by providers that genuinely support history inheritance.
|
||||
*/
|
||||
forkSession(options: CreateSessionOptions): Promise<AISession>;
|
||||
|
||||
/**
|
||||
* Resume a previously created Plannotator AI session by its ID.
|
||||
* Used when the user returns to a conversation they started earlier.
|
||||
*
|
||||
* If the provider doesn't support resuming, this should throw.
|
||||
*/
|
||||
resumeSession(sessionId: string): Promise<AISession>;
|
||||
|
||||
/**
|
||||
* Clean up any resources held by the provider.
|
||||
* Called when the server shuts down.
|
||||
*/
|
||||
dispose(): void;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Provider configuration
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* Configuration passed to a provider factory.
|
||||
* Each provider type may extend this with its own fields.
|
||||
*/
|
||||
export interface AIProviderConfig {
|
||||
/** Provider type identifier (matches AIProvider.name). */
|
||||
type: string;
|
||||
/** Working directory for the agent. */
|
||||
cwd?: string;
|
||||
/** Default model to use. */
|
||||
model?: string;
|
||||
}
|
||||
|
||||
export interface ClaudeAgentSDKConfig extends AIProviderConfig {
|
||||
type: "claude-agent-sdk";
|
||||
/**
|
||||
* Tools the AI session is allowed to use.
|
||||
* Defaults to read-only tools for safety in inline chat.
|
||||
*/
|
||||
allowedTools?: string[];
|
||||
/**
|
||||
* Permission mode for the session.
|
||||
* Defaults to "default" (inherits user's existing permission rules).
|
||||
*/
|
||||
permissionMode?: "default" | "plan" | "bypassPermissions";
|
||||
/**
|
||||
* Explicit path to the claude CLI binary.
|
||||
* Required when running inside a compiled binary where PATH resolution
|
||||
* doesn't work the same way (e.g., bun build --compile).
|
||||
*/
|
||||
claudeExecutablePath?: string;
|
||||
/**
|
||||
* Setting sources to load permission rules from.
|
||||
* Loads user's existing Claude Code permission rules so inline chat
|
||||
* inherits what they've already approved.
|
||||
*/
|
||||
settingSources?: string[];
|
||||
}
|
||||
|
||||
export interface CodexSDKConfig extends AIProviderConfig {
|
||||
type: "codex-sdk";
|
||||
/**
|
||||
* Sandbox mode controls what the Codex agent can do.
|
||||
* Defaults to "read-only" for safety in inline chat.
|
||||
*/
|
||||
sandboxMode?: "read-only" | "workspace-write" | "danger-full-access";
|
||||
/**
|
||||
* Explicit path to the codex CLI binary.
|
||||
* Required when running inside a compiled binary where PATH resolution
|
||||
* doesn't work the same way (e.g., bun build --compile).
|
||||
*/
|
||||
codexExecutablePath?: string;
|
||||
}
|
||||
|
||||
export interface PiSDKConfig extends AIProviderConfig {
|
||||
type: "pi-sdk";
|
||||
/**
|
||||
* Explicit path to the pi CLI binary.
|
||||
* Required when running inside a compiled binary where PATH resolution
|
||||
* doesn't work the same way (e.g., bun build --compile).
|
||||
*/
|
||||
piExecutablePath?: string;
|
||||
}
|
||||
|
||||
export interface OpenCodeConfig extends AIProviderConfig {
|
||||
type: "opencode-sdk";
|
||||
/** Hostname for the OpenCode server. Default: "127.0.0.1". */
|
||||
hostname?: string;
|
||||
/** Port for the OpenCode server. Default: 4096. */
|
||||
port?: number;
|
||||
}
|
||||
104
extensions/plannotator/generated/annotate-args.ts
Normal file
104
extensions/plannotator/generated/annotate-args.ts
Normal file
@@ -0,0 +1,104 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/shared/annotate-args.ts
|
||||
/**
|
||||
* Parse CLI-style args arriving as a single whitespace-delimited string.
|
||||
*
|
||||
* Extracts the `--gate`, `--json`, and `--hook` flags (issue #570)
|
||||
* from the remainder, which is treated as the target path. Leading `@` is
|
||||
* stripped via the shared at-reference helper — reference-mode is primary.
|
||||
* Scoped-package-style literal `@` paths are handled by a fallback that the
|
||||
* downstream resolver opts into (see at-reference.ts).
|
||||
*
|
||||
* Used by the OpenCode plugin and Pi extension, where the whole args string
|
||||
* arrives pre-joined from the harness slash-command dispatcher. The Claude
|
||||
* Code binary parses argv directly with indexOf/splice and does not use
|
||||
* this helper.
|
||||
*
|
||||
* Implementation: walks the raw string once, preserving whitespace runs and
|
||||
* non-whitespace tokens as separate segments. Only known flag tokens
|
||||
* (whole-word match) plus one adjacent whitespace run are removed.
|
||||
* This keeps double-spaces and tabs inside file paths intact — which
|
||||
* matches the pre-PR behavior on `main`, where OpenCode and Pi passed
|
||||
* the raw args string straight through to the filesystem resolver.
|
||||
*
|
||||
* Remaining edge: if a path literally contains a known flag as a standalone
|
||||
* whitespace-separated token (e.g. `"Feature --gate spec.md"`), that token
|
||||
* is stripped. Supporting this would need shell-style quoting, which isn't
|
||||
* worth the complexity for a vanishingly rare naming pattern.
|
||||
*/
|
||||
|
||||
import { stripAtPrefix } from "./at-reference";
|
||||
import { stripWrappingQuotes } from "./resolve-file";
|
||||
|
||||
export interface ParsedAnnotateArgs {
|
||||
/**
|
||||
* Primary resolution path with any leading `@` stripped (reference-mode
|
||||
* convention). Most call sites should use this directly.
|
||||
*/
|
||||
filePath: string;
|
||||
/**
|
||||
* Raw path with the `@` prefix preserved (if the user supplied one).
|
||||
* Callers that want the literal-`@` fallback for scoped-package-style
|
||||
* paths pair this with `resolveAtReference` from at-reference.ts.
|
||||
*/
|
||||
rawFilePath: string;
|
||||
gate: boolean;
|
||||
json: boolean;
|
||||
hook: boolean;
|
||||
}
|
||||
|
||||
type Segment = { type: "ws" | "tok"; text: string };
|
||||
|
||||
const FLAG_MAP = {
|
||||
"--gate": "gate",
|
||||
"--json": "json",
|
||||
"--hook": "hook",
|
||||
} as const satisfies Record<string, keyof Omit<ParsedAnnotateArgs, "filePath" | "rawFilePath">>;
|
||||
|
||||
export function parseAnnotateArgs(raw: string): ParsedAnnotateArgs {
|
||||
const s = (raw ?? "").trim();
|
||||
const flags = { gate: false, json: false, hook: false };
|
||||
|
||||
const segments: Segment[] = [];
|
||||
for (let i = 0; i < s.length;) {
|
||||
const isWs = /\s/.test(s[i]);
|
||||
const start = i;
|
||||
while (i < s.length && /\s/.test(s[i]) === isWs) i++;
|
||||
segments.push({ type: isWs ? "ws" : "tok", text: s.slice(start, i) });
|
||||
}
|
||||
|
||||
const keep = segments.map(() => true);
|
||||
for (let j = 0; j < segments.length; j++) {
|
||||
const seg = segments[j];
|
||||
if (seg.type !== "tok") continue;
|
||||
const key = FLAG_MAP[seg.text as keyof typeof FLAG_MAP];
|
||||
if (!key) continue;
|
||||
|
||||
flags[key] = true;
|
||||
keep[j] = false;
|
||||
|
||||
// Drop one adjacent whitespace run so removed flags don't leave dangling
|
||||
// spaces. Prefer trailing whitespace; fall back to leading if at the end.
|
||||
if (j + 1 < segments.length && segments[j + 1].type === "ws") {
|
||||
keep[j + 1] = false;
|
||||
} else if (j > 0 && segments[j - 1].type === "ws") {
|
||||
keep[j - 1] = false;
|
||||
}
|
||||
}
|
||||
|
||||
// Trim covers the case where two adjacent flags (`... --gate --json`)
|
||||
// both claim the single whitespace between them, leaving a trailing space
|
||||
// after the kept token. Wrapping quotes come from OpenCode/Pi users who
|
||||
// quote paths with spaces (shell muscle memory); strip them here so
|
||||
// downstream callers never see tokenization artifacts.
|
||||
const rawFilePath = stripWrappingQuotes(
|
||||
segments
|
||||
.filter((_, j) => keep[j])
|
||||
.map((seg) => seg.text)
|
||||
.join("")
|
||||
.trim(),
|
||||
);
|
||||
|
||||
if (flags.hook) flags.gate = true;
|
||||
|
||||
return { filePath: stripAtPrefix(rawFilePath), rawFilePath, ...flags };
|
||||
}
|
||||
53
extensions/plannotator/generated/at-reference.ts
Normal file
53
extensions/plannotator/generated/at-reference.ts
Normal file
@@ -0,0 +1,53 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/shared/at-reference.ts
|
||||
/**
|
||||
* `@`-reference handling for user-provided paths.
|
||||
*
|
||||
* Several agent harnesses (Claude Code, OpenCode, Pi) let users reference
|
||||
* files with an `@` prefix, e.g. `@README.md`. The `@` is the team's
|
||||
* reference marker, not part of the filename. Stripping it is the primary
|
||||
* resolution path — that's the common case and it's supported first-class.
|
||||
*
|
||||
* The secondary path handles scoped-package-style names like
|
||||
* `@scope/pkg/README.md`: if the stripped form doesn't resolve, fall back
|
||||
* to the literal form so those paths still open.
|
||||
*
|
||||
* Both functions are pure and take any filesystem-ish predicate via a
|
||||
* callback, so they're trivial to unit-test without stubbing anything.
|
||||
*/
|
||||
|
||||
import { stripWrappingQuotes } from "./resolve-file";
|
||||
|
||||
/**
|
||||
* Normalize a user-typed path reference by unwrapping matching `"..."` or
|
||||
* `'...'` quotes and removing a single leading `@`. Quotes come from
|
||||
* harnesses that tokenize on whitespace (OpenCode, Pi), where paths
|
||||
* containing spaces have to be quoted. The quote-stripping has to run
|
||||
* first so the `@` check sees the real first character.
|
||||
*
|
||||
* Non-`@` inputs are returned unchanged except for quote unwrapping.
|
||||
* Does not recurse: `@@foo` becomes `@foo`, not `foo`.
|
||||
*/
|
||||
export function stripAtPrefix(input: string): string {
|
||||
const unquoted = stripWrappingQuotes(input);
|
||||
return unquoted.startsWith("@") ? unquoted.slice(1) : unquoted;
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve an `@`-prefixed user input by trying the stripped form first
|
||||
* (reference mode, primary) and falling back to the literal form if the
|
||||
* stripped form doesn't resolve. Returns the candidate that resolves, or
|
||||
* null if neither does.
|
||||
*
|
||||
* `exists` defines what "resolves" means — use `existsSync` for a bare
|
||||
* filesystem check, or wrap `resolveMarkdownFile` / `statSync` for richer
|
||||
* predicates. The helper itself is filesystem-agnostic.
|
||||
*/
|
||||
export function resolveAtReference(
|
||||
input: string,
|
||||
exists: (candidate: string) => boolean,
|
||||
): string | null {
|
||||
const stripped = stripAtPrefix(input);
|
||||
if (exists(stripped)) return stripped;
|
||||
if (stripped !== input && exists(input)) return input;
|
||||
return null;
|
||||
}
|
||||
53
extensions/plannotator/generated/checklist.ts
Normal file
53
extensions/plannotator/generated/checklist.ts
Normal file
@@ -0,0 +1,53 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/shared/checklist.ts
|
||||
/**
|
||||
* Checklist parsing and progress tracking utilities.
|
||||
*
|
||||
* Shared between Pi extension and OpenCode plugin for plan execution tracking.
|
||||
*/
|
||||
|
||||
export interface ChecklistItem {
|
||||
/** 1-based step number, compatible with markCompletedSteps/extractDoneSteps. */
|
||||
step: number;
|
||||
text: string;
|
||||
completed: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse standard markdown checkboxes from file content.
|
||||
*
|
||||
* Matches lines like:
|
||||
* - [ ] Step description
|
||||
* - [x] Completed step
|
||||
* * [ ] Alternative bullet
|
||||
*/
|
||||
export function parseChecklist(content: string): ChecklistItem[] {
|
||||
const items: ChecklistItem[] = [];
|
||||
const pattern = /^[-*]\s*\[([ xX])\]\s+(.+)$/gm;
|
||||
|
||||
for (const match of content.matchAll(pattern)) {
|
||||
const completed = match[1] !== " ";
|
||||
const text = match[2].trim();
|
||||
if (text.length > 0) {
|
||||
items.push({ step: items.length + 1, text, completed });
|
||||
}
|
||||
}
|
||||
return items;
|
||||
}
|
||||
|
||||
export function extractDoneSteps(message: string): number[] {
|
||||
const steps: number[] = [];
|
||||
for (const match of message.matchAll(/\[DONE:(\d+)\]/gi)) {
|
||||
const step = Number(match[1]);
|
||||
if (Number.isFinite(step)) steps.push(step);
|
||||
}
|
||||
return steps;
|
||||
}
|
||||
|
||||
export function markCompletedSteps(text: string, items: ChecklistItem[]): number {
|
||||
const doneSteps = extractDoneSteps(text);
|
||||
for (const step of doneSteps) {
|
||||
const item = items.find((t) => t.step === step);
|
||||
if (item) item.completed = true;
|
||||
}
|
||||
return doneSteps.length;
|
||||
}
|
||||
346
extensions/plannotator/generated/claude-review.ts
Normal file
346
extensions/plannotator/generated/claude-review.ts
Normal file
@@ -0,0 +1,346 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/server/claude-review.ts
|
||||
import { toRelativePath } from "./path-utils.js";
|
||||
|
||||
/**
|
||||
* Claude Code Review Agent — prompt, command builder, and JSONL output parser.
|
||||
*
|
||||
* Claude has its own review model (severity-based findings with reasoning traces)
|
||||
* separate from Codex's priority-based model. The transform layer normalizes
|
||||
* both into the shared annotation format.
|
||||
*
|
||||
* Claude uses --json-schema (inline JSON + Ajv validation with retries) and
|
||||
* --output-format stream-json for live JSONL streaming. The final event is
|
||||
* type:"result" with structured_output containing validated findings.
|
||||
*/
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Types
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export type ClaudeSeverity = "important" | "nit" | "pre_existing";
|
||||
|
||||
export interface ClaudeFinding {
|
||||
severity: ClaudeSeverity;
|
||||
file: string;
|
||||
line: number;
|
||||
end_line: number;
|
||||
description: string;
|
||||
reasoning: string;
|
||||
}
|
||||
|
||||
export interface ClaudeReviewOutput {
|
||||
findings: ClaudeFinding[];
|
||||
summary: {
|
||||
important: number;
|
||||
nit: number;
|
||||
pre_existing: number;
|
||||
};
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Schema — Claude's own severity-based model
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export const CLAUDE_REVIEW_SCHEMA_JSON = JSON.stringify({
|
||||
type: "object",
|
||||
properties: {
|
||||
findings: {
|
||||
type: "array",
|
||||
items: {
|
||||
type: "object",
|
||||
properties: {
|
||||
severity: { type: "string", enum: ["important", "nit", "pre_existing"] },
|
||||
file: { type: "string" },
|
||||
line: { type: "integer" },
|
||||
end_line: { type: "integer" },
|
||||
description: { type: "string" },
|
||||
reasoning: { type: "string" },
|
||||
},
|
||||
required: ["severity", "file", "line", "end_line", "description", "reasoning"],
|
||||
additionalProperties: false,
|
||||
},
|
||||
},
|
||||
summary: {
|
||||
type: "object",
|
||||
properties: {
|
||||
important: { type: "integer" },
|
||||
nit: { type: "integer" },
|
||||
pre_existing: { type: "integer" },
|
||||
},
|
||||
required: ["important", "nit", "pre_existing"],
|
||||
additionalProperties: false,
|
||||
},
|
||||
},
|
||||
required: ["findings", "summary"],
|
||||
additionalProperties: false,
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Review prompt — converges open-source Claude Code review + remote service
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export const CLAUDE_REVIEW_PROMPT = `# Claude Code Review System Prompt
|
||||
|
||||
## Identity
|
||||
You are a code review system. Your job is to find bugs that would break
|
||||
production. You are not a linter, formatter, or style checker unless
|
||||
project guidance files explicitly expand your scope.
|
||||
|
||||
## Pipeline
|
||||
|
||||
Step 1: Gather context
|
||||
- Retrieve the PR diff (gh pr diff or git diff)
|
||||
- Read CLAUDE.md and REVIEW.md at the repo root and in every directory
|
||||
containing modified files
|
||||
- Build a map of which rules apply to which file paths
|
||||
- Identify any skip rules (paths, patterns, or file types to ignore)
|
||||
|
||||
Step 2: Launch 4 parallel review agents
|
||||
|
||||
Agent 1 — Bug + Regression (Opus-level reasoning)
|
||||
Scan for logic errors, regressions, broken edge cases, build failures,
|
||||
and code that will produce wrong results. Focus on the diff but read
|
||||
surrounding code to understand call sites and data flow. Flag only
|
||||
issues where the code is demonstrably wrong — not stylistic concerns,
|
||||
not missing tests, not "could be cleaner."
|
||||
|
||||
Agent 2 — Security + Deep Analysis (Opus-level reasoning)
|
||||
Look for security vulnerabilities with concrete exploit paths, race
|
||||
conditions, incorrect assumptions about trust boundaries, and subtle
|
||||
issues in introduced code. Read surrounding code for context. Do not
|
||||
flag theoretical risks without a plausible path to harm.
|
||||
|
||||
Agent 3 — Code Quality + Reusability (Sonnet-level reasoning)
|
||||
Look for code smells, unnecessary duplication, missed opportunities to
|
||||
reuse existing utilities or patterns in the codebase, overly complex
|
||||
implementations that could be simpler, and elegance issues. Read the
|
||||
surrounding codebase to understand existing patterns before flagging.
|
||||
Only flag issues a senior engineer would care about.
|
||||
|
||||
Agent 4 — Guideline Compliance (Haiku-level reasoning)
|
||||
Audit changes against rules from CLAUDE.md and REVIEW.md gathered in
|
||||
Step 1. Only flag clear, unambiguous violations where you can cite the
|
||||
exact rule broken. If a PR makes a CLAUDE.md statement outdated, flag
|
||||
that the docs need updating. Respect all skip rules — never flag files
|
||||
or patterns that guidance says to ignore.
|
||||
|
||||
All agents:
|
||||
- Do not duplicate each other's findings
|
||||
- Do not flag issues in paths excluded by guidance files
|
||||
- Provide file, line number, and a concise description for each candidate
|
||||
|
||||
Step 3: Validate each candidate finding
|
||||
For each candidate, launch a validation agent. The validator:
|
||||
- Traces the actual code path to confirm the issue is real
|
||||
- Checks whether the issue is handled elsewhere (try/catch, upstream
|
||||
guard, fallback logic, type system guarantees)
|
||||
- Confirms the finding is not a false positive with high confidence
|
||||
- If validation fails, drop the finding silently
|
||||
- If validation passes, write a clear reasoning chain explaining how
|
||||
the issue was confirmed — this becomes the \`reasoning\` field
|
||||
|
||||
Step 4: Classify each validated finding
|
||||
Assign exactly one severity:
|
||||
|
||||
important — A bug that should be fixed before merging. Build failures,
|
||||
clear logic errors, security vulnerabilities with exploit paths, data
|
||||
loss risks, race conditions with observable consequences.
|
||||
|
||||
nit — A minor issue worth fixing but non-blocking. Style deviations
|
||||
from project guidelines, code quality concerns, edge cases that are
|
||||
unlikely but worth noting, convention violations that don't affect
|
||||
correctness.
|
||||
|
||||
pre_existing — A bug that exists in the surrounding codebase but was
|
||||
NOT introduced by this PR. Only flag when directly relevant to the
|
||||
changed code path.
|
||||
|
||||
Step 5: Deduplicate and rank
|
||||
- Merge findings that describe the same underlying issue from different
|
||||
agents — keep the most specific description and the highest severity
|
||||
- Sort by severity: important → nit → pre_existing
|
||||
- Within each severity, sort by file path and line number
|
||||
|
||||
Step 6: Return structured JSON output matching the schema.
|
||||
If no issues are found, return an empty findings array with zeroed summary.
|
||||
|
||||
## Hard constraints
|
||||
- Never approve or block the PR
|
||||
- Never comment on formatting or code style unless guidance files say to
|
||||
- Never flag missing test coverage unless guidance files say to
|
||||
- Never invent rules — only enforce what CLAUDE.md or REVIEW.md state
|
||||
- Never flag issues in skipped paths or generated files unless guidance
|
||||
explicitly includes them
|
||||
- Prefer silence over false positives — when in doubt, drop the finding
|
||||
- Do NOT post any comments to GitHub or GitLab
|
||||
- Do NOT use gh pr comment or any commenting tool
|
||||
- Your only output is the structured JSON findings`;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Command builder
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export interface ClaudeCommandResult {
|
||||
command: string[];
|
||||
/** Prompt text to write to stdin (Claude reads prompt from stdin, not argv). */
|
||||
stdinPrompt: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the `claude -p` command. Prompt is passed via stdin, not as a
|
||||
* positional arg — avoids quoting issues, argv limits, and variadic flag conflicts.
|
||||
*/
|
||||
export function buildClaudeCommand(prompt: string, model: string = "claude-opus-4-7", effort?: string): ClaudeCommandResult {
|
||||
const allowedTools = [
|
||||
"Agent", "Read", "Glob", "Grep",
|
||||
// GitHub CLI
|
||||
"Bash(gh pr view:*)", "Bash(gh pr diff:*)", "Bash(gh pr list:*)",
|
||||
"Bash(gh issue view:*)", "Bash(gh issue list:*)",
|
||||
"Bash(gh api repos/*/*/pulls/*)", "Bash(gh api repos/*/*/pulls/*/files*)",
|
||||
"Bash(gh api repos/*/*/pulls/*/comments*)", "Bash(gh api repos/*/*/issues/*/comments*)",
|
||||
// GitLab CLI
|
||||
"Bash(glab mr view:*)", "Bash(glab mr diff:*)", "Bash(glab mr list:*)",
|
||||
"Bash(glab api:*)",
|
||||
// Git (read-only)
|
||||
"Bash(git status:*)", "Bash(git diff:*)", "Bash(git log:*)",
|
||||
"Bash(git show:*)", "Bash(git blame:*)", "Bash(git branch:*)",
|
||||
"Bash(git grep:*)", "Bash(git ls-remote:*)", "Bash(git ls-tree:*)",
|
||||
"Bash(git merge-base:*)", "Bash(git remote:*)", "Bash(git rev-parse:*)",
|
||||
"Bash(git show-ref:*)",
|
||||
"Bash(wc:*)",
|
||||
].join(",");
|
||||
|
||||
const disallowedTools = [
|
||||
"Edit", "Write", "NotebookEdit", "WebFetch", "WebSearch",
|
||||
"Bash(python:*)", "Bash(python3:*)", "Bash(node:*)", "Bash(npx:*)",
|
||||
"Bash(bun:*)", "Bash(bunx:*)", "Bash(sh:*)", "Bash(bash:*)", "Bash(zsh:*)",
|
||||
"Bash(curl:*)", "Bash(wget:*)",
|
||||
].join(",");
|
||||
|
||||
return {
|
||||
command: [
|
||||
"claude", "-p",
|
||||
"--permission-mode", "dontAsk",
|
||||
"--output-format", "stream-json",
|
||||
"--verbose",
|
||||
"--json-schema", CLAUDE_REVIEW_SCHEMA_JSON,
|
||||
"--no-session-persistence",
|
||||
"--model", model,
|
||||
...(effort ? ["--effort", effort] : []),
|
||||
"--tools", "Agent,Bash,Read,Glob,Grep",
|
||||
"--allowedTools", allowedTools,
|
||||
"--disallowedTools", disallowedTools,
|
||||
],
|
||||
stdinPrompt: prompt,
|
||||
};
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// JSONL stream output parser
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* Parse Claude Code's stream-json output (JSONL).
|
||||
* Extracts structured_output from the final type:"result" event.
|
||||
*/
|
||||
export function parseClaudeStreamOutput(stdout: string): ClaudeReviewOutput | null {
|
||||
if (!stdout.trim()) return null;
|
||||
|
||||
const lines = stdout.trim().split('\n');
|
||||
for (let i = lines.length - 1; i >= 0; i--) {
|
||||
const line = lines[i].trim();
|
||||
if (!line) continue;
|
||||
|
||||
try {
|
||||
const event = JSON.parse(line);
|
||||
|
||||
if (event.type === 'result') {
|
||||
if (event.is_error) return null;
|
||||
|
||||
const output = event.structured_output;
|
||||
if (!output || !Array.isArray(output.findings)) return null;
|
||||
|
||||
return output as ClaudeReviewOutput;
|
||||
}
|
||||
} catch {
|
||||
// Not valid JSON — skip
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Finding transform — Claude findings → external annotations
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/** Transform Claude findings into the external annotation format. */
|
||||
export function transformClaudeFindings(
|
||||
findings: ClaudeFinding[],
|
||||
source: string,
|
||||
cwd?: string,
|
||||
): Array<{
|
||||
source: string;
|
||||
filePath: string;
|
||||
lineStart: number;
|
||||
lineEnd: number;
|
||||
type: string;
|
||||
side: string;
|
||||
scope: string;
|
||||
text: string;
|
||||
severity: ClaudeSeverity;
|
||||
reasoning: string;
|
||||
author: string;
|
||||
}> {
|
||||
return findings
|
||||
.filter(f => f.file && typeof f.line === "number")
|
||||
.map(f => ({
|
||||
source,
|
||||
filePath: toRelativePath(f.file, cwd),
|
||||
lineStart: f.line,
|
||||
lineEnd: f.end_line ?? f.line,
|
||||
type: "comment",
|
||||
side: "new",
|
||||
scope: "line",
|
||||
text: `[${f.severity}] ${f.description}`,
|
||||
severity: f.severity,
|
||||
reasoning: f.reasoning,
|
||||
author: "Claude Code",
|
||||
}));
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Live log formatter
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* Extract log-worthy content from a JSONL line for the LiveLogViewer.
|
||||
* Returns a human-readable string, or null if the line should be skipped.
|
||||
*/
|
||||
export function formatClaudeLogEvent(line: string): string | null {
|
||||
try {
|
||||
const event = JSON.parse(line);
|
||||
|
||||
// Skip the final result event — handled separately
|
||||
if (event.type === 'result') return null;
|
||||
|
||||
// Assistant messages (the agent's thinking/responses)
|
||||
if (event.type === 'assistant' && event.message?.content) {
|
||||
const parts = Array.isArray(event.message.content) ? event.message.content : [event.message.content];
|
||||
const texts = parts
|
||||
.filter((p: any) => p.type === 'text' && p.text)
|
||||
.map((p: any) => p.text);
|
||||
if (texts.length > 0) return texts.join('\n');
|
||||
|
||||
// Tool use events (only reached if no text parts found)
|
||||
const tools = parts.filter((p: any) => p.type === 'tool_use');
|
||||
if (tools.length > 0) {
|
||||
return tools.map((t: any) => `[${t.name}] ${typeof t.input === 'string' ? t.input.slice(0, 100) : JSON.stringify(t.input).slice(0, 100)}`).join('\n');
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
20
extensions/plannotator/generated/code-file.ts
Normal file
20
extensions/plannotator/generated/code-file.ts
Normal file
@@ -0,0 +1,20 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/shared/code-file.ts
|
||||
export const CODE_FILE_REGEX = /(?:\.(tsx?|jsx?|py|rb|go|rs|java|c|cpp|h|hpp|cs|swift|kt|scala|sh|bash|zsh|sql|graphql|json|ya?ml|toml|ini|css|scss|less|xml|tf|lua|r|dart|ex|exs|vue|svelte|astro|zig|proto)|(?:^|\/)(Dockerfile|Makefile|Rakefile|Gemfile|Procfile|Vagrantfile|Brewfile|Justfile))$/i;
|
||||
|
||||
export const CODE_PATH_BARE_REGEX = /(?:\.{0,2}\/)?(?:[a-zA-Z0-9_@.\-\[\]]+\/)+[a-zA-Z0-9_.\-\[\]]+\.[a-zA-Z0-9]+/g;
|
||||
|
||||
const IMPLAUSIBLE_CHARS = /[{},*?\s]/;
|
||||
|
||||
export function isPlausibleCodeFilePath(input: string): boolean {
|
||||
return !IMPLAUSIBLE_CHARS.test(input);
|
||||
}
|
||||
|
||||
export function isCodeFilePath(input: string): boolean {
|
||||
if (!isPlausibleCodeFilePath(input)) return false;
|
||||
return CODE_FILE_REGEX.test(input.replace(/#.*$/, ''))
|
||||
&& !input.startsWith('http://') && !input.startsWith('https://');
|
||||
}
|
||||
|
||||
export function isCodeFilePathStrict(input: string): boolean {
|
||||
return input.includes('/') && isCodeFilePath(input);
|
||||
}
|
||||
408
extensions/plannotator/generated/codex-review.ts
Normal file
408
extensions/plannotator/generated/codex-review.ts
Normal file
@@ -0,0 +1,408 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/server/codex-review.ts
|
||||
/**
|
||||
* Codex Review Agent — prompt, command builder, output parser, and finding transformer.
|
||||
*
|
||||
* Encapsulates all Codex-specific logic for the AI review agent integration.
|
||||
* The review server (review.ts) calls into this module via the agent-jobs callbacks.
|
||||
*/
|
||||
|
||||
import { join } from "node:path";
|
||||
import { homedir, tmpdir } from "node:os";
|
||||
import { appendFile, mkdir, unlink, writeFile, readFile } from "node:fs/promises";
|
||||
import { existsSync } from "node:fs";
|
||||
import type { DiffType } from "./review-core.js";
|
||||
import type { PRMetadata } from "./pr-provider.js";
|
||||
import { toRelativePath } from "./path-utils.js";
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Debug log — only active when PLANNOTATOR_DEBUG is set
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const DEBUG_ENABLED = !!process.env.PLANNOTATOR_DEBUG;
|
||||
const DEBUG_LOG_PATH = join(homedir(), ".plannotator", "codex-review-debug.log");
|
||||
|
||||
async function debugLog(label: string, data?: unknown): Promise<void> {
|
||||
if (!DEBUG_ENABLED) return;
|
||||
try {
|
||||
await mkdir(join(homedir(), ".plannotator"), { recursive: true });
|
||||
const timestamp = new Date().toISOString();
|
||||
const line = data !== undefined
|
||||
? `[${timestamp}] ${label}: ${typeof data === "string" ? data : JSON.stringify(data, null, 2)}\n`
|
||||
: `[${timestamp}] ${label}\n`;
|
||||
await appendFile(DEBUG_LOG_PATH, line);
|
||||
} catch { /* never fail the main flow */ }
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Schema — embedded as a string, written to disk on first use.
|
||||
// Bun's compiled binary uses a virtual FS that external processes (codex)
|
||||
// can't read, so we materialize the schema to a real file.
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const CODEX_REVIEW_SCHEMA = JSON.stringify({
|
||||
type: "object",
|
||||
properties: {
|
||||
findings: {
|
||||
type: "array",
|
||||
items: {
|
||||
type: "object",
|
||||
properties: {
|
||||
title: { type: "string" },
|
||||
body: { type: "string" },
|
||||
confidence_score: { type: "number" },
|
||||
priority: { type: ["integer", "null"] },
|
||||
code_location: {
|
||||
type: "object",
|
||||
properties: {
|
||||
absolute_file_path: { type: "string" },
|
||||
line_range: {
|
||||
type: "object",
|
||||
properties: {
|
||||
start: { type: "integer" },
|
||||
end: { type: "integer" },
|
||||
},
|
||||
required: ["start", "end"],
|
||||
additionalProperties: false,
|
||||
},
|
||||
},
|
||||
required: ["absolute_file_path", "line_range"],
|
||||
additionalProperties: false,
|
||||
},
|
||||
},
|
||||
required: ["title", "body", "confidence_score", "priority", "code_location"],
|
||||
additionalProperties: false,
|
||||
},
|
||||
},
|
||||
overall_correctness: { type: "string" },
|
||||
overall_explanation: { type: "string" },
|
||||
overall_confidence_score: { type: "number" },
|
||||
},
|
||||
required: ["findings", "overall_correctness", "overall_explanation", "overall_confidence_score"],
|
||||
additionalProperties: false,
|
||||
});
|
||||
|
||||
const SCHEMA_DIR = join(homedir(), ".plannotator");
|
||||
const SCHEMA_FILE = join(SCHEMA_DIR, "codex-review-schema.json");
|
||||
let schemaMaterialized = false;
|
||||
|
||||
/** Ensure the schema file exists on disk and return its path. */
|
||||
async function ensureSchemaFile(): Promise<string> {
|
||||
if (!schemaMaterialized) {
|
||||
await mkdir(SCHEMA_DIR, { recursive: true });
|
||||
await writeFile(SCHEMA_FILE, CODEX_REVIEW_SCHEMA);
|
||||
schemaMaterialized = true;
|
||||
}
|
||||
return SCHEMA_FILE;
|
||||
}
|
||||
|
||||
export { SCHEMA_FILE as CODEX_REVIEW_SCHEMA_PATH };
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// System prompt — copied verbatim from codex-rs/core/review_prompt.md
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export const CODEX_REVIEW_SYSTEM_PROMPT = `# Review guidelines:
|
||||
|
||||
You are acting as a reviewer for a proposed code change made by another engineer.
|
||||
|
||||
Below are some default guidelines for determining whether the original author would appreciate the issue being flagged.
|
||||
|
||||
These are not the final word in determining whether an issue is a bug. In many cases, you will encounter other, more specific guidelines. These may be present elsewhere in a developer message, a user message, a file, or even elsewhere in this system message.
|
||||
Those guidelines should be considered to override these general instructions.
|
||||
|
||||
Here are the general guidelines for determining whether something is a bug and should be flagged.
|
||||
|
||||
1. It meaningfully impacts the accuracy, performance, security, or maintainability of the code.
|
||||
2. The bug is discrete and actionable (i.e. not a general issue with the codebase or a combination of multiple issues).
|
||||
3. Fixing the bug does not demand a level of rigor that is not present in the rest of the codebase (e.g. one doesn't need very detailed comments and input validation in a repository of one-off scripts in personal projects)
|
||||
4. The bug was introduced in the commit (pre-existing bugs should not be flagged).
|
||||
5. The author of the original PR would likely fix the issue if they were made aware of it.
|
||||
6. The bug does not rely on unstated assumptions about the codebase or author's intent.
|
||||
7. It is not enough to speculate that a change may disrupt another part of the codebase, to be considered a bug, one must identify the other parts of the code that are provably affected.
|
||||
8. The bug is clearly not just an intentional change by the original author.
|
||||
|
||||
When flagging a bug, you will also provide an accompanying comment. Once again, these guidelines are not the final word on how to construct a comment -- defer to any subsequent guidelines that you encounter.
|
||||
|
||||
1. The comment should be clear about why the issue is a bug.
|
||||
2. The comment should appropriately communicate the severity of the issue. It should not claim that an issue is more severe than it actually is.
|
||||
3. The comment should be brief. The body should be at most 1 paragraph. It should not introduce line breaks within the natural language flow unless it is necessary for the code fragment.
|
||||
4. The comment should not include any chunks of code longer than 3 lines. Any code chunks should be wrapped in markdown inline code tags or a code block.
|
||||
5. The comment should clearly and explicitly communicate the scenarios, environments, or inputs that are necessary for the bug to arise. The comment should immediately indicate that the issue's severity depends on these factors.
|
||||
6. The comment's tone should be matter-of-fact and not accusatory or overly positive. It should read as a helpful AI assistant suggestion without sounding too much like a human reviewer.
|
||||
7. The comment should be written such that the original author can immediately grasp the idea without close reading.
|
||||
8. The comment should avoid excessive flattery and comments that are not helpful to the original author. The comment should avoid phrasing like "Great job ...", "Thanks for ...".
|
||||
|
||||
Below are some more detailed guidelines that you should apply to this specific review.
|
||||
|
||||
HOW MANY FINDINGS TO RETURN:
|
||||
|
||||
Output all findings that the original author would fix if they knew about it. If there is no finding that a person would definitely love to see and fix, prefer outputting no findings. Do not stop at the first qualifying finding. Continue until you've listed every qualifying finding.
|
||||
|
||||
GUIDELINES:
|
||||
|
||||
- Ignore trivial style unless it obscures meaning or violates documented standards.
|
||||
- Use one comment per distinct issue (or a multi-line range if necessary).
|
||||
- Use \`\`\`suggestion blocks ONLY for concrete replacement code (minimal lines; no commentary inside the block).
|
||||
- In every \`\`\`suggestion block, preserve the exact leading whitespace of the replaced lines (spaces vs tabs, number of spaces).
|
||||
- Do NOT introduce or remove outer indentation levels unless that is the actual fix.
|
||||
|
||||
The comments will be presented in the code review as inline comments. You should avoid providing unnecessary location details in the comment body. Always keep the line range as short as possible for interpreting the issue. Avoid ranges longer than 5–10 lines; instead, choose the most suitable subrange that pinpoints the problem.
|
||||
|
||||
At the beginning of the finding title, tag the bug with priority level. For example "[P1] Un-padding slices along wrong tensor dimensions". [P0] – Drop everything to fix. Blocking release, operations, or major usage. Only use for universal issues that do not depend on any assumptions about the inputs. · [P1] – Urgent. Should be addressed in the next cycle · [P2] – Normal. To be fixed eventually · [P3] – Low. Nice to have.
|
||||
|
||||
Additionally, include a numeric priority field in the JSON output for each finding: set "priority" to 0 for P0, 1 for P1, 2 for P2, or 3 for P3. If a priority cannot be determined, omit the field or use null.
|
||||
|
||||
At the end of your findings, output an "overall correctness" verdict of whether or not the patch should be considered "correct".
|
||||
Correct implies that existing code and tests will not break, and the patch is free of bugs and other blocking issues.
|
||||
Ignore non-blocking issues such as style, formatting, typos, documentation, and other nits.
|
||||
|
||||
FORMATTING GUIDELINES:
|
||||
The finding description should be one paragraph.`;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// User message builder
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/** Build the dynamic user message based on review context. */
|
||||
export function buildCodexReviewUserMessage(
|
||||
patch: string,
|
||||
diffType: DiffType,
|
||||
options?: { defaultBranch?: string; hasLocalAccess?: boolean; prDiffScope?: string },
|
||||
prMetadata?: PRMetadata,
|
||||
): string {
|
||||
// PR/MR mode — pass the link, with local context if --local
|
||||
if (prMetadata) {
|
||||
if (options?.prDiffScope === "full-stack") {
|
||||
return [
|
||||
`Full-stack review of ${prMetadata.url}`,
|
||||
"",
|
||||
"This is a stacked PR. The diff below shows ALL accumulated changes from the repository default branch through this PR's head (not just this PR's own layer).",
|
||||
"Review the complete diff for issues that span the stack.",
|
||||
"",
|
||||
"```diff",
|
||||
patch,
|
||||
"```",
|
||||
].join("\n");
|
||||
}
|
||||
if (options?.hasLocalAccess) {
|
||||
return [
|
||||
prMetadata.url,
|
||||
"",
|
||||
"You are in a local worktree checked out at the PR head. The code is available locally.",
|
||||
`To see the PR changes, diff against the remote base branch: git diff origin/${prMetadata.baseBranch}...HEAD`,
|
||||
"Do NOT diff against the local `main` branch — it may be stale. Always use origin/.",
|
||||
].join("\n");
|
||||
}
|
||||
return prMetadata.url;
|
||||
}
|
||||
|
||||
// Local mode — Codex has full file/git access
|
||||
const effectiveDiffType = diffType.startsWith("worktree:")
|
||||
? diffType.split(":").pop() || "uncommitted"
|
||||
: diffType;
|
||||
|
||||
switch (effectiveDiffType) {
|
||||
case "uncommitted":
|
||||
return "Review the current code changes (staged, unstaged, and untracked files) and provide prioritized findings.";
|
||||
|
||||
case "staged":
|
||||
return "Review the currently staged code changes (`git diff --staged`) and provide prioritized findings.";
|
||||
|
||||
case "unstaged":
|
||||
return "Review the unstaged code changes (tracked modifications and untracked files) and provide prioritized findings.";
|
||||
|
||||
case "last-commit":
|
||||
return "Review the code changes introduced in the last commit (`git diff HEAD~1..HEAD`) and provide prioritized findings.";
|
||||
|
||||
case "branch": {
|
||||
const base = options?.defaultBranch || "main";
|
||||
return `Review the code changes against the base branch '${base}'. Run \`git diff ${base}..HEAD\` to inspect the changes. Provide prioritized, actionable findings.`;
|
||||
}
|
||||
|
||||
case "merge-base": {
|
||||
const base = options?.defaultBranch || "main";
|
||||
return `Review the PR-style diff against base '${base}'. First find the common ancestor with \`git merge-base ${base} HEAD\`, then run \`git diff <merge-base>..HEAD\` using that commit to inspect only the changes introduced on this branch (matches GitHub's PR view). Provide prioritized, actionable findings.`;
|
||||
}
|
||||
|
||||
case "all":
|
||||
return "Review every file in the repository (all files shown as additions, diffed against an empty tree). Provide prioritized, actionable findings.";
|
||||
|
||||
default:
|
||||
// p4 or unknown — fall back to generic with inlined diff
|
||||
return [
|
||||
"Review the following code changes and provide prioritized findings.",
|
||||
"",
|
||||
"```diff",
|
||||
patch,
|
||||
"```",
|
||||
].join("\n");
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Command builder
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export interface CodexCommandOptions {
|
||||
cwd: string;
|
||||
outputPath: string;
|
||||
prompt: string;
|
||||
model?: string;
|
||||
reasoningEffort?: string;
|
||||
fastMode?: boolean;
|
||||
}
|
||||
|
||||
/** Build the `codex exec` argv array. Materializes the schema file on first call. */
|
||||
export async function buildCodexCommand(options: CodexCommandOptions): Promise<string[]> {
|
||||
const { cwd, outputPath, prompt, model, reasoningEffort, fastMode } = options;
|
||||
const schemaPath = await ensureSchemaFile();
|
||||
|
||||
const command = [
|
||||
"codex",
|
||||
...(model ? ["-m", model] : []),
|
||||
...(reasoningEffort ? ["-c", `model_reasoning_effort=${reasoningEffort}`] : []),
|
||||
...(fastMode ? ["-c", "service_tier=fast"] : []),
|
||||
"exec",
|
||||
"--output-schema", schemaPath,
|
||||
"-o", outputPath,
|
||||
"--full-auto",
|
||||
"--ephemeral",
|
||||
"-C", cwd,
|
||||
prompt,
|
||||
];
|
||||
|
||||
debugLog("BUILD_COMMAND", {
|
||||
cwd,
|
||||
outputPath,
|
||||
schemaPath,
|
||||
promptLength: prompt.length,
|
||||
command: command.map((c, i) => i === command.length - 1 ? `<prompt: ${c.length} chars>` : c),
|
||||
});
|
||||
|
||||
return command;
|
||||
}
|
||||
|
||||
/** Generate a unique temp file path for Codex output. */
|
||||
export function generateOutputPath(): string {
|
||||
return join(tmpdir(), `plannotator-codex-${crypto.randomUUID()}.json`);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Output parsing — matches Codex's native ReviewOutputEvent schema
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export interface CodexCodeLocation {
|
||||
absolute_file_path: string;
|
||||
line_range: { start: number; end: number };
|
||||
}
|
||||
|
||||
export interface CodexFinding {
|
||||
title: string;
|
||||
body: string;
|
||||
confidence_score: number;
|
||||
priority: number | null;
|
||||
code_location: CodexCodeLocation;
|
||||
}
|
||||
|
||||
export interface CodexReviewOutput {
|
||||
findings: CodexFinding[];
|
||||
overall_correctness: string;
|
||||
overall_explanation: string;
|
||||
overall_confidence_score: number;
|
||||
}
|
||||
|
||||
/** Read and parse the Codex -o output file. Returns null on any failure. */
|
||||
export async function parseCodexOutput(outputPath: string): Promise<CodexReviewOutput | null> {
|
||||
await debugLog("PARSE_OUTPUT_START", { outputPath });
|
||||
|
||||
try {
|
||||
if (!existsSync(outputPath)) {
|
||||
await debugLog("PARSE_OUTPUT_FILE_MISSING", outputPath);
|
||||
return null;
|
||||
}
|
||||
|
||||
const text = await readFile(outputPath, "utf-8");
|
||||
|
||||
// Clean up temp file
|
||||
try { await unlink(outputPath); } catch { /* ignore */ }
|
||||
|
||||
if (!text.trim()) {
|
||||
await debugLog("PARSE_OUTPUT_EMPTY");
|
||||
return null;
|
||||
}
|
||||
|
||||
const parsed = JSON.parse(text);
|
||||
if (!parsed || !Array.isArray(parsed.findings)) {
|
||||
await debugLog("PARSE_OUTPUT_INVALID_SHAPE", { hasFindings: !!parsed?.findings });
|
||||
return null;
|
||||
}
|
||||
|
||||
await debugLog("PARSE_OUTPUT_SUCCESS", {
|
||||
findingsCount: parsed.findings.length,
|
||||
overall_correctness: parsed.overall_correctness,
|
||||
overall_confidence_score: parsed.overall_confidence_score,
|
||||
});
|
||||
|
||||
return parsed as CodexReviewOutput;
|
||||
} catch (err) {
|
||||
await debugLog("PARSE_OUTPUT_ERROR", err instanceof Error ? err.message : String(err));
|
||||
// Clean up on error too
|
||||
try { await unlink(outputPath); } catch { /* ignore */ }
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Finding → external annotation transform
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export interface ReviewAnnotationInput {
|
||||
source: string;
|
||||
filePath: string;
|
||||
lineStart: number;
|
||||
lineEnd: number;
|
||||
type: string;
|
||||
side: string;
|
||||
scope: string;
|
||||
text: string;
|
||||
author: string;
|
||||
}
|
||||
|
||||
/** Transform review findings (provider-agnostic) into the external annotation format. */
|
||||
export function transformReviewFindings(
|
||||
findings: CodexFinding[],
|
||||
source: string,
|
||||
cwd?: string,
|
||||
author?: string,
|
||||
): ReviewAnnotationInput[] {
|
||||
const annotations = findings
|
||||
.filter((f) =>
|
||||
f.code_location?.absolute_file_path &&
|
||||
typeof f.code_location?.line_range?.start === "number" &&
|
||||
typeof f.code_location?.line_range?.end === "number"
|
||||
)
|
||||
.map((f) => ({
|
||||
source,
|
||||
filePath: toRelativePath(f.code_location.absolute_file_path, cwd),
|
||||
lineStart: f.code_location.line_range.start,
|
||||
lineEnd: f.code_location.line_range.end,
|
||||
type: "comment",
|
||||
side: "new",
|
||||
scope: "line",
|
||||
text: `${f.title}\n\n${f.body}`.trim(),
|
||||
author: author ?? "Review Agent",
|
||||
}));
|
||||
|
||||
debugLog("TRANSFORM_FINDINGS", {
|
||||
inputCount: findings.length,
|
||||
outputCount: annotations.length,
|
||||
annotations: annotations.map((a) => ({
|
||||
filePath: a.filePath,
|
||||
lineStart: a.lineStart,
|
||||
lineEnd: a.lineEnd,
|
||||
textPreview: a.text.slice(0, 80),
|
||||
})),
|
||||
});
|
||||
|
||||
return annotations;
|
||||
}
|
||||
227
extensions/plannotator/generated/config.ts
Normal file
227
extensions/plannotator/generated/config.ts
Normal file
@@ -0,0 +1,227 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/shared/config.ts
|
||||
/**
|
||||
* Plannotator Config
|
||||
*
|
||||
* Reads/writes ~/.plannotator/config.json for persistent user settings.
|
||||
* Runtime-agnostic: uses only node:fs, node:os, node:child_process.
|
||||
*/
|
||||
|
||||
import { homedir } from "os";
|
||||
import { join } from "path";
|
||||
import { readFileSync, writeFileSync, mkdirSync, existsSync } from "fs";
|
||||
import { execSync } from "child_process";
|
||||
|
||||
export type DefaultDiffType = 'uncommitted' | 'unstaged' | 'staged' | 'merge-base' | 'all';
|
||||
|
||||
export interface DiffOptions {
|
||||
diffStyle?: 'split' | 'unified';
|
||||
overflow?: 'scroll' | 'wrap';
|
||||
diffIndicators?: 'bars' | 'classic' | 'none';
|
||||
lineDiffType?: 'word-alt' | 'word' | 'char' | 'none';
|
||||
showLineNumbers?: boolean;
|
||||
showDiffBackground?: boolean;
|
||||
fontFamily?: string;
|
||||
fontSize?: string;
|
||||
hideWhitespace?: boolean;
|
||||
defaultDiffType?: DefaultDiffType;
|
||||
}
|
||||
|
||||
/** Single conventional comment label entry stored in config.json */
|
||||
export interface CCLabelConfig {
|
||||
label: string;
|
||||
display: string;
|
||||
blocking: boolean;
|
||||
}
|
||||
|
||||
export type PromptSectionOverrides = Record<string, string | undefined>;
|
||||
|
||||
export type PromptRuntime =
|
||||
| "claude-code"
|
||||
| "opencode"
|
||||
| "copilot-cli"
|
||||
| "pi"
|
||||
| "codex"
|
||||
| "gemini-cli";
|
||||
|
||||
interface PromptSectionConfig {
|
||||
[key: string]: string | Partial<Record<PromptRuntime, PromptSectionOverrides>> | undefined;
|
||||
runtimes?: Partial<Record<PromptRuntime, PromptSectionOverrides>>;
|
||||
}
|
||||
|
||||
export interface PromptConfig {
|
||||
review?: PromptSectionConfig & {
|
||||
approved?: string;
|
||||
denied?: string;
|
||||
};
|
||||
plan?: PromptSectionConfig & {
|
||||
approved?: string;
|
||||
approvedWithNotes?: string;
|
||||
autoApproved?: string;
|
||||
denied?: string;
|
||||
};
|
||||
annotate?: PromptSectionConfig & {
|
||||
fileFeedback?: string;
|
||||
messageFeedback?: string;
|
||||
approved?: string;
|
||||
};
|
||||
}
|
||||
|
||||
const PROMPT_SECTIONS = ["review", "plan", "annotate"] as const;
|
||||
|
||||
export function mergePromptConfig(
|
||||
current?: PromptConfig,
|
||||
partial?: PromptConfig,
|
||||
): PromptConfig | undefined {
|
||||
if (!current && !partial) return undefined;
|
||||
|
||||
const result: Record<string, any> = { ...current, ...partial };
|
||||
|
||||
for (const section of PROMPT_SECTIONS) {
|
||||
const cur = current?.[section];
|
||||
const par = partial?.[section];
|
||||
if (cur || par) {
|
||||
result[section] = {
|
||||
...cur,
|
||||
...par,
|
||||
runtimes: (cur?.runtimes || par?.runtimes)
|
||||
? { ...cur?.runtimes, ...par?.runtimes }
|
||||
: undefined,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return result as PromptConfig;
|
||||
}
|
||||
|
||||
export interface PlannotatorConfig {
|
||||
displayName?: string;
|
||||
diffOptions?: DiffOptions;
|
||||
prompts?: PromptConfig;
|
||||
conventionalComments?: boolean;
|
||||
/** null = explicitly cleared (use defaults), undefined = not set */
|
||||
conventionalLabels?: CCLabelConfig[] | null;
|
||||
/**
|
||||
* Enable `gh attestation verify` during CLI installation/upgrade.
|
||||
* Read by scripts/install.sh|ps1|cmd on every run (not by any runtime code).
|
||||
* When true, the installer runs build-provenance verification after the
|
||||
* SHA256 checksum check; requires `gh` CLI installed and authenticated
|
||||
* (`gh auth login`). OS-level opt-in only — no UI surface. Default: false.
|
||||
*/
|
||||
verifyAttestation?: boolean;
|
||||
/**
|
||||
* Enable Jina Reader for URL-to-markdown conversion during annotation.
|
||||
* When true (default), `plannotator annotate <url>` routes through
|
||||
* r.jina.ai for better JS-rendered page support and reader-mode extraction.
|
||||
* Set to false to always use plain fetch + Turndown.
|
||||
*/
|
||||
jina?: boolean;
|
||||
}
|
||||
|
||||
const CONFIG_DIR = join(homedir(), ".plannotator");
|
||||
const CONFIG_PATH = join(CONFIG_DIR, "config.json");
|
||||
|
||||
/**
|
||||
* Load config from ~/.plannotator/config.json.
|
||||
* Returns {} on missing file or malformed JSON.
|
||||
*/
|
||||
export function loadConfig(): PlannotatorConfig {
|
||||
try {
|
||||
if (!existsSync(CONFIG_PATH)) return {};
|
||||
const raw = readFileSync(CONFIG_PATH, "utf-8");
|
||||
const parsed = JSON.parse(raw);
|
||||
return typeof parsed === "object" && parsed !== null ? parsed : {};
|
||||
} catch (e) {
|
||||
process.stderr.write(`[plannotator] Warning: failed to read config.json: ${e}\n`);
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Save config by merging partial values into the existing file.
|
||||
* Creates ~/.plannotator/ directory if needed.
|
||||
*/
|
||||
export function saveConfig(partial: Partial<PlannotatorConfig>): void {
|
||||
try {
|
||||
const current = loadConfig();
|
||||
const mergedDiffOptions = (current.diffOptions || partial.diffOptions)
|
||||
? { ...current.diffOptions, ...partial.diffOptions }
|
||||
: undefined;
|
||||
const mergedPrompts = mergePromptConfig(current.prompts, partial.prompts);
|
||||
const merged = {
|
||||
...current,
|
||||
...partial,
|
||||
diffOptions: mergedDiffOptions,
|
||||
prompts: mergedPrompts,
|
||||
};
|
||||
mkdirSync(CONFIG_DIR, { recursive: true });
|
||||
writeFileSync(CONFIG_PATH, JSON.stringify(merged, null, 2) + "\n", "utf-8");
|
||||
} catch (e) {
|
||||
process.stderr.write(`[plannotator] Warning: failed to write config.json: ${e}\n`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Detect the git user name from `git config user.name`.
|
||||
* Returns null if git is unavailable, not in a repo, or user.name is not set.
|
||||
*/
|
||||
export function detectGitUser(): string | null {
|
||||
try {
|
||||
const name = execSync("git config user.name", { encoding: "utf-8", timeout: 3000 }).trim();
|
||||
return name || null;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the serverConfig payload for API responses.
|
||||
* Reads config.json fresh each call so the response reflects the latest file on disk.
|
||||
*/
|
||||
export function getServerConfig(gitUser: string | null): {
|
||||
displayName?: string;
|
||||
diffOptions?: DiffOptions;
|
||||
gitUser?: string;
|
||||
conventionalComments?: boolean;
|
||||
conventionalLabels?: CCLabelConfig[] | null;
|
||||
} {
|
||||
const cfg = loadConfig();
|
||||
return {
|
||||
displayName: cfg.displayName,
|
||||
diffOptions: cfg.diffOptions,
|
||||
gitUser: gitUser ?? undefined,
|
||||
...(cfg.conventionalComments !== undefined && { conventionalComments: cfg.conventionalComments }),
|
||||
...(cfg.conventionalLabels !== undefined && { conventionalLabels: cfg.conventionalLabels }),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Read the user's preferred default diff type from config, falling back to 'unstaged'.
|
||||
*/
|
||||
export function resolveDefaultDiffType(cfg?: PlannotatorConfig): DefaultDiffType {
|
||||
const v = cfg?.diffOptions?.defaultDiffType as string | undefined;
|
||||
if (v === 'branch') return 'merge-base';
|
||||
return v === 'uncommitted' || v === 'unstaged' || v === 'staged' || v === 'merge-base' || v === 'all' ? v : 'unstaged';
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve whether to use Jina Reader for URL annotation.
|
||||
*
|
||||
* Priority (highest wins):
|
||||
* --no-jina CLI flag → PLANNOTATOR_JINA env var → config.jina → default true
|
||||
*/
|
||||
export function resolveUseJina(cliNoJina: boolean, config: PlannotatorConfig): boolean {
|
||||
// CLI flag has highest priority
|
||||
if (cliNoJina) return false;
|
||||
|
||||
// Environment variable
|
||||
const envVal = process.env.PLANNOTATOR_JINA;
|
||||
if (envVal !== undefined) {
|
||||
return envVal === "1" || envVal.toLowerCase() === "true";
|
||||
}
|
||||
|
||||
// Config file
|
||||
if (config.jina !== undefined) return config.jina;
|
||||
|
||||
// Default: enabled
|
||||
return true;
|
||||
}
|
||||
65
extensions/plannotator/generated/draft.ts
Normal file
65
extensions/plannotator/generated/draft.ts
Normal file
@@ -0,0 +1,65 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/shared/draft.ts
|
||||
/**
|
||||
* Draft Storage
|
||||
*
|
||||
* Persists annotation drafts to ~/.plannotator/drafts/ so they survive
|
||||
* server crashes. Each draft is keyed by a content hash of the plan/diff
|
||||
* it was created against.
|
||||
*
|
||||
* Runtime-agnostic: uses only node:fs, node:path, node:os, node:crypto.
|
||||
*/
|
||||
|
||||
import { homedir } from "os";
|
||||
import { join } from "path";
|
||||
import { mkdirSync, writeFileSync, readFileSync, unlinkSync, existsSync } from "fs";
|
||||
import { createHash } from "crypto";
|
||||
|
||||
/**
|
||||
* Get the drafts directory, creating it if needed.
|
||||
*/
|
||||
export function getDraftDir(): string {
|
||||
const dir = join(homedir(), ".plannotator", "drafts");
|
||||
mkdirSync(dir, { recursive: true });
|
||||
return dir;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a stable key from content using truncated SHA-256.
|
||||
* Same content always produces the same key across server restarts.
|
||||
*/
|
||||
export function contentHash(content: string): string {
|
||||
return createHash("sha256").update(content).digest("hex").slice(0, 16);
|
||||
}
|
||||
|
||||
/**
|
||||
* Save a draft to disk.
|
||||
*/
|
||||
export function saveDraft(key: string, data: object): void {
|
||||
const dir = getDraftDir();
|
||||
writeFileSync(join(dir, `${key}.json`), JSON.stringify(data), "utf-8");
|
||||
}
|
||||
|
||||
/**
|
||||
* Load a draft from disk. Returns null if not found.
|
||||
*/
|
||||
export function loadDraft(key: string): object | null {
|
||||
const filePath = join(getDraftDir(), `${key}.json`);
|
||||
try {
|
||||
if (!existsSync(filePath)) return null;
|
||||
return JSON.parse(readFileSync(filePath, "utf-8"));
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete a draft from disk. No-op if not found.
|
||||
*/
|
||||
export function deleteDraft(key: string): void {
|
||||
const filePath = join(getDraftDir(), `${key}.json`);
|
||||
try {
|
||||
if (existsSync(filePath)) unlinkSync(filePath);
|
||||
} catch {
|
||||
// Ignore delete failures
|
||||
}
|
||||
}
|
||||
398
extensions/plannotator/generated/external-annotation.ts
Normal file
398
extensions/plannotator/generated/external-annotation.ts
Normal file
@@ -0,0 +1,398 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/shared/external-annotation.ts
|
||||
/**
|
||||
* External Annotations — shared types, store logic, and SSE helpers.
|
||||
*
|
||||
* Runtime-agnostic: no node:fs, no node:http, no Bun APIs.
|
||||
* Both the Bun server handler and Pi server handler import this module
|
||||
* and wrap it with their respective HTTP transport layers.
|
||||
*
|
||||
* The store is generic — plan servers store Annotation objects,
|
||||
* review servers store CodeAnnotation objects. The mode-specific
|
||||
* input transformers handle validation and field assignment.
|
||||
*/
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Types
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/** Constraint for any annotation type the store can hold. */
|
||||
export type StorableAnnotation = { id: string; source?: string };
|
||||
|
||||
export type ExternalAnnotationEvent<T = unknown> =
|
||||
| { type: "snapshot"; annotations: T[] }
|
||||
| { type: "add"; annotations: T[] }
|
||||
| { type: "remove"; ids: string[] }
|
||||
| { type: "clear"; source?: string }
|
||||
| { type: "update"; id: string; annotation: T };
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// SSE helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/** Heartbeat comment to keep SSE connections alive (sent every 30s). */
|
||||
export const HEARTBEAT_COMMENT = ":\n\n";
|
||||
|
||||
/** Interval in ms between heartbeat comments. */
|
||||
export const HEARTBEAT_INTERVAL_MS = 30_000;
|
||||
|
||||
/** Encode an event as an SSE `data:` line. */
|
||||
export function serializeSSEEvent<T>(event: ExternalAnnotationEvent<T>): string {
|
||||
return `data: ${JSON.stringify(event)}\n\n`;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Input validation — shared helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export interface ParseError {
|
||||
error: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Unwrap a POST body into an array of raw input objects.
|
||||
*
|
||||
* Accepts either:
|
||||
* - A single annotation object: `{ source: "...", ... }`
|
||||
* - A batch wrapper: `{ annotations: [{ source: "...", ... }, ...] }`
|
||||
*/
|
||||
function unwrapBody(body: unknown): Record<string, unknown>[] | ParseError {
|
||||
if (!body || typeof body !== "object") {
|
||||
return { error: "Request body must be a JSON object" };
|
||||
}
|
||||
|
||||
const obj = body as Record<string, unknown>;
|
||||
|
||||
// Batch format: { annotations: [...] }
|
||||
if (Array.isArray(obj.annotations)) {
|
||||
if (obj.annotations.length === 0) {
|
||||
return { error: "annotations array must not be empty" };
|
||||
}
|
||||
const items: Record<string, unknown>[] = [];
|
||||
for (let i = 0; i < obj.annotations.length; i++) {
|
||||
const item = obj.annotations[i];
|
||||
if (!item || typeof item !== "object") {
|
||||
return { error: `annotations[${i}] must be an object` };
|
||||
}
|
||||
items.push(item as Record<string, unknown>);
|
||||
}
|
||||
return items;
|
||||
}
|
||||
|
||||
// Single format: { source: "...", ... }
|
||||
if (typeof obj.source === "string") {
|
||||
return [obj as Record<string, unknown>];
|
||||
}
|
||||
|
||||
return { error: 'Missing required "source" field or "annotations" array' };
|
||||
}
|
||||
|
||||
function requireString(obj: Record<string, unknown>, field: string, index: number): string | ParseError {
|
||||
const val = obj[field];
|
||||
if (typeof val !== "string" || val.length === 0) {
|
||||
return { error: `annotations[${index}] missing required "${field}" field` };
|
||||
}
|
||||
return val;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Plan mode transformer — produces Annotation objects
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/** The Annotation type shape for plan mode (mirrors packages/ui/types.ts). */
|
||||
interface PlanAnnotation {
|
||||
id: string;
|
||||
blockId: string;
|
||||
startOffset: number;
|
||||
endOffset: number;
|
||||
type: string; // AnnotationType value
|
||||
text?: string;
|
||||
originalText: string;
|
||||
createdA: number;
|
||||
author?: string;
|
||||
source?: string;
|
||||
}
|
||||
|
||||
const VALID_PLAN_TYPES = ["DELETION", "COMMENT", "GLOBAL_COMMENT"];
|
||||
|
||||
export function transformPlanInput(
|
||||
body: unknown,
|
||||
): { annotations: PlanAnnotation[] } | ParseError {
|
||||
const items = unwrapBody(body);
|
||||
if ("error" in items) return items;
|
||||
|
||||
const annotations: PlanAnnotation[] = [];
|
||||
for (let i = 0; i < items.length; i++) {
|
||||
const obj = items[i];
|
||||
|
||||
const source = requireString(obj, "source", i);
|
||||
if (typeof source !== "string") return source;
|
||||
|
||||
// Must have text content
|
||||
if (typeof obj.text !== "string" || obj.text.length === 0) {
|
||||
return { error: `annotations[${i}] missing required "text" field` };
|
||||
}
|
||||
|
||||
// Validate type if provided, default to GLOBAL_COMMENT
|
||||
const type = typeof obj.type === "string" ? obj.type : "GLOBAL_COMMENT";
|
||||
if (!VALID_PLAN_TYPES.includes(type)) {
|
||||
return {
|
||||
error: `annotations[${i}] invalid type "${type}". Must be one of: ${VALID_PLAN_TYPES.join(", ")}`,
|
||||
};
|
||||
}
|
||||
|
||||
// DELETION requires originalText (the text to remove)
|
||||
if (type === "DELETION" && (typeof obj.originalText !== "string" || obj.originalText.length === 0)) {
|
||||
return { error: `annotations[${i}] DELETION type requires non-empty "originalText" field` };
|
||||
}
|
||||
|
||||
// COMMENT requires originalText so the renderer can pin it to a phrase.
|
||||
// External agents that want sidebar-only feedback should use GLOBAL_COMMENT
|
||||
// instead — without a phrase to anchor to, a COMMENT renders as an empty
|
||||
// quote bubble in the sidebar and exports as `Feedback on: ""`.
|
||||
if (type === "COMMENT" && (typeof obj.originalText !== "string" || obj.originalText.length === 0)) {
|
||||
return {
|
||||
error: `annotations[${i}] COMMENT requires non-empty "originalText" field. Use GLOBAL_COMMENT for sidebar-only feedback.`,
|
||||
};
|
||||
}
|
||||
|
||||
annotations.push({
|
||||
id: crypto.randomUUID(),
|
||||
blockId: "external",
|
||||
startOffset: 0,
|
||||
endOffset: 0,
|
||||
type,
|
||||
text: String(obj.text),
|
||||
originalText: typeof obj.originalText === "string" ? obj.originalText : "",
|
||||
createdA: Date.now(),
|
||||
author: typeof obj.author === "string" ? obj.author : undefined,
|
||||
source,
|
||||
});
|
||||
}
|
||||
|
||||
return { annotations };
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Review mode transformer — produces CodeAnnotation objects
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/** The CodeAnnotation type shape for review mode (mirrors packages/ui/types.ts). */
|
||||
interface ReviewAnnotation {
|
||||
id: string;
|
||||
type: string; // CodeAnnotationType value
|
||||
scope?: string;
|
||||
filePath: string;
|
||||
lineStart: number;
|
||||
lineEnd: number;
|
||||
side: string;
|
||||
text?: string;
|
||||
suggestedCode?: string;
|
||||
originalCode?: string;
|
||||
createdAt: number;
|
||||
author?: string;
|
||||
source?: string;
|
||||
// Agent review metadata (optional — only set by Claude review findings)
|
||||
severity?: string; // "important" | "nit" | "pre_existing"
|
||||
reasoning?: string; // Validation chain explaining how the issue was confirmed
|
||||
}
|
||||
|
||||
const VALID_REVIEW_TYPES = ["comment", "suggestion", "concern"];
|
||||
const VALID_SIDES = ["old", "new"];
|
||||
const VALID_SCOPES = ["line", "file"];
|
||||
|
||||
export function transformReviewInput(
|
||||
body: unknown,
|
||||
): { annotations: ReviewAnnotation[] } | ParseError {
|
||||
const items = unwrapBody(body);
|
||||
if ("error" in items) return items;
|
||||
|
||||
const annotations: ReviewAnnotation[] = [];
|
||||
for (let i = 0; i < items.length; i++) {
|
||||
const obj = items[i];
|
||||
|
||||
const source = requireString(obj, "source", i);
|
||||
if (typeof source !== "string") return source;
|
||||
|
||||
const filePath = requireString(obj, "filePath", i);
|
||||
if (typeof filePath !== "string") return filePath;
|
||||
|
||||
if (typeof obj.lineStart !== "number") {
|
||||
return { error: `annotations[${i}] missing required "lineStart" field` };
|
||||
}
|
||||
if (typeof obj.lineEnd !== "number") {
|
||||
return { error: `annotations[${i}] missing required "lineEnd" field` };
|
||||
}
|
||||
|
||||
// side: optional, defaults to "new"
|
||||
const side = typeof obj.side === "string" ? obj.side : "new";
|
||||
if (!VALID_SIDES.includes(side)) {
|
||||
return {
|
||||
error: `annotations[${i}] invalid side "${side}". Must be one of: ${VALID_SIDES.join(", ")}`,
|
||||
};
|
||||
}
|
||||
|
||||
// type: optional, defaults to "comment"
|
||||
const type = typeof obj.type === "string" ? obj.type : "comment";
|
||||
if (!VALID_REVIEW_TYPES.includes(type)) {
|
||||
return {
|
||||
error: `annotations[${i}] invalid type "${type}". Must be one of: ${VALID_REVIEW_TYPES.join(", ")}`,
|
||||
};
|
||||
}
|
||||
|
||||
// scope: optional, defaults to "line"
|
||||
const scope = typeof obj.scope === "string" ? obj.scope : "line";
|
||||
if (!VALID_SCOPES.includes(scope)) {
|
||||
return {
|
||||
error: `annotations[${i}] invalid scope "${scope}". Must be one of: ${VALID_SCOPES.join(", ")}`,
|
||||
};
|
||||
}
|
||||
|
||||
// Must have at least text or suggestedCode
|
||||
if (typeof obj.text !== "string" && typeof obj.suggestedCode !== "string") {
|
||||
return {
|
||||
error: `annotations[${i}] must have at least one of: text, suggestedCode`,
|
||||
};
|
||||
}
|
||||
|
||||
annotations.push({
|
||||
id: crypto.randomUUID(),
|
||||
type,
|
||||
scope,
|
||||
filePath,
|
||||
lineStart: obj.lineStart,
|
||||
lineEnd: obj.lineEnd,
|
||||
side,
|
||||
text: typeof obj.text === "string" ? obj.text : undefined,
|
||||
suggestedCode: typeof obj.suggestedCode === "string" ? obj.suggestedCode : undefined,
|
||||
originalCode: typeof obj.originalCode === "string" ? obj.originalCode : undefined,
|
||||
createdAt: Date.now(),
|
||||
author: typeof obj.author === "string" ? obj.author : undefined,
|
||||
source,
|
||||
// Agent review metadata (optional — only set by Claude review findings)
|
||||
...(typeof obj.severity === "string" && { severity: obj.severity }),
|
||||
...(typeof obj.reasoning === "string" && { reasoning: obj.reasoning }),
|
||||
});
|
||||
}
|
||||
|
||||
return { annotations };
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Annotation Store (generic)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
type MutationListener<T> = (event: ExternalAnnotationEvent<T>) => void;
|
||||
|
||||
export interface AnnotationStore<T extends StorableAnnotation> {
|
||||
/** Add fully-formed annotations. Returns the added annotations. */
|
||||
add(items: T[]): T[];
|
||||
/** Remove an annotation by ID. Returns true if found. */
|
||||
remove(id: string): boolean;
|
||||
/** Remove all annotations from a specific source. Returns count removed. */
|
||||
clearBySource(source: string): number;
|
||||
/** Update an annotation by ID. Returns the updated annotation, or null if not found. */
|
||||
update(id: string, fields: Partial<T>): T | null;
|
||||
/** Remove all annotations. Returns count removed. */
|
||||
clearAll(): number;
|
||||
/** Get all annotations (snapshot). */
|
||||
getAll(): T[];
|
||||
/** Monotonic version counter — incremented on every mutation. */
|
||||
readonly version: number;
|
||||
/** Register a listener for mutation events. Returns unsubscribe function. */
|
||||
onMutation(listener: MutationListener<T>): () => void;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an in-memory annotation store.
|
||||
*
|
||||
* The store is runtime-agnostic — it holds data and emits events.
|
||||
* HTTP transport (SSE broadcasting, request parsing) is handled by
|
||||
* the server-specific adapter (Bun or Pi).
|
||||
*/
|
||||
export function createAnnotationStore<T extends StorableAnnotation>(): AnnotationStore<T> {
|
||||
const annotations: T[] = [];
|
||||
const listeners = new Set<MutationListener<T>>();
|
||||
let version = 0;
|
||||
|
||||
function emit(event: ExternalAnnotationEvent<T>): void {
|
||||
for (const listener of listeners) {
|
||||
try {
|
||||
listener(event);
|
||||
} catch {
|
||||
// Don't let a failing listener break the store
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
add(items) {
|
||||
if (items.length > 0) {
|
||||
for (const item of items) {
|
||||
annotations.push(item);
|
||||
}
|
||||
version++;
|
||||
emit({ type: "add", annotations: items });
|
||||
}
|
||||
return items;
|
||||
},
|
||||
|
||||
remove(id) {
|
||||
const idx = annotations.findIndex((a) => a.id === id);
|
||||
if (idx === -1) return false;
|
||||
annotations.splice(idx, 1);
|
||||
version++;
|
||||
emit({ type: "remove", ids: [id] });
|
||||
return true;
|
||||
},
|
||||
|
||||
update(id, fields) {
|
||||
const idx = annotations.findIndex((a) => a.id === id);
|
||||
if (idx === -1) return null;
|
||||
const merged = { ...annotations[idx], ...fields, id } as T;
|
||||
annotations[idx] = merged;
|
||||
version++;
|
||||
emit({ type: "update", id, annotation: merged });
|
||||
return merged;
|
||||
},
|
||||
|
||||
clearBySource(source) {
|
||||
const before = annotations.length;
|
||||
for (let i = annotations.length - 1; i >= 0; i--) {
|
||||
if (annotations[i].source === source) {
|
||||
annotations.splice(i, 1);
|
||||
}
|
||||
}
|
||||
const removed = before - annotations.length;
|
||||
if (removed > 0) {
|
||||
version++;
|
||||
emit({ type: "clear", source });
|
||||
}
|
||||
return removed;
|
||||
},
|
||||
|
||||
clearAll() {
|
||||
const count = annotations.length;
|
||||
if (count > 0) {
|
||||
annotations.length = 0;
|
||||
version++;
|
||||
emit({ type: "clear" });
|
||||
}
|
||||
return count;
|
||||
},
|
||||
|
||||
getAll() {
|
||||
return [...annotations];
|
||||
},
|
||||
|
||||
get version() {
|
||||
return version;
|
||||
},
|
||||
|
||||
onMutation(listener) {
|
||||
listeners.add(listener);
|
||||
return () => {
|
||||
listeners.delete(listener);
|
||||
};
|
||||
},
|
||||
};
|
||||
}
|
||||
6
extensions/plannotator/generated/favicon.ts
Normal file
6
extensions/plannotator/generated/favicon.ts
Normal file
@@ -0,0 +1,6 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/shared/favicon.ts
|
||||
export const FAVICON_SVG = `<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 64 64">
|
||||
<rect width="64" height="64" rx="14" fill="#070b14"/>
|
||||
<rect x="12" y="28" width="40" height="14" rx="3" fill="#E0BA55" opacity="0.35"/>
|
||||
<text x="32" y="46" text-anchor="middle" font-family="Inter,system-ui,sans-serif" font-weight="800" font-size="42" fill="white">P</text>
|
||||
</svg>`;
|
||||
30
extensions/plannotator/generated/feedback-templates.ts
Normal file
30
extensions/plannotator/generated/feedback-templates.ts
Normal file
@@ -0,0 +1,30 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/shared/feedback-templates.ts
|
||||
/**
|
||||
* Shared feedback templates for all agent integrations.
|
||||
*
|
||||
* The plan deny template was tuned in #224 / commit 3dca977 to use strong
|
||||
* directive framing — Claude was ignoring softer phrasing.
|
||||
*
|
||||
* IMPORTANT: This module is imported by packages/ui/utils/parser.ts which is
|
||||
* bundled into the browser SPA. It must NOT import from ./prompts or ./config
|
||||
* (which depend on node:fs, node:os, node:child_process). Keep it self-contained.
|
||||
*
|
||||
* Server-side call sites use getPlanDeniedPrompt() from ./prompts directly.
|
||||
* This module is only kept for the browser's wrapFeedbackForAgent clipboard feature.
|
||||
*/
|
||||
|
||||
export interface PlanDenyFeedbackOptions {
|
||||
planFilePath?: string;
|
||||
}
|
||||
|
||||
export const planDenyFeedback = (
|
||||
feedback: string,
|
||||
toolName: string = "ExitPlanMode",
|
||||
options?: PlanDenyFeedbackOptions,
|
||||
): string => {
|
||||
const planFileRule = options?.planFilePath
|
||||
? `- Your plan is saved at: ${options.planFilePath}\n You can edit this file to make targeted changes, then pass its path to ${toolName}.\n`
|
||||
: "";
|
||||
|
||||
return `YOUR PLAN WAS NOT APPROVED.\n\nYou MUST revise the plan to address ALL of the feedback below before calling ${toolName} again.\n\nRules:\n${planFileRule}- Do not resubmit the same plan unchanged.\n- Do NOT change the plan title (first # heading) unless the user explicitly asks you to.\n\n${feedback || "Plan changes requested"}`;
|
||||
};
|
||||
33
extensions/plannotator/generated/html-to-markdown.ts
Normal file
33
extensions/plannotator/generated/html-to-markdown.ts
Normal file
@@ -0,0 +1,33 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/shared/html-to-markdown.ts
|
||||
/**
|
||||
* HTML-to-Markdown conversion via Turndown.
|
||||
*
|
||||
* Shared between the CLI (single HTML file / URL) and the server
|
||||
* (on-demand conversion for HTML files in folder mode).
|
||||
*/
|
||||
|
||||
import TurndownService from "turndown";
|
||||
// @ts-expect-error — @joplin/turndown-plugin-gfm ships JS only, no .d.ts (see declarations.d.ts for local types)
|
||||
import { gfm } from "@joplin/turndown-plugin-gfm";
|
||||
|
||||
const td = new TurndownService({
|
||||
headingStyle: "atx",
|
||||
codeBlockStyle: "fenced",
|
||||
bulletListMarker: "-",
|
||||
});
|
||||
|
||||
td.use(gfm);
|
||||
|
||||
// Strip <style> and <script> tags entirely (Turndown keeps unrecognised
|
||||
// tags as blank by default, but their text content can leak through).
|
||||
td.remove(["style", "script", "noscript"]);
|
||||
|
||||
/**
|
||||
* Convert an HTML string to Markdown.
|
||||
*
|
||||
* Uses a module-level TurndownService singleton (stateless, safe to reuse).
|
||||
* GFM tables, strikethrough, and task lists are supported via turndown-plugin-gfm.
|
||||
*/
|
||||
export function htmlToMarkdown(html: string): string {
|
||||
return td.turndown(html);
|
||||
}
|
||||
244
extensions/plannotator/generated/integrations-common.ts
Normal file
244
extensions/plannotator/generated/integrations-common.ts
Normal file
@@ -0,0 +1,244 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/shared/integrations-common.ts
|
||||
import { existsSync, readFileSync } from "fs";
|
||||
import { join } from "path";
|
||||
|
||||
// --- Types ---
|
||||
|
||||
export interface ObsidianConfig {
|
||||
vaultPath: string;
|
||||
folder: string;
|
||||
plan: string;
|
||||
filenameFormat?: string; // Custom format string, e.g. '{YYYY}-{MM}-{DD} - {title}'
|
||||
filenameSeparator?: "space" | "dash" | "underscore"; // Replace spaces in filename
|
||||
}
|
||||
|
||||
export interface BearConfig {
|
||||
plan: string;
|
||||
customTags?: string;
|
||||
tagPosition?: "prepend" | "append";
|
||||
}
|
||||
|
||||
export interface OctarineConfig {
|
||||
plan: string;
|
||||
workspace: string;
|
||||
folder: string;
|
||||
}
|
||||
|
||||
export interface IntegrationResult {
|
||||
success: boolean;
|
||||
error?: string;
|
||||
path?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Detect Obsidian vaults by reading Obsidian's config file
|
||||
* Returns array of vault paths found on the system
|
||||
*/
|
||||
export function detectObsidianVaults(): string[] {
|
||||
try {
|
||||
const home = process.env.HOME || process.env.USERPROFILE || "";
|
||||
let configPath: string;
|
||||
|
||||
// Platform-specific config locations
|
||||
if (process.platform === "darwin") {
|
||||
configPath = join(
|
||||
home,
|
||||
"Library/Application Support/obsidian/obsidian.json",
|
||||
);
|
||||
} else if (process.platform === "win32") {
|
||||
const appData = process.env.APPDATA || join(home, "AppData/Roaming");
|
||||
configPath = join(appData, "obsidian/obsidian.json");
|
||||
} else {
|
||||
// Linux
|
||||
configPath = join(home, ".config/obsidian/obsidian.json");
|
||||
}
|
||||
|
||||
if (!existsSync(configPath)) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const configContent = readFileSync(configPath, "utf-8");
|
||||
const config = JSON.parse(configContent);
|
||||
|
||||
if (!config.vaults || typeof config.vaults !== "object") {
|
||||
return [];
|
||||
}
|
||||
|
||||
// Extract vault paths, filter to ones that exist
|
||||
const vaults: string[] = [];
|
||||
for (const vaultId of Object.keys(config.vaults)) {
|
||||
const vault = config.vaults[vaultId];
|
||||
if (vault.path && existsSync(vault.path)) {
|
||||
vaults.push(vault.path);
|
||||
}
|
||||
}
|
||||
|
||||
return vaults;
|
||||
} catch {
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
// --- Frontmatter and Filename Generation ---
|
||||
|
||||
/**
|
||||
* Generate frontmatter for the note
|
||||
*/
|
||||
export function generateFrontmatter(tags: string[]): string {
|
||||
const now = new Date().toISOString();
|
||||
const tagList = tags.map((t) => t.toLowerCase()).join(", ");
|
||||
return `---
|
||||
created: ${now}
|
||||
source: plannotator
|
||||
tags: [${tagList}]
|
||||
---`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract title from markdown (first H1 heading)
|
||||
*/
|
||||
export function extractTitle(markdown: string): string {
|
||||
const h1Match = markdown.match(
|
||||
/^#\s+(?:Implementation\s+Plan:|Plan:)?\s*(.+)$/im,
|
||||
);
|
||||
if (h1Match) {
|
||||
// Clean up the title for use as filename
|
||||
return h1Match[1]
|
||||
.trim()
|
||||
.replace(/[<>:"/\\|?*(){}\[\]#~`]/g, "") // Remove invalid/problematic filename chars
|
||||
.replace(/\s+/g, " ") // Normalize whitespace
|
||||
.trim() // Re-trim after stripping
|
||||
.slice(0, 50); // Limit length
|
||||
}
|
||||
return "Plan";
|
||||
}
|
||||
|
||||
/** Default filename format matching original behavior */
|
||||
export const DEFAULT_FILENAME_FORMAT =
|
||||
"{title} - {Mon} {D}, {YYYY} {h}-{mm}{ampm}";
|
||||
|
||||
/**
|
||||
* Generate filename from a format string with variable substitution.
|
||||
*
|
||||
* Supported variables:
|
||||
* {title} - Plan title from first H1 heading
|
||||
* {YYYY} - 4-digit year
|
||||
* {MM} - 2-digit month (01-12)
|
||||
* {DD} - 2-digit day (01-31)
|
||||
* {Mon} - Abbreviated month name (Jan, Feb, ...)
|
||||
* {D} - Day without leading zero
|
||||
* {HH} - 2-digit hour, 24h (00-23)
|
||||
* {h} - Hour without leading zero, 12h
|
||||
* {hh} - 2-digit hour, 12h (01-12)
|
||||
* {mm} - 2-digit minutes (00-59)
|
||||
* {ss} - 2-digit seconds (00-59)
|
||||
* {ampm} - am/pm
|
||||
*
|
||||
* Default format: '{title} - {Mon} {D}, {YYYY} {h}-{mm}{ampm}'
|
||||
* Example output: 'User Authentication - Jan 2, 2026 2-30pm.md'
|
||||
*/
|
||||
export function generateFilename(
|
||||
markdown: string,
|
||||
format?: string,
|
||||
separator?: "space" | "dash" | "underscore",
|
||||
): string {
|
||||
const title = extractTitle(markdown);
|
||||
const now = new Date();
|
||||
|
||||
const months = [
|
||||
"Jan",
|
||||
"Feb",
|
||||
"Mar",
|
||||
"Apr",
|
||||
"May",
|
||||
"Jun",
|
||||
"Jul",
|
||||
"Aug",
|
||||
"Sep",
|
||||
"Oct",
|
||||
"Nov",
|
||||
"Dec",
|
||||
];
|
||||
|
||||
const hour24 = now.getHours();
|
||||
const hour12 = hour24 % 12 || 12;
|
||||
const ampm = hour24 >= 12 ? "pm" : "am";
|
||||
|
||||
const vars: Record<string, string> = {
|
||||
title,
|
||||
YYYY: String(now.getFullYear()),
|
||||
MM: String(now.getMonth() + 1).padStart(2, "0"),
|
||||
DD: String(now.getDate()).padStart(2, "0"),
|
||||
Mon: months[now.getMonth()],
|
||||
D: String(now.getDate()),
|
||||
HH: String(hour24).padStart(2, "0"),
|
||||
h: String(hour12),
|
||||
hh: String(hour12).padStart(2, "0"),
|
||||
mm: String(now.getMinutes()).padStart(2, "0"),
|
||||
ss: String(now.getSeconds()).padStart(2, "0"),
|
||||
ampm,
|
||||
};
|
||||
|
||||
const template = format?.trim() || DEFAULT_FILENAME_FORMAT;
|
||||
const result = template.replace(
|
||||
/\{(\w+)\}/g,
|
||||
(match, key) => vars[key] ?? match,
|
||||
);
|
||||
|
||||
// Sanitize: remove characters invalid in filenames
|
||||
let sanitized = result
|
||||
.replace(/[<>:"/\\|?*]/g, "")
|
||||
.replace(/\s+/g, " ")
|
||||
.trim();
|
||||
|
||||
// Apply separator preference (replace spaces with dash or underscore)
|
||||
if (separator === "dash") {
|
||||
sanitized = sanitized.replace(/ /g, "-");
|
||||
} else if (separator === "underscore") {
|
||||
sanitized = sanitized.replace(/ /g, "_");
|
||||
}
|
||||
|
||||
return sanitized.endsWith(".md") ? sanitized : `${sanitized}.md`;
|
||||
}
|
||||
|
||||
// --- Bear Integration ---
|
||||
|
||||
export function stripH1(plan: string): string {
|
||||
return plan.replace(/^#\s+.+\n?/m, "").trimStart();
|
||||
}
|
||||
|
||||
export function buildHashtags(
|
||||
customTags: string | undefined,
|
||||
autoTags: string[],
|
||||
): string {
|
||||
if (customTags?.trim()) {
|
||||
return customTags
|
||||
.split(",")
|
||||
.map((t) => `#${t.trim()}`)
|
||||
.filter((t) => t !== "#")
|
||||
.join(" ");
|
||||
}
|
||||
return autoTags.map((t) => `#${t}`).join(" ");
|
||||
}
|
||||
|
||||
export function buildBearContent(
|
||||
body: string,
|
||||
hashtags: string,
|
||||
tagPosition: "prepend" | "append",
|
||||
): string {
|
||||
return tagPosition === "prepend"
|
||||
? `${hashtags}\n\n${body}`
|
||||
: `${body}\n\n${hashtags}`;
|
||||
}
|
||||
|
||||
// --- Octarine Integration ---
|
||||
|
||||
/**
|
||||
* Generate YAML frontmatter for an Octarine note.
|
||||
* Uses Octarine's property format (list-style tags, Status, Author, Last Edited).
|
||||
*/
|
||||
export function generateOctarineFrontmatter(tags: string[]): string {
|
||||
const now = new Date().toISOString().slice(0, 16); // YYYY-MM-DDTHH:MM
|
||||
const tagLines = tags.map((t) => ` - ${t.toLowerCase()}`).join("\n");
|
||||
return `---\ntags:\n${tagLines}\nStatus: Draft\nAuthor: plannotator\nLast Edited: ${now}\n---`;
|
||||
}
|
||||
19
extensions/plannotator/generated/path-utils.ts
Normal file
19
extensions/plannotator/generated/path-utils.ts
Normal file
@@ -0,0 +1,19 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/server/path-utils.ts
|
||||
/**
|
||||
* Strip a cwd prefix from an absolute path to get a repo-relative path.
|
||||
* Used by review agent transforms to convert absolute file paths from
|
||||
* agent output into diff-compatible relative paths.
|
||||
*
|
||||
* Uses path.relative for cross-platform support (Windows backslashes)
|
||||
* and normalizes to forward slashes for git diff path matching.
|
||||
*/
|
||||
import { relative } from "node:path";
|
||||
|
||||
export function toRelativePath(absolutePath: string, cwd?: string): string {
|
||||
if (!cwd) return absolutePath;
|
||||
const rel = relative(cwd, absolutePath);
|
||||
// Don't relativize if the result goes outside cwd (different drive, symlink escape)
|
||||
if (rel.startsWith("..")) return absolutePath;
|
||||
// Normalize to forward slashes for diff path matching
|
||||
return rel.replace(/\\/g, "/");
|
||||
}
|
||||
662
extensions/plannotator/generated/pr-github.ts
Normal file
662
extensions/plannotator/generated/pr-github.ts
Normal file
@@ -0,0 +1,662 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/shared/pr-github.ts
|
||||
/**
|
||||
* GitHub-specific PR provider implementation.
|
||||
*
|
||||
* All functions use the `gh` CLI via the PRRuntime abstraction.
|
||||
*/
|
||||
|
||||
import type { PRRuntime, PRMetadata, PRContext, PRReviewThread, PRThreadComment, PRReviewFileComment, CommandResult, PRStackTree, PRStackNode, PRListItem } from "./pr-provider";
|
||||
import { encodeApiFilePath } from "./pr-provider";
|
||||
|
||||
// GitHub-specific PRRef shape (used internally)
|
||||
interface GhPRRef {
|
||||
platform: "github";
|
||||
host: string;
|
||||
owner: string;
|
||||
repo: string;
|
||||
number: number;
|
||||
}
|
||||
|
||||
/** Build the --repo flag value: HOST/OWNER/REPO for GHE, OWNER/REPO for github.com */
|
||||
function repoFlag(ref: GhPRRef): string {
|
||||
if (ref.host !== "github.com") {
|
||||
return `${ref.host}/${ref.owner}/${ref.repo}`;
|
||||
}
|
||||
return `${ref.owner}/${ref.repo}`;
|
||||
}
|
||||
|
||||
/** Append --hostname to args for gh api / gh auth on GHE */
|
||||
function hostnameArgs(host: string, args: string[]): string[] {
|
||||
if (host !== "github.com") {
|
||||
return [...args, "--hostname", host];
|
||||
}
|
||||
return args;
|
||||
}
|
||||
|
||||
// --- Auth ---
|
||||
|
||||
export async function checkGhAuth(runtime: PRRuntime, host: string): Promise<void> {
|
||||
const result = await runtime.runCommand("gh", hostnameArgs(host, ["auth", "status"]));
|
||||
if (result.exitCode !== 0) {
|
||||
const stderr = result.stderr.trim();
|
||||
const hostHint = host !== "github.com" ? ` --hostname ${host}` : "";
|
||||
throw new Error(
|
||||
`GitHub CLI not authenticated. Run \`gh auth login${hostHint}\` first.\n${stderr}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
export async function getGhUser(runtime: PRRuntime, host: string): Promise<string | null> {
|
||||
try {
|
||||
const result = await runtime.runCommand("gh", hostnameArgs(host, ["api", "user", "--jq", ".login"]));
|
||||
if (result.exitCode === 0 && result.stdout.trim()) {
|
||||
return result.stdout.trim();
|
||||
}
|
||||
return null;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
// --- Fetch PR ---
|
||||
|
||||
export async function fetchGhPR(
|
||||
runtime: PRRuntime,
|
||||
ref: GhPRRef,
|
||||
): Promise<{ metadata: PRMetadata; rawPatch: string }> {
|
||||
const repo = repoFlag(ref);
|
||||
|
||||
// Fetch diff, metadata, and repository defaults in parallel.
|
||||
const [diffResult, viewResult, repoResult] = await Promise.all([
|
||||
runtime.runCommand("gh", [
|
||||
"pr", "diff", String(ref.number),
|
||||
"--repo", repo,
|
||||
]),
|
||||
runtime.runCommand("gh", [
|
||||
"pr", "view", String(ref.number),
|
||||
"--repo", repo,
|
||||
"--json", "id,title,author,baseRefName,headRefName,baseRefOid,headRefOid,url",
|
||||
]),
|
||||
runtime.runCommand("gh", [
|
||||
"repo", "view", repo,
|
||||
"--json", "defaultBranchRef",
|
||||
"--jq", ".defaultBranchRef.name",
|
||||
]),
|
||||
]);
|
||||
|
||||
if (diffResult.exitCode !== 0) {
|
||||
throw new Error(
|
||||
`Failed to fetch PR diff: ${diffResult.stderr.trim() || `exit code ${diffResult.exitCode}`}`,
|
||||
);
|
||||
}
|
||||
|
||||
if (viewResult.exitCode !== 0) {
|
||||
throw new Error(
|
||||
`Failed to fetch PR metadata: ${viewResult.stderr.trim() || `exit code ${viewResult.exitCode}`}`,
|
||||
);
|
||||
}
|
||||
|
||||
const raw = JSON.parse(viewResult.stdout) as {
|
||||
id: string;
|
||||
title: string;
|
||||
author: { login: string };
|
||||
baseRefName: string;
|
||||
headRefName: string;
|
||||
baseRefOid: string;
|
||||
headRefOid: string;
|
||||
url: string;
|
||||
};
|
||||
|
||||
// Fetch the merge-base SHA — the common ancestor commit GitHub uses to compute the PR diff.
|
||||
// baseSha (baseRefOid) is the tip of the base branch, which may have moved since the branch point.
|
||||
// File contents must be fetched at the merge-base to match the diff hunks.
|
||||
let mergeBaseSha: string | undefined;
|
||||
try {
|
||||
const compareResult = await runtime.runCommand("gh", hostnameArgs(ref.host, [
|
||||
"api",
|
||||
`repos/${ref.owner}/${ref.repo}/compare/${raw.baseRefOid}...${raw.headRefOid}`,
|
||||
"--jq", ".merge_base_commit.sha",
|
||||
]));
|
||||
if (compareResult.exitCode === 0 && compareResult.stdout.trim()) {
|
||||
mergeBaseSha = compareResult.stdout.trim();
|
||||
}
|
||||
} catch { /* fallback to baseSha if compare API fails */ }
|
||||
|
||||
const metadata: PRMetadata = {
|
||||
platform: "github",
|
||||
host: ref.host,
|
||||
owner: ref.owner,
|
||||
repo: ref.repo,
|
||||
number: ref.number,
|
||||
prNodeId: raw.id,
|
||||
title: raw.title,
|
||||
author: raw.author.login,
|
||||
baseBranch: raw.baseRefName,
|
||||
headBranch: raw.headRefName,
|
||||
defaultBranch: repoResult.exitCode === 0 && repoResult.stdout.trim() && repoResult.stdout.trim() !== "null"
|
||||
? repoResult.stdout.trim()
|
||||
: undefined,
|
||||
baseSha: raw.baseRefOid,
|
||||
headSha: raw.headRefOid,
|
||||
mergeBaseSha,
|
||||
url: raw.url,
|
||||
};
|
||||
|
||||
return { metadata, rawPatch: diffResult.stdout };
|
||||
}
|
||||
|
||||
// --- PR Context ---
|
||||
|
||||
const GH_CONTEXT_FIELDS = [
|
||||
"body", "state", "isDraft", "labels",
|
||||
"comments", "reviews", "reviewDecision",
|
||||
"mergeable", "mergeStateStatus",
|
||||
"statusCheckRollup", "closingIssuesReferences",
|
||||
].join(",");
|
||||
|
||||
function parseGhPRContext(raw: Record<string, unknown>): PRContext {
|
||||
const arr = (v: unknown): unknown[] => (Array.isArray(v) ? v : []);
|
||||
const str = (v: unknown): string => (typeof v === "string" ? v : "");
|
||||
const login = (v: unknown): string =>
|
||||
typeof v === "object" && v !== null && "login" in v
|
||||
? String((v as { login: unknown }).login || "")
|
||||
: "";
|
||||
|
||||
return {
|
||||
body: str(raw.body),
|
||||
state: str(raw.state),
|
||||
isDraft: raw.isDraft === true,
|
||||
labels: arr(raw.labels).map((l: any) => ({
|
||||
name: str(l?.name),
|
||||
color: str(l?.color),
|
||||
})),
|
||||
reviewDecision: str(raw.reviewDecision),
|
||||
mergeable: str(raw.mergeable),
|
||||
mergeStateStatus: str(raw.mergeStateStatus),
|
||||
comments: arr(raw.comments).map((c: any) => ({
|
||||
id: str(c?.id),
|
||||
author: login(c?.author),
|
||||
body: str(c?.body),
|
||||
createdAt: str(c?.createdAt),
|
||||
url: str(c?.url),
|
||||
})),
|
||||
reviews: arr(raw.reviews).map((r: any) => ({
|
||||
id: str(r?.id),
|
||||
author: login(r?.author),
|
||||
state: str(r?.state),
|
||||
body: str(r?.body),
|
||||
submittedAt: str(r?.submittedAt),
|
||||
...(r?.url ? { url: str(r.url) } : {}),
|
||||
})),
|
||||
reviewThreads: [], // populated via GraphQL after initial fetch
|
||||
checks: arr(raw.statusCheckRollup).map((c: any) => ({
|
||||
name: str(c?.name),
|
||||
status: str(c?.status),
|
||||
conclusion: typeof c?.conclusion === "string" ? c.conclusion : null,
|
||||
workflowName: str(c?.workflowName),
|
||||
detailsUrl: str(c?.detailsUrl),
|
||||
})),
|
||||
linkedIssues: arr(raw.closingIssuesReferences).map((i: any) => ({
|
||||
number: typeof i?.number === "number" ? i.number : 0,
|
||||
url: str(i?.url),
|
||||
repo: i?.repository
|
||||
? `${login(i.repository.owner)}/${str(i.repository.name)}`
|
||||
: "",
|
||||
})),
|
||||
};
|
||||
}
|
||||
|
||||
export async function fetchGhPRContext(
|
||||
runtime: PRRuntime,
|
||||
ref: GhPRRef,
|
||||
): Promise<PRContext> {
|
||||
const repo = repoFlag(ref);
|
||||
|
||||
const result = await runtime.runCommand("gh", [
|
||||
"pr", "view", String(ref.number),
|
||||
"--repo", repo,
|
||||
"--json", GH_CONTEXT_FIELDS,
|
||||
]);
|
||||
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(
|
||||
`Failed to fetch PR context: ${result.stderr.trim() || `exit code ${result.exitCode}`}`,
|
||||
);
|
||||
}
|
||||
|
||||
const raw = JSON.parse(result.stdout) as Record<string, unknown>;
|
||||
const context = parseGhPRContext(raw);
|
||||
|
||||
// Fetch inline review threads via GraphQL (parallel-safe, non-blocking failure)
|
||||
try {
|
||||
context.reviewThreads = await fetchGhReviewThreads(runtime, ref);
|
||||
} catch {
|
||||
// GraphQL may not be available or may fail — degrade gracefully
|
||||
context.reviewThreads = [];
|
||||
}
|
||||
|
||||
return context;
|
||||
}
|
||||
|
||||
// --- Review Threads (GraphQL) ---
|
||||
|
||||
const REVIEW_THREADS_QUERY = `
|
||||
query($owner: String!, $repo: String!, $number: Int!) {
|
||||
repository(owner: $owner, name: $repo) {
|
||||
pullRequest(number: $number) {
|
||||
reviewThreads(first: 100) {
|
||||
nodes {
|
||||
id
|
||||
isResolved
|
||||
isOutdated
|
||||
line
|
||||
startLine
|
||||
path
|
||||
diffSide
|
||||
comments(first: 50) {
|
||||
nodes {
|
||||
id
|
||||
body
|
||||
author { login }
|
||||
createdAt
|
||||
url
|
||||
diffHunk
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}`;
|
||||
|
||||
async function fetchGhReviewThreads(
|
||||
runtime: PRRuntime,
|
||||
ref: GhPRRef,
|
||||
): Promise<PRReviewThread[]> {
|
||||
const result = await runtime.runCommand("gh", hostnameArgs(ref.host, [
|
||||
"api", "graphql",
|
||||
"-f", `query=${REVIEW_THREADS_QUERY}`,
|
||||
"-f", `owner=${ref.owner}`,
|
||||
"-f", `repo=${ref.repo}`,
|
||||
"-F", `number=${ref.number}`,
|
||||
]));
|
||||
|
||||
if (result.exitCode !== 0) return [];
|
||||
|
||||
const data = JSON.parse(result.stdout);
|
||||
const threads = data?.data?.repository?.pullRequest?.reviewThreads?.nodes;
|
||||
if (!Array.isArray(threads)) return [];
|
||||
|
||||
return threads.map((t: any): PRReviewThread => ({
|
||||
id: String(t.id ?? ''),
|
||||
isResolved: t.isResolved === true,
|
||||
isOutdated: t.isOutdated === true,
|
||||
path: String(t.path ?? ''),
|
||||
line: typeof t.line === 'number' ? t.line : null,
|
||||
startLine: typeof t.startLine === 'number' ? t.startLine : null,
|
||||
diffSide: t.diffSide === 'LEFT' || t.diffSide === 'RIGHT' ? t.diffSide : null,
|
||||
comments: Array.isArray(t.comments?.nodes)
|
||||
? t.comments.nodes.map((c: any): PRThreadComment => ({
|
||||
id: String(c.id ?? ''),
|
||||
author: c.author?.login ? String(c.author.login) : '',
|
||||
body: String(c.body ?? ''),
|
||||
createdAt: String(c.createdAt ?? ''),
|
||||
url: String(c.url ?? ''),
|
||||
...(c.diffHunk ? { diffHunk: String(c.diffHunk) } : {}),
|
||||
}))
|
||||
: [],
|
||||
}));
|
||||
}
|
||||
|
||||
// --- File Content ---
|
||||
|
||||
export async function fetchGhPRFileContent(
|
||||
runtime: PRRuntime,
|
||||
ref: GhPRRef,
|
||||
sha: string,
|
||||
filePath: string,
|
||||
): Promise<string | null> {
|
||||
const result = await runtime.runCommand("gh", hostnameArgs(ref.host, [
|
||||
"api",
|
||||
`repos/${ref.owner}/${ref.repo}/contents/${encodeApiFilePath(filePath)}?ref=${sha}`,
|
||||
"--jq", ".content",
|
||||
]));
|
||||
|
||||
if (result.exitCode !== 0) return null;
|
||||
|
||||
const base64Content = result.stdout.trim();
|
||||
if (!base64Content) return null;
|
||||
|
||||
// GitHub returns base64-encoded content with newlines
|
||||
const cleaned = base64Content.replace(/\n/g, "");
|
||||
try {
|
||||
return Buffer.from(cleaned, "base64").toString("utf-8");
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
// --- Viewed Files ---
|
||||
|
||||
/**
|
||||
* Fetch the per-file "viewed" state for a GitHub PR via GraphQL.
|
||||
* Returns a map of { filePath: isViewed } where isViewed is true for
|
||||
* VIEWED or DISMISSED states (i.e., the file was reviewed but may need
|
||||
* re-review after new commits).
|
||||
*/
|
||||
export async function fetchGhPRViewedFiles(
|
||||
runtime: PRRuntime,
|
||||
ref: GhPRRef,
|
||||
): Promise<Record<string, boolean>> {
|
||||
const query = `
|
||||
query($owner: String!, $repo: String!, $number: Int!, $cursor: String) {
|
||||
repository(owner: $owner, name: $repo) {
|
||||
pullRequest(number: $number) {
|
||||
files(first: 100, after: $cursor) {
|
||||
nodes {
|
||||
path
|
||||
viewerViewedState
|
||||
}
|
||||
pageInfo {
|
||||
hasNextPage
|
||||
endCursor
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
`;
|
||||
|
||||
const result: Record<string, boolean> = {};
|
||||
let cursor: string | null = null;
|
||||
|
||||
// Paginate through all files (GitHub returns max 100 per page)
|
||||
do {
|
||||
const args = hostnameArgs(ref.host, [
|
||||
"api", "graphql",
|
||||
"-f", `query=${query}`,
|
||||
"-F", `owner=${ref.owner}`,
|
||||
"-F", `repo=${ref.repo}`,
|
||||
"-F", `number=${ref.number}`,
|
||||
]);
|
||||
if (cursor) {
|
||||
args.push("-F", `cursor=${cursor}`);
|
||||
}
|
||||
|
||||
const res = await runtime.runCommand("gh", args);
|
||||
if (res.exitCode !== 0) {
|
||||
throw new Error(
|
||||
`Failed to fetch PR viewed files: ${res.stderr.trim() || `exit code ${res.exitCode}`}`,
|
||||
);
|
||||
}
|
||||
|
||||
const data = JSON.parse(res.stdout) as {
|
||||
data?: {
|
||||
repository?: {
|
||||
pullRequest?: {
|
||||
files?: {
|
||||
nodes: Array<{ path: string; viewerViewedState: string }>;
|
||||
pageInfo: { hasNextPage: boolean; endCursor: string | null };
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
errors?: Array<{ message: string }>;
|
||||
};
|
||||
|
||||
if (data.errors?.length) {
|
||||
throw new Error(`GraphQL error: ${data.errors[0].message}`);
|
||||
}
|
||||
|
||||
const files = data.data?.repository?.pullRequest?.files;
|
||||
if (!files) break;
|
||||
|
||||
for (const node of files.nodes) {
|
||||
// VIEWED = explicitly marked as viewed
|
||||
// DISMISSED = was viewed but new commits arrived (still "was reviewed")
|
||||
result[node.path] = node.viewerViewedState === "VIEWED" || node.viewerViewedState === "DISMISSED";
|
||||
}
|
||||
|
||||
cursor = files.pageInfo.hasNextPage ? files.pageInfo.endCursor : null;
|
||||
} while (cursor !== null);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark or unmark a set of files as viewed in a GitHub PR via GraphQL mutations.
|
||||
* Uses Promise.allSettled so a single file failure doesn't block the rest.
|
||||
* Throws only if ALL mutations fail.
|
||||
*/
|
||||
export async function markGhFilesViewed(
|
||||
runtime: PRRuntime,
|
||||
ref: GhPRRef,
|
||||
prNodeId: string,
|
||||
filePaths: string[],
|
||||
viewed: boolean,
|
||||
): Promise<void> {
|
||||
if (filePaths.length === 0) return;
|
||||
|
||||
const mutationName = viewed ? "markFileAsViewed" : "unmarkFileAsViewed";
|
||||
const mutation = `
|
||||
mutation($id: ID!, $path: String!) {
|
||||
${mutationName}(input: { pullRequestId: $id, path: $path }) {
|
||||
clientMutationId
|
||||
}
|
||||
}
|
||||
`;
|
||||
|
||||
const results = await Promise.allSettled(
|
||||
filePaths.map((path) =>
|
||||
runtime.runCommandWithInput
|
||||
? runtime.runCommand("gh", hostnameArgs(ref.host, [
|
||||
"api", "graphql",
|
||||
"-f", `query=${mutation}`,
|
||||
"-F", `id=${prNodeId}`,
|
||||
"-F", `path=${path}`,
|
||||
]))
|
||||
: Promise.reject(new Error("Runtime does not support commands")),
|
||||
),
|
||||
);
|
||||
|
||||
const failures = results.filter((r): r is PromiseRejectedResult => r.status === "rejected");
|
||||
if (failures.length === filePaths.length) {
|
||||
throw new Error(
|
||||
`Failed to ${mutationName} all files: ${failures[0].reason}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// --- Submit PR Review ---
|
||||
|
||||
export async function submitGhPRReview(
|
||||
runtime: PRRuntime,
|
||||
ref: GhPRRef,
|
||||
headSha: string,
|
||||
action: "approve" | "comment",
|
||||
body: string,
|
||||
fileComments: PRReviewFileComment[],
|
||||
): Promise<void> {
|
||||
const payload = JSON.stringify({
|
||||
commit_id: headSha,
|
||||
body,
|
||||
event: action === "approve" ? "APPROVE" : "COMMENT",
|
||||
comments: fileComments,
|
||||
});
|
||||
|
||||
const endpoint = `repos/${ref.owner}/${ref.repo}/pulls/${ref.number}/reviews`;
|
||||
|
||||
let result: CommandResult;
|
||||
|
||||
if (runtime.runCommandWithInput) {
|
||||
result = await runtime.runCommandWithInput(
|
||||
"gh",
|
||||
hostnameArgs(ref.host, ["api", endpoint, "--method", "POST", "--input", "-"]),
|
||||
payload,
|
||||
);
|
||||
} else {
|
||||
throw new Error("Runtime does not support stdin input; cannot submit PR review");
|
||||
}
|
||||
|
||||
if (result.exitCode !== 0) {
|
||||
const message = result.stderr.trim() || result.stdout.trim() || `exit code ${result.exitCode}`;
|
||||
throw new Error(`Failed to submit PR review: ${message}`);
|
||||
}
|
||||
}
|
||||
|
||||
// --- Stack Tree (GraphQL) ---
|
||||
|
||||
type StackPRNode = { number: number; title: string; url: string; baseRefName: string; headRefName: string; state: string };
|
||||
|
||||
function stackPRQuery(kind: "head" | "base"): string {
|
||||
const varName = kind === "head" ? "headRefName" : "baseRefName";
|
||||
const first = kind === "head" ? 5 : 10;
|
||||
return `
|
||||
query($owner: String!, $repo: String!, $${varName}: String!) {
|
||||
repository(owner: $owner, name: $repo) {
|
||||
pullRequests(first: ${first}, ${varName}: $${varName}, states: [OPEN, MERGED]) {
|
||||
nodes { number title url baseRefName headRefName state }
|
||||
}
|
||||
}
|
||||
}`;
|
||||
}
|
||||
|
||||
async function queryPRsByRef(
|
||||
runtime: PRRuntime,
|
||||
ref: GhPRRef,
|
||||
kind: "head" | "base",
|
||||
refName: string,
|
||||
): Promise<StackPRNode[]> {
|
||||
const varName = kind === "head" ? "headRefName" : "baseRefName";
|
||||
const result = await runtime.runCommand("gh", hostnameArgs(ref.host, [
|
||||
"api", "graphql",
|
||||
"-f", `query=${stackPRQuery(kind)}`,
|
||||
"-f", `owner=${ref.owner}`,
|
||||
"-f", `repo=${ref.repo}`,
|
||||
"-f", `${varName}=${refName}`,
|
||||
]));
|
||||
if (result.exitCode !== 0) return [];
|
||||
const data = JSON.parse(result.stdout);
|
||||
const prs = data?.data?.repository?.pullRequests?.nodes;
|
||||
return Array.isArray(prs) ? prs : [];
|
||||
}
|
||||
|
||||
/**
|
||||
* Walk up and down the PR stack from the current PR, resolving
|
||||
* PR numbers/titles for every node in the chain.
|
||||
*
|
||||
* Up: walk from currentPR.baseBranch → defaultBranch (ancestors)
|
||||
* Down: walk from currentPR.headBranch → leaf PRs (descendants)
|
||||
*/
|
||||
export async function fetchGhPRStack(
|
||||
runtime: PRRuntime,
|
||||
ref: GhPRRef,
|
||||
metadata: PRMetadata,
|
||||
): Promise<PRStackTree | null> {
|
||||
if (metadata.platform !== "github") return null;
|
||||
const defaultBranch = metadata.defaultBranch;
|
||||
if (!defaultBranch) return null;
|
||||
|
||||
const currentNode: PRStackNode = {
|
||||
branch: metadata.headBranch,
|
||||
number: metadata.number,
|
||||
title: metadata.title,
|
||||
url: metadata.url,
|
||||
isCurrent: true,
|
||||
isDefaultBranch: false,
|
||||
};
|
||||
|
||||
// Walk up: find the PR whose headRefName === baseBranch, repeat
|
||||
const ancestors: PRStackNode[] = [];
|
||||
let nextHead = metadata.baseBranch;
|
||||
const maxDepth = 10;
|
||||
|
||||
for (let i = 0; i < maxDepth; i++) {
|
||||
if (nextHead === defaultBranch) break;
|
||||
|
||||
const prs = await queryPRsByRef(runtime, ref, "head", nextHead);
|
||||
if (prs.length === 0) {
|
||||
ancestors.push({ branch: nextHead, isCurrent: false, isDefaultBranch: false });
|
||||
break;
|
||||
}
|
||||
|
||||
const pr = prs[0];
|
||||
ancestors.push({
|
||||
branch: pr.headRefName,
|
||||
number: pr.number,
|
||||
title: pr.title,
|
||||
url: pr.url,
|
||||
isCurrent: false,
|
||||
isDefaultBranch: false,
|
||||
state: (pr.state === 'MERGED' ? 'merged' : pr.state === 'CLOSED' ? 'closed' : 'open') as PRStackNode['state'],
|
||||
});
|
||||
nextHead = pr.baseRefName;
|
||||
}
|
||||
|
||||
// Walk down: find PRs whose baseRefName === current headBranch, repeat
|
||||
const descendants: PRStackNode[] = [];
|
||||
let nextBase = metadata.headBranch;
|
||||
|
||||
for (let i = 0; i < maxDepth; i++) {
|
||||
const prs = await queryPRsByRef(runtime, ref, "base", nextBase);
|
||||
if (prs.length === 0) break;
|
||||
|
||||
const pr = prs[0];
|
||||
descendants.push({
|
||||
branch: pr.headRefName,
|
||||
number: pr.number,
|
||||
title: pr.title,
|
||||
url: pr.url,
|
||||
isCurrent: false,
|
||||
isDefaultBranch: false,
|
||||
state: (pr.state === 'MERGED' ? 'merged' : pr.state === 'CLOSED' ? 'closed' : 'open') as PRStackNode['state'],
|
||||
});
|
||||
nextBase = pr.headRefName;
|
||||
}
|
||||
|
||||
// Build tree: defaultBranch → ancestors (reversed) → current → descendants
|
||||
const nodes: PRStackNode[] = [
|
||||
{ branch: defaultBranch, isCurrent: false, isDefaultBranch: true },
|
||||
...ancestors.reverse(),
|
||||
currentNode,
|
||||
...descendants,
|
||||
];
|
||||
|
||||
return { nodes };
|
||||
}
|
||||
|
||||
// --- PR List ---
|
||||
|
||||
export async function fetchGhPRList(
|
||||
runtime: PRRuntime,
|
||||
ref: GhPRRef,
|
||||
): Promise<PRListItem[]> {
|
||||
const result = await runtime.runCommand("gh", [
|
||||
"pr", "list",
|
||||
"--repo", repoFlag(ref),
|
||||
"--json", "number,title,author,url,baseRefName,state",
|
||||
"--limit", "30",
|
||||
"--state", "all",
|
||||
]);
|
||||
|
||||
if (result.exitCode !== 0) return [];
|
||||
|
||||
const raw = JSON.parse(result.stdout) as Array<{
|
||||
number: number;
|
||||
title: string;
|
||||
author: { login: string };
|
||||
url: string;
|
||||
baseRefName: string;
|
||||
state: string;
|
||||
}>;
|
||||
|
||||
return raw.map((pr) => ({
|
||||
id: String(pr.number),
|
||||
number: pr.number,
|
||||
title: pr.title,
|
||||
author: pr.author.login,
|
||||
url: pr.url,
|
||||
baseBranch: pr.baseRefName,
|
||||
state: (pr.state === "OPEN" ? "open" : pr.state === "MERGED" ? "merged" : "closed") as PRListItem["state"],
|
||||
}));
|
||||
}
|
||||
521
extensions/plannotator/generated/pr-gitlab.ts
Normal file
521
extensions/plannotator/generated/pr-gitlab.ts
Normal file
@@ -0,0 +1,521 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/shared/pr-gitlab.ts
|
||||
/**
|
||||
* GitLab-specific MR provider implementation.
|
||||
*
|
||||
* All functions use the `glab` CLI via the PRRuntime abstraction.
|
||||
* Self-hosted instances are supported via the --hostname flag.
|
||||
*/
|
||||
|
||||
import type { PRRuntime, PRMetadata, PRContext, PRReviewFileComment, CommandResult } from "./pr-provider";
|
||||
import { encodeApiFilePath } from "./pr-provider";
|
||||
|
||||
// GitLab-specific MRRef shape (used internally)
|
||||
interface GlMRRef {
|
||||
platform: "gitlab";
|
||||
host: string;
|
||||
projectPath: string;
|
||||
iid: number;
|
||||
}
|
||||
|
||||
/** URL-encode the project path for GitLab API (group/project → group%2Fproject) */
|
||||
function encodeProject(projectPath: string): string {
|
||||
return encodeURIComponent(projectPath);
|
||||
}
|
||||
|
||||
/** Build glab API args with optional --hostname for self-hosted */
|
||||
function apiArgs(host: string, endpoint: string, extra: string[] = []): string[] {
|
||||
const args = ["api", endpoint, ...extra];
|
||||
if (host !== "gitlab.com") {
|
||||
args.push("--hostname", host);
|
||||
}
|
||||
return args;
|
||||
}
|
||||
|
||||
/** Shape of each entry from the GitLab merge_request diffs API */
|
||||
interface GitLabDiffEntry {
|
||||
diff: string;
|
||||
old_path: string;
|
||||
new_path: string;
|
||||
new_file: boolean;
|
||||
deleted_file: boolean;
|
||||
renamed_file: boolean;
|
||||
}
|
||||
|
||||
/** Parse JSON array from glab api --paginate output (already merged by glab) */
|
||||
function parsePaginatedArray<T>(stdout: string): T[] {
|
||||
return JSON.parse(stdout) as T[];
|
||||
}
|
||||
|
||||
/**
|
||||
* Reconstruct a unified patch from GitLab's merge_request diffs API response.
|
||||
*
|
||||
* Each entry has: { diff, old_path, new_path, new_file, deleted_file, renamed_file }
|
||||
* We construct proper `diff --git` headers that the UI parser expects.
|
||||
*/
|
||||
function reconstructPatch(diffs: GitLabDiffEntry[]): string {
|
||||
const parts: string[] = [];
|
||||
|
||||
for (const d of diffs) {
|
||||
const aPath = d.new_file ? "/dev/null" : `a/${d.old_path}`;
|
||||
const bPath = d.deleted_file ? "/dev/null" : `b/${d.new_path}`;
|
||||
const displayOld = d.new_file ? d.new_path : d.old_path;
|
||||
const displayNew = d.deleted_file ? d.old_path : d.new_path;
|
||||
|
||||
let header = `diff --git a/${displayOld} b/${displayNew}`;
|
||||
if (d.renamed_file) {
|
||||
header += `\nrename from ${d.old_path}\nrename to ${d.new_path}`;
|
||||
}
|
||||
if (d.new_file) {
|
||||
header += "\nnew file mode 100644";
|
||||
}
|
||||
if (d.deleted_file) {
|
||||
header += "\ndeleted file mode 100644";
|
||||
}
|
||||
|
||||
parts.push(`${header}\n--- ${aPath}\n+++ ${bPath}\n${d.diff}`);
|
||||
}
|
||||
|
||||
return parts.join("");
|
||||
}
|
||||
|
||||
// --- Auth ---
|
||||
|
||||
export async function checkGlAuth(runtime: PRRuntime, host: string): Promise<void> {
|
||||
const args = ["auth", "status"];
|
||||
if (host !== "gitlab.com") {
|
||||
args.push("--hostname", host);
|
||||
}
|
||||
const result = await runtime.runCommand("glab", args);
|
||||
if (result.exitCode !== 0) {
|
||||
const stderr = result.stderr.trim();
|
||||
const hostHint = host !== "gitlab.com" ? ` --hostname ${host}` : "";
|
||||
throw new Error(
|
||||
`GitLab CLI not authenticated. Run \`glab auth login${hostHint}\` first.\n${stderr}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
export async function getGlUser(runtime: PRRuntime, host: string): Promise<string | null> {
|
||||
try {
|
||||
const result = await runtime.runCommand("glab", apiArgs(host, "/user"));
|
||||
if (result.exitCode === 0 && result.stdout.trim()) {
|
||||
const user = JSON.parse(result.stdout) as { username?: string };
|
||||
return user.username ?? null;
|
||||
}
|
||||
return null;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
// --- Fetch MR ---
|
||||
|
||||
export async function fetchGlMR(
|
||||
runtime: PRRuntime,
|
||||
ref: GlMRRef,
|
||||
): Promise<{ metadata: PRMetadata; rawPatch: string }> {
|
||||
const encoded = encodeProject(ref.projectPath);
|
||||
|
||||
// Fetch diff and metadata in parallel via glab api (supports --hostname for self-hosted)
|
||||
const [diffResult, viewResult] = await Promise.all([
|
||||
runtime.runCommand("glab", apiArgs(ref.host, `projects/${encoded}/merge_requests/${ref.iid}/diffs?per_page=100`, ["--paginate"])),
|
||||
runtime.runCommand("glab", apiArgs(ref.host, `projects/${encoded}/merge_requests/${ref.iid}`)),
|
||||
]);
|
||||
|
||||
if (diffResult.exitCode !== 0) {
|
||||
throw new Error(
|
||||
`Failed to fetch MR diff: ${diffResult.stderr.trim() || `exit code ${diffResult.exitCode}`}`,
|
||||
);
|
||||
}
|
||||
|
||||
if (viewResult.exitCode !== 0) {
|
||||
throw new Error(
|
||||
`Failed to fetch MR metadata: ${viewResult.stderr.trim() || `exit code ${viewResult.exitCode}`}`,
|
||||
);
|
||||
}
|
||||
|
||||
// Reconstruct unified patch from structured API response
|
||||
const diffs = parsePaginatedArray<GitLabDiffEntry>(diffResult.stdout);
|
||||
const rawPatch = reconstructPatch(diffs);
|
||||
|
||||
const raw = JSON.parse(viewResult.stdout) as {
|
||||
title: string;
|
||||
author: { username: string };
|
||||
source_branch: string;
|
||||
target_branch: string;
|
||||
target_project_id?: number;
|
||||
diff_refs: { base_sha: string; head_sha: string; start_sha: string } | null;
|
||||
web_url: string;
|
||||
};
|
||||
|
||||
if (!raw.diff_refs) {
|
||||
throw new Error("MR has no diff refs — it may have been merged or the source branch deleted.");
|
||||
}
|
||||
|
||||
let defaultBranch: string | undefined;
|
||||
const projectEndpoint = typeof raw.target_project_id === "number"
|
||||
? `projects/${raw.target_project_id}`
|
||||
: `projects/${encoded}`;
|
||||
try {
|
||||
const projectResult = await runtime.runCommand("glab", apiArgs(ref.host, projectEndpoint));
|
||||
if (projectResult.exitCode === 0 && projectResult.stdout.trim()) {
|
||||
const project = JSON.parse(projectResult.stdout) as { default_branch?: string };
|
||||
defaultBranch = project.default_branch;
|
||||
}
|
||||
} catch { /* default branch is best-effort metadata */ }
|
||||
|
||||
const metadata: PRMetadata = {
|
||||
platform: "gitlab",
|
||||
host: ref.host,
|
||||
projectPath: ref.projectPath,
|
||||
iid: ref.iid,
|
||||
title: raw.title,
|
||||
author: raw.author.username,
|
||||
baseBranch: raw.target_branch,
|
||||
headBranch: raw.source_branch,
|
||||
defaultBranch,
|
||||
baseSha: raw.diff_refs.base_sha,
|
||||
headSha: raw.diff_refs.head_sha,
|
||||
url: raw.web_url,
|
||||
};
|
||||
|
||||
return { metadata, rawPatch };
|
||||
}
|
||||
|
||||
// --- MR Context ---
|
||||
|
||||
export async function fetchGlMRContext(
|
||||
runtime: PRRuntime,
|
||||
ref: GlMRRef,
|
||||
): Promise<PRContext> {
|
||||
const encoded = encodeProject(ref.projectPath);
|
||||
const mrEndpoint = `projects/${encoded}/merge_requests/${ref.iid}`;
|
||||
|
||||
// Fetch all context in parallel
|
||||
const [mrResult, notesResult, approvalsResult, pipelinesResult, issuesResult] = await Promise.all([
|
||||
runtime.runCommand("glab", apiArgs(ref.host, mrEndpoint)),
|
||||
runtime.runCommand("glab", apiArgs(ref.host, `${mrEndpoint}/notes?sort=asc&per_page=100`)),
|
||||
runtime.runCommand("glab", apiArgs(ref.host, `${mrEndpoint}/approvals`)),
|
||||
runtime.runCommand("glab", apiArgs(ref.host, `${mrEndpoint}/pipelines?per_page=5`)),
|
||||
runtime.runCommand("glab", apiArgs(ref.host, `${mrEndpoint}/closes_issues`)),
|
||||
]);
|
||||
|
||||
const str = (v: unknown): string => (typeof v === "string" ? v : "");
|
||||
const arr = (v: unknown): unknown[] => (Array.isArray(v) ? v : []);
|
||||
|
||||
// --- MR details ---
|
||||
let mr: Record<string, unknown> = {};
|
||||
if (mrResult.exitCode === 0) {
|
||||
try { mr = JSON.parse(mrResult.stdout); } catch { /* non-JSON response */ }
|
||||
}
|
||||
|
||||
// Normalize state: GitLab uses "opened"/"closed"/"merged" → uppercase
|
||||
const glState = str(mr.state);
|
||||
const state = glState === "opened" ? "OPEN" : glState.toUpperCase();
|
||||
|
||||
const isDraft = mr.draft === true
|
||||
|| (typeof mr.title === "string" && /^(Draft:|WIP:)/i.test(mr.title));
|
||||
|
||||
const labels = arr(mr.labels).map((l: any) => {
|
||||
if (typeof l === "string") return { name: l, color: "" };
|
||||
return { name: str(l?.name), color: str(l?.color) };
|
||||
});
|
||||
|
||||
// GitLab merge_status values
|
||||
const mergeStatus = str(mr.merge_status);
|
||||
const detailedStatus = str(mr.detailed_merge_status);
|
||||
const mergeable = mergeStatus === "can_be_merged" ? "MERGEABLE"
|
||||
: mergeStatus === "cannot_be_merged" ? "CONFLICTING"
|
||||
: mergeStatus === "unchecked" ? "UNKNOWN"
|
||||
: mergeStatus.toUpperCase();
|
||||
|
||||
// Map GitLab detailed_merge_status to GitHub-compatible merge state enums
|
||||
const mergeStateMap: Record<string, string> = {
|
||||
mergeable: "CLEAN",
|
||||
broken_status: "DIRTY",
|
||||
checking: "UNKNOWN",
|
||||
unchecked: "UNKNOWN",
|
||||
ci_must_pass: "BLOCKED",
|
||||
ci_still_running: "BLOCKED",
|
||||
discussions_not_resolved: "BLOCKED",
|
||||
draft_status: "BLOCKED",
|
||||
blocked_status: "BLOCKED",
|
||||
not_approved: "BLOCKED",
|
||||
not_open: "DIRTY",
|
||||
need_rebase: "BEHIND",
|
||||
conflict: "DIRTY",
|
||||
jira_association_missing: "BLOCKED",
|
||||
};
|
||||
const mergeStateStatus = detailedStatus
|
||||
? (mergeStateMap[detailedStatus] ?? detailedStatus.toUpperCase())
|
||||
: mergeable;
|
||||
|
||||
// --- Notes (comments) ---
|
||||
const notes: PRContext["comments"] = [];
|
||||
if (notesResult.exitCode === 0) {
|
||||
try {
|
||||
const rawNotes = JSON.parse(notesResult.stdout) as any[];
|
||||
for (const n of rawNotes) {
|
||||
if (n.system) continue;
|
||||
notes.push({
|
||||
id: String(n.id ?? ""),
|
||||
author: str(n.author?.username),
|
||||
body: str(n.body),
|
||||
createdAt: str(n.created_at),
|
||||
url: str(n.web_url) || "",
|
||||
});
|
||||
}
|
||||
} catch { /* non-JSON response */ }
|
||||
}
|
||||
|
||||
// --- Approvals ---
|
||||
let reviewDecision = "";
|
||||
const reviews: PRContext["reviews"] = [];
|
||||
if (approvalsResult.exitCode === 0) {
|
||||
try {
|
||||
const approvals = JSON.parse(approvalsResult.stdout) as Record<string, unknown>;
|
||||
const approvedBy = arr(approvals.approved_by);
|
||||
const approved = approvals.approved === true || approvedBy.length > 0;
|
||||
reviewDecision = approved ? "APPROVED" : "";
|
||||
|
||||
for (const a of approvedBy) {
|
||||
const user = (a as any)?.user;
|
||||
if (!user) continue;
|
||||
reviews.push({
|
||||
id: String(user.id ?? ""),
|
||||
author: str(user.username),
|
||||
state: "APPROVED",
|
||||
body: "",
|
||||
submittedAt: "",
|
||||
});
|
||||
}
|
||||
} catch { /* non-JSON response */ }
|
||||
}
|
||||
|
||||
// --- Pipelines → Checks ---
|
||||
const checks: PRContext["checks"] = [];
|
||||
if (pipelinesResult.exitCode === 0) {
|
||||
try {
|
||||
const pipelines = JSON.parse(pipelinesResult.stdout) as any[];
|
||||
if (pipelines.length > 0) {
|
||||
const latest = pipelines[0];
|
||||
const jobsResult = await runtime.runCommand(
|
||||
"glab",
|
||||
apiArgs(ref.host, `projects/${encoded}/pipelines/${latest.id}/jobs?per_page=100`),
|
||||
);
|
||||
if (jobsResult.exitCode === 0) {
|
||||
try {
|
||||
const jobs = JSON.parse(jobsResult.stdout) as any[];
|
||||
for (const job of jobs) {
|
||||
const jobStatus = str(job.status);
|
||||
const isComplete = ["success", "failed", "canceled", "skipped"].includes(jobStatus);
|
||||
// Map GitLab job statuses to GitHub-compatible conclusion enums
|
||||
const conclusionMap: Record<string, string> = {
|
||||
success: "SUCCESS",
|
||||
failed: "FAILURE",
|
||||
canceled: "NEUTRAL",
|
||||
skipped: "SKIPPED",
|
||||
};
|
||||
checks.push({
|
||||
name: str(job.name),
|
||||
status: isComplete ? "COMPLETED" : "IN_PROGRESS",
|
||||
conclusion: isComplete ? (conclusionMap[jobStatus] ?? jobStatus.toUpperCase()) : null,
|
||||
workflowName: str(latest.ref),
|
||||
detailsUrl: str(job.web_url),
|
||||
});
|
||||
}
|
||||
} catch { /* non-JSON jobs response */ }
|
||||
}
|
||||
}
|
||||
} catch { /* non-JSON pipelines response */ }
|
||||
}
|
||||
|
||||
// --- Linked Issues ---
|
||||
const linkedIssues: PRContext["linkedIssues"] = [];
|
||||
if (issuesResult.exitCode === 0) {
|
||||
try {
|
||||
const issues = JSON.parse(issuesResult.stdout) as any[];
|
||||
for (const i of issues) {
|
||||
linkedIssues.push({
|
||||
number: typeof i.iid === "number" ? i.iid : 0,
|
||||
url: str(i.web_url),
|
||||
repo: ref.projectPath,
|
||||
});
|
||||
}
|
||||
} catch {
|
||||
// Non-critical — some GitLab versions may not support this endpoint
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
body: str(mr.description),
|
||||
state,
|
||||
isDraft,
|
||||
labels,
|
||||
reviewDecision,
|
||||
mergeable,
|
||||
mergeStateStatus,
|
||||
comments: notes,
|
||||
reviews,
|
||||
reviewThreads: [], // TODO: parse DiffNote positions from notes for thread support
|
||||
checks,
|
||||
linkedIssues,
|
||||
};
|
||||
}
|
||||
|
||||
// --- File Content ---
|
||||
|
||||
export async function fetchGlFileContent(
|
||||
runtime: PRRuntime,
|
||||
ref: GlMRRef,
|
||||
sha: string,
|
||||
filePath: string,
|
||||
): Promise<string | null> {
|
||||
const encoded = encodeProject(ref.projectPath);
|
||||
const encodedPath = encodeApiFilePath(filePath);
|
||||
|
||||
const result = await runtime.runCommand(
|
||||
"glab",
|
||||
apiArgs(ref.host, `projects/${encoded}/repository/files/${encodedPath}/raw?ref=${sha}`),
|
||||
);
|
||||
|
||||
if (result.exitCode !== 0) return null;
|
||||
|
||||
// GitLab returns raw file content (no base64 encoding)
|
||||
return result.stdout;
|
||||
}
|
||||
|
||||
// --- Submit MR Review ---
|
||||
|
||||
export async function submitGlMRReview(
|
||||
runtime: PRRuntime,
|
||||
ref: GlMRRef,
|
||||
headSha: string,
|
||||
action: "approve" | "comment",
|
||||
body: string,
|
||||
fileComments: PRReviewFileComment[],
|
||||
): Promise<void> {
|
||||
if (!runtime.runCommandWithInput) {
|
||||
throw new Error("Runtime does not support stdin input; cannot submit MR review");
|
||||
}
|
||||
|
||||
const encoded = encodeProject(ref.projectPath);
|
||||
const mrEndpoint = `projects/${encoded}/merge_requests/${ref.iid}`;
|
||||
|
||||
// Fetch base SHA for position context (needed for line comments)
|
||||
// We use the headSha passed in and derive baseSha from MR metadata
|
||||
// The caller already has this info, but GitLab's discussion API needs start_sha too
|
||||
|
||||
// 1. Post general body as a note (if non-empty)
|
||||
if (body && body.trim()) {
|
||||
const notePayload = JSON.stringify({ body: body.trim() });
|
||||
const noteResult = await runtime.runCommandWithInput(
|
||||
"glab",
|
||||
apiArgs(ref.host, `${mrEndpoint}/notes`, ["--method", "POST", "--input", "-", "-H", "Content-Type:application/json"]),
|
||||
notePayload,
|
||||
);
|
||||
if (noteResult.exitCode !== 0) {
|
||||
const msg = noteResult.stderr.trim() || noteResult.stdout.trim() || `exit code ${noteResult.exitCode}`;
|
||||
throw new Error(`Failed to post MR note: ${msg}`);
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Post inline file comments as discussions with position
|
||||
if (fileComments.length > 0) {
|
||||
// We need the MR's diff_refs for the position SHAs.
|
||||
const mrResult = await runtime.runCommand(
|
||||
"glab",
|
||||
apiArgs(ref.host, mrEndpoint),
|
||||
);
|
||||
let baseSha = headSha; // fallback
|
||||
let startSha = headSha;
|
||||
if (mrResult.exitCode === 0 && mrResult.stdout.trim()) {
|
||||
try {
|
||||
const mrData = JSON.parse(mrResult.stdout) as { diff_refs?: { base_sha: string; start_sha: string; head_sha: string } };
|
||||
if (mrData.diff_refs) {
|
||||
baseSha = mrData.diff_refs.base_sha;
|
||||
startSha = mrData.diff_refs.start_sha;
|
||||
}
|
||||
} catch {
|
||||
// Use fallbacks
|
||||
}
|
||||
}
|
||||
|
||||
const errors: string[] = [];
|
||||
|
||||
// Submit comments in parallel
|
||||
const results = await Promise.allSettled(
|
||||
fileComments.map(async (comment) => {
|
||||
const isOldSide = comment.side === "LEFT";
|
||||
const position: Record<string, unknown> = {
|
||||
position_type: "text",
|
||||
base_sha: baseSha,
|
||||
head_sha: headSha,
|
||||
start_sha: startSha,
|
||||
new_path: comment.path,
|
||||
old_path: comment.path,
|
||||
};
|
||||
|
||||
if (isOldSide) {
|
||||
position.old_line = comment.line;
|
||||
} else {
|
||||
position.new_line = comment.line;
|
||||
}
|
||||
|
||||
// Multi-line range support
|
||||
if (comment.start_line != null && comment.start_line !== comment.line) {
|
||||
const startIsOld = (comment.start_side ?? comment.side) === "LEFT";
|
||||
const startEntry: Record<string, unknown> = { type: startIsOld ? "old" : "new" };
|
||||
if (startIsOld) startEntry.old_line = comment.start_line;
|
||||
else startEntry.new_line = comment.start_line;
|
||||
|
||||
const endEntry: Record<string, unknown> = { type: isOldSide ? "old" : "new" };
|
||||
if (isOldSide) endEntry.old_line = comment.line;
|
||||
else endEntry.new_line = comment.line;
|
||||
|
||||
position.line_range = { start: startEntry, end: endEntry };
|
||||
}
|
||||
|
||||
const payload = JSON.stringify({ body: comment.body, position });
|
||||
const res = await runtime.runCommandWithInput!(
|
||||
"glab",
|
||||
apiArgs(ref.host, `${mrEndpoint}/discussions`, ["--method", "POST", "--input", "-", "-H", "Content-Type:application/json"]),
|
||||
payload,
|
||||
);
|
||||
|
||||
if (res.exitCode !== 0) {
|
||||
const msg = res.stderr.trim() || res.stdout.trim() || `exit code ${res.exitCode}`;
|
||||
throw new Error(`${comment.path}:${comment.line}: ${msg}`);
|
||||
}
|
||||
}),
|
||||
);
|
||||
|
||||
for (const r of results) {
|
||||
if (r.status === "rejected") {
|
||||
errors.push(r.reason instanceof Error ? r.reason.message : String(r.reason));
|
||||
}
|
||||
}
|
||||
|
||||
if (errors.length > 0 && errors.length === fileComments.length) {
|
||||
// All failed — throw
|
||||
throw new Error(`Failed to post inline comments:\n${errors.join("\n")}`);
|
||||
}
|
||||
// Partial failures: some comments posted, some didn't — log but don't throw
|
||||
if (errors.length > 0) {
|
||||
console.error(`Warning: ${errors.length}/${fileComments.length} inline comments failed:\n${errors.join("\n")}`);
|
||||
}
|
||||
}
|
||||
|
||||
// 3. Approve if requested
|
||||
if (action === "approve") {
|
||||
const approveResult = await runtime.runCommandWithInput(
|
||||
"glab",
|
||||
apiArgs(ref.host, `${mrEndpoint}/approve`, ["--method", "POST", "--input", "-", "-H", "Content-Type:application/json"]),
|
||||
"{}",
|
||||
);
|
||||
if (approveResult.exitCode !== 0) {
|
||||
const msg = approveResult.stderr.trim() || approveResult.stdout.trim() || `exit code ${approveResult.exitCode}`;
|
||||
throw new Error(`Failed to approve MR: ${msg}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
432
extensions/plannotator/generated/pr-provider.ts
Normal file
432
extensions/plannotator/generated/pr-provider.ts
Normal file
@@ -0,0 +1,432 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/shared/pr-provider.ts
|
||||
/**
|
||||
* Runtime-agnostic PR provider shared by Bun runtimes and Pi.
|
||||
*
|
||||
* Dispatches to platform-specific implementations (GitHub, GitLab)
|
||||
* based on the `platform` field in PRRef/PRMetadata.
|
||||
*
|
||||
* Same pattern as review-core.ts: a runtime interface abstracts subprocess
|
||||
* execution so the logic is reusable across Bun and Node/jiti.
|
||||
*/
|
||||
|
||||
import { checkGhAuth, getGhUser, fetchGhPR, fetchGhPRContext, fetchGhPRFileContent, submitGhPRReview, fetchGhPRViewedFiles, markGhFilesViewed, fetchGhPRStack, fetchGhPRList } from "./pr-github";
|
||||
import { checkGlAuth, getGlUser, fetchGlMR, fetchGlMRContext, fetchGlFileContent, submitGlMRReview } from "./pr-gitlab";
|
||||
|
||||
// --- Runtime Types ---
|
||||
|
||||
export interface CommandResult {
|
||||
stdout: string;
|
||||
stderr: string;
|
||||
exitCode: number;
|
||||
}
|
||||
|
||||
export interface PRRuntime {
|
||||
runCommand: (
|
||||
cmd: string,
|
||||
args: string[],
|
||||
) => Promise<CommandResult>;
|
||||
runCommandWithInput?: (
|
||||
cmd: string,
|
||||
args: string[],
|
||||
input: string,
|
||||
) => Promise<CommandResult>;
|
||||
}
|
||||
|
||||
// --- Platform Types ---
|
||||
|
||||
export type Platform = "github" | "gitlab";
|
||||
|
||||
/** GitHub PR reference */
|
||||
export interface GithubPRRef {
|
||||
platform: "github";
|
||||
host: string;
|
||||
owner: string;
|
||||
repo: string;
|
||||
number: number;
|
||||
}
|
||||
|
||||
/** GitLab MR reference */
|
||||
export interface GitlabMRRef {
|
||||
platform: "gitlab";
|
||||
host: string;
|
||||
projectPath: string;
|
||||
iid: number;
|
||||
}
|
||||
|
||||
/** Discriminated union — auto-detected from URL */
|
||||
export type PRRef = GithubPRRef | GitlabMRRef;
|
||||
|
||||
/** GitHub PR metadata */
|
||||
export interface GithubPRMetadata {
|
||||
platform: "github";
|
||||
host: string;
|
||||
owner: string;
|
||||
repo: string;
|
||||
number: number;
|
||||
/** GraphQL node ID for the PR — used for markFileAsViewed mutations */
|
||||
prNodeId?: string;
|
||||
title: string;
|
||||
author: string;
|
||||
baseBranch: string;
|
||||
headBranch: string;
|
||||
/** Repository default branch, used to infer whether this PR targets another PR branch. */
|
||||
defaultBranch?: string;
|
||||
baseSha: string;
|
||||
headSha: string;
|
||||
/** Merge-base SHA — the common ancestor commit used to compute the PR diff. Differs from baseSha when the base branch has moved. */
|
||||
mergeBaseSha?: string;
|
||||
url: string;
|
||||
}
|
||||
|
||||
/** GitLab MR metadata */
|
||||
export interface GitlabMRMetadata {
|
||||
platform: "gitlab";
|
||||
host: string;
|
||||
projectPath: string;
|
||||
iid: number;
|
||||
title: string;
|
||||
author: string;
|
||||
baseBranch: string;
|
||||
headBranch: string;
|
||||
/** Project default branch, used to infer whether this MR targets another MR branch. */
|
||||
defaultBranch?: string;
|
||||
baseSha: string;
|
||||
headSha: string;
|
||||
/** Merge-base SHA — the common ancestor commit used to compute the MR diff. */
|
||||
mergeBaseSha?: string;
|
||||
url: string;
|
||||
}
|
||||
|
||||
/** Discriminated union — downstream gets type narrowing for free */
|
||||
export type PRMetadata = GithubPRMetadata | GitlabMRMetadata;
|
||||
|
||||
// --- PR Context Types (platform-agnostic) ---
|
||||
|
||||
export interface PRComment {
|
||||
id: string;
|
||||
author: string;
|
||||
body: string;
|
||||
createdAt: string;
|
||||
url: string;
|
||||
}
|
||||
|
||||
export interface PRReview {
|
||||
id: string;
|
||||
author: string;
|
||||
state: string;
|
||||
body: string;
|
||||
submittedAt: string;
|
||||
url?: string;
|
||||
}
|
||||
|
||||
export interface PRCheck {
|
||||
name: string;
|
||||
status: string;
|
||||
conclusion: string | null;
|
||||
workflowName: string;
|
||||
detailsUrl: string;
|
||||
}
|
||||
|
||||
export interface PRLinkedIssue {
|
||||
number: number;
|
||||
url: string;
|
||||
repo: string;
|
||||
}
|
||||
|
||||
export interface PRThreadComment {
|
||||
id: string;
|
||||
author: string;
|
||||
body: string;
|
||||
createdAt: string;
|
||||
url: string;
|
||||
diffHunk?: string;
|
||||
}
|
||||
|
||||
export interface PRReviewThread {
|
||||
id: string;
|
||||
isResolved: boolean;
|
||||
isOutdated: boolean;
|
||||
path: string;
|
||||
line: number | null;
|
||||
startLine: number | null;
|
||||
diffSide: 'LEFT' | 'RIGHT' | null;
|
||||
comments: PRThreadComment[];
|
||||
}
|
||||
|
||||
export interface PRContext {
|
||||
body: string;
|
||||
state: string;
|
||||
isDraft: boolean;
|
||||
labels: Array<{ name: string; color: string }>;
|
||||
reviewDecision: string;
|
||||
mergeable: string;
|
||||
mergeStateStatus: string;
|
||||
comments: PRComment[];
|
||||
reviews: PRReview[];
|
||||
reviewThreads: PRReviewThread[];
|
||||
checks: PRCheck[];
|
||||
linkedIssues: PRLinkedIssue[];
|
||||
}
|
||||
|
||||
export interface PRReviewFileComment {
|
||||
path: string;
|
||||
line: number;
|
||||
side: "LEFT" | "RIGHT";
|
||||
body: string;
|
||||
start_line?: number;
|
||||
start_side?: "LEFT" | "RIGHT";
|
||||
}
|
||||
|
||||
export type PRDiffScope = "layer" | "full-stack";
|
||||
|
||||
export interface PRDiffScopeOption {
|
||||
id: PRDiffScope;
|
||||
label: string;
|
||||
description: string;
|
||||
enabled: boolean;
|
||||
}
|
||||
|
||||
export interface PRStackInfo {
|
||||
isStacked: boolean;
|
||||
baseBranch: string;
|
||||
defaultBranch?: string;
|
||||
label: string;
|
||||
source: "branch-inferred" | "tree-discovered" | "github-native" | "gitlab-native" | "graphite" | "ghstack";
|
||||
}
|
||||
|
||||
export interface PRStackNode {
|
||||
branch: string;
|
||||
number?: number;
|
||||
title?: string;
|
||||
url?: string;
|
||||
isCurrent: boolean;
|
||||
isDefaultBranch: boolean;
|
||||
state?: 'open' | 'merged' | 'closed';
|
||||
}
|
||||
|
||||
export interface PRStackTree {
|
||||
nodes: PRStackNode[];
|
||||
}
|
||||
|
||||
export interface PRListItem {
|
||||
id: string;
|
||||
number: number;
|
||||
title: string;
|
||||
author: string;
|
||||
url: string;
|
||||
baseBranch: string;
|
||||
state: 'open' | 'closed' | 'merged';
|
||||
}
|
||||
|
||||
// --- Label Helpers ---
|
||||
// Accept either PRRef or PRMetadata (both have `platform` discriminant)
|
||||
|
||||
type HasPlatform = PRRef | PRMetadata;
|
||||
|
||||
/** "GitHub" or "GitLab" */
|
||||
export function getPlatformLabel(m: HasPlatform): string {
|
||||
return m.platform === "github" ? "GitHub" : "GitLab";
|
||||
}
|
||||
|
||||
/** "PR" or "MR" */
|
||||
export function getMRLabel(m: HasPlatform): string {
|
||||
return m.platform === "github" ? "PR" : "MR";
|
||||
}
|
||||
|
||||
/** "#123" or "!42" */
|
||||
export function getMRNumberLabel(m: HasPlatform): string {
|
||||
if (m.platform === "github") return `#${m.number}`;
|
||||
return `!${m.iid}`;
|
||||
}
|
||||
|
||||
/** "owner/repo" or "group/project" */
|
||||
export function getDisplayRepo(m: HasPlatform): string {
|
||||
if (m.platform === "github") return `${m.owner}/${m.repo}`;
|
||||
return m.projectPath;
|
||||
}
|
||||
|
||||
/** Reconstruct a PRRef from metadata */
|
||||
export function prRefFromMetadata(m: PRMetadata): PRRef {
|
||||
if (m.platform === "github") {
|
||||
return { platform: "github", host: m.host, owner: m.owner, repo: m.repo, number: m.number };
|
||||
}
|
||||
return { platform: "gitlab", host: m.host, projectPath: m.projectPath, iid: m.iid };
|
||||
}
|
||||
|
||||
export function isSameProject(a: PRRef, b: PRRef): boolean {
|
||||
if (a.platform !== b.platform) return false;
|
||||
if (a.platform === "github" && b.platform === "github") {
|
||||
return a.host === b.host && a.owner === b.owner && a.repo === b.repo;
|
||||
}
|
||||
if (a.platform === "gitlab" && b.platform === "gitlab") {
|
||||
return a.host === b.host && a.projectPath === b.projectPath;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/** CLI tool name for the platform */
|
||||
export function getCliName(ref: PRRef): string {
|
||||
return ref.platform === "github" ? "gh" : "glab";
|
||||
}
|
||||
|
||||
/** Install URL for the platform CLI */
|
||||
export function getCliInstallUrl(ref: PRRef): string {
|
||||
return ref.platform === "github"
|
||||
? "https://cli.github.com"
|
||||
: "https://gitlab.com/gitlab-org/cli";
|
||||
}
|
||||
|
||||
/** Encode a file path for use in platform API URLs */
|
||||
export function encodeApiFilePath(filePath: string): string {
|
||||
return encodeURIComponent(filePath);
|
||||
}
|
||||
|
||||
// --- URL Parsing ---
|
||||
|
||||
/**
|
||||
* Parse a PR/MR URL into its components. Auto-detects platform.
|
||||
*
|
||||
* Handles:
|
||||
* - GitHub: https://github.com/owner/repo/pull/123[/files|/commits]
|
||||
* - GitHub Enterprise: https://ghe.company.com/owner/repo/pull/123
|
||||
* - GitLab: https://gitlab.com/group/subgroup/project/-/merge_requests/42[/diffs]
|
||||
* - Self-hosted GitLab: https://gitlab.mycompany.com/group/project/-/merge_requests/42
|
||||
*
|
||||
* GitLab is checked first because `/-/merge_requests/` is unambiguous,
|
||||
* while `/pull/` could theoretically appear on any host.
|
||||
*/
|
||||
export function parsePRUrl(url: string): PRRef | null {
|
||||
if (!url) return null;
|
||||
|
||||
// GitLab: https://{host}/{projectPath}/-/merge_requests/{iid}[/...]
|
||||
// Checked first — `/-/merge_requests/` is the most specific pattern.
|
||||
const glMatch = url.match(
|
||||
/^https?:\/\/([^/]+)\/(.+?)\/-\/merge_requests\/(\d+)/,
|
||||
);
|
||||
if (glMatch) {
|
||||
return {
|
||||
platform: "gitlab",
|
||||
host: glMatch[1],
|
||||
projectPath: glMatch[2],
|
||||
iid: parseInt(glMatch[3], 10),
|
||||
};
|
||||
}
|
||||
|
||||
// GitHub (including GHE): https://{host}/{owner}/{repo}/pull/{number}[/...]
|
||||
const ghMatch = url.match(
|
||||
/^https?:\/\/([^/]+)\/([^/]+)\/([^/]+)\/pull\/(\d+)/,
|
||||
);
|
||||
if (ghMatch) {
|
||||
return {
|
||||
platform: "github",
|
||||
host: ghMatch[1],
|
||||
owner: ghMatch[2],
|
||||
repo: ghMatch[3],
|
||||
number: parseInt(ghMatch[4], 10),
|
||||
};
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
// --- Dispatch Functions ---
|
||||
|
||||
export async function checkAuth(runtime: PRRuntime, ref: PRRef): Promise<void> {
|
||||
if (ref.platform === "github") return checkGhAuth(runtime, ref.host);
|
||||
return checkGlAuth(runtime, ref.host);
|
||||
}
|
||||
|
||||
export async function getUser(runtime: PRRuntime, ref: PRRef): Promise<string | null> {
|
||||
if (ref.platform === "github") return getGhUser(runtime, ref.host);
|
||||
return getGlUser(runtime, ref.host);
|
||||
}
|
||||
|
||||
export async function fetchPR(
|
||||
runtime: PRRuntime,
|
||||
ref: PRRef,
|
||||
): Promise<{ metadata: PRMetadata; rawPatch: string }> {
|
||||
if (ref.platform === "github") return fetchGhPR(runtime, ref);
|
||||
return fetchGlMR(runtime, ref);
|
||||
}
|
||||
|
||||
export async function fetchPRContext(
|
||||
runtime: PRRuntime,
|
||||
ref: PRRef,
|
||||
): Promise<PRContext> {
|
||||
if (ref.platform === "github") return fetchGhPRContext(runtime, ref);
|
||||
return fetchGlMRContext(runtime, ref);
|
||||
}
|
||||
|
||||
export async function fetchPRFileContent(
|
||||
runtime: PRRuntime,
|
||||
ref: PRRef,
|
||||
sha: string,
|
||||
filePath: string,
|
||||
): Promise<string | null> {
|
||||
if (ref.platform === "github") return fetchGhPRFileContent(runtime, ref, sha, filePath);
|
||||
return fetchGlFileContent(runtime, ref, sha, filePath);
|
||||
}
|
||||
|
||||
export async function submitPRReview(
|
||||
runtime: PRRuntime,
|
||||
ref: PRRef,
|
||||
headSha: string,
|
||||
action: "approve" | "comment",
|
||||
body: string,
|
||||
fileComments: PRReviewFileComment[],
|
||||
): Promise<void> {
|
||||
if (ref.platform === "github") return submitGhPRReview(runtime, ref, headSha, action, body, fileComments);
|
||||
return submitGlMRReview(runtime, ref, headSha, action, body, fileComments);
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch per-file "viewed" state for a PR.
|
||||
* GitHub: returns { filePath: isViewed } map.
|
||||
* GitLab: always returns {} (no server-side viewed state API).
|
||||
*/
|
||||
export async function fetchPRViewedFiles(
|
||||
runtime: PRRuntime,
|
||||
ref: PRRef,
|
||||
): Promise<Record<string, boolean>> {
|
||||
if (ref.platform === "github") return fetchGhPRViewedFiles(runtime, ref);
|
||||
return {}; // GitLab has no server-side viewed state
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark or unmark files as viewed in a PR.
|
||||
* GitHub: fires markFileAsViewed / unmarkFileAsViewed GraphQL mutations.
|
||||
* GitLab: no-op (no server-side viewed state API).
|
||||
*/
|
||||
export async function markPRFilesViewed(
|
||||
runtime: PRRuntime,
|
||||
ref: PRRef,
|
||||
prNodeId: string,
|
||||
filePaths: string[],
|
||||
viewed: boolean,
|
||||
): Promise<void> {
|
||||
if (ref.platform === "github") return markGhFilesViewed(runtime, ref, prNodeId, filePaths, viewed);
|
||||
// GitLab: no-op
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch the full stack tree for a stacked PR.
|
||||
* Walks up from the current PR to the default branch, resolving
|
||||
* PR numbers and titles for each intermediate branch.
|
||||
* Returns null if the PR is not stacked or the API call fails.
|
||||
*/
|
||||
export async function fetchPRStack(
|
||||
runtime: PRRuntime,
|
||||
ref: PRRef,
|
||||
metadata: PRMetadata,
|
||||
): Promise<PRStackTree | null> {
|
||||
if (ref.platform === "github") return fetchGhPRStack(runtime, ref, metadata);
|
||||
return null; // GitLab: not yet implemented
|
||||
}
|
||||
|
||||
export async function fetchPRList(
|
||||
runtime: PRRuntime,
|
||||
ref: PRRef,
|
||||
): Promise<PRListItem[]> {
|
||||
if (ref.platform === "github") return fetchGhPRList(runtime, ref);
|
||||
return []; // GitLab: not yet implemented
|
||||
}
|
||||
195
extensions/plannotator/generated/pr-stack.ts
Normal file
195
extensions/plannotator/generated/pr-stack.ts
Normal file
@@ -0,0 +1,195 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/shared/pr-stack.ts
|
||||
import type { DiffResult, ReviewGitRuntime } from "./review-core";
|
||||
import type {
|
||||
PRDiffScopeOption,
|
||||
PRMetadata,
|
||||
PRStackInfo,
|
||||
PRStackTree,
|
||||
PRStackNode,
|
||||
} from "./pr-provider";
|
||||
export type { PRDiffScope, PRDiffScopeOption, PRStackInfo, PRStackTree, PRStackNode } from "./pr-provider";
|
||||
|
||||
function branchNameIsSafe(branch: string): boolean {
|
||||
return branch.trim().length > 0 && !branch.startsWith("-") && !branch.includes("\0");
|
||||
}
|
||||
|
||||
export function getPRStackInfo(metadata: PRMetadata | undefined): PRStackInfo | null {
|
||||
if (!metadata?.defaultBranch) return null;
|
||||
if (metadata.baseBranch === metadata.defaultBranch) return null;
|
||||
|
||||
return {
|
||||
isStacked: true,
|
||||
baseBranch: metadata.baseBranch,
|
||||
defaultBranch: metadata.defaultBranch,
|
||||
label: `${metadata.headBranch} stacked on ${metadata.baseBranch}`,
|
||||
source: "branch-inferred",
|
||||
};
|
||||
}
|
||||
|
||||
export function resolveStackInfo(
|
||||
metadata: PRMetadata,
|
||||
stackTree: PRStackTree | null,
|
||||
existing?: PRStackInfo | null,
|
||||
): PRStackInfo | null {
|
||||
if (existing) return existing;
|
||||
if (!stackTree || stackTree.nodes.filter(n => !n.isDefaultBranch).length <= 1) return null;
|
||||
return getPRStackInfo(metadata) ?? {
|
||||
isStacked: true,
|
||||
baseBranch: metadata.baseBranch,
|
||||
defaultBranch: metadata.defaultBranch!,
|
||||
label: `Root of stack — ${metadata.headBranch}`,
|
||||
source: "tree-discovered",
|
||||
};
|
||||
}
|
||||
|
||||
export function getPRDiffScopeOptions(
|
||||
metadata: PRMetadata | undefined,
|
||||
hasLocalCheckout: boolean,
|
||||
): PRDiffScopeOption[] {
|
||||
const stackInfo = getPRStackInfo(metadata);
|
||||
|
||||
return [
|
||||
{
|
||||
id: "layer",
|
||||
label: "Layer",
|
||||
description: metadata?.baseBranch
|
||||
? `Only changes relative to ${metadata.baseBranch}.`
|
||||
: "Only changes from this review.",
|
||||
enabled: true,
|
||||
},
|
||||
{
|
||||
id: "full-stack",
|
||||
label: "Full stack",
|
||||
description: stackInfo?.defaultBranch
|
||||
? `All changes from ${stackInfo.defaultBranch} to HEAD in the local checkout.`
|
||||
: "All changes from the default branch to HEAD in the local checkout.",
|
||||
enabled: Boolean(stackInfo && hasLocalCheckout),
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
export async function resolvePRFullStackBaseRef(
|
||||
runtime: ReviewGitRuntime,
|
||||
defaultBranch: string,
|
||||
cwd?: string,
|
||||
): Promise<string | null> {
|
||||
const remoteRef = `origin/${defaultBranch}`;
|
||||
const remote = await runtime.runGit(
|
||||
["show-ref", "--verify", "--quiet", `refs/remotes/${remoteRef}`],
|
||||
{ cwd },
|
||||
);
|
||||
if (remote.exitCode === 0) return remoteRef;
|
||||
|
||||
const local = await runtime.runGit(
|
||||
["show-ref", "--verify", "--quiet", `refs/heads/${defaultBranch}`],
|
||||
{ cwd },
|
||||
);
|
||||
if (local.exitCode === 0) return defaultBranch;
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
export async function runPRFullStackDiff(
|
||||
runtime: ReviewGitRuntime,
|
||||
metadata: PRMetadata,
|
||||
cwd?: string,
|
||||
): Promise<DiffResult> {
|
||||
const defaultBranch = metadata.defaultBranch;
|
||||
if (!defaultBranch || !branchNameIsSafe(defaultBranch)) {
|
||||
return {
|
||||
patch: "",
|
||||
label: "Full stack diff unavailable",
|
||||
error: "Could not determine a safe default branch for this review.",
|
||||
};
|
||||
}
|
||||
|
||||
const baseRef = await resolvePRFullStackBaseRef(runtime, defaultBranch, cwd);
|
||||
if (!baseRef) {
|
||||
return {
|
||||
patch: "",
|
||||
label: "Full stack diff unavailable",
|
||||
error: `Could not find origin/${defaultBranch} or local ${defaultBranch} in this checkout.`,
|
||||
};
|
||||
}
|
||||
|
||||
const diffArgs = [
|
||||
"diff",
|
||||
"--no-ext-diff",
|
||||
"--src-prefix=a/",
|
||||
"--dst-prefix=b/",
|
||||
"--end-of-options",
|
||||
`${baseRef}...HEAD`,
|
||||
];
|
||||
const diff = await runtime.runGit(diffArgs, { cwd });
|
||||
if (diff.exitCode !== 0) {
|
||||
const message = diff.stderr.trim() || `git ${diffArgs.join(" ")} failed`;
|
||||
return {
|
||||
patch: "",
|
||||
label: "Full stack diff unavailable",
|
||||
error: message.split("\n").find((line) => line.trim().length > 0) ?? message,
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
patch: diff.stdout,
|
||||
label: `Full stack diff vs ${baseRef}`,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch and checkout a PR/MR head in a local worktree.
|
||||
* Returns true if the checkout succeeded, false otherwise.
|
||||
*/
|
||||
export async function checkoutPRHead(
|
||||
runtime: ReviewGitRuntime,
|
||||
metadata: PRMetadata,
|
||||
cwd: string,
|
||||
): Promise<boolean> {
|
||||
const refSpec = metadata.platform === "github"
|
||||
? `refs/pull/${metadata.number}/head`
|
||||
: `refs/merge-requests/${metadata.iid}/head`;
|
||||
|
||||
const fetch = await runtime.runGit(["fetch", "origin", refSpec], { cwd });
|
||||
if (fetch.exitCode !== 0) return false;
|
||||
|
||||
const checkout = await runtime.runGit(["checkout", "FETCH_HEAD"], { cwd });
|
||||
return checkout.exitCode === 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a minimal stack tree from existing metadata (no API calls).
|
||||
* Used as a fallback when the full stack tree hasn't loaded yet.
|
||||
*/
|
||||
export function buildMinimalStackTree(
|
||||
metadata: PRMetadata,
|
||||
stackInfo: PRStackInfo,
|
||||
): PRStackTree {
|
||||
const nodes: PRStackNode[] = [];
|
||||
|
||||
if (stackInfo.defaultBranch) {
|
||||
nodes.push({
|
||||
branch: stackInfo.defaultBranch,
|
||||
isCurrent: false,
|
||||
isDefaultBranch: true,
|
||||
});
|
||||
}
|
||||
|
||||
if (stackInfo.baseBranch !== stackInfo.defaultBranch) {
|
||||
nodes.push({
|
||||
branch: stackInfo.baseBranch,
|
||||
isCurrent: false,
|
||||
isDefaultBranch: false,
|
||||
});
|
||||
}
|
||||
|
||||
nodes.push({
|
||||
branch: metadata.headBranch,
|
||||
number: metadata.platform === "github" ? metadata.number : metadata.iid,
|
||||
title: metadata.title,
|
||||
url: metadata.url,
|
||||
isCurrent: true,
|
||||
isDefaultBranch: false,
|
||||
});
|
||||
|
||||
return { nodes };
|
||||
}
|
||||
72
extensions/plannotator/generated/project.ts
Normal file
72
extensions/plannotator/generated/project.ts
Normal file
@@ -0,0 +1,72 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/shared/project.ts
|
||||
/**
|
||||
* Project Utility — Pure Functions
|
||||
*
|
||||
* String sanitization and path extraction helpers.
|
||||
* Runtime-agnostic: no Bun or Node-specific APIs.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Sanitize a string for use as a tag
|
||||
* - lowercase
|
||||
* - replace spaces/underscores with hyphens
|
||||
* - remove special characters
|
||||
* - trim to reasonable length
|
||||
*/
|
||||
export function sanitizeTag(name: string): string | null {
|
||||
if (!name || typeof name !== "string") return null;
|
||||
|
||||
const sanitized = name
|
||||
.toLowerCase()
|
||||
.trim()
|
||||
.replace(/[\s_]+/g, "-") // spaces/underscores -> hyphens
|
||||
.replace(/[^a-z0-9-]/g, "") // remove special chars
|
||||
.replace(/-+/g, "-") // collapse multiple hyphens
|
||||
.replace(/^-|-$/g, "") // trim leading/trailing hyphens
|
||||
.slice(0, 30); // max 30 chars
|
||||
|
||||
return sanitized.length >= 2 ? sanitized : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract repo name from a git root path
|
||||
*/
|
||||
export function extractRepoName(gitRootPath: string): string | null {
|
||||
if (!gitRootPath || typeof gitRootPath !== "string") return null;
|
||||
|
||||
const trimmed = gitRootPath.trim().replace(/\/+$/, ""); // remove trailing slashes
|
||||
const parts = trimmed.split("/");
|
||||
const name = parts[parts.length - 1];
|
||||
|
||||
return sanitizeTag(name);
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract directory name from a path
|
||||
*/
|
||||
export function extractDirName(path: string): string | null {
|
||||
if (!path || typeof path !== "string") return null;
|
||||
|
||||
const trimmed = path.trim().replace(/\/+$/, "");
|
||||
if (trimmed === "" || trimmed === "/") return null;
|
||||
|
||||
const parts = trimmed.split("/");
|
||||
const name = parts[parts.length - 1];
|
||||
|
||||
// Skip generic names
|
||||
const skipNames = new Set(["home", "users", "user", "root", "tmp", "var"]);
|
||||
if (skipNames.has(name.toLowerCase())) return null;
|
||||
|
||||
return sanitizeTag(name);
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract hostname from a URL string, or return the original string on failure.
|
||||
*/
|
||||
export function hostnameOrFallback(url: string): string {
|
||||
try {
|
||||
return new URL(url).hostname;
|
||||
} catch {
|
||||
return url;
|
||||
}
|
||||
}
|
||||
245
extensions/plannotator/generated/prompts.ts
Normal file
245
extensions/plannotator/generated/prompts.ts
Normal file
@@ -0,0 +1,245 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/shared/prompts.ts
|
||||
import { loadConfig, type PlannotatorConfig, type PromptRuntime } from "./config";
|
||||
|
||||
// ─── Template engine ─────────────────────────────────────────────────────────
|
||||
|
||||
export function resolveTemplate(
|
||||
template: string,
|
||||
vars: Record<string, string | undefined>,
|
||||
): string {
|
||||
return template.replace(/\{\{(\w+)\}\}/g, (match, key) => {
|
||||
const val = vars[key];
|
||||
return val !== undefined ? val : match;
|
||||
});
|
||||
}
|
||||
|
||||
// ─── Tool name map ───────────────────────────────────────────────────────────
|
||||
|
||||
export const PLAN_TOOL_NAMES: Record<PromptRuntime, string> = {
|
||||
"claude-code": "ExitPlanMode",
|
||||
opencode: "submit_plan",
|
||||
"copilot-cli": "exit_plan_mode",
|
||||
pi: "plannotator_submit_plan",
|
||||
codex: "ExitPlanMode",
|
||||
"gemini-cli": "exit_plan_mode",
|
||||
};
|
||||
|
||||
export function getPlanToolName(runtime?: PromptRuntime | null): string {
|
||||
return (runtime && PLAN_TOOL_NAMES[runtime]) || "ExitPlanMode";
|
||||
}
|
||||
|
||||
export function buildPlanFileRule(toolName: string, planFilePath?: string): string {
|
||||
if (!planFilePath) return "";
|
||||
return `- Your plan is saved at: ${planFilePath}\n You can edit this file to make targeted changes, then pass its path to ${toolName}.\n`;
|
||||
}
|
||||
|
||||
// ─── Default constants ───────────────────────────────────────────────────────
|
||||
|
||||
export const DEFAULT_REVIEW_APPROVED_PROMPT = "# Code Review\n\nCode review completed — no changes requested.";
|
||||
|
||||
export const DEFAULT_REVIEW_DENIED_SUFFIX = "\nThe reviewer has identified issues above. You must address all of them.";
|
||||
|
||||
export const DEFAULT_PLAN_DENIED_PROMPT =
|
||||
"YOUR PLAN WAS NOT APPROVED.\n\nYou MUST revise the plan to address ALL of the feedback below before calling {{toolName}} again.\n\nRules:\n{{planFileRule}}- Do not resubmit the same plan unchanged.\n- Do NOT change the plan title (first # heading) unless the user explicitly asks you to.\n\n{{feedback}}";
|
||||
|
||||
export const DEFAULT_PLAN_APPROVED_PROMPT =
|
||||
"Plan approved. You now have full tool access (read, bash, edit, write). Execute the plan in {{planFilePath}}. {{doneMsg}}";
|
||||
|
||||
export const DEFAULT_PLAN_APPROVED_WITH_NOTES_PROMPT =
|
||||
"Plan approved with notes! You now have full tool access (read, bash, edit, write). Execute the plan in {{planFilePath}}. {{doneMsg}}\n\n## Implementation Notes\n\nThe user approved your plan but added the following notes to consider during implementation:\n\n{{feedback}}\n\nProceed with implementation, incorporating these notes where applicable.";
|
||||
|
||||
export const DEFAULT_PLAN_AUTO_APPROVED_PROMPT =
|
||||
"Plan auto-approved (non-interactive mode). Execute the plan now.";
|
||||
|
||||
export const DEFAULT_ANNOTATE_FILE_FEEDBACK_PROMPT =
|
||||
"# Markdown Annotations\n\n{{fileHeader}}: {{filePath}}\n\n{{feedback}}\n\nPlease address the annotation feedback above.";
|
||||
|
||||
export const DEFAULT_ANNOTATE_MESSAGE_FEEDBACK_PROMPT =
|
||||
"# Message Annotations\n\n{{feedback}}\n\nPlease address the annotation feedback above.";
|
||||
|
||||
export const DEFAULT_ANNOTATE_APPROVED_PROMPT = "The user approved.";
|
||||
|
||||
// ─── Core resolver ───────────────────────────────────────────────────────────
|
||||
|
||||
type PromptSection = "review" | "plan" | "annotate";
|
||||
type PromptKey = "approved" | "approvedWithNotes" | "autoApproved" | "denied"
|
||||
| "fileFeedback" | "messageFeedback";
|
||||
|
||||
interface PromptLookupOptions {
|
||||
section: PromptSection;
|
||||
key: PromptKey;
|
||||
runtime?: PromptRuntime | null;
|
||||
config?: PlannotatorConfig;
|
||||
fallback: string;
|
||||
runtimeFallbacks?: Partial<Record<PromptRuntime, string>>;
|
||||
}
|
||||
|
||||
function normalizePrompt(prompt: unknown): string | undefined {
|
||||
if (typeof prompt !== "string") return undefined;
|
||||
return prompt.trim() ? prompt : undefined;
|
||||
}
|
||||
|
||||
export function getConfiguredPrompt(options: PromptLookupOptions): string {
|
||||
const resolvedConfig = options.config ?? loadConfig();
|
||||
const section = resolvedConfig.prompts?.[options.section];
|
||||
const runtimePrompt = options.runtime
|
||||
? normalizePrompt(section?.runtimes?.[options.runtime]?.[options.key])
|
||||
: undefined;
|
||||
const genericPrompt = normalizePrompt(section?.[options.key]);
|
||||
const runtimeFallback = options.runtime
|
||||
? options.runtimeFallbacks?.[options.runtime]
|
||||
: undefined;
|
||||
|
||||
return runtimePrompt ?? genericPrompt ?? runtimeFallback ?? options.fallback;
|
||||
}
|
||||
|
||||
type FeedbackVars = Record<string, string | undefined>;
|
||||
|
||||
// ─── Review wrappers ─────────────────────────────────────────────────────────
|
||||
|
||||
export function getReviewApprovedPrompt(
|
||||
runtime?: PromptRuntime | null,
|
||||
config?: PlannotatorConfig,
|
||||
): string {
|
||||
return getConfiguredPrompt({
|
||||
section: "review",
|
||||
key: "approved",
|
||||
runtime,
|
||||
config,
|
||||
fallback: DEFAULT_REVIEW_APPROVED_PROMPT,
|
||||
});
|
||||
}
|
||||
|
||||
const REVIEW_DENIED_RUNTIME_DEFAULTS: Partial<Record<PromptRuntime, string>> = {
|
||||
opencode: "\n\nPlease address this feedback.",
|
||||
pi: "\n\nPlease address this feedback.",
|
||||
};
|
||||
|
||||
export function getReviewDeniedSuffix(
|
||||
runtime?: PromptRuntime | null,
|
||||
config?: PlannotatorConfig,
|
||||
): string {
|
||||
return getConfiguredPrompt({
|
||||
section: "review",
|
||||
key: "denied",
|
||||
runtime,
|
||||
config,
|
||||
fallback: DEFAULT_REVIEW_DENIED_SUFFIX,
|
||||
runtimeFallbacks: REVIEW_DENIED_RUNTIME_DEFAULTS,
|
||||
});
|
||||
}
|
||||
|
||||
// ─── Plan wrappers ───────────────────────────────────────────────────────────
|
||||
|
||||
export function getPlanDeniedPrompt(
|
||||
runtime?: PromptRuntime | null,
|
||||
config?: PlannotatorConfig,
|
||||
vars?: FeedbackVars,
|
||||
): string {
|
||||
const template = getConfiguredPrompt({
|
||||
section: "plan",
|
||||
key: "denied",
|
||||
runtime,
|
||||
config,
|
||||
fallback: DEFAULT_PLAN_DENIED_PROMPT,
|
||||
});
|
||||
return resolveTemplate(template, vars ?? {});
|
||||
}
|
||||
|
||||
const PLAN_APPROVED_RUNTIME_DEFAULTS: Partial<Record<PromptRuntime, string>> = {
|
||||
opencode: "Plan approved!{{doneMsg}}",
|
||||
};
|
||||
|
||||
export function getPlanApprovedPrompt(
|
||||
runtime?: PromptRuntime | null,
|
||||
config?: PlannotatorConfig,
|
||||
vars?: FeedbackVars,
|
||||
): string {
|
||||
const template = getConfiguredPrompt({
|
||||
section: "plan",
|
||||
key: "approved",
|
||||
runtime,
|
||||
config,
|
||||
fallback: DEFAULT_PLAN_APPROVED_PROMPT,
|
||||
runtimeFallbacks: PLAN_APPROVED_RUNTIME_DEFAULTS,
|
||||
});
|
||||
return resolveTemplate(template, vars ?? {});
|
||||
}
|
||||
|
||||
const PLAN_APPROVED_WITH_NOTES_RUNTIME_DEFAULTS: Partial<Record<PromptRuntime, string>> = {
|
||||
opencode: "Plan approved with notes!\n{{doneMsg}}\n\n## Implementation Notes\n\nThe user approved your plan but added the following notes to consider during implementation:\n\n{{feedback}}{{proceedSuffix}}",
|
||||
};
|
||||
|
||||
export function getPlanApprovedWithNotesPrompt(
|
||||
runtime?: PromptRuntime | null,
|
||||
config?: PlannotatorConfig,
|
||||
vars?: FeedbackVars,
|
||||
): string {
|
||||
const template = getConfiguredPrompt({
|
||||
section: "plan",
|
||||
key: "approvedWithNotes",
|
||||
runtime,
|
||||
config,
|
||||
fallback: DEFAULT_PLAN_APPROVED_WITH_NOTES_PROMPT,
|
||||
runtimeFallbacks: PLAN_APPROVED_WITH_NOTES_RUNTIME_DEFAULTS,
|
||||
});
|
||||
return resolveTemplate(template, { proceedSuffix: "", ...vars });
|
||||
}
|
||||
|
||||
export function getPlanAutoApprovedPrompt(
|
||||
runtime?: PromptRuntime | null,
|
||||
config?: PlannotatorConfig,
|
||||
): string {
|
||||
return getConfiguredPrompt({
|
||||
section: "plan",
|
||||
key: "autoApproved",
|
||||
runtime,
|
||||
config,
|
||||
fallback: DEFAULT_PLAN_AUTO_APPROVED_PROMPT,
|
||||
});
|
||||
}
|
||||
|
||||
// ─── Annotate wrappers ──────────────────────────────────────────────────────
|
||||
|
||||
export function getAnnotateFileFeedbackPrompt(
|
||||
runtime?: PromptRuntime | null,
|
||||
config?: PlannotatorConfig,
|
||||
vars?: FeedbackVars,
|
||||
): string {
|
||||
const template = getConfiguredPrompt({
|
||||
section: "annotate",
|
||||
key: "fileFeedback",
|
||||
runtime,
|
||||
config,
|
||||
fallback: DEFAULT_ANNOTATE_FILE_FEEDBACK_PROMPT,
|
||||
});
|
||||
return resolveTemplate(template, vars ?? {});
|
||||
}
|
||||
|
||||
export function getAnnotateMessageFeedbackPrompt(
|
||||
runtime?: PromptRuntime | null,
|
||||
config?: PlannotatorConfig,
|
||||
vars?: FeedbackVars,
|
||||
): string {
|
||||
const template = getConfiguredPrompt({
|
||||
section: "annotate",
|
||||
key: "messageFeedback",
|
||||
runtime,
|
||||
config,
|
||||
fallback: DEFAULT_ANNOTATE_MESSAGE_FEEDBACK_PROMPT,
|
||||
});
|
||||
return resolveTemplate(template, vars ?? {});
|
||||
}
|
||||
|
||||
export function getAnnotateApprovedPrompt(
|
||||
runtime?: PromptRuntime | null,
|
||||
config?: PlannotatorConfig,
|
||||
): string {
|
||||
return getConfiguredPrompt({
|
||||
section: "annotate",
|
||||
key: "approved",
|
||||
runtime,
|
||||
config,
|
||||
fallback: DEFAULT_ANNOTATE_APPROVED_PROMPT,
|
||||
});
|
||||
}
|
||||
88
extensions/plannotator/generated/reference-common.ts
Normal file
88
extensions/plannotator/generated/reference-common.ts
Normal file
@@ -0,0 +1,88 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/shared/reference-common.ts
|
||||
// --- Vault file tree helpers ---
|
||||
|
||||
export const FILE_BROWSER_EXCLUDED = [
|
||||
"node_modules/",
|
||||
".git/",
|
||||
"dist/",
|
||||
"build/",
|
||||
".next/",
|
||||
"__pycache__/",
|
||||
".obsidian/",
|
||||
".trash/",
|
||||
".venv/",
|
||||
"vendor/",
|
||||
"target/",
|
||||
".cache/",
|
||||
"coverage/",
|
||||
".turbo/",
|
||||
".svelte-kit/",
|
||||
".nuxt/",
|
||||
".output/",
|
||||
".parcel-cache/",
|
||||
".webpack/",
|
||||
".expo/",
|
||||
"_site/",
|
||||
"public/",
|
||||
".jekyll-cache/",
|
||||
"out/",
|
||||
".docusaurus/",
|
||||
"storybook-static/",
|
||||
];
|
||||
|
||||
export interface VaultNode {
|
||||
name: string;
|
||||
path: string; // relative path within vault
|
||||
type: "file" | "folder";
|
||||
children?: VaultNode[];
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a nested file tree from a sorted list of relative paths.
|
||||
* Folders are sorted before files at each level.
|
||||
*/
|
||||
export function buildFileTree(relativePaths: string[]): VaultNode[] {
|
||||
const root: VaultNode[] = [];
|
||||
|
||||
for (const filePath of relativePaths) {
|
||||
const parts = filePath.split("/");
|
||||
let current = root;
|
||||
let pathSoFar = "";
|
||||
|
||||
for (let i = 0; i < parts.length; i++) {
|
||||
const part = parts[i];
|
||||
pathSoFar = pathSoFar ? `${pathSoFar}/${part}` : part;
|
||||
const isFile = i === parts.length - 1;
|
||||
|
||||
let node = current.find(
|
||||
(n) => n.name === part && n.type === (isFile ? "file" : "folder"),
|
||||
);
|
||||
if (!node) {
|
||||
node = {
|
||||
name: part,
|
||||
path: pathSoFar,
|
||||
type: isFile ? "file" : "folder",
|
||||
};
|
||||
if (!isFile) node.children = [];
|
||||
current.push(node);
|
||||
}
|
||||
if (!isFile) {
|
||||
current = node.children!;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sort: folders first (alphabetical), then files (alphabetical)
|
||||
const sortNodes = (nodes: VaultNode[]) => {
|
||||
nodes.sort((a, b) => {
|
||||
if (a.type !== b.type) return a.type === "folder" ? -1 : 1;
|
||||
return a.name.localeCompare(b.name);
|
||||
});
|
||||
for (const node of nodes) {
|
||||
if (node.children) sortNodes(node.children);
|
||||
}
|
||||
};
|
||||
sortNodes(root);
|
||||
|
||||
return root;
|
||||
}
|
||||
72
extensions/plannotator/generated/repo.ts
Normal file
72
extensions/plannotator/generated/repo.ts
Normal file
@@ -0,0 +1,72 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/shared/repo.ts
|
||||
export interface RepoInfo {
|
||||
/** Display string (e.g., "backnotprop/plannotator" or "my-project") */
|
||||
display: string;
|
||||
/** Current git branch (if in a git repo) */
|
||||
branch?: string;
|
||||
/** Host of the git remote (e.g., "github.com", "gitlab.com"). Populated */
|
||||
/** only when the remote URL is parseable; absent for directory-only fallbacks. */
|
||||
host?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse org/repo from a git remote URL
|
||||
*
|
||||
* Handles:
|
||||
* - SSH: git@github.com:org/repo.git
|
||||
* - HTTPS: https://github.com/org/repo.git
|
||||
* - SSH with port: ssh://git@github.com:22/org/repo.git
|
||||
* - GitLab subgroups: git@gitlab.com:group/subgroup/project.git
|
||||
*/
|
||||
export function parseRemoteUrl(url: string): string | null {
|
||||
if (!url) return null;
|
||||
|
||||
// SSH with port: ssh://git@host:22/path.git — strip scheme+host+port
|
||||
const sshPortMatch = url.match(/^ssh:\/\/[^/]+(?::\d+)?\/(.+?)(?:\.git)?$/);
|
||||
if (sshPortMatch) return sshPortMatch[1];
|
||||
|
||||
// SSH format: git@host:path.git — capture full path after ':'
|
||||
// Reject URLs with :// scheme (HTTPS with non-standard ports like :8443)
|
||||
if (!url.includes("://")) {
|
||||
const sshMatch = url.match(/:([^/][^:]*?)(?:\.git)?$/);
|
||||
if (sshMatch) return sshMatch[1];
|
||||
}
|
||||
|
||||
// HTTPS format: https://host/path.git — capture full path after host
|
||||
const httpsMatch = url.match(/^https?:\/\/[^/]+\/(.+?)(?:\.git)?$/);
|
||||
if (httpsMatch) return httpsMatch[1];
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse the host from a git remote URL. Returns null when the shape
|
||||
* doesn't match a known remote form. Used to identify the forge
|
||||
* (github.com, gitlab.com, self-hosted) so inline mention / issue
|
||||
* refs can link to the correct destination instead of assuming GitHub.
|
||||
*/
|
||||
export function parseRemoteHost(url: string): string | null {
|
||||
if (!url) return null;
|
||||
// ssh://git@host:port/path
|
||||
const sshPort = url.match(/^ssh:\/\/(?:[^@]+@)?([^:/]+)/i);
|
||||
if (sshPort) return sshPort[1];
|
||||
// git@host:path
|
||||
if (!url.includes('://')) {
|
||||
const ssh = url.match(/^[^@\s]+@([^:\s]+):/);
|
||||
if (ssh) return ssh[1];
|
||||
}
|
||||
// https://host/path or http://host/path
|
||||
const https = url.match(/^https?:\/\/([^/:]+)/i);
|
||||
if (https) return https[1];
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get directory name from path
|
||||
*/
|
||||
export function getDirName(path: string): string | null {
|
||||
if (!path) return null;
|
||||
const trimmed = path.trim().replace(/\/+$/, "");
|
||||
const parts = trimmed.split("/");
|
||||
return parts[parts.length - 1] || null;
|
||||
}
|
||||
510
extensions/plannotator/generated/resolve-file.ts
Normal file
510
extensions/plannotator/generated/resolve-file.ts
Normal file
@@ -0,0 +1,510 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/shared/resolve-file.ts
|
||||
/**
|
||||
* Smart markdown file resolution.
|
||||
*
|
||||
* Resolves a user-provided path to an absolute file path using three strategies:
|
||||
* 1. Exact path (absolute or relative to cwd)
|
||||
* 2. Case-insensitive relative path search within project root
|
||||
* 3. Case-insensitive bare filename search within project root
|
||||
*
|
||||
* Used by both the CLI (`plannotator annotate`) and the `/api/doc` endpoint.
|
||||
*/
|
||||
|
||||
import { homedir } from "os";
|
||||
import { isAbsolute, join, resolve, win32 } from "path";
|
||||
import { existsSync, readdirSync, type Dirent } from "fs";
|
||||
|
||||
const MARKDOWN_PATH_REGEX = /\.mdx?$/i;
|
||||
|
||||
import { CODE_FILE_REGEX as CODE_FILE_BASENAME_REGEX } from "./code-file";
|
||||
export { CODE_FILE_REGEX, isCodeFilePath } from "./code-file";
|
||||
|
||||
const WINDOWS_DRIVE_PATH_PATTERNS = [
|
||||
/^\/cygdrive\/([a-zA-Z])\/(.+)$/,
|
||||
/^\/([a-zA-Z])\/(.+)$/,
|
||||
];
|
||||
|
||||
const IGNORED_DIRS = [
|
||||
"node_modules/",
|
||||
".git/",
|
||||
"dist/",
|
||||
"build/",
|
||||
".next/",
|
||||
"__pycache__/",
|
||||
".obsidian/",
|
||||
".trash/",
|
||||
];
|
||||
|
||||
const CODE_IGNORED_DIRS = [
|
||||
...IGNORED_DIRS,
|
||||
".turbo/",
|
||||
".cache/",
|
||||
"target/",
|
||||
"vendor/",
|
||||
"coverage/",
|
||||
".venv/",
|
||||
".pytest_cache/",
|
||||
];
|
||||
|
||||
export type ResolveResult =
|
||||
| { kind: "found"; path: string }
|
||||
| { kind: "not_found"; input: string }
|
||||
| { kind: "ambiguous"; input: string; matches: string[] }
|
||||
| { kind: "unavailable"; input: string };
|
||||
|
||||
function normalizeSeparators(input: string): string {
|
||||
return input.replace(/\\/g, "/");
|
||||
}
|
||||
|
||||
function stripTrailingSlashes(input: string): string {
|
||||
return input.replace(/\/+$/, "");
|
||||
}
|
||||
|
||||
export function expandHomePath(input: string, home = homedir()): string {
|
||||
if (input === "~") {
|
||||
return home;
|
||||
}
|
||||
|
||||
if (input.startsWith("~/") || input.startsWith("~\\")) {
|
||||
return join(home, input.slice(2));
|
||||
}
|
||||
|
||||
return input;
|
||||
}
|
||||
|
||||
export function stripWrappingQuotes(input: string): string {
|
||||
if (input.length < 2) {
|
||||
return input;
|
||||
}
|
||||
|
||||
const first = input[0];
|
||||
const last = input[input.length - 1];
|
||||
if ((first === '"' && last === '"') || (first === "'" && last === "'")) {
|
||||
return input.slice(1, -1);
|
||||
}
|
||||
|
||||
return input;
|
||||
}
|
||||
|
||||
export function normalizeUserPathInput(
|
||||
input: string,
|
||||
platform = process.platform,
|
||||
): string {
|
||||
const trimmedInput = input.trim();
|
||||
const unquotedInput = stripWrappingQuotes(trimmedInput);
|
||||
const expandedInput = expandHomePath(unquotedInput);
|
||||
|
||||
if (platform !== "win32") {
|
||||
return expandedInput;
|
||||
}
|
||||
|
||||
for (const pattern of WINDOWS_DRIVE_PATH_PATTERNS) {
|
||||
const match = expandedInput.match(pattern);
|
||||
if (!match) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const [, driveLetter, rest] = match;
|
||||
return `${driveLetter.toUpperCase()}:/${rest}`;
|
||||
}
|
||||
|
||||
return expandedInput;
|
||||
}
|
||||
|
||||
function isAbsoluteNormalizedUserPath(
|
||||
input: string,
|
||||
platform = process.platform,
|
||||
): boolean {
|
||||
if (hasWindowsDriveLetter(input)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return platform === "win32"
|
||||
? win32.isAbsolute(input)
|
||||
: isAbsolute(input);
|
||||
}
|
||||
|
||||
export function isAbsoluteUserPath(
|
||||
input: string,
|
||||
platform = process.platform,
|
||||
): boolean {
|
||||
return isAbsoluteNormalizedUserPath(normalizeUserPathInput(input, platform), platform);
|
||||
}
|
||||
|
||||
export function resolveUserPath(
|
||||
input: string,
|
||||
baseDir = process.cwd(),
|
||||
platform = process.platform,
|
||||
): string {
|
||||
const normalizedInput = normalizeUserPathInput(input, platform);
|
||||
if (!normalizedInput) {
|
||||
return "";
|
||||
}
|
||||
return isAbsoluteNormalizedUserPath(normalizedInput, platform)
|
||||
? resolveAbsolutePath(normalizedInput, platform)
|
||||
: resolve(baseDir, normalizedInput);
|
||||
}
|
||||
|
||||
function normalizeComparablePath(input: string): string {
|
||||
return stripTrailingSlashes(normalizeSeparators(resolveUserPath(input)));
|
||||
}
|
||||
|
||||
export function isWithinProjectRoot(candidate: string, projectRoot: string): boolean {
|
||||
const normalizedCandidate = normalizeComparablePath(candidate);
|
||||
const normalizedProjectRoot = normalizeComparablePath(projectRoot);
|
||||
return (
|
||||
normalizedCandidate === normalizedProjectRoot ||
|
||||
normalizedCandidate.startsWith(`${normalizedProjectRoot}/`)
|
||||
);
|
||||
}
|
||||
|
||||
function getLowercaseBasename(input: string): string {
|
||||
const normalizedInput = normalizeSeparators(input);
|
||||
return normalizedInput.split("/").pop()!.toLowerCase();
|
||||
}
|
||||
|
||||
function getLookupKey(input: string, isBareFilename: boolean): string {
|
||||
return isBareFilename ? getLowercaseBasename(input) : input.toLowerCase();
|
||||
}
|
||||
|
||||
function resolveAbsolutePath(
|
||||
input: string,
|
||||
platform = process.platform,
|
||||
): string {
|
||||
// Use win32.resolve for Windows paths regardless of reported platform
|
||||
return platform === "win32" || hasWindowsDriveLetter(input)
|
||||
? win32.resolve(input)
|
||||
: resolve(input);
|
||||
}
|
||||
|
||||
function isSearchableMarkdownPath(input: string): boolean {
|
||||
return MARKDOWN_PATH_REGEX.test(input.trim());
|
||||
}
|
||||
|
||||
/** Check if a path looks like a Windows absolute path (e.g. C:\ or C:/) */
|
||||
function hasWindowsDriveLetter(input: string): boolean {
|
||||
return /^[a-zA-Z]:[/\\]/.test(input);
|
||||
}
|
||||
|
||||
/** Cross-platform file existence check using Node fs (more reliable than Bun.file in compiled exes) */
|
||||
function fileExists(filePath: string): boolean {
|
||||
try {
|
||||
return existsSync(filePath);
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/** Recursively walk a directory collecting files matching `fileMatcher`, skipping ignored dirs. */
|
||||
function walkFiles(
|
||||
dir: string,
|
||||
root: string,
|
||||
results: string[],
|
||||
ignoredDirs: string[],
|
||||
fileMatcher: (name: string) => boolean,
|
||||
): void {
|
||||
const entries = readdirSync(dir, { withFileTypes: true }) as Dirent[];
|
||||
for (const entry of entries) {
|
||||
if (entry.isDirectory()) {
|
||||
if (ignoredDirs.some((d) => d === entry.name + "/")) continue;
|
||||
try {
|
||||
walkFiles(join(dir, entry.name), root, results, ignoredDirs, fileMatcher);
|
||||
} catch {
|
||||
/* skip dirs we can't read */
|
||||
}
|
||||
} else if (entry.isFile() && fileMatcher(entry.name)) {
|
||||
const relative = join(dir, entry.name)
|
||||
.slice(root.length + 1)
|
||||
.replace(/\\/g, "/");
|
||||
results.push(relative);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function walkMarkdownFiles(dir: string, root: string, results: string[], ignoredDirs: string[]): void {
|
||||
try {
|
||||
walkFiles(dir, root, results, ignoredDirs, (name) => /\.mdx?$/i.test(name));
|
||||
} catch {
|
||||
/* fail soft for markdown — preserves existing behavior */
|
||||
}
|
||||
}
|
||||
|
||||
// --- Code-file resolution (async, cached) ---
|
||||
|
||||
const FILE_LIST_CACHE_TTL_MS = 30_000;
|
||||
const fileListCache = new Map<
|
||||
string,
|
||||
{ promise: Promise<string[] | null>; startedAt: number }
|
||||
>();
|
||||
|
||||
function fileListCacheKey(projectRoot: string, kind: string): string {
|
||||
return `${projectRoot}|${kind}`;
|
||||
}
|
||||
|
||||
function startCodeWalk(projectRoot: string): Promise<string[] | null> {
|
||||
return Promise.resolve().then(() => {
|
||||
try {
|
||||
const results: string[] = [];
|
||||
walkFiles(projectRoot, projectRoot, results, CODE_IGNORED_DIRS, (name) =>
|
||||
CODE_FILE_BASENAME_REGEX.test(name),
|
||||
);
|
||||
return results;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Trigger (or return the in-flight) walk of `projectRoot` for code files.
|
||||
* Cached for `FILE_LIST_CACHE_TTL_MS`. Storing a Promise (not a value) makes
|
||||
* concurrent callers piggyback on the same walk — first arrival wins.
|
||||
*
|
||||
* Returns `null` (wrapped in Promise) when the walk fails (perms, etc).
|
||||
*/
|
||||
export function warmFileListCache(
|
||||
projectRoot: string,
|
||||
kind: "code",
|
||||
): Promise<string[] | null> {
|
||||
const key = fileListCacheKey(projectRoot, kind);
|
||||
const entry = fileListCache.get(key);
|
||||
if (entry && Date.now() - entry.startedAt < FILE_LIST_CACHE_TTL_MS) {
|
||||
return entry.promise;
|
||||
}
|
||||
const promise = startCodeWalk(projectRoot);
|
||||
fileListCache.set(key, { promise, startedAt: Date.now() });
|
||||
return promise;
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve a code-file path within a project root.
|
||||
*
|
||||
* Strategies:
|
||||
* 1. Absolute path → use as-is.
|
||||
* 2. Exact relative from project root.
|
||||
* 3. If `baseDir` provided, literal `<baseDir>/<input>` existence check —
|
||||
* lets out-of-tree linked docs resolve their own relative references
|
||||
* (e.g. `../script.ts` in `~/notes/foo.md` finds `~/script.ts`).
|
||||
* 4. Case-insensitive suffix match against the cached file list:
|
||||
* - bare basename input → match any file with that basename;
|
||||
* - input with `/` → match files whose path equals or ends with `/<input>`
|
||||
* on a segment boundary (so `editor/App.tsx` matches `packages/editor/App.tsx`
|
||||
* but not `myeditor/App.tsx`).
|
||||
*
|
||||
* `..` segments in the input are honored: only `./` is stripped before suffix
|
||||
* matching. `../foo.ts` without a `baseDir` correctly falls through to
|
||||
* not_found rather than fabricating a match against `foo.ts` somewhere in cwd.
|
||||
*/
|
||||
export async function resolveCodeFile(
|
||||
input: string,
|
||||
projectRoot: string,
|
||||
baseDir?: string,
|
||||
): Promise<ResolveResult> {
|
||||
const originalInput = input.trim();
|
||||
const unquotedInput = stripWrappingQuotes(originalInput);
|
||||
const normalizedInput = normalizeUserPathInput(unquotedInput);
|
||||
const searchInput = normalizeSeparators(normalizedInput);
|
||||
|
||||
if (!searchInput) {
|
||||
return { kind: "not_found", input: originalInput };
|
||||
}
|
||||
|
||||
if (isAbsoluteNormalizedUserPath(normalizedInput)) {
|
||||
const absolutePath = resolveAbsolutePath(normalizedInput);
|
||||
if (fileExists(absolutePath)) {
|
||||
return { kind: "found", path: absolutePath };
|
||||
}
|
||||
return { kind: "not_found", input: originalInput };
|
||||
}
|
||||
|
||||
const fromRoot = resolve(projectRoot, searchInput);
|
||||
if (isWithinProjectRoot(fromRoot, projectRoot) && fileExists(fromRoot)) {
|
||||
return { kind: "found", path: fromRoot };
|
||||
}
|
||||
|
||||
if (baseDir) {
|
||||
const fromBase = resolve(baseDir, searchInput);
|
||||
if (fileExists(fromBase)) {
|
||||
return { kind: "found", path: fromBase };
|
||||
}
|
||||
}
|
||||
|
||||
const fileList = await warmFileListCache(projectRoot, "code");
|
||||
if (fileList === null) {
|
||||
return { kind: "unavailable", input: originalInput };
|
||||
}
|
||||
|
||||
// Strip leading `./` so suffix matching works on inputs like
|
||||
// `./editor/App.tsx` — file list entries never carry that segment.
|
||||
// `../` is intentionally NOT stripped: `..` is meaningful (escape parent),
|
||||
// not noise. If we can't honor it via baseDir, the input has no
|
||||
// suffix-match equivalent in the in-tree file list.
|
||||
const cleanedInput = searchInput.replace(/^(?:\.\/)+/, "");
|
||||
if (!cleanedInput || cleanedInput.startsWith("../")) {
|
||||
return { kind: "not_found", input: originalInput };
|
||||
}
|
||||
const target = cleanedInput.toLowerCase();
|
||||
const isBareFilename = !cleanedInput.includes("/");
|
||||
const matches: string[] = [];
|
||||
|
||||
for (const f of fileList) {
|
||||
const fl = f.toLowerCase();
|
||||
if (isBareFilename) {
|
||||
const base = fl.split("/").pop();
|
||||
if (base === target) matches.push(resolve(projectRoot, f));
|
||||
} else {
|
||||
if (fl === target || fl.endsWith("/" + target)) {
|
||||
matches.push(resolve(projectRoot, f));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (matches.length === 1) {
|
||||
return { kind: "found", path: matches[0] };
|
||||
}
|
||||
if (matches.length > 1) {
|
||||
return { kind: "ambiguous", input: originalInput, matches };
|
||||
}
|
||||
return { kind: "not_found", input: originalInput };
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve a markdown file path within a project root.
|
||||
*
|
||||
* @param input - User-provided path (absolute, relative, or bare filename)
|
||||
* @param projectRoot - Project root directory to search within
|
||||
*/
|
||||
function resolveMarkdownFileCore(
|
||||
input: string,
|
||||
projectRoot: string,
|
||||
): ResolveResult {
|
||||
const normalizedInput = normalizeUserPathInput(input);
|
||||
const searchInput = normalizeSeparators(normalizedInput);
|
||||
const isBareFilename = !searchInput.includes("/");
|
||||
const targetLookupKey = getLookupKey(searchInput, isBareFilename);
|
||||
|
||||
// Restrict to markdown files
|
||||
if (!isSearchableMarkdownPath(normalizedInput)) {
|
||||
return { kind: "not_found", input };
|
||||
}
|
||||
|
||||
// 1. Absolute path — use as-is (no project root restriction;
|
||||
// the user explicitly typed the full path)
|
||||
if (isAbsoluteNormalizedUserPath(normalizedInput)) {
|
||||
const absolutePath = resolveAbsolutePath(normalizedInput);
|
||||
if (fileExists(absolutePath)) {
|
||||
return { kind: "found", path: absolutePath };
|
||||
}
|
||||
return { kind: "not_found", input };
|
||||
}
|
||||
|
||||
// 2. Exact relative path from project root
|
||||
const fromRoot = resolve(projectRoot, searchInput);
|
||||
if (isWithinProjectRoot(fromRoot, projectRoot) && fileExists(fromRoot)) {
|
||||
return { kind: "found", path: fromRoot };
|
||||
}
|
||||
|
||||
// 3. Case-insensitive search (only scan markdown files)
|
||||
const allFiles: string[] = [];
|
||||
walkMarkdownFiles(projectRoot, projectRoot, allFiles, IGNORED_DIRS);
|
||||
const matches: string[] = [];
|
||||
|
||||
for (const match of allFiles) {
|
||||
const normalizedMatch = normalizeSeparators(match);
|
||||
const matchLookupKey = getLookupKey(normalizedMatch, isBareFilename);
|
||||
|
||||
if (matchLookupKey === targetLookupKey) {
|
||||
const full = resolve(projectRoot, normalizedMatch);
|
||||
if (isWithinProjectRoot(full, projectRoot)) {
|
||||
matches.push(full);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (matches.length === 1) {
|
||||
return { kind: "found", path: matches[0] };
|
||||
}
|
||||
if (matches.length > 1) {
|
||||
const projectRootPrefix = `${normalizeComparablePath(projectRoot)}/`;
|
||||
const relative = matches.map((match) =>
|
||||
normalizeComparablePath(match).replace(projectRootPrefix, ""),
|
||||
);
|
||||
return { kind: "ambiguous", input, matches: relative };
|
||||
}
|
||||
|
||||
return { kind: "not_found", input };
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve a markdown file path within a project root.
|
||||
*
|
||||
* @param input - User-provided path (absolute, relative, or bare filename)
|
||||
* @param projectRoot - Project root directory to search within
|
||||
*/
|
||||
export function resolveMarkdownFile(
|
||||
input: string,
|
||||
projectRoot: string,
|
||||
): ResolveResult {
|
||||
const originalInput = input.trim();
|
||||
const unquotedInput = stripWrappingQuotes(originalInput);
|
||||
|
||||
const primary = resolveMarkdownFileCore(unquotedInput, projectRoot);
|
||||
if (primary.kind === "found") {
|
||||
return primary;
|
||||
}
|
||||
if (primary.kind === "ambiguous") {
|
||||
return { ...primary, input: originalInput };
|
||||
}
|
||||
|
||||
if (!unquotedInput.startsWith("@")) {
|
||||
return { kind: "not_found", input: originalInput };
|
||||
}
|
||||
|
||||
const normalizedInput = unquotedInput.replace(/^@+/, "");
|
||||
if (!normalizedInput) {
|
||||
return { kind: "not_found", input: originalInput };
|
||||
}
|
||||
|
||||
const fallback = resolveMarkdownFileCore(normalizedInput, projectRoot);
|
||||
if (fallback.kind === "found") {
|
||||
return fallback;
|
||||
}
|
||||
if (fallback.kind === "ambiguous") {
|
||||
return { ...fallback, input: originalInput };
|
||||
}
|
||||
|
||||
return { kind: "not_found", input: originalInput };
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a directory contains at least one file matching the given extensions.
|
||||
* Used to validate folder annotation targets.
|
||||
*
|
||||
* @param dirPath - Directory to search
|
||||
* @param excludedDirs - Directory names to skip (with trailing slash, e.g. "node_modules/")
|
||||
* @param extensions - Regex to match file extensions (default: markdown only)
|
||||
*/
|
||||
export function hasMarkdownFiles(
|
||||
dirPath: string,
|
||||
excludedDirs: string[] = IGNORED_DIRS,
|
||||
extensions: RegExp = /\.mdx?$/i,
|
||||
): boolean {
|
||||
function walk(dir: string): boolean {
|
||||
let entries;
|
||||
try {
|
||||
entries = readdirSync(dir, { withFileTypes: true });
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
for (const entry of entries) {
|
||||
if (entry.isDirectory()) {
|
||||
if (excludedDirs.some((d) => d === entry.name + "/")) continue;
|
||||
if (walk(join(dir, entry.name))) return true;
|
||||
} else if (entry.isFile() && extensions.test(entry.name)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
return walk(dirPath);
|
||||
}
|
||||
748
extensions/plannotator/generated/review-core.ts
Normal file
748
extensions/plannotator/generated/review-core.ts
Normal file
@@ -0,0 +1,748 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/shared/review-core.ts
|
||||
/**
|
||||
* Runtime-agnostic code-review core shared by Bun runtimes and Pi.
|
||||
*
|
||||
* Pi consumes a build-time copy of this file so its published package stays
|
||||
* self-contained while review diff logic remains sourced from one module.
|
||||
*/
|
||||
|
||||
import { resolve as resolvePath } from "node:path";
|
||||
|
||||
export type DiffType =
|
||||
| "uncommitted"
|
||||
| "staged"
|
||||
| "unstaged"
|
||||
| "last-commit"
|
||||
| "branch"
|
||||
| "merge-base"
|
||||
| "all"
|
||||
| `worktree:${string}`
|
||||
| "p4-default"
|
||||
| `p4-changelist:${string}`;
|
||||
|
||||
export interface DiffOption {
|
||||
id: string;
|
||||
label: string;
|
||||
}
|
||||
|
||||
export interface WorktreeInfo {
|
||||
path: string;
|
||||
branch: string | null;
|
||||
head: string;
|
||||
}
|
||||
|
||||
export interface AvailableBranches {
|
||||
local: string[];
|
||||
remote: string[];
|
||||
}
|
||||
|
||||
export interface GitContext {
|
||||
currentBranch: string;
|
||||
defaultBranch: string;
|
||||
diffOptions: DiffOption[];
|
||||
worktrees: WorktreeInfo[];
|
||||
availableBranches: AvailableBranches;
|
||||
cwd?: string;
|
||||
vcsType?: "git" | "p4";
|
||||
}
|
||||
|
||||
export interface DiffResult {
|
||||
patch: string;
|
||||
label: string;
|
||||
error?: string;
|
||||
}
|
||||
|
||||
export interface GitCommandResult {
|
||||
stdout: string;
|
||||
stderr: string;
|
||||
exitCode: number;
|
||||
}
|
||||
|
||||
export interface ReviewGitRuntime {
|
||||
runGit: (
|
||||
args: string[],
|
||||
options?: { cwd?: string; timeoutMs?: number },
|
||||
) => Promise<GitCommandResult>;
|
||||
readTextFile: (path: string) => Promise<string | null>;
|
||||
}
|
||||
|
||||
export interface GitDiffOptions {
|
||||
hideWhitespace?: boolean;
|
||||
}
|
||||
|
||||
export async function getCurrentBranch(
|
||||
runtime: ReviewGitRuntime,
|
||||
cwd?: string,
|
||||
): Promise<string> {
|
||||
const result = await runtime.runGit(
|
||||
["rev-parse", "--abbrev-ref", "HEAD"],
|
||||
{ cwd },
|
||||
);
|
||||
return result.exitCode === 0 ? result.stdout.trim() || "HEAD" : "HEAD";
|
||||
}
|
||||
|
||||
export async function getDefaultBranch(
|
||||
runtime: ReviewGitRuntime,
|
||||
cwd?: string,
|
||||
): Promise<string> {
|
||||
// Prefer the remote tracking ref (e.g. `origin/main`) so diffs run against
|
||||
// the upstream tip, not a potentially stale local copy. Only fall back to
|
||||
// a local ref when there's no remote configured at all.
|
||||
const remoteHead = await runtime.runGit(
|
||||
["symbolic-ref", "refs/remotes/origin/HEAD"],
|
||||
{ cwd },
|
||||
);
|
||||
if (remoteHead.exitCode === 0) {
|
||||
const ref = remoteHead.stdout.trim();
|
||||
if (ref) {
|
||||
// `symbolic-ref` only tells us what origin/HEAD *points at* — it does
|
||||
// not guarantee that the target ref was actually fetched. In narrow
|
||||
// or partial clones the pointer can be set while the target is
|
||||
// missing, in which case a later `git diff origin/main..HEAD` would
|
||||
// error. Verify the target exists before trusting it.
|
||||
const verify = await runtime.runGit(
|
||||
["show-ref", "--verify", "--quiet", ref],
|
||||
{ cwd },
|
||||
);
|
||||
if (verify.exitCode === 0) return ref.replace("refs/remotes/", "");
|
||||
}
|
||||
}
|
||||
|
||||
const mainBranch = await runtime.runGit(
|
||||
["show-ref", "--verify", "refs/heads/main"],
|
||||
{ cwd },
|
||||
);
|
||||
if (mainBranch.exitCode === 0) return "main";
|
||||
|
||||
return "master";
|
||||
}
|
||||
|
||||
/**
|
||||
* Query the remote for its default branch via `ls-remote --symref`. Returns
|
||||
* `origin/<name>` if the remote answers and the tracking ref exists locally,
|
||||
* otherwise `null`. Designed to run in the background at server startup — the
|
||||
* caller fires it with `.then()` and uses the result if/when it arrives.
|
||||
*
|
||||
* Timeout-guarded: if the network is slow or absent, the promise resolves
|
||||
* (with `null`) once the timeout fires. Never throws.
|
||||
*/
|
||||
export async function detectRemoteDefaultBranch(
|
||||
runtime: ReviewGitRuntime,
|
||||
cwd?: string,
|
||||
): Promise<string | null> {
|
||||
try {
|
||||
const lsRemote = await runtime.runGit(
|
||||
["ls-remote", "--symref", "origin", "HEAD"],
|
||||
{ cwd, timeoutMs: 5000 },
|
||||
);
|
||||
if (lsRemote.exitCode !== 0) return null;
|
||||
const match = lsRemote.stdout.match(/^ref:\s+refs\/heads\/(\S+)\s+HEAD/m);
|
||||
if (!match) return null;
|
||||
const remoteBranch = `origin/${match[1]}`;
|
||||
const refExists = await runtime.runGit(
|
||||
["show-ref", "--verify", "--quiet", `refs/remotes/${remoteBranch}`],
|
||||
{ cwd },
|
||||
);
|
||||
return refExists.exitCode === 0 ? remoteBranch : null;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
export async function listBranches(
|
||||
runtime: ReviewGitRuntime,
|
||||
cwd?: string,
|
||||
): Promise<AvailableBranches> {
|
||||
// Emit `<full-refname>\t<short-name>` so we can classify by ref prefix
|
||||
// without guessing from the short form — local branches can contain `/`
|
||||
// (e.g. `feature/foo`), so `name.includes("/")` would misclassify them.
|
||||
const result = await runtime.runGit(
|
||||
[
|
||||
"for-each-ref",
|
||||
"--format=%(refname)\t%(refname:short)",
|
||||
"refs/heads",
|
||||
"refs/remotes",
|
||||
],
|
||||
{ cwd },
|
||||
);
|
||||
if (result.exitCode !== 0) return { local: [], remote: [] };
|
||||
|
||||
const local: string[] = [];
|
||||
const remote: string[] = [];
|
||||
|
||||
for (const line of result.stdout.split("\n")) {
|
||||
const [fullRef, shortName] = line.split("\t");
|
||||
if (!fullRef || !shortName) continue;
|
||||
if (shortName.endsWith("/HEAD")) continue;
|
||||
if (fullRef.startsWith("refs/heads/")) {
|
||||
local.push(shortName);
|
||||
} else if (fullRef.startsWith("refs/remotes/")) {
|
||||
remote.push(shortName);
|
||||
}
|
||||
}
|
||||
|
||||
// Keep both local and remote refs — they can point to different commits
|
||||
// (stale local tracking branches are common) and users need to be able to
|
||||
// pick either explicitly. The picker groups them separately for clarity.
|
||||
local.sort();
|
||||
remote.sort();
|
||||
|
||||
return { local, remote };
|
||||
}
|
||||
|
||||
/**
|
||||
* Pick a safe base branch. Trusts the caller verbatim if they supplied one,
|
||||
* otherwise falls back to the detected default. Shared by Bun (`review.ts`)
|
||||
* and Pi (`serverReview.ts`) so both runtimes behave identically.
|
||||
*
|
||||
* Why trust the caller: the UI picker only ever sends refs from the known
|
||||
* list, and external/programmatic callers may pass tags, SHAs, or refs under
|
||||
* non-`origin` remotes that we must not silently rewrite (a tag `release` is
|
||||
* not the same commit as a branch `origin/release`). Invalid refs surface as
|
||||
* git errors on the next diff call, which is better than silently producing
|
||||
* a patch against the wrong commit.
|
||||
*/
|
||||
export function resolveBaseBranch(
|
||||
requested: string | undefined,
|
||||
detected: string,
|
||||
): string {
|
||||
return requested || detected;
|
||||
}
|
||||
|
||||
export async function getWorktrees(
|
||||
runtime: ReviewGitRuntime,
|
||||
cwd?: string,
|
||||
): Promise<WorktreeInfo[]> {
|
||||
const result = await runtime.runGit(["worktree", "list", "--porcelain"], { cwd });
|
||||
if (result.exitCode !== 0) return [];
|
||||
|
||||
const entries: WorktreeInfo[] = [];
|
||||
let current: Partial<WorktreeInfo> = {};
|
||||
|
||||
for (const line of result.stdout.split("\n")) {
|
||||
if (line.startsWith("worktree ")) {
|
||||
if (current.path) {
|
||||
entries.push({
|
||||
path: current.path,
|
||||
head: current.head || "",
|
||||
branch: current.branch ?? null,
|
||||
});
|
||||
}
|
||||
current = { path: line.slice("worktree ".length) };
|
||||
} else if (line.startsWith("HEAD ")) {
|
||||
current.head = line.slice("HEAD ".length);
|
||||
} else if (line.startsWith("branch ")) {
|
||||
current.branch = line
|
||||
.slice("branch ".length)
|
||||
.replace("refs/heads/", "");
|
||||
} else if (line === "detached") {
|
||||
current.branch = null;
|
||||
}
|
||||
}
|
||||
|
||||
if (current.path) {
|
||||
entries.push({
|
||||
path: current.path,
|
||||
head: current.head || "",
|
||||
branch: current.branch ?? null,
|
||||
});
|
||||
}
|
||||
|
||||
return entries;
|
||||
}
|
||||
|
||||
export async function getGitContext(
|
||||
runtime: ReviewGitRuntime,
|
||||
cwd?: string,
|
||||
): Promise<GitContext> {
|
||||
const [currentBranch, defaultBranch, availableBranches] = await Promise.all([
|
||||
getCurrentBranch(runtime, cwd),
|
||||
getDefaultBranch(runtime, cwd),
|
||||
listBranches(runtime, cwd),
|
||||
]);
|
||||
|
||||
const diffOptions: DiffOption[] = [
|
||||
{ id: "uncommitted", label: "Uncommitted changes" },
|
||||
{ id: "staged", label: "Staged changes" },
|
||||
{ id: "unstaged", label: "Unstaged changes" },
|
||||
{ id: "last-commit", label: "Last commit" },
|
||||
];
|
||||
|
||||
// Always offer Branch diff / PR Diff when a default branch exists. The
|
||||
// older guard hid them when the reviewer was on the default branch (the
|
||||
// `vs <default>` diff from the default branch itself is always empty), but
|
||||
// the base picker now lets reviewers compare against any branch from any
|
||||
// branch, so there's no meaningless-by-construction option. Also: preserving
|
||||
// diff mode across worktree switches and Pi's `initialBase` can land the
|
||||
// reviewer on the default branch with branch/merge-base already active — the
|
||||
// old guard hid the active mode's option, trapping them. Unconditional
|
||||
// emission keeps the active option reachable in every flow.
|
||||
if (defaultBranch) {
|
||||
diffOptions.push({ id: "merge-base", label: "Committed changes" });
|
||||
}
|
||||
|
||||
diffOptions.push({ id: "all", label: "All files (HEAD)" });
|
||||
|
||||
const [worktrees, currentTreePathResult] = await Promise.all([
|
||||
getWorktrees(runtime, cwd),
|
||||
runtime.runGit(["rev-parse", "--show-toplevel"], { cwd }),
|
||||
]);
|
||||
|
||||
const currentTreePath =
|
||||
currentTreePathResult.exitCode === 0
|
||||
? currentTreePathResult.stdout.trim()
|
||||
: null;
|
||||
|
||||
return {
|
||||
currentBranch,
|
||||
defaultBranch,
|
||||
diffOptions,
|
||||
worktrees: worktrees.filter((wt) => wt.path !== currentTreePath),
|
||||
availableBranches,
|
||||
cwd,
|
||||
};
|
||||
}
|
||||
|
||||
async function getUntrackedFileDiffs(
|
||||
runtime: ReviewGitRuntime,
|
||||
srcPrefix = "a/",
|
||||
dstPrefix = "b/",
|
||||
cwd?: string,
|
||||
options?: GitDiffOptions,
|
||||
): Promise<string> {
|
||||
// git ls-files scopes to the CWD subtree and returns CWD-relative paths,
|
||||
// unlike git diff HEAD which always covers the full repo with root-relative
|
||||
// paths. Resolve the repo root so untracked files from the entire repo are
|
||||
// included and their paths match the tracked-diff output.
|
||||
const toplevelResult = await runtime.runGit(
|
||||
["rev-parse", "--show-toplevel"],
|
||||
{ cwd },
|
||||
);
|
||||
const rootCwd =
|
||||
toplevelResult.exitCode === 0 ? toplevelResult.stdout.trim() : cwd;
|
||||
|
||||
const lsResult = await runtime.runGit(
|
||||
["ls-files", "--others", "--exclude-standard"],
|
||||
{ cwd: rootCwd },
|
||||
);
|
||||
if (lsResult.exitCode !== 0) return "";
|
||||
|
||||
const files = lsResult.stdout
|
||||
.trim()
|
||||
.split("\n")
|
||||
.filter((file) => file.length > 0);
|
||||
|
||||
if (files.length === 0) return "";
|
||||
|
||||
const diffs = await Promise.all(
|
||||
files.map(async (file) => {
|
||||
const diffResult = await runtime.runGit(
|
||||
[
|
||||
"diff",
|
||||
"--no-ext-diff",
|
||||
...(options?.hideWhitespace ? ["-w"] : []),
|
||||
"--no-index",
|
||||
`--src-prefix=${srcPrefix}`,
|
||||
`--dst-prefix=${dstPrefix}`,
|
||||
"/dev/null",
|
||||
file,
|
||||
],
|
||||
{ cwd: rootCwd },
|
||||
);
|
||||
return diffResult.stdout;
|
||||
}),
|
||||
);
|
||||
|
||||
return diffs.join("");
|
||||
}
|
||||
|
||||
function assertGitSuccess(
|
||||
result: GitCommandResult,
|
||||
args: string[],
|
||||
): GitCommandResult {
|
||||
if (result.exitCode === 0) return result;
|
||||
|
||||
const command = `git ${args.join(" ")}`;
|
||||
const stderr = result.stderr.trim();
|
||||
throw new Error(
|
||||
stderr
|
||||
? `${command} failed: ${stderr}`
|
||||
: `${command} failed with exit code ${result.exitCode}`,
|
||||
);
|
||||
}
|
||||
|
||||
const WORKTREE_SUB_TYPES = new Set([
|
||||
"uncommitted",
|
||||
"staged",
|
||||
"unstaged",
|
||||
"last-commit",
|
||||
"branch",
|
||||
"merge-base",
|
||||
"all",
|
||||
]);
|
||||
|
||||
export function parseWorktreeDiffType(
|
||||
diffType: string,
|
||||
): { path: string; subType: string } | null {
|
||||
if (!diffType.startsWith("worktree:")) return null;
|
||||
|
||||
const rest = diffType.slice("worktree:".length);
|
||||
const lastColon = rest.lastIndexOf(":");
|
||||
if (lastColon !== -1) {
|
||||
const maybeSub = rest.slice(lastColon + 1);
|
||||
if (WORKTREE_SUB_TYPES.has(maybeSub)) {
|
||||
return { path: rest.slice(0, lastColon), subType: maybeSub };
|
||||
}
|
||||
}
|
||||
|
||||
return { path: rest, subType: "uncommitted" };
|
||||
}
|
||||
|
||||
export async function runGitDiff(
|
||||
runtime: ReviewGitRuntime,
|
||||
diffType: DiffType,
|
||||
defaultBranch: string = "main",
|
||||
externalCwd?: string,
|
||||
options?: GitDiffOptions,
|
||||
): Promise<DiffResult> {
|
||||
let patch = "";
|
||||
let label = "";
|
||||
let cwd: string | undefined = externalCwd;
|
||||
let effectiveDiffType = diffType as string;
|
||||
|
||||
if (diffType.startsWith("worktree:")) {
|
||||
const parsed = parseWorktreeDiffType(diffType);
|
||||
if (!parsed) {
|
||||
return {
|
||||
patch: "",
|
||||
label: "Worktree error",
|
||||
error: "Could not parse worktree diff type",
|
||||
};
|
||||
}
|
||||
cwd = parsed.path;
|
||||
effectiveDiffType = parsed.subType;
|
||||
}
|
||||
|
||||
const wFlag = options?.hideWhitespace ? ["-w"] : [];
|
||||
|
||||
try {
|
||||
switch (effectiveDiffType) {
|
||||
case "uncommitted": {
|
||||
const trackedDiffArgs = [
|
||||
"diff",
|
||||
"--no-ext-diff",
|
||||
...wFlag,
|
||||
"HEAD",
|
||||
"--src-prefix=a/",
|
||||
"--dst-prefix=b/",
|
||||
];
|
||||
const hasHead =
|
||||
(await runtime.runGit(["rev-parse", "--verify", "HEAD"], { cwd }))
|
||||
.exitCode === 0;
|
||||
const trackedPatch = hasHead
|
||||
? assertGitSuccess(
|
||||
await runtime.runGit(trackedDiffArgs, { cwd }),
|
||||
trackedDiffArgs,
|
||||
).stdout
|
||||
: "";
|
||||
const untrackedDiff = await getUntrackedFileDiffs(
|
||||
runtime,
|
||||
"a/",
|
||||
"b/",
|
||||
cwd,
|
||||
options,
|
||||
);
|
||||
patch = trackedPatch + untrackedDiff;
|
||||
label = "Uncommitted changes";
|
||||
break;
|
||||
}
|
||||
|
||||
case "staged": {
|
||||
const stagedDiffArgs = [
|
||||
"diff",
|
||||
"--no-ext-diff",
|
||||
...wFlag,
|
||||
"--staged",
|
||||
"--src-prefix=a/",
|
||||
"--dst-prefix=b/",
|
||||
];
|
||||
const stagedDiff = assertGitSuccess(
|
||||
await runtime.runGit(stagedDiffArgs, { cwd }),
|
||||
stagedDiffArgs,
|
||||
);
|
||||
patch = stagedDiff.stdout;
|
||||
label = "Staged changes";
|
||||
break;
|
||||
}
|
||||
|
||||
case "unstaged": {
|
||||
const trackedDiffArgs = [
|
||||
"diff",
|
||||
"--no-ext-diff",
|
||||
...wFlag,
|
||||
"--src-prefix=a/",
|
||||
"--dst-prefix=b/",
|
||||
];
|
||||
const trackedDiff = assertGitSuccess(
|
||||
await runtime.runGit(trackedDiffArgs, { cwd }),
|
||||
trackedDiffArgs,
|
||||
);
|
||||
const untrackedDiff = await getUntrackedFileDiffs(
|
||||
runtime,
|
||||
"a/",
|
||||
"b/",
|
||||
cwd,
|
||||
options,
|
||||
);
|
||||
patch = trackedDiff.stdout + untrackedDiff;
|
||||
label = "Unstaged changes";
|
||||
break;
|
||||
}
|
||||
|
||||
case "last-commit": {
|
||||
const hasParent = await runtime.runGit(
|
||||
["rev-parse", "--verify", "HEAD~1"],
|
||||
{ cwd },
|
||||
);
|
||||
const args =
|
||||
hasParent.exitCode === 0
|
||||
? ["diff", "--no-ext-diff", ...wFlag, "HEAD~1..HEAD", "--src-prefix=a/", "--dst-prefix=b/"]
|
||||
: ["diff", "--no-ext-diff", ...wFlag, "--root", "HEAD", "--src-prefix=a/", "--dst-prefix=b/"];
|
||||
const lastCommitDiff = assertGitSuccess(
|
||||
await runtime.runGit(args, { cwd }),
|
||||
args,
|
||||
);
|
||||
patch = lastCommitDiff.stdout;
|
||||
label = "Last commit";
|
||||
break;
|
||||
}
|
||||
|
||||
case "branch": {
|
||||
// `--end-of-options` hardens against a caller-supplied `defaultBranch`
|
||||
// that starts with `-` being parsed as a git flag (e.g. `--output=...`
|
||||
// would redirect diff output to an attacker-chosen path). Same pattern
|
||||
// applied wherever user-controlled refs flow into a git argv.
|
||||
const branchDiffArgs = [
|
||||
"diff",
|
||||
"--no-ext-diff",
|
||||
...wFlag,
|
||||
"--src-prefix=a/",
|
||||
"--dst-prefix=b/",
|
||||
"--end-of-options",
|
||||
`${defaultBranch}..HEAD`,
|
||||
];
|
||||
const branchDiff = assertGitSuccess(
|
||||
await runtime.runGit(branchDiffArgs, { cwd }),
|
||||
branchDiffArgs,
|
||||
);
|
||||
patch = branchDiff.stdout;
|
||||
label = `Changes vs ${defaultBranch}`;
|
||||
break;
|
||||
}
|
||||
|
||||
case "merge-base": {
|
||||
const mergeBaseLookupArgs = ["merge-base", "--end-of-options", defaultBranch, "HEAD"];
|
||||
const mergeBaseResult = assertGitSuccess(
|
||||
await runtime.runGit(mergeBaseLookupArgs, { cwd }),
|
||||
mergeBaseLookupArgs,
|
||||
);
|
||||
const mergeBase = mergeBaseResult.stdout.trim();
|
||||
const mergeBaseDiffArgs = [
|
||||
"diff",
|
||||
"--no-ext-diff",
|
||||
...wFlag,
|
||||
"--src-prefix=a/",
|
||||
"--dst-prefix=b/",
|
||||
"--end-of-options",
|
||||
`${mergeBase}..HEAD`,
|
||||
];
|
||||
const mergeBaseDiff = assertGitSuccess(
|
||||
await runtime.runGit(mergeBaseDiffArgs, { cwd }),
|
||||
mergeBaseDiffArgs,
|
||||
);
|
||||
patch = mergeBaseDiff.stdout;
|
||||
label = `PR diff vs ${defaultBranch}`;
|
||||
break;
|
||||
}
|
||||
|
||||
case "all": {
|
||||
// Diff from the empty tree to HEAD — shows every tracked file as an addition.
|
||||
const emptyTreeResult = await runtime.runGit(["hash-object", "-t", "tree", "/dev/null"], { cwd });
|
||||
const emptyTree = emptyTreeResult.exitCode === 0
|
||||
? emptyTreeResult.stdout.trim()
|
||||
: "4b825dc642cb6eb9a060e54bf8d69288fbee4904";
|
||||
const allDiffArgs = [
|
||||
"diff",
|
||||
"--no-ext-diff",
|
||||
...wFlag,
|
||||
"--src-prefix=a/",
|
||||
"--dst-prefix=b/",
|
||||
"--end-of-options",
|
||||
`${emptyTree}..HEAD`,
|
||||
];
|
||||
const allDiff = assertGitSuccess(
|
||||
await runtime.runGit(allDiffArgs, { cwd }),
|
||||
allDiffArgs,
|
||||
);
|
||||
patch = allDiff.stdout;
|
||||
label = "All files";
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
return { patch: "", label: "Unknown diff type" };
|
||||
}
|
||||
} catch (error) {
|
||||
const raw = error instanceof Error ? error.message : String(error);
|
||||
// Git dumps its entire --help output on some failures; keep only the
|
||||
// first meaningful line so the UI doesn't vomit a wall of text.
|
||||
const firstLine = raw.split("\n").find((l) => l.trim().length > 0) ?? raw;
|
||||
const message = firstLine.length > 200 ? firstLine.slice(0, 200) + "…" : firstLine;
|
||||
return {
|
||||
patch: "",
|
||||
label: cwd ? "Worktree error" : `Error: ${diffType}`,
|
||||
error: message,
|
||||
};
|
||||
}
|
||||
|
||||
if (cwd) {
|
||||
const branch = await getCurrentBranch(runtime, cwd);
|
||||
label =
|
||||
branch && branch !== "HEAD"
|
||||
? `${branch}: ${label}`
|
||||
: `${cwd.split("/").pop()}: ${label}`;
|
||||
}
|
||||
|
||||
return { patch, label };
|
||||
}
|
||||
|
||||
export async function runGitDiffWithContext(
|
||||
runtime: ReviewGitRuntime,
|
||||
diffType: DiffType,
|
||||
gitContext: GitContext,
|
||||
options?: GitDiffOptions,
|
||||
): Promise<DiffResult> {
|
||||
return runGitDiff(runtime, diffType, gitContext.defaultBranch, gitContext.cwd, options);
|
||||
}
|
||||
|
||||
export async function getFileContentsForDiff(
|
||||
runtime: ReviewGitRuntime,
|
||||
diffType: DiffType,
|
||||
defaultBranch: string,
|
||||
filePath: string,
|
||||
oldPath?: string,
|
||||
cwd?: string,
|
||||
): Promise<{ oldContent: string | null; newContent: string | null }> {
|
||||
const oldFilePath = oldPath || filePath;
|
||||
|
||||
let effectiveDiffType = diffType as string;
|
||||
if (diffType.startsWith("worktree:")) {
|
||||
const parsed = parseWorktreeDiffType(diffType);
|
||||
if (!parsed) return { oldContent: null, newContent: null };
|
||||
cwd = parsed.path;
|
||||
effectiveDiffType = parsed.subType;
|
||||
}
|
||||
|
||||
async function gitShow(ref: string, path: string): Promise<string | null> {
|
||||
// `--end-of-options` hardens against user-supplied refs starting with `-`.
|
||||
const result = await runtime.runGit(["show", "--end-of-options", `${ref}:${path}`], { cwd });
|
||||
return result.exitCode === 0 ? result.stdout : null;
|
||||
}
|
||||
|
||||
async function readWorkingTree(path: string): Promise<string | null> {
|
||||
const fullPath = cwd ? resolvePath(cwd, path) : path;
|
||||
return runtime.readTextFile(fullPath);
|
||||
}
|
||||
|
||||
switch (effectiveDiffType) {
|
||||
case "uncommitted":
|
||||
return {
|
||||
oldContent: await gitShow("HEAD", oldFilePath),
|
||||
newContent: await readWorkingTree(filePath),
|
||||
};
|
||||
case "staged":
|
||||
return {
|
||||
oldContent: await gitShow("HEAD", oldFilePath),
|
||||
newContent: await gitShow(":0", filePath),
|
||||
};
|
||||
case "unstaged":
|
||||
return {
|
||||
oldContent: await gitShow(":0", oldFilePath),
|
||||
newContent: await readWorkingTree(filePath),
|
||||
};
|
||||
case "last-commit":
|
||||
return {
|
||||
oldContent: await gitShow("HEAD~1", oldFilePath),
|
||||
newContent: await gitShow("HEAD", filePath),
|
||||
};
|
||||
case "branch":
|
||||
return {
|
||||
oldContent: await gitShow(defaultBranch, oldFilePath),
|
||||
newContent: await gitShow("HEAD", filePath),
|
||||
};
|
||||
case "merge-base": {
|
||||
const mbResult = await runtime.runGit(["merge-base", "--end-of-options", defaultBranch, "HEAD"], { cwd });
|
||||
const mb = mbResult.exitCode === 0 ? mbResult.stdout.trim() : defaultBranch;
|
||||
return {
|
||||
oldContent: await gitShow(mb, oldFilePath),
|
||||
newContent: await gitShow("HEAD", filePath),
|
||||
};
|
||||
}
|
||||
case "all":
|
||||
return {
|
||||
oldContent: null,
|
||||
newContent: await gitShow("HEAD", filePath),
|
||||
};
|
||||
default:
|
||||
return { oldContent: null, newContent: null };
|
||||
}
|
||||
}
|
||||
|
||||
export function validateFilePath(filePath: string): void {
|
||||
if (filePath.includes("..") || filePath.startsWith("/")) {
|
||||
throw new Error("Invalid file path");
|
||||
}
|
||||
}
|
||||
|
||||
async function ensureGitSuccess(
|
||||
runtime: ReviewGitRuntime,
|
||||
args: string[],
|
||||
cwd?: string,
|
||||
): Promise<void> {
|
||||
const result = await runtime.runGit(args, { cwd });
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(result.stderr.trim() || `git ${args.join(" ")} failed`);
|
||||
}
|
||||
}
|
||||
|
||||
export async function gitAddFile(
|
||||
runtime: ReviewGitRuntime,
|
||||
filePath: string,
|
||||
cwd?: string,
|
||||
): Promise<void> {
|
||||
validateFilePath(filePath);
|
||||
await ensureGitSuccess(runtime, ["add", "--", filePath], cwd);
|
||||
}
|
||||
|
||||
export async function gitResetFile(
|
||||
runtime: ReviewGitRuntime,
|
||||
filePath: string,
|
||||
cwd?: string,
|
||||
): Promise<void> {
|
||||
validateFilePath(filePath);
|
||||
await ensureGitSuccess(runtime, ["reset", "HEAD", "--", filePath], cwd);
|
||||
}
|
||||
|
||||
export function parseP4DiffType(
|
||||
diffType: string,
|
||||
): { changelist: string | "default" } | null {
|
||||
if (diffType === "p4-default") return { changelist: "default" };
|
||||
if (diffType.startsWith("p4-changelist:")) {
|
||||
return { changelist: diffType.slice("p4-changelist:".length) };
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
export function isP4DiffType(diffType: string): boolean {
|
||||
return parseP4DiffType(diffType) !== null;
|
||||
}
|
||||
377
extensions/plannotator/generated/storage.ts
Normal file
377
extensions/plannotator/generated/storage.ts
Normal file
@@ -0,0 +1,377 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/shared/storage.ts
|
||||
/**
|
||||
* Plan Storage Utility
|
||||
*
|
||||
* Saves plans and annotations to ~/.plannotator/plans/
|
||||
* Cross-platform: works on Windows, macOS, and Linux.
|
||||
*
|
||||
* Runtime-agnostic: uses only node:fs, node:path, node:os.
|
||||
*/
|
||||
|
||||
import { homedir } from "os";
|
||||
import { join, resolve, sep } from "path";
|
||||
import { mkdirSync, writeFileSync, readFileSync, readdirSync, statSync, existsSync } from "fs";
|
||||
import { sanitizeTag } from "./project";
|
||||
import { resolveUserPath } from "./resolve-file";
|
||||
|
||||
/**
|
||||
* Get the plan storage directory, creating it if needed.
|
||||
* Cross-platform: uses os.homedir() for Windows/macOS/Linux compatibility.
|
||||
* @param customPath Optional custom path. Supports ~ for home directory.
|
||||
*/
|
||||
export function getPlanDir(customPath?: string | null): string {
|
||||
let planDir: string;
|
||||
|
||||
if (customPath?.trim()) {
|
||||
planDir = resolveUserPath(customPath);
|
||||
} else {
|
||||
planDir = join(homedir(), ".plannotator", "plans");
|
||||
}
|
||||
|
||||
mkdirSync(planDir, { recursive: true });
|
||||
return planDir;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract the first heading from markdown content.
|
||||
*/
|
||||
function extractFirstHeading(markdown: string): string | null {
|
||||
const match = markdown.match(/^#\s+(.+)$/m);
|
||||
if (!match) return null;
|
||||
return match[1].trim();
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a slug from plan content.
|
||||
* Format: {sanitized-heading}-YYYY-MM-DD
|
||||
*/
|
||||
export function generateSlug(plan: string): string {
|
||||
const date = new Date().toISOString().split("T")[0]; // YYYY-MM-DD
|
||||
|
||||
const heading = extractFirstHeading(plan);
|
||||
const slug = heading ? sanitizeTag(heading) : null;
|
||||
|
||||
return slug ? `${slug}-${date}` : `plan-${date}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Save the plan markdown to disk.
|
||||
* Returns the full path to the saved file.
|
||||
*/
|
||||
export function savePlan(slug: string, content: string, customPath?: string | null): string {
|
||||
const planDir = getPlanDir(customPath);
|
||||
const filePath = join(planDir, `${slug}.md`);
|
||||
writeFileSync(filePath, content, "utf-8");
|
||||
return filePath;
|
||||
}
|
||||
|
||||
/**
|
||||
* Save annotations to disk.
|
||||
* Returns the full path to the saved file.
|
||||
*/
|
||||
export function saveAnnotations(slug: string, annotationsContent: string, customPath?: string | null): string {
|
||||
const planDir = getPlanDir(customPath);
|
||||
const filePath = join(planDir, `${slug}.annotations.md`);
|
||||
writeFileSync(filePath, annotationsContent, "utf-8");
|
||||
return filePath;
|
||||
}
|
||||
|
||||
/**
|
||||
* Save the final snapshot on approve/deny.
|
||||
* Combines plan and annotations into a single file with status suffix.
|
||||
* Returns the full path to the saved file.
|
||||
*/
|
||||
export function saveFinalSnapshot(
|
||||
slug: string,
|
||||
status: "approved" | "denied",
|
||||
plan: string,
|
||||
annotations: string,
|
||||
customPath?: string | null
|
||||
): string {
|
||||
const planDir = getPlanDir(customPath);
|
||||
const filePath = join(planDir, `${slug}-${status}.md`);
|
||||
|
||||
// Combine plan with annotations appended
|
||||
let content = plan;
|
||||
if (annotations && annotations !== "No changes detected.") {
|
||||
content += "\n\n---\n\n" + annotations;
|
||||
}
|
||||
|
||||
writeFileSync(filePath, content, "utf-8");
|
||||
return filePath;
|
||||
}
|
||||
|
||||
// --- Plan Archive ---
|
||||
|
||||
export interface ArchivedPlan {
|
||||
filename: string;
|
||||
title: string;
|
||||
date: string;
|
||||
timestamp: string; // ISO string from file mtime
|
||||
status: "approved" | "denied" | "unknown";
|
||||
size: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse an archive filename into metadata.
|
||||
* Handles both old (DATE-heading-status.md) and new (heading-DATE-status.md) formats.
|
||||
*/
|
||||
export function parseArchiveFilename(filename: string): ArchivedPlan | null {
|
||||
// Skip non-decision files
|
||||
if (filename.endsWith(".annotations.md") || filename.endsWith(".diff.md")) return null;
|
||||
|
||||
const base = filename.replace(/\.md$/, "");
|
||||
|
||||
// Extract status suffix
|
||||
let status: ArchivedPlan["status"] = "unknown";
|
||||
let slug = base;
|
||||
if (base.endsWith("-approved")) {
|
||||
status = "approved";
|
||||
slug = base.slice(0, -"-approved".length);
|
||||
} else if (base.endsWith("-denied")) {
|
||||
status = "denied";
|
||||
slug = base.slice(0, -"-denied".length);
|
||||
} else {
|
||||
// Skip plain files (no decision status)
|
||||
return null;
|
||||
}
|
||||
|
||||
// Extract date (YYYY-MM-DD) — could be anywhere in the slug
|
||||
const dateMatch = slug.match(/(\d{4}-\d{2}-\d{2})/);
|
||||
const date = dateMatch ? dateMatch[1] : "";
|
||||
|
||||
// Title: remove date, convert hyphens to spaces, trim
|
||||
const title = slug
|
||||
.replace(/\d{4}-\d{2}-\d{2}/, "")
|
||||
.replace(/^-+|-+$/g, "")
|
||||
.replace(/-+/g, " ")
|
||||
.trim() || "Untitled Plan";
|
||||
|
||||
return { filename, title, date, timestamp: "", status, size: 0 };
|
||||
}
|
||||
|
||||
/**
|
||||
* List all archived plans (approved/denied decision snapshots).
|
||||
* Returns plans sorted by date descending.
|
||||
*/
|
||||
export function listArchivedPlans(customPath?: string | null): ArchivedPlan[] {
|
||||
const planDir = getPlanDir(customPath);
|
||||
try {
|
||||
const entries = readdirSync(planDir);
|
||||
const plans: ArchivedPlan[] = [];
|
||||
for (const entry of entries) {
|
||||
if (!entry.endsWith(".md")) continue;
|
||||
const parsed = parseArchiveFilename(entry);
|
||||
if (!parsed) continue;
|
||||
try {
|
||||
const stat = statSync(join(planDir, entry));
|
||||
parsed.size = stat.size;
|
||||
parsed.timestamp = stat.mtime.toISOString();
|
||||
} catch { /* keep defaults */ }
|
||||
plans.push(parsed);
|
||||
}
|
||||
return plans.sort((a, b) => b.date.localeCompare(a.date) || b.timestamp.localeCompare(a.timestamp));
|
||||
} catch {
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Read an archived plan file by filename.
|
||||
* Returns null if the file doesn't exist or on read error.
|
||||
*/
|
||||
export function readArchivedPlan(filename: string, customPath?: string | null): string | null {
|
||||
const planDir = getPlanDir(customPath);
|
||||
const filePath = resolve(planDir, filename);
|
||||
// Guard against path traversal (resolve + trailing separator, matching reference-handlers.ts)
|
||||
if (!filePath.startsWith(planDir + sep)) return null;
|
||||
try {
|
||||
return readFileSync(filePath, "utf-8");
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
// --- Version History ---
|
||||
|
||||
/**
|
||||
* Get the history directory for a project/slug combination, creating it if needed.
|
||||
* History is always stored in ~/.plannotator/history/{project}/{slug}/.
|
||||
* Not affected by the customPath setting (that only affects decision saves).
|
||||
*/
|
||||
export function getHistoryDir(project: string, slug: string): string {
|
||||
const historyDir = join(homedir(), ".plannotator", "history", project, slug);
|
||||
mkdirSync(historyDir, { recursive: true });
|
||||
return historyDir;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine the next version number by scanning existing files.
|
||||
* Returns 1 if no versions exist, otherwise max + 1.
|
||||
*/
|
||||
function getNextVersionNumber(historyDir: string): number {
|
||||
try {
|
||||
const entries = readdirSync(historyDir);
|
||||
let max = 0;
|
||||
for (const entry of entries) {
|
||||
const match = entry.match(/^(\d+)\.md$/);
|
||||
if (match) {
|
||||
const num = parseInt(match[1], 10);
|
||||
if (num > max) max = num;
|
||||
}
|
||||
}
|
||||
return max + 1;
|
||||
} catch {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Save a plan version to the history directory.
|
||||
* Deduplication: if the latest version has identical content, skip saving.
|
||||
* Returns the version number, file path, and whether a new file was created.
|
||||
*/
|
||||
export function saveToHistory(
|
||||
project: string,
|
||||
slug: string,
|
||||
plan: string
|
||||
): { version: number; path: string; isNew: boolean } {
|
||||
const historyDir = getHistoryDir(project, slug);
|
||||
const nextVersion = getNextVersionNumber(historyDir);
|
||||
|
||||
// Deduplicate: check if latest version has identical content
|
||||
if (nextVersion > 1) {
|
||||
const latestPath = join(historyDir, `${String(nextVersion - 1).padStart(3, "0")}.md`);
|
||||
try {
|
||||
const existing = readFileSync(latestPath, "utf-8");
|
||||
if (existing === plan) {
|
||||
return { version: nextVersion - 1, path: latestPath, isNew: false };
|
||||
}
|
||||
} catch {
|
||||
// File read failed, proceed with saving
|
||||
}
|
||||
}
|
||||
|
||||
const fileName = `${String(nextVersion).padStart(3, "0")}.md`;
|
||||
const filePath = join(historyDir, fileName);
|
||||
writeFileSync(filePath, plan, "utf-8");
|
||||
return { version: nextVersion, path: filePath, isNew: true };
|
||||
}
|
||||
|
||||
/**
|
||||
* Read a specific version's content from history.
|
||||
* Returns null if the version doesn't exist or on read error.
|
||||
*/
|
||||
export function getPlanVersion(
|
||||
project: string,
|
||||
slug: string,
|
||||
version: number
|
||||
): string | null {
|
||||
const historyDir = join(homedir(), ".plannotator", "history", project, slug);
|
||||
const fileName = `${String(version).padStart(3, "0")}.md`;
|
||||
const filePath = join(historyDir, fileName);
|
||||
|
||||
try {
|
||||
return readFileSync(filePath, "utf-8");
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the file path for a specific version in history.
|
||||
* Returns null if the version file doesn't exist.
|
||||
*/
|
||||
export function getPlanVersionPath(
|
||||
project: string,
|
||||
slug: string,
|
||||
version: number
|
||||
): string | null {
|
||||
const historyDir = join(homedir(), ".plannotator", "history", project, slug);
|
||||
const fileName = `${String(version).padStart(3, "0")}.md`;
|
||||
const filePath = join(historyDir, fileName);
|
||||
return existsSync(filePath) ? filePath : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the number of versions stored for a project/slug.
|
||||
* Returns 0 if the directory doesn't exist.
|
||||
*/
|
||||
export function getVersionCount(project: string, slug: string): number {
|
||||
const historyDir = join(homedir(), ".plannotator", "history", project, slug);
|
||||
try {
|
||||
const entries = readdirSync(historyDir);
|
||||
return entries.filter((e) => /^\d+\.md$/.test(e)).length;
|
||||
} catch {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* List all versions for a project/slug with metadata.
|
||||
* Returns versions sorted ascending by version number.
|
||||
*/
|
||||
export function listVersions(
|
||||
project: string,
|
||||
slug: string
|
||||
): Array<{ version: number; timestamp: string }> {
|
||||
const historyDir = join(homedir(), ".plannotator", "history", project, slug);
|
||||
try {
|
||||
const entries = readdirSync(historyDir);
|
||||
const versions: Array<{ version: number; timestamp: string }> = [];
|
||||
for (const entry of entries) {
|
||||
const match = entry.match(/^(\d+)\.md$/);
|
||||
if (match) {
|
||||
const version = parseInt(match[1], 10);
|
||||
const filePath = join(historyDir, entry);
|
||||
try {
|
||||
const stat = statSync(filePath);
|
||||
versions.push({ version, timestamp: stat.mtime.toISOString() });
|
||||
} catch {
|
||||
versions.push({ version, timestamp: "" });
|
||||
}
|
||||
}
|
||||
}
|
||||
return versions.sort((a, b) => a.version - b.version);
|
||||
} catch {
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* List all plan slugs stored for a project.
|
||||
* Returns slugs sorted by most recently modified first.
|
||||
*/
|
||||
export function listProjectPlans(
|
||||
project: string
|
||||
): Array<{ slug: string; versions: number; lastModified: string }> {
|
||||
const projectDir = join(homedir(), ".plannotator", "history", project);
|
||||
try {
|
||||
const entries = readdirSync(projectDir, { withFileTypes: true });
|
||||
const plans: Array<{ slug: string; versions: number; lastModified: string }> = [];
|
||||
for (const entry of entries) {
|
||||
if (!entry.isDirectory()) continue;
|
||||
const slugDir = join(projectDir, entry.name);
|
||||
const files = readdirSync(slugDir).filter((f) => /^\d+\.md$/.test(f));
|
||||
if (files.length === 0) continue;
|
||||
|
||||
// Find most recent file modification time
|
||||
let latest = 0;
|
||||
for (const file of files) {
|
||||
try {
|
||||
const mtime = statSync(join(slugDir, file)).mtime.getTime();
|
||||
if (mtime > latest) latest = mtime;
|
||||
} catch { /* skip */ }
|
||||
}
|
||||
|
||||
plans.push({
|
||||
slug: entry.name,
|
||||
versions: files.length,
|
||||
lastModified: latest ? new Date(latest).toISOString() : "",
|
||||
});
|
||||
}
|
||||
return plans.sort((a, b) => b.lastModified.localeCompare(a.lastModified));
|
||||
} catch {
|
||||
return [];
|
||||
}
|
||||
}
|
||||
601
extensions/plannotator/generated/tour-review.ts
Normal file
601
extensions/plannotator/generated/tour-review.ts
Normal file
@@ -0,0 +1,601 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/server/tour/tour-review.ts
|
||||
import { join } from "node:path";
|
||||
import { homedir, tmpdir } from "node:os";
|
||||
import { mkdir, writeFile, readFile, unlink } from "node:fs/promises";
|
||||
import type { DiffType } from "./review-core.js";
|
||||
import type { PRMetadata } from "./pr-provider.js";
|
||||
import type {
|
||||
CodeTourOutput,
|
||||
TourDiffAnchor,
|
||||
TourKeyTakeaway,
|
||||
TourStop,
|
||||
TourQAItem,
|
||||
} from "./tour.js";
|
||||
|
||||
export type { CodeTourOutput, TourDiffAnchor, TourKeyTakeaway, TourStop, TourQAItem };
|
||||
|
||||
export const TOUR_EMPTY_OUTPUT_ERROR = "Tour generation returned empty or malformed output";
|
||||
|
||||
export const TOUR_SCHEMA_JSON = JSON.stringify({
|
||||
type: "object",
|
||||
properties: {
|
||||
title: { type: "string" },
|
||||
greeting: { type: "string" },
|
||||
intent: { type: "string" },
|
||||
before: { type: "string" },
|
||||
after: { type: "string" },
|
||||
key_takeaways: {
|
||||
type: "array",
|
||||
items: {
|
||||
type: "object",
|
||||
properties: {
|
||||
text: { type: "string" },
|
||||
severity: { type: "string", enum: ["info", "important", "warning"] },
|
||||
},
|
||||
required: ["text", "severity"],
|
||||
additionalProperties: false,
|
||||
},
|
||||
},
|
||||
stops: {
|
||||
type: "array",
|
||||
items: {
|
||||
type: "object",
|
||||
properties: {
|
||||
title: { type: "string" },
|
||||
gist: { type: "string" },
|
||||
detail: { type: "string" },
|
||||
transition: { type: "string" },
|
||||
anchors: {
|
||||
type: "array",
|
||||
items: {
|
||||
type: "object",
|
||||
properties: {
|
||||
file: { type: "string" },
|
||||
line: { type: "integer" },
|
||||
end_line: { type: "integer" },
|
||||
hunk: { type: "string" },
|
||||
label: { type: "string" },
|
||||
},
|
||||
required: ["file", "line", "end_line", "hunk", "label"],
|
||||
additionalProperties: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
required: ["title", "gist", "detail", "transition", "anchors"],
|
||||
additionalProperties: false,
|
||||
},
|
||||
},
|
||||
qa_checklist: {
|
||||
type: "array",
|
||||
items: {
|
||||
type: "object",
|
||||
properties: {
|
||||
question: { type: "string" },
|
||||
stop_indices: { type: "array", items: { type: "integer" } },
|
||||
},
|
||||
required: ["question", "stop_indices"],
|
||||
additionalProperties: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
required: ["title", "greeting", "intent", "before", "after", "key_takeaways", "stops", "qa_checklist"],
|
||||
additionalProperties: false,
|
||||
});
|
||||
|
||||
export const TOUR_REVIEW_PROMPT = `# Code Tour Narrator
|
||||
|
||||
## Identity
|
||||
You are a colleague giving a casual, warm tour of work you understand well.
|
||||
Think of it like sitting down next to someone and saying: "Hey Mike, here's
|
||||
the PR. Let me walk you through it." The whole voice is conversational, not
|
||||
documentary. You're telling the story of what changed and why.
|
||||
|
||||
The arguments (like "here's why we did it this way" or "we picked X instead
|
||||
of Y") live INSIDE the stop details, where they belong. The framing (the
|
||||
greeting, intent, before/after, transitions between stops) stays warm and
|
||||
human, the way a coworker actually talks over coffee.
|
||||
|
||||
You are NOT finding bugs. You are NOT writing a technical report.
|
||||
|
||||
## Tone
|
||||
- Conversational throughout. You're talking to a coworker, not writing docs.
|
||||
- Use "we" and "you". "Here's what we changed." "You'll notice that..."
|
||||
- A couple of sentences of context is fine, even for small PRs. If a
|
||||
colleague was describing a one-line change, they wouldn't just say "I
|
||||
changed a line." They'd say "Oh yeah, I bumped the TTL from 7 days to 24
|
||||
hours because the audit flagged it last month." A little color is good.
|
||||
- Each stop should feel like a colleague pausing to point at something:
|
||||
"Okay, look at this part. Here's why it's interesting."
|
||||
- **Do NOT use em-dashes (—) anywhere.** They're a dead giveaway of
|
||||
AI-generated prose. Use commas, colons, semicolons, or separate sentences
|
||||
instead. If you want to add an aside, use parentheses or start a new
|
||||
sentence. Never an em-dash.
|
||||
- No emoji anywhere. The UI handles all visual labeling deterministically.
|
||||
|
||||
## Output structure
|
||||
|
||||
### greeting
|
||||
2-4 sentences welcoming the reviewer and setting the scene. Not a headline,
|
||||
more like how you'd actually open a conversation. "Hey, so this PR does X
|
||||
and Y. Grab a coffee; I'll walk you through it." A bit of warmth and context,
|
||||
even for small changes.
|
||||
Example: "Hey, so this PR tightens the auth session lifetime from a week down
|
||||
to 24 hours. It's small in line count but it's the fix the security team has
|
||||
been asking for since Q1. Let me walk you through it."
|
||||
|
||||
### intent
|
||||
1-3 sentences explaining WHY this changeset exists. What problem is being
|
||||
solved? What motivated the work? Keep it conversational; you're giving
|
||||
context, not writing a ticket.
|
||||
|
||||
To determine intent:
|
||||
- If a PR/MR URL was provided, read the PR description (gh pr view or
|
||||
equivalent). Look for motivation, linked issues, and context the author
|
||||
provided.
|
||||
- If the PR body references a GitHub issue (e.g. "Fixes #123", "Closes
|
||||
owner/repo#456") or GitLab issue, read that specific issue for deeper
|
||||
context.
|
||||
- If no PR is provided, infer intent from commit messages, branch name, and
|
||||
the nature of the changes themselves.
|
||||
- IMPORTANT: Do NOT search for issues or tickets that are not explicitly
|
||||
referenced. Do not browse all open issues. Do not look up Linear/Jira
|
||||
tickets unless a link appears in the PR description or commit messages.
|
||||
Only follow what is given.
|
||||
|
||||
Example: "Closes SEC-412, the overly-permissive session TTL flagged by the
|
||||
security team during the Q1 audit. It also lays some groundwork for the
|
||||
offline-first work shipping next sprint."
|
||||
|
||||
### before / after
|
||||
One to two sentences each. Paint the picture of the world before and after
|
||||
this change. Focus on user or system behavior, not code structure.
|
||||
Example before: "Sessions lasted 7 days, with no refresh contract, so a
|
||||
stolen token was dangerous for a full week."
|
||||
Example after: "Sessions now expire in 24 hours with a clean refresh path,
|
||||
and mobile clients poll every 15 minutes to stay fresh."
|
||||
|
||||
### key_takeaways
|
||||
3 to 5 bullet points. These are the MOST IMPORTANT things someone needs to
|
||||
know at a glance about what this changeset DOES. Focus on what changes in
|
||||
behavior, functionality, or developer experience. Each is ONE sentence. No
|
||||
emoji, no prefix, just the text.
|
||||
|
||||
Severity guide (drives visual styling automatically; pick honestly, don't inflate):
|
||||
- "info": neutral context, good to know.
|
||||
- "important": a meaningful change in behavior, capability, or system contract.
|
||||
- "warning": a behavioral shift worth watching, something that changes how
|
||||
the system works in a way someone could miss. NOT code smells or style
|
||||
nits. A clean changeset with no warnings is perfectly normal.
|
||||
|
||||
### stops
|
||||
Each stop is the colleague pausing at a specific change to explain it.
|
||||
|
||||
#### How to ORDER stops
|
||||
Order by READING FLOW, the order the colleague would walk you through the
|
||||
change to make it understandable. NOT by blast radius or criticality.
|
||||
|
||||
Lead with the entry point: the file or function that, if understood alone,
|
||||
unlocks the rest. Then walk outward:
|
||||
- Definitions before consumers (types/interfaces/schemas before usage).
|
||||
- Cause before effect (the change that motivated downstream changes comes first).
|
||||
- Verification last (tests and migrations after the code they exercise).
|
||||
|
||||
#### How to CHUNK stops
|
||||
A stop is a logical change, NOT a file. If three files changed for one reason,
|
||||
that's ONE stop with three anchors. If one file has two unrelated changes,
|
||||
that's two stops. Never "one-stop-per-file" by default; let logic decide.
|
||||
|
||||
#### Stop fields
|
||||
- **title**: Short, friendly. "Token refresh flow", not "Changes to auth/refresh.ts".
|
||||
- **gist**: ONE sentence. The headline. A reviewer who reads nothing else should
|
||||
understand this stop from the gist alone.
|
||||
- **detail**: This is where the colleague pauses to explain. Supports basic markdown.
|
||||
- Start with 1-2 sentences describing the situation or problem this stop addresses.
|
||||
- Then make the argument: WHY did we change this? WHY does the new code look the
|
||||
way it does? If a non-obvious choice was made (data structure, error strategy,
|
||||
sync vs async, where the logic lives), surface it. "We did X instead of Y
|
||||
because Z" is exactly what the reviewer wants.
|
||||
- Use ### headings (e.g. "### Why this shape") to highlight critical sub-sections.
|
||||
- Use > [!IMPORTANT], > [!WARNING], or > [!NOTE] callout blocks for context
|
||||
that helps the reader understand non-obvious decisions or behavioral shifts
|
||||
(e.g., a new default value, a changed error path, a contract that callers
|
||||
now depend on). These are not for flagging code smells.
|
||||
- Use - bullet points for multi-part changes or parallel considerations.
|
||||
- Keep total length reasonable, around 3-6 sentences equivalent. Don't write
|
||||
an essay.
|
||||
- **transition**: A short connective phrase to the next stop, in the colleague's
|
||||
voice. Examples: "Building on that...", "On a related note...", "To support
|
||||
that change...". Empty string for the last stop.
|
||||
- **anchors**: The specific diff hunks shown inline below the detail narrative.
|
||||
Each anchor MUST have a non-empty "hunk" field containing the actual unified
|
||||
diff text extracted from the changeset. The hunk must include the @@ line.
|
||||
|
||||
Valid hunk format (REQUIRED; every anchor needs this):
|
||||
|
||||
@@ -42,7 +42,9 @@
|
||||
function processRequest(req) {
|
||||
- const result = await fetch(url);
|
||||
- return result.json();
|
||||
+ const result = await fetch(url, { timeout: 5000 });
|
||||
+ if (!result.ok) throw new Error("HTTP " + result.status);
|
||||
+ return result.json();
|
||||
}
|
||||
|
||||
The label should be a substantive 1-sentence explanation of what this code
|
||||
section does or why it matters, not a filename paraphrase.
|
||||
E.g. "Adds a 5-second timeout and explicit error check to prevent silent hangs",
|
||||
not "Changes to request.ts".
|
||||
|
||||
### qa_checklist
|
||||
4 to 8 verification questions a HUMAN can actually answer. Two valid channels:
|
||||
|
||||
1. By READING the code (e.g., "Did we update both call sites of \`legacyAuth()\`?",
|
||||
"Are all uses of the old token format migrated?", "Does the error handler
|
||||
cover the new throw paths?").
|
||||
2. By manually USING the product (e.g., "Sign in, restart the browser, and
|
||||
confirm the session persists.", "Trigger a 503 from the API and confirm the
|
||||
retry banner appears.").
|
||||
|
||||
NOT machine-runnable test ideas. NOT generic "smoke test" framing. The reviewer
|
||||
is a person; what would THEY do to gain confidence?
|
||||
|
||||
Reference which stops each question relates to via stop_indices. Every question
|
||||
should reference at least one stop.
|
||||
|
||||
## Pipeline
|
||||
|
||||
1. Read the full diff (git diff or inlined patch).
|
||||
2. Read CLAUDE.md and README.md for project context.
|
||||
3. Read commit messages (git log --oneline) and PR title/body if available.
|
||||
4. Identify logical groupings of change (cross-file when appropriate). These
|
||||
become stops.
|
||||
5. Determine reading flow order: entry point first, then outward. Definitions
|
||||
before consumers, cause before effect.
|
||||
6. Write the greeting, intent, before/after, takeaways, stops, and checklist
|
||||
in the voice of a coworker walking you through the work.
|
||||
7. Return structured JSON matching the schema.
|
||||
|
||||
## Hard constraints
|
||||
- Every anchor MUST have a non-empty "hunk" field. An anchor with an empty hunk
|
||||
is broken; it will show "diff not available" to the reviewer. Extract the
|
||||
real unified diff text from the input patch. Do not leave hunk blank.
|
||||
- Never fabricate line numbers. Extract them from the diff.
|
||||
- Gist must be ONE sentence. Not two. Not a run-on. One.
|
||||
- Detail supports markdown. Use it when it makes the explanation clearer, not
|
||||
for decoration. Plain prose is fine when the change is simple.
|
||||
- Anchor labels must explain the code's purpose or the change's impact, not
|
||||
just describe the filename.
|
||||
- key_takeaways: 3 to 5 items, each ONE sentence.
|
||||
- Stops are LOGICAL units, not files. Cross-file grouping is expected.
|
||||
- Stop ORDER is reading flow: entry point first, definitions before consumers,
|
||||
cause before effect, verification last.
|
||||
- Combine trivial changes (renames, imports, formatting) into one "Housekeeping"
|
||||
stop at the end, or omit entirely.
|
||||
- QA questions must be answerable by a human, either by reading code or by
|
||||
using the product. Never frame them as automated tests.
|
||||
- NEVER use em-dashes (—) anywhere in the output. Use commas, colons,
|
||||
semicolons, parentheses, or separate sentences. This is a hard constraint.
|
||||
|
||||
## Calibration: tour, not review
|
||||
Your job is to EXPLAIN the changeset, not to critique it. If you genuinely
|
||||
spot a real bug or a meaningful behavioral concern while reading the code,
|
||||
surface it naturally in the relevant stop detail or as a warning takeaway.
|
||||
That's the colleague noticing something worth mentioning. But don't hunt for
|
||||
problems. Most clean changesets should have zero warnings and zero [!WARNING]
|
||||
callouts. The primary question is "what does this change do and why?" not
|
||||
"what's wrong with this code?"`;
|
||||
|
||||
function buildTourUserMessage(
|
||||
patch: string,
|
||||
diffType: DiffType,
|
||||
options?: { defaultBranch?: string; hasLocalAccess?: boolean; prDiffScope?: string },
|
||||
prMetadata?: PRMetadata,
|
||||
): string {
|
||||
if (prMetadata) {
|
||||
if (options?.prDiffScope === "full-stack") {
|
||||
return [
|
||||
`Full-stack tour of ${prMetadata.url}`,
|
||||
"",
|
||||
"This is a stacked PR. The diff below shows ALL accumulated changes from the repository default branch through this PR's head (not just this PR's own layer).",
|
||||
"Walk the reviewer through the complete changeset as a guided tour.",
|
||||
"",
|
||||
"```diff",
|
||||
patch,
|
||||
"```",
|
||||
].join("\n");
|
||||
}
|
||||
if (options?.hasLocalAccess) {
|
||||
return [
|
||||
prMetadata.url,
|
||||
"",
|
||||
"You are in a local worktree checked out at the PR head. The code is available locally.",
|
||||
`To see the PR changes, diff against the remote base branch: git diff origin/${prMetadata.baseBranch}...HEAD`,
|
||||
"Do NOT diff against the local `main` branch; it may be stale. Always use origin/.",
|
||||
"",
|
||||
"Walk the reviewer through this changeset as a guided tour.",
|
||||
].join("\n");
|
||||
}
|
||||
return [prMetadata.url, "", "Walk the reviewer through this PR as a guided tour."].join("\n");
|
||||
}
|
||||
|
||||
const effectiveDiffType = diffType.startsWith("worktree:")
|
||||
? diffType.split(":").pop() || "uncommitted"
|
||||
: diffType;
|
||||
|
||||
switch (effectiveDiffType) {
|
||||
case "uncommitted":
|
||||
return "Walk the reviewer through the current code changes (staged, unstaged, and untracked files) as a guided tour.";
|
||||
case "staged":
|
||||
return "Walk the reviewer through the currently staged code changes (`git diff --staged`) as a guided tour.";
|
||||
case "unstaged":
|
||||
return "Walk the reviewer through the unstaged code changes (tracked modifications and untracked files) as a guided tour.";
|
||||
case "last-commit":
|
||||
return "Walk the reviewer through the code changes introduced in the last commit (`git diff HEAD~1..HEAD`) as a guided tour.";
|
||||
case "branch": {
|
||||
const base = options?.defaultBranch || "main";
|
||||
return `Walk the reviewer through the code changes against the base branch '${base}' as a guided tour. Run \`git diff ${base}..HEAD\` to inspect the changes.`;
|
||||
}
|
||||
case "merge-base": {
|
||||
const base = options?.defaultBranch || "main";
|
||||
return `Walk the reviewer through the PR-style diff against base '${base}' as a guided tour. First find the common ancestor with \`git merge-base ${base} HEAD\`, then run \`git diff <merge-base>..HEAD\` using that commit to inspect only the changes introduced on this branch (matches GitHub's PR view).`;
|
||||
}
|
||||
case "all":
|
||||
return "Walk the reviewer through every file in the repository as a guided tour. All files are shown as additions (diffed against an empty tree).";
|
||||
default:
|
||||
return [
|
||||
"Walk the reviewer through the following code changes as a guided tour.",
|
||||
"",
|
||||
"```diff",
|
||||
patch,
|
||||
"```",
|
||||
].join("\n");
|
||||
}
|
||||
}
|
||||
|
||||
export interface TourClaudeCommandResult {
|
||||
command: string[];
|
||||
stdinPrompt: string;
|
||||
}
|
||||
|
||||
export function buildTourClaudeCommand(prompt: string, model: string = "sonnet", effort?: string): TourClaudeCommandResult {
|
||||
const allowedTools = [
|
||||
"Agent", "Read", "Glob", "Grep",
|
||||
"Bash(git status:*)", "Bash(git diff:*)", "Bash(git log:*)",
|
||||
"Bash(git show:*)", "Bash(git blame:*)", "Bash(git branch:*)",
|
||||
"Bash(git grep:*)", "Bash(git ls-remote:*)", "Bash(git ls-tree:*)",
|
||||
"Bash(git merge-base:*)", "Bash(git remote:*)", "Bash(git rev-parse:*)",
|
||||
"Bash(git show-ref:*)",
|
||||
"Bash(gh pr view:*)", "Bash(gh pr diff:*)", "Bash(gh pr list:*)",
|
||||
"Bash(gh api repos/*/*/pulls/*)", "Bash(gh api repos/*/*/pulls/*/files*)",
|
||||
// The tour prompt follows linked issues (`Fixes #123`, `Closes owner/repo#456`),
|
||||
// so the allowlist has to permit the issue-read commands.
|
||||
"Bash(gh issue view:*)", "Bash(gh api repos/*/*/issues/*)",
|
||||
"Bash(glab mr view:*)", "Bash(glab mr diff:*)",
|
||||
"Bash(glab issue view:*)",
|
||||
"Bash(wc:*)",
|
||||
].join(",");
|
||||
|
||||
const disallowedTools = [
|
||||
"Edit", "Write", "NotebookEdit", "WebFetch", "WebSearch",
|
||||
"Bash(python:*)", "Bash(python3:*)", "Bash(node:*)", "Bash(npx:*)",
|
||||
"Bash(bun:*)", "Bash(bunx:*)", "Bash(sh:*)", "Bash(bash:*)", "Bash(zsh:*)",
|
||||
"Bash(curl:*)", "Bash(wget:*)",
|
||||
].join(",");
|
||||
|
||||
return {
|
||||
command: [
|
||||
"claude", "-p",
|
||||
"--permission-mode", "dontAsk",
|
||||
"--output-format", "stream-json",
|
||||
"--verbose",
|
||||
"--json-schema", TOUR_SCHEMA_JSON,
|
||||
"--no-session-persistence",
|
||||
"--model", model,
|
||||
...(effort ? ["--effort", effort] : []),
|
||||
"--tools", "Agent,Bash,Read,Glob,Grep",
|
||||
"--allowedTools", allowedTools,
|
||||
"--disallowedTools", disallowedTools,
|
||||
],
|
||||
stdinPrompt: prompt,
|
||||
};
|
||||
}
|
||||
|
||||
const TOUR_SCHEMA_DIR = join(homedir(), ".plannotator");
|
||||
const TOUR_SCHEMA_FILE = join(TOUR_SCHEMA_DIR, "tour-schema.json");
|
||||
let tourSchemaMaterialized = false;
|
||||
|
||||
async function ensureTourSchemaFile(): Promise<string> {
|
||||
if (!tourSchemaMaterialized) {
|
||||
await mkdir(TOUR_SCHEMA_DIR, { recursive: true });
|
||||
await writeFile(TOUR_SCHEMA_FILE, TOUR_SCHEMA_JSON);
|
||||
tourSchemaMaterialized = true;
|
||||
}
|
||||
return TOUR_SCHEMA_FILE;
|
||||
}
|
||||
|
||||
export function generateTourOutputPath(): string {
|
||||
return join(tmpdir(), `plannotator-tour-${crypto.randomUUID()}.json`);
|
||||
}
|
||||
|
||||
export async function buildTourCodexCommand(options: {
|
||||
cwd: string;
|
||||
outputPath: string;
|
||||
prompt: string;
|
||||
model?: string;
|
||||
reasoningEffort?: string;
|
||||
fastMode?: boolean;
|
||||
}): Promise<string[]> {
|
||||
const { cwd, outputPath, prompt, model, reasoningEffort, fastMode } = options;
|
||||
const schemaPath = await ensureTourSchemaFile();
|
||||
|
||||
const command = [
|
||||
"codex",
|
||||
// Global flags must precede the "exec" subcommand for the Codex CLI.
|
||||
...(model ? ["-m", model] : []),
|
||||
...(reasoningEffort ? ["-c", `model_reasoning_effort=${reasoningEffort}`] : []),
|
||||
...(fastMode ? ["-c", "service_tier=fast"] : []),
|
||||
"exec",
|
||||
"--output-schema", schemaPath,
|
||||
"-o", outputPath,
|
||||
"--full-auto", "--ephemeral",
|
||||
"-C", cwd,
|
||||
prompt,
|
||||
];
|
||||
|
||||
return command;
|
||||
}
|
||||
|
||||
export function parseTourStreamOutput(stdout: string): CodeTourOutput | null {
|
||||
if (!stdout.trim()) return null;
|
||||
|
||||
const lines = stdout.trim().split('\n');
|
||||
for (let i = lines.length - 1; i >= 0; i--) {
|
||||
const line = lines[i].trim();
|
||||
if (!line) continue;
|
||||
|
||||
try {
|
||||
const event = JSON.parse(line);
|
||||
if (event.type === 'result') {
|
||||
if (event.is_error) return null;
|
||||
const output = event.structured_output;
|
||||
// A tour with no stops isn't a tour — treat as invalid so the UI
|
||||
// error state fires instead of rendering an empty walkthrough.
|
||||
if (!output || !Array.isArray(output.stops) || output.stops.length === 0) return null;
|
||||
return output as CodeTourOutput;
|
||||
}
|
||||
} catch {
|
||||
// Not valid JSON — skip
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
export async function parseTourFileOutput(outputPath: string): Promise<CodeTourOutput | null> {
|
||||
try {
|
||||
const text = await readFile(outputPath, "utf-8");
|
||||
try { await unlink(outputPath); } catch { /* ignore */ }
|
||||
if (!text.trim()) return null;
|
||||
const parsed = JSON.parse(text);
|
||||
// A tour with no stops isn't a tour — treat as invalid so the UI
|
||||
// error state fires instead of rendering an empty walkthrough.
|
||||
if (!parsed || !Array.isArray(parsed.stops) || parsed.stops.length === 0) return null;
|
||||
return parsed as CodeTourOutput;
|
||||
} catch {
|
||||
try { await unlink(outputPath); } catch { /* ignore */ }
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
export interface TourSessionBuildCommandOptions {
|
||||
cwd: string;
|
||||
patch: string;
|
||||
diffType: DiffType;
|
||||
options?: { defaultBranch?: string; hasLocalAccess?: boolean };
|
||||
prMetadata?: PRMetadata;
|
||||
config?: Record<string, unknown>;
|
||||
}
|
||||
|
||||
export interface TourSessionBuildCommandResult {
|
||||
command: string[];
|
||||
outputPath?: string;
|
||||
captureStdout?: boolean;
|
||||
stdinPrompt?: string;
|
||||
cwd?: string;
|
||||
label?: string;
|
||||
prompt?: string;
|
||||
engine: "claude" | "codex";
|
||||
model: string;
|
||||
effort?: string;
|
||||
reasoningEffort?: string;
|
||||
fastMode?: boolean;
|
||||
}
|
||||
|
||||
export interface TourSessionJobSummary {
|
||||
correctness: string;
|
||||
explanation: string;
|
||||
confidence: number;
|
||||
}
|
||||
|
||||
export interface TourSessionJobRef {
|
||||
id: string;
|
||||
engine?: string;
|
||||
}
|
||||
|
||||
export interface TourSessionOnJobCompleteOptions {
|
||||
job: TourSessionJobRef;
|
||||
meta: { outputPath?: string; stdout?: string };
|
||||
}
|
||||
|
||||
export interface TourSession {
|
||||
tourResults: Map<string, CodeTourOutput>;
|
||||
tourChecklists: Map<string, boolean[]>;
|
||||
buildCommand(opts: TourSessionBuildCommandOptions): Promise<TourSessionBuildCommandResult>;
|
||||
onJobComplete(opts: TourSessionOnJobCompleteOptions): Promise<{ summary: TourSessionJobSummary | null }>;
|
||||
getTour(jobId: string): (CodeTourOutput & { checklist: boolean[] }) | null;
|
||||
saveChecklist(jobId: string, checked: boolean[]): void;
|
||||
}
|
||||
|
||||
export function createTourSession(): TourSession {
|
||||
const tourResults = new Map<string, CodeTourOutput>();
|
||||
const tourChecklists = new Map<string, boolean[]>();
|
||||
|
||||
return {
|
||||
tourResults,
|
||||
tourChecklists,
|
||||
|
||||
async buildCommand({ cwd, patch, diffType, options, prMetadata, config }) {
|
||||
const engine = (typeof config?.engine === "string" ? config.engine : "claude") as "claude" | "codex";
|
||||
const explicitModel = typeof config?.model === "string" && config.model ? config.model : null;
|
||||
// "sonnet" is a Claude model, so we must NOT pass it to Codex when no model
|
||||
// is explicitly selected. Leave Codex model blank and let its CLI default pick.
|
||||
const model = explicitModel ?? (engine === "codex" ? "" : "sonnet");
|
||||
const reasoningEffort = typeof config?.reasoningEffort === "string" && config.reasoningEffort ? config.reasoningEffort : undefined;
|
||||
const effort = typeof config?.effort === "string" && config.effort ? config.effort : undefined;
|
||||
const fastMode = config?.fastMode === true;
|
||||
const userMessage = buildTourUserMessage(patch, diffType, options, prMetadata);
|
||||
const prompt = TOUR_REVIEW_PROMPT + "\n\n---\n\n" + userMessage;
|
||||
|
||||
if (engine === "codex") {
|
||||
const outputPath = generateTourOutputPath();
|
||||
const command = await buildTourCodexCommand({ cwd, outputPath, prompt, model: model || undefined, reasoningEffort, fastMode });
|
||||
return { command, outputPath, prompt, label: "Code Tour", engine: "codex", model, reasoningEffort, fastMode: fastMode || undefined };
|
||||
}
|
||||
|
||||
const { command, stdinPrompt } = buildTourClaudeCommand(prompt, model, effort);
|
||||
return { command, stdinPrompt, prompt, cwd, label: "Code Tour", captureStdout: true, engine: "claude", model, effort };
|
||||
},
|
||||
|
||||
async onJobComplete({ job, meta }) {
|
||||
let output: CodeTourOutput | null = null;
|
||||
if (job.engine === "codex" && meta.outputPath) {
|
||||
output = await parseTourFileOutput(meta.outputPath);
|
||||
} else if (meta.stdout) {
|
||||
output = parseTourStreamOutput(meta.stdout);
|
||||
}
|
||||
|
||||
if (!output) {
|
||||
console.error(`[tour] Failed to parse output for job ${job.id}`);
|
||||
return { summary: null };
|
||||
}
|
||||
|
||||
tourResults.set(job.id, output);
|
||||
const summary: TourSessionJobSummary = {
|
||||
correctness: "Tour Generated",
|
||||
explanation: `${output.stops.length} stop${output.stops.length !== 1 ? "s" : ""}, ${output.qa_checklist.length} QA item${output.qa_checklist.length !== 1 ? "s" : ""}`,
|
||||
confidence: 1.0,
|
||||
};
|
||||
return { summary };
|
||||
},
|
||||
|
||||
getTour(jobId) {
|
||||
const tour = tourResults.get(jobId);
|
||||
if (!tour) return null;
|
||||
return { ...tour, checklist: tourChecklists.get(jobId) ?? [] };
|
||||
},
|
||||
|
||||
saveChecklist(jobId, checked) {
|
||||
tourChecklists.set(jobId, checked);
|
||||
},
|
||||
};
|
||||
}
|
||||
62
extensions/plannotator/generated/tour.ts
Normal file
62
extensions/plannotator/generated/tour.ts
Normal file
@@ -0,0 +1,62 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/shared/tour.ts
|
||||
export interface TourDiffAnchor {
|
||||
/** Relative file path within the repo. */
|
||||
file: string;
|
||||
/** Start line in the new file (post-change). */
|
||||
line: number;
|
||||
/** End line in the new file. */
|
||||
end_line: number;
|
||||
/** Raw unified diff hunk for this anchor. */
|
||||
hunk: string;
|
||||
/** One-line chip label, e.g. "Add retry logic". */
|
||||
label: string;
|
||||
}
|
||||
|
||||
export interface TourKeyTakeaway {
|
||||
/** One sentence — the takeaway. */
|
||||
text: string;
|
||||
/** Severity for visual styling. */
|
||||
severity: "info" | "important" | "warning";
|
||||
}
|
||||
|
||||
export interface TourStop {
|
||||
/** Short chapter title, friendly tone. */
|
||||
title: string;
|
||||
/** ONE sentence — the headline for this stop. Scannable without expanding. */
|
||||
gist: string;
|
||||
/** 2-3 sentences of additional context. Only shown when expanded. */
|
||||
detail: string;
|
||||
/** Connective phrase to the next stop, e.g. "Building on that..." (empty for last stop). */
|
||||
transition: string;
|
||||
/** Diff anchors — the code locations this stop references. */
|
||||
anchors: TourDiffAnchor[];
|
||||
}
|
||||
|
||||
export interface TourQAItem {
|
||||
/** Product-level verification question. */
|
||||
question: string;
|
||||
/** Indices into stops[] that this question relates to. */
|
||||
stop_indices: number[];
|
||||
}
|
||||
|
||||
export interface CodeTourOutput {
|
||||
/** One-line title for the entire tour. */
|
||||
title: string;
|
||||
/** 1-2 sentence friendly greeting + summary. Conversational, not formal. */
|
||||
greeting: string;
|
||||
/** 1-3 sentences: why this changeset exists — the motivation/problem being solved. */
|
||||
intent: string;
|
||||
/** What things looked like before this changeset — one sentence. */
|
||||
before: string;
|
||||
/** What things look like after — one sentence. */
|
||||
after: string;
|
||||
/** 3-5 key takeaways — the most critical info, scannable at a glance. */
|
||||
key_takeaways: TourKeyTakeaway[];
|
||||
/** Ordered tour stops — the detailed walk-through. */
|
||||
stops: TourStop[];
|
||||
/** Product-level QA checklist. */
|
||||
qa_checklist: TourQAItem[];
|
||||
}
|
||||
|
||||
/** UI-side tour shape: server output extended with persisted checklist state. */
|
||||
export type CodeTourData = CodeTourOutput & { checklist: boolean[] };
|
||||
352
extensions/plannotator/generated/url-to-markdown.ts
Normal file
352
extensions/plannotator/generated/url-to-markdown.ts
Normal file
@@ -0,0 +1,352 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/shared/url-to-markdown.ts
|
||||
/**
|
||||
* URL-to-Markdown conversion.
|
||||
*
|
||||
* Fetches a URL via Jina Reader (default) or plain fetch + Turndown,
|
||||
* returning clean markdown for the annotation pipeline.
|
||||
*/
|
||||
|
||||
import { htmlToMarkdown } from "./html-to-markdown";
|
||||
|
||||
export interface UrlToMarkdownOptions {
|
||||
/** Whether to use Jina Reader (true) or plain fetch+Turndown (false). */
|
||||
useJina: boolean;
|
||||
}
|
||||
|
||||
export interface UrlToMarkdownResult {
|
||||
markdown: string;
|
||||
source: "jina" | "fetch+turndown" | "fetch-raw" | "content-negotiation";
|
||||
}
|
||||
|
||||
/** True when the source indicates the markdown was converted from HTML,
|
||||
* not returned as-is from the origin. */
|
||||
export const isConvertedSource = (source: UrlToMarkdownResult["source"]): boolean =>
|
||||
source === "jina" || source === "fetch+turndown";
|
||||
|
||||
const FETCH_TIMEOUT_MS = 30_000;
|
||||
const MAX_BODY_BYTES = 10 * 1024 * 1024; // 10 MB — matches local HTML file guard
|
||||
|
||||
/**
|
||||
* Skip Jina for local/private URLs — fetch them directly instead.
|
||||
*
|
||||
* IMPORTANT — IPv6 hostname format (verified empirically in Bun 1.3.11 and Node 22):
|
||||
* The WHATWG URL `hostname` getter returns IPv6 addresses WITH brackets.
|
||||
* This is why PRIVATE_IPV6 uses `^\[` — it matches the actual runtime output.
|
||||
*
|
||||
* Verified outputs (both Bun and Node return identical results):
|
||||
* new URL("http://[::1]:3000/").hostname → "[::1]"
|
||||
* new URL("http://[fe80::1]/").hostname → "[fe80::1]"
|
||||
* new URL("http://[fc00::1]/").hostname → "[fc00::1]"
|
||||
* new URL("http://[fd12::1]/").hostname → "[fd12::1]"
|
||||
* new URL("http://[::ffff:192.168.0.1]/").hostname → "[::ffff:c0a8:1]"
|
||||
* new URL("http://[::ffff:169.254.169.254]/").hostname → "[::ffff:a9fe:a9fe]"
|
||||
*
|
||||
* The unbracketed "::1" check (line below) covers the edge case defensively.
|
||||
*/
|
||||
const PRIVATE_IPV4 = /^(10\.\d{1,3}|192\.168|172\.(1[6-9]|2\d|3[01])|169\.254)\.\d{1,3}\.\d{1,3}$/;
|
||||
// Bracketed IPv6 private/reserved prefixes (matches WHATWG URL hostname getter output).
|
||||
// fc00::/7 covers fc00:: through fdff::, so match [fc or [fd prefix.
|
||||
const PRIVATE_IPV6 = /^\[(::1|::ffff:|fe80:|fc[0-9a-f]{2}:|fd[0-9a-f]{2}:)/i;
|
||||
function isLocalUrl(url: string): boolean {
|
||||
try {
|
||||
const { hostname } = new URL(url);
|
||||
if (
|
||||
hostname === "localhost" ||
|
||||
hostname === "::1" ||
|
||||
hostname === "[::1]" ||
|
||||
hostname === "0.0.0.0" ||
|
||||
hostname.endsWith(".local") ||
|
||||
/^127\./.test(hostname) ||
|
||||
PRIVATE_IPV4.test(hostname)
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
// IPv6 private ranges: link-local (fe80::), unique-local (fc00::/fd00::),
|
||||
// and IPv4-mapped (::ffff:) which embeds private IPv4 in hex notation
|
||||
if (PRIVATE_IPV6.test(hostname)) return true;
|
||||
return false;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch a URL and return its content as markdown.
|
||||
*
|
||||
* When `useJina` is true, attempts Jina Reader first (returns markdown
|
||||
* directly, handles JS-rendered pages). On failure, warns to stderr
|
||||
* and falls back to plain fetch + Turndown.
|
||||
*/
|
||||
export async function urlToMarkdown(
|
||||
url: string,
|
||||
options: UrlToMarkdownOptions,
|
||||
): Promise<UrlToMarkdownResult> {
|
||||
// URLs pointing to markdown files — fetch raw if the server returns plain text.
|
||||
// If the server returns HTML (e.g. GitHub's .md viewer), fall through to Jina/Turndown.
|
||||
const urlPath = url.split("?")[0].split("#")[0];
|
||||
if (/\.mdx?$/i.test(urlPath)) {
|
||||
const text = await fetchRawText(url);
|
||||
if (text !== null) {
|
||||
return { markdown: text, source: "fetch-raw" };
|
||||
}
|
||||
// Server returned HTML for this .md URL — fall through to normal conversion
|
||||
}
|
||||
|
||||
// Content negotiation fast path — if the server natively returns markdown
|
||||
// (e.g. Cloudflare's Markdown for Agents), skip Jina/Turndown entirely.
|
||||
const local = isLocalUrl(url);
|
||||
if (!local) {
|
||||
const negotiated = await fetchViaContentNegotiation(url);
|
||||
if (negotiated !== null) {
|
||||
return { markdown: negotiated, source: "content-negotiation" };
|
||||
}
|
||||
}
|
||||
|
||||
if (options.useJina && !local) {
|
||||
try {
|
||||
const markdown = await fetchViaJina(url);
|
||||
return { markdown, source: "jina" };
|
||||
} catch (err) {
|
||||
process.stderr.write(
|
||||
`[plannotator] Warning: Jina Reader failed (${err instanceof Error ? err.message : String(err)}), falling back to direct fetch...\n`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
const markdown = await fetchViaTurndown(url);
|
||||
return { markdown, source: "fetch+turndown" };
|
||||
}
|
||||
|
||||
/** Read response body with a size limit. Throws if the body exceeds MAX_BODY_BYTES. */
|
||||
async function readBodyWithLimit(res: Response): Promise<string> {
|
||||
const contentLength = res.headers.get("content-length");
|
||||
if (contentLength) {
|
||||
const bytes = parseInt(contentLength, 10);
|
||||
if (bytes > MAX_BODY_BYTES) {
|
||||
res.body?.cancel();
|
||||
throw new Error(`Response too large (${Math.round(bytes / 1024 / 1024)}MB, max 10MB)`);
|
||||
}
|
||||
}
|
||||
const reader = res.body?.getReader();
|
||||
if (!reader) {
|
||||
// Null body is rare (e.g. manually constructed Response). Still enforce
|
||||
// the size limit via the text result length as a best-effort fallback.
|
||||
const text = await res.text();
|
||||
if (text.length > MAX_BODY_BYTES) {
|
||||
throw new Error(`Response too large (>${Math.round(MAX_BODY_BYTES / 1024 / 1024)}MB, max 10MB)`);
|
||||
}
|
||||
return text;
|
||||
}
|
||||
|
||||
const chunks: Uint8Array[] = [];
|
||||
let totalBytes = 0;
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
totalBytes += value.byteLength;
|
||||
if (totalBytes > MAX_BODY_BYTES) {
|
||||
reader.cancel();
|
||||
throw new Error(`Response too large (>${Math.round(MAX_BODY_BYTES / 1024 / 1024)}MB, max 10MB)`);
|
||||
}
|
||||
chunks.push(value);
|
||||
}
|
||||
return new TextDecoder().decode(Buffer.concat(chunks));
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch a URL as raw text — for .md/.mdx URLs that are already markdown.
|
||||
* Returns null if the server returns HTML (e.g. GitHub's viewer page for
|
||||
* a .md file), signaling the caller to fall through to Jina/Turndown.
|
||||
*
|
||||
* Uses redirect: "manual" with isLocalUrl validation on each hop —
|
||||
* same SSRF protection as fetchViaTurndown.
|
||||
*/
|
||||
async function fetchRawText(url: string): Promise<string | null> {
|
||||
const controller = new AbortController();
|
||||
const timer = setTimeout(() => controller.abort(), FETCH_TIMEOUT_MS);
|
||||
const headers = { "User-Agent": "Mozilla/5.0 (compatible; Plannotator/1.0; +https://plannotator.ai)" };
|
||||
try {
|
||||
let currentUrl = url;
|
||||
let res = await fetch(currentUrl, { headers, redirect: "manual", signal: controller.signal });
|
||||
|
||||
for (let i = 0; i < MAX_REDIRECTS && REDIRECT_STATUSES.has(res.status); i++) {
|
||||
const location = res.headers.get("location");
|
||||
if (!location) break;
|
||||
currentUrl = new URL(location, currentUrl).href;
|
||||
if (isLocalUrl(currentUrl)) {
|
||||
throw new Error(`Redirect to private/local URL blocked: ${currentUrl}`);
|
||||
}
|
||||
res.body?.cancel();
|
||||
res = await fetch(currentUrl, { headers, redirect: "manual", signal: controller.signal });
|
||||
}
|
||||
|
||||
if (REDIRECT_STATUSES.has(res.status)) {
|
||||
res.body?.cancel();
|
||||
throw new Error("Too many redirects");
|
||||
}
|
||||
if (!res.ok) {
|
||||
res.body?.cancel();
|
||||
throw new Error(`HTTP ${res.status} ${res.statusText}`);
|
||||
}
|
||||
// If server returns HTML (e.g. GitHub's .md viewer), signal caller to
|
||||
// fall through to Jina/Turndown instead of using raw content
|
||||
const ct = res.headers.get("content-type") || "";
|
||||
if (ct.includes("text/html") || ct.includes("application/xhtml+xml")) {
|
||||
res.body?.cancel();
|
||||
return null;
|
||||
}
|
||||
return await readBodyWithLimit(res);
|
||||
} catch (err) {
|
||||
if (err instanceof Error && err.name === "AbortError") {
|
||||
throw new Error(`Timed out fetching ${url}`);
|
||||
}
|
||||
throw err;
|
||||
} finally {
|
||||
clearTimeout(timer);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Content negotiation fast path — request `text/markdown` via the Accept header.
|
||||
* Sites that support Cloudflare's "Markdown for Agents" (or similar) will return
|
||||
* markdown directly, letting us skip Jina and Turndown entirely.
|
||||
* Returns null if the server doesn't serve markdown.
|
||||
*/
|
||||
const NEGOTIATION_TIMEOUT_MS = 5_000; // Short timeout — this is a best-effort optimization
|
||||
|
||||
async function fetchViaContentNegotiation(url: string): Promise<string | null> {
|
||||
const controller = new AbortController();
|
||||
const timer = setTimeout(() => controller.abort(), NEGOTIATION_TIMEOUT_MS);
|
||||
const headers = {
|
||||
"User-Agent": "Mozilla/5.0 (compatible; Plannotator/1.0; +https://plannotator.ai)",
|
||||
Accept: "text/markdown, text/html;q=0.9",
|
||||
};
|
||||
|
||||
try {
|
||||
let currentUrl = url;
|
||||
let res = await fetch(currentUrl, { headers, redirect: "manual", signal: controller.signal });
|
||||
|
||||
for (let i = 0; i < MAX_REDIRECTS && REDIRECT_STATUSES.has(res.status); i++) {
|
||||
const location = res.headers.get("location");
|
||||
if (!location) break;
|
||||
currentUrl = new URL(location, currentUrl).href;
|
||||
if (isLocalUrl(currentUrl)) {
|
||||
res.body?.cancel();
|
||||
return null;
|
||||
}
|
||||
res.body?.cancel();
|
||||
res = await fetch(currentUrl, { headers, redirect: "manual", signal: controller.signal });
|
||||
}
|
||||
|
||||
if (!res.ok) {
|
||||
res.body?.cancel();
|
||||
return null;
|
||||
}
|
||||
|
||||
const ct = res.headers.get("content-type") || "";
|
||||
if (!ct.includes("text/markdown")) {
|
||||
res.body?.cancel();
|
||||
return null;
|
||||
}
|
||||
|
||||
return await readBodyWithLimit(res);
|
||||
} catch {
|
||||
return null;
|
||||
} finally {
|
||||
clearTimeout(timer);
|
||||
}
|
||||
}
|
||||
|
||||
/** Fetch via Jina Reader — returns markdown directly. */
|
||||
async function fetchViaJina(url: string): Promise<string> {
|
||||
// Strip fragment (never sent to server) and encode for Jina's path-based API
|
||||
const cleanUrl = url.split("#")[0];
|
||||
const jinaUrl = `https://r.jina.ai/${cleanUrl}`;
|
||||
const headers: Record<string, string> = {
|
||||
Accept: "text/plain",
|
||||
};
|
||||
|
||||
const apiKey = process.env.JINA_API_KEY;
|
||||
if (apiKey) {
|
||||
headers.Authorization = `Bearer ${apiKey}`;
|
||||
}
|
||||
|
||||
const controller = new AbortController();
|
||||
const timer = setTimeout(() => controller.abort(), FETCH_TIMEOUT_MS);
|
||||
|
||||
try {
|
||||
const res = await fetch(jinaUrl, { headers, signal: controller.signal });
|
||||
if (!res.ok) {
|
||||
res.body?.cancel();
|
||||
throw new Error(`HTTP ${res.status} ${res.statusText}`);
|
||||
}
|
||||
return await readBodyWithLimit(res);
|
||||
} catch (err) {
|
||||
if (err instanceof Error && err.name === "AbortError") {
|
||||
throw new Error("timed out");
|
||||
}
|
||||
throw err;
|
||||
} finally {
|
||||
clearTimeout(timer);
|
||||
}
|
||||
}
|
||||
|
||||
const MAX_REDIRECTS = 10;
|
||||
const REDIRECT_STATUSES = new Set([301, 302, 303, 307, 308]);
|
||||
|
||||
/** Fetch raw HTML and convert via Turndown. Follows redirects manually to validate each hop. */
|
||||
async function fetchViaTurndown(url: string): Promise<string> {
|
||||
const controller = new AbortController();
|
||||
const timer = setTimeout(() => controller.abort(), FETCH_TIMEOUT_MS);
|
||||
|
||||
const headers = {
|
||||
"User-Agent":
|
||||
"Mozilla/5.0 (compatible; Plannotator/1.0; +https://plannotator.ai)",
|
||||
Accept: "text/html,application/xhtml+xml",
|
||||
};
|
||||
|
||||
try {
|
||||
let currentUrl = url;
|
||||
let res = await fetch(currentUrl, { headers, redirect: "manual", signal: controller.signal });
|
||||
|
||||
for (let i = 0; i < MAX_REDIRECTS && REDIRECT_STATUSES.has(res.status); i++) {
|
||||
const location = res.headers.get("location");
|
||||
if (!location) break;
|
||||
|
||||
currentUrl = new URL(location, currentUrl).href;
|
||||
if (isLocalUrl(currentUrl)) {
|
||||
throw new Error(`Redirect to private/local URL blocked: ${currentUrl}`);
|
||||
}
|
||||
res.body?.cancel();
|
||||
res = await fetch(currentUrl, { headers, redirect: "manual", signal: controller.signal });
|
||||
}
|
||||
|
||||
if (REDIRECT_STATUSES.has(res.status)) {
|
||||
res.body?.cancel();
|
||||
throw new Error("Too many redirects");
|
||||
}
|
||||
if (!res.ok) {
|
||||
res.body?.cancel();
|
||||
throw new Error(`HTTP ${res.status} ${res.statusText}`);
|
||||
}
|
||||
const contentType = res.headers.get("content-type") || "";
|
||||
if (
|
||||
!contentType.includes("text/html") &&
|
||||
!contentType.includes("application/xhtml+xml")
|
||||
) {
|
||||
res.body?.cancel();
|
||||
throw new Error(
|
||||
`Not an HTML page (content-type: ${contentType})`,
|
||||
);
|
||||
}
|
||||
const html = await readBodyWithLimit(res);
|
||||
return htmlToMarkdown(html);
|
||||
} catch (err) {
|
||||
if (err instanceof Error && err.name === "AbortError") {
|
||||
throw new Error(`Timed out fetching ${url}`);
|
||||
}
|
||||
throw err;
|
||||
} finally {
|
||||
clearTimeout(timer);
|
||||
}
|
||||
}
|
||||
104
extensions/plannotator/generated/worktree-pool.ts
Normal file
104
extensions/plannotator/generated/worktree-pool.ts
Normal file
@@ -0,0 +1,104 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/shared/worktree-pool.ts
|
||||
/**
|
||||
* Worktree Pool — manages a set of per-PR git worktrees for a review session.
|
||||
*
|
||||
* Runtime-agnostic. Uses ReviewGitRuntime for all git operations.
|
||||
* Both Bun and Pi servers import this module (Pi via vendor.sh).
|
||||
*
|
||||
* Each PR visited during a session gets its own worktree, created on first
|
||||
* access and cached for the session lifetime. Agents run in their PR's
|
||||
* worktree undisturbed by PR switches.
|
||||
*/
|
||||
|
||||
import { join } from "node:path";
|
||||
import type { ReviewGitRuntime } from "./review-core";
|
||||
import type { PRMetadata } from "./pr-provider";
|
||||
import { createWorktree, removeWorktree, fetchRef, ensureObjectAvailable } from "./worktree";
|
||||
|
||||
export interface PoolEntry {
|
||||
path: string;
|
||||
prUrl: string;
|
||||
number: number;
|
||||
ready: boolean;
|
||||
}
|
||||
|
||||
export interface WorktreePoolConfig {
|
||||
sessionDir: string;
|
||||
repoDir: string;
|
||||
isSameRepo: boolean;
|
||||
}
|
||||
|
||||
export interface WorktreePool {
|
||||
get(prUrl: string): PoolEntry | undefined;
|
||||
has(prUrl: string): boolean;
|
||||
resolve(prUrl: string): string | undefined;
|
||||
ensure(runtime: ReviewGitRuntime, metadata: PRMetadata): Promise<PoolEntry>;
|
||||
entries(): IterableIterator<PoolEntry>;
|
||||
cleanup(runtime: ReviewGitRuntime): Promise<void>;
|
||||
}
|
||||
|
||||
export function createWorktreePool(config: WorktreePoolConfig, initial?: PoolEntry): WorktreePool {
|
||||
const pool = new Map<string, PoolEntry>();
|
||||
const pending = new Map<string, Promise<PoolEntry>>();
|
||||
if (initial) pool.set(initial.prUrl, initial);
|
||||
|
||||
return {
|
||||
get(prUrl) { return pool.get(prUrl); },
|
||||
has(prUrl) { return pool.has(prUrl); },
|
||||
resolve(prUrl) {
|
||||
const entry = pool.get(prUrl);
|
||||
return entry?.ready ? entry.path : undefined;
|
||||
},
|
||||
|
||||
async ensure(runtime, metadata) {
|
||||
const existing = pool.get(metadata.url);
|
||||
if (existing?.ready) return existing;
|
||||
|
||||
const inflight = pending.get(metadata.url);
|
||||
if (inflight) return inflight;
|
||||
|
||||
if (!config.isSameRepo) {
|
||||
throw new Error("Cross-repo pool cannot create worktrees for other PRs");
|
||||
}
|
||||
|
||||
const promise = (async (): Promise<PoolEntry> => {
|
||||
const number = metadata.platform === "github" ? metadata.number : metadata.iid;
|
||||
const worktreePath = join(config.sessionDir, "pool", `pr-${number}`);
|
||||
const refSpec = metadata.platform === "github"
|
||||
? `refs/pull/${number}/head`
|
||||
: `refs/merge-requests/${number}/head`;
|
||||
|
||||
await fetchRef(runtime, metadata.baseBranch, { cwd: config.repoDir });
|
||||
await ensureObjectAvailable(runtime, metadata.baseSha, { cwd: config.repoDir });
|
||||
await fetchRef(runtime, refSpec, { cwd: config.repoDir });
|
||||
|
||||
await createWorktree(runtime, {
|
||||
ref: "FETCH_HEAD",
|
||||
path: worktreePath,
|
||||
detach: true,
|
||||
cwd: config.repoDir,
|
||||
});
|
||||
|
||||
const entry: PoolEntry = { path: worktreePath, prUrl: metadata.url, number, ready: true };
|
||||
pool.set(metadata.url, entry);
|
||||
return entry;
|
||||
})();
|
||||
|
||||
pending.set(metadata.url, promise);
|
||||
try {
|
||||
return await promise;
|
||||
} finally {
|
||||
pending.delete(metadata.url);
|
||||
}
|
||||
},
|
||||
|
||||
entries() { return pool.values(); },
|
||||
|
||||
async cleanup(runtime) {
|
||||
for (const entry of pool.values()) {
|
||||
await removeWorktree(runtime, entry.path, { force: true, cwd: config.repoDir });
|
||||
}
|
||||
pool.clear();
|
||||
},
|
||||
};
|
||||
}
|
||||
120
extensions/plannotator/generated/worktree.ts
Normal file
120
extensions/plannotator/generated/worktree.ts
Normal file
@@ -0,0 +1,120 @@
|
||||
// @generated — DO NOT EDIT. Source: packages/shared/worktree.ts
|
||||
/**
|
||||
* Worktree — runtime-agnostic git worktree primitives.
|
||||
*
|
||||
* Uses ReviewGitRuntime so both Bun and Node runtimes can use the same logic.
|
||||
* Lives in packages/shared/ and gets vendored to Pi via vendor.sh.
|
||||
*
|
||||
* Designed as composable primitives, not tied to any specific use case.
|
||||
* PR local checkout, agent sandboxes, parallel sessions — all compose from these.
|
||||
*/
|
||||
|
||||
import type { ReviewGitRuntime } from "./review-core";
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Types
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export interface CreateWorktreeOptions {
|
||||
/** Git ref to check out (branch name, SHA, FETCH_HEAD, etc.) */
|
||||
ref: string;
|
||||
/** Absolute path where the worktree will be created. */
|
||||
path: string;
|
||||
/** Create in detached HEAD mode (no branch). Default: false. */
|
||||
detach?: boolean;
|
||||
/** CWD of the source repository. Defaults to process.cwd(). */
|
||||
cwd?: string;
|
||||
}
|
||||
|
||||
export interface RemoveWorktreeOptions {
|
||||
/** Force removal even if the worktree has modifications. Default: false. */
|
||||
force?: boolean;
|
||||
/** CWD of the source repository. Required if the worktree was created from a different cwd. */
|
||||
cwd?: string;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Primitives
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* Fetch a ref from origin.
|
||||
* Runs: `git fetch origin <ref>`
|
||||
* Throws on failure.
|
||||
*/
|
||||
export async function fetchRef(
|
||||
runtime: ReviewGitRuntime,
|
||||
ref: string,
|
||||
options?: { cwd?: string },
|
||||
): Promise<void> {
|
||||
const result = await runtime.runGit(["fetch", "origin", "--", ref], { cwd: options?.cwd });
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`git fetch origin ${ref} failed: ${result.stderr.trim() || `exit code ${result.exitCode}`}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure a git object (commit SHA) is available locally.
|
||||
* Checks with `git cat-file -t`, fetches from origin if missing.
|
||||
* Returns true if the object is available after the attempt.
|
||||
*/
|
||||
export async function ensureObjectAvailable(
|
||||
runtime: ReviewGitRuntime,
|
||||
sha: string,
|
||||
options?: { cwd?: string },
|
||||
): Promise<boolean> {
|
||||
const check = await runtime.runGit(["cat-file", "-t", sha], { cwd: options?.cwd });
|
||||
if (check.exitCode === 0) return true;
|
||||
|
||||
// Object missing locally — try fetching it
|
||||
const fetch = await runtime.runGit(["fetch", "origin", "--", sha], { cwd: options?.cwd });
|
||||
if (fetch.exitCode !== 0) return false;
|
||||
|
||||
// Verify it's now available
|
||||
const recheck = await runtime.runGit(["cat-file", "-t", sha], { cwd: options?.cwd });
|
||||
return recheck.exitCode === 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a git worktree.
|
||||
* Runs: `git worktree add [--detach] <path> <ref>`
|
||||
* Throws on failure with a descriptive error.
|
||||
*/
|
||||
export async function createWorktree(
|
||||
runtime: ReviewGitRuntime,
|
||||
options: CreateWorktreeOptions,
|
||||
): Promise<{ worktreePath: string }> {
|
||||
const args = ["worktree", "add"];
|
||||
if (options.detach) args.push("--detach");
|
||||
args.push(options.path, options.ref);
|
||||
|
||||
const result = await runtime.runGit(args, { cwd: options.cwd });
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`git worktree add failed: ${result.stderr.trim() || `exit code ${result.exitCode}`}`);
|
||||
}
|
||||
|
||||
return { worktreePath: options.path };
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove a git worktree. Best-effort — logs errors but does not throw.
|
||||
* Runs: `git worktree remove [--force] <path>`
|
||||
*/
|
||||
export async function removeWorktree(
|
||||
runtime: ReviewGitRuntime,
|
||||
worktreePath: string,
|
||||
options?: RemoveWorktreeOptions,
|
||||
): Promise<void> {
|
||||
const args = ["worktree", "remove"];
|
||||
if (options?.force) args.push("--force");
|
||||
args.push(worktreePath);
|
||||
|
||||
try {
|
||||
const result = await runtime.runGit(args, { cwd: options?.cwd });
|
||||
if (result.exitCode !== 0) {
|
||||
console.error(`Warning: git worktree remove failed for ${worktreePath}: ${result.stderr.trim()}`);
|
||||
}
|
||||
} catch (err) {
|
||||
console.error(`Warning: worktree cleanup error: ${err instanceof Error ? err.message : String(err)}`);
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user