file_path stringlengths 3 280 | file_language stringclasses 66 values | content stringlengths 1 1.04M | repo_name stringlengths 5 92 | repo_stars int64 0 154k | repo_description stringlengths 0 402 | repo_primary_language stringclasses 108 values | developer_username stringlengths 1 25 | developer_name stringlengths 0 30 | developer_company stringlengths 0 82 |
|---|---|---|---|---|---|---|---|---|---|
src/memory/types.ts | TypeScript | /**
* Working Memory Types
*
* Types for the working memory store that allows Claude to remember
* facts, decisions, and context across conversation boundaries.
*/
/**
* A single item stored in working memory
*/
export interface WorkingMemoryItem {
id: string;
key: string;
value: string;
context?: string;
tags: string[];
sessionId?: string;
projectPath: string;
createdAt: number;
updatedAt: number;
expiresAt?: number;
}
/**
* Database row representation of a working memory item
*/
export interface WorkingMemoryRow {
id: string;
key: string;
value: string;
context: string | null;
tags: string | null; // JSON array
session_id: string | null;
project_path: string;
created_at: number;
updated_at: number;
expires_at: number | null;
embedding: Buffer | null;
}
/**
* Options for storing a memory item
*/
export interface RememberOptions {
key: string;
value: string;
context?: string;
tags?: string[];
sessionId?: string;
projectPath: string;
ttl?: number; // Time-to-live in seconds
}
/**
* Options for recalling memory items
*/
export interface RecallOptions {
key?: string;
tags?: string[];
sessionId?: string;
projectPath?: string;
includeExpired?: boolean;
}
/**
* Options for semantic recall
*/
export interface SemanticRecallOptions {
query: string;
projectPath: string;
limit?: number;
threshold?: number;
}
/**
* Result from semantic recall with similarity score
*/
export interface SemanticRecallResult extends WorkingMemoryItem {
similarity: number;
}
/**
* Session handoff document for transferring context between conversations
*/
export interface SessionHandoff {
id: string;
fromSessionId: string;
projectPath: string;
createdAt: number;
// Extracted content
decisions: HandoffDecision[];
activeFiles: ActiveFile[];
pendingTasks: PendingTask[];
workingMemory: WorkingMemoryItem[];
// Summary for quick injection
contextSummary: string;
// Tracking
resumedBy?: string;
resumedAt?: number;
}
/**
* Decision included in a handoff
*/
export interface HandoffDecision {
id: string;
text: string;
rationale?: string;
context?: string;
timestamp: number;
}
/**
* File that was actively worked on
*/
export interface ActiveFile {
path: string;
lastAction: "read" | "edit" | "create" | "delete";
summary?: string;
timestamp: number;
}
/**
* Task that was in progress
*/
export interface PendingTask {
description: string;
status: "in_progress" | "blocked" | "pending";
context?: string;
}
/**
* Database row for session handoff
*/
export interface SessionHandoffRow {
id: string;
from_session_id: string;
project_path: string;
created_at: number;
handoff_data: string; // JSON
resumed_by_session_id: string | null;
resumed_at: number | null;
}
/**
* Live checkpoint for real-time session tracking
*/
export interface SessionCheckpoint {
id: string;
sessionId: string;
projectPath: string;
checkpointNumber: number;
createdAt: number;
decisions: HandoffDecision[];
activeFiles: ActiveFile[];
taskState?: Record<string, unknown>;
contextSummary: string;
}
/**
* Database row for session checkpoint
*/
export interface SessionCheckpointRow {
id: string;
session_id: string;
project_path: string;
checkpoint_number: number;
created_at: number;
decisions: string; // JSON array
active_files: string; // JSON array
task_state: string | null; // JSON
context_summary: string;
}
/**
* Configuration for real-time watching
*/
export interface RealtimeConfig {
enabled: boolean;
watchPaths: string[];
extractionInterval: number; // ms between extractions
checkpointInterval: number; // ms between auto-checkpoints
autoRemember: {
decisions: boolean;
fileEdits: boolean;
errors: boolean;
};
}
/**
* Context injection result
*/
export interface InjectedContext {
handoff?: SessionHandoff;
decisions: HandoffDecision[];
memory: WorkingMemoryItem[];
recentFiles: ActiveFile[];
summary: string;
tokenEstimate: number;
}
/**
* Options for context injection
*/
export interface ContextInjectionOptions {
query?: string;
projectPath: string;
maxTokens?: number;
sources?: Array<"history" | "decisions" | "memory" | "handoffs">;
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/parsers/CodexConversationParser.ts | TypeScript | /**
* Codex Conversation Parser for MCP integration.
*
* This parser reads conversation history from Codex's storage location
* (~/.codex/sessions) and converts it to the same format as ConversationParser.
*
* Codex stores conversations in a date-hierarchical structure:
* ~/.codex/sessions/YYYY/MM/DD/rollout-{timestamp}-{uuid}.jsonl
*
* Each line in a Codex session file has the structure:
* {
* timestamp: string,
* type: "session_meta" | "response_item" | "event_msg" | "turn_context",
* payload: { ... }
* }
*
* @example
* ```typescript
* const parser = new CodexConversationParser();
* const result = parser.parseSessions('/Users/username/.codex');
* console.error(`Parsed ${result.conversations.length} Codex sessions`);
* ```
*/
import { readFileSync, readdirSync, existsSync, statSync } from "fs";
import { isAbsolute, join, resolve } from "path";
import { nanoid } from "nanoid";
import type {
Conversation,
Message,
ToolUse,
ToolResult,
FileEdit,
ThinkingBlock,
ParseResult,
} from "./ConversationParser.js";
import { getCanonicalProjectPath } from "../utils/worktree.js";
// Codex-specific type definitions
interface CodexEntry {
timestamp: string;
type: "session_meta" | "response_item" | "event_msg" | "turn_context";
payload: Record<string, unknown>;
}
interface CodexSessionMeta {
id: string;
timestamp: string;
cwd?: string;
originator?: string;
cli_version?: string;
instructions?: string;
source?: string;
model_provider?: string;
git?: {
commit_hash?: string;
branch?: string;
repository_url?: string;
};
}
interface CodexContentItem {
type?: string;
text?: string;
thinking?: string;
signature?: string;
name?: string;
input?: Record<string, unknown>;
id?: string;
tool_use_id?: string;
content?: unknown;
is_error?: boolean;
stdout?: string;
stderr?: string;
}
/**
* Parser for Codex conversation history.
*
* Converts Codex session files into the same format as ConversationParser
* so they can be stored in the same database and searched together.
*/
export class CodexConversationParser {
private cwdCache = new Map<string, { canonicalPath: string; isGitRepo: boolean }>();
private resolveProjectPath(
cwd: string | undefined,
codexPath: string
): { rawCwd?: string; projectPath: string; isGitRepo: boolean } {
if (!cwd) {
return { projectPath: codexPath, isGitRepo: false };
}
const rawCwd = cwd.trim();
if (!rawCwd) {
return { projectPath: codexPath, isGitRepo: false };
}
const resolvedCwd = isAbsolute(rawCwd) ? resolve(rawCwd) : resolve(codexPath, rawCwd);
const cached = this.cwdCache.get(resolvedCwd);
if (cached) {
return { rawCwd: resolvedCwd, projectPath: cached.canonicalPath, isGitRepo: cached.isGitRepo };
}
const { canonicalPath, isGitRepo } = getCanonicalProjectPath(resolvedCwd);
this.cwdCache.set(resolvedCwd, { canonicalPath, isGitRepo });
return { rawCwd: resolvedCwd, projectPath: canonicalPath, isGitRepo };
}
private findCwd(entries: CodexEntry[]): string | undefined {
for (const entry of entries) {
const payload = entry.payload as { cwd?: unknown } | undefined;
if (!payload || typeof payload.cwd !== "string") {
continue;
}
const trimmed = payload.cwd.trim();
if (trimmed) {
return trimmed;
}
}
return undefined;
}
/**
* Parse all Codex sessions.
*
* Recursively scans the sessions directory for JSONL files and parses them.
*
* @param codexPath - Path to Codex home directory (default: ~/.codex)
* @param sessionId - Optional specific session ID to parse
* @returns ParseResult with all extracted entities
*/
parseSession(
codexPath: string,
sessionId?: string,
lastIndexedMs?: number
): ParseResult {
const sessionsDir = join(codexPath, "sessions");
if (!existsSync(sessionsDir)) {
throw new Error(`Codex sessions directory not found: ${sessionsDir}`);
}
const conversations: Conversation[] = [];
const messages: Message[] = [];
const tool_uses: ToolUse[] = [];
const tool_results: ToolResult[] = [];
const file_edits: FileEdit[] = [];
const thinking_blocks: ThinkingBlock[] = [];
const indexed_folders: string[] = [];
let skippedCount = 0;
// Recursively find all .jsonl files in date-hierarchical structure
const sessionFiles = this.findSessionFiles(sessionsDir);
for (const sessionFile of sessionFiles) {
try {
// Skip unchanged files in incremental mode
if (lastIndexedMs) {
const stats = statSync(sessionFile);
if (stats.mtimeMs < lastIndexedMs) {
skippedCount++;
continue;
}
}
// Extract session ID from filename: rollout-{timestamp}-{uuid}.jsonl
const filename = sessionFile.split("/").pop();
if (!filename) {
continue;
}
// Match rollout-{timestamp}-{uuid}.jsonl where timestamp is like 2025-11-03T20-35-04
// UUID format: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
const match = filename.match(/rollout-.+-([0-9a-f]+-[0-9a-f]+-[0-9a-f]+-[0-9a-f]+-[0-9a-f]+)\.jsonl$/i);
let extractedSessionId: string;
if (match) {
extractedSessionId = match[1];
} else {
// Fallback: just strip "rollout-" prefix and ".jsonl" suffix
const fallbackId = filename.replace(/^rollout-/, "").replace(/\.jsonl$/, "");
if (!fallbackId) {
continue;
}
extractedSessionId = fallbackId;
}
// Skip if filtering by session ID
if (sessionId && extractedSessionId !== sessionId) {
continue;
}
const result = this.parseSessionFile(sessionFile, codexPath);
conversations.push(...result.conversations);
messages.push(...result.messages);
tool_uses.push(...result.tool_uses);
tool_results.push(...result.tool_results);
file_edits.push(...result.file_edits);
thinking_blocks.push(...result.thinking_blocks);
// Track indexed folder
const sessionDir = sessionFile.substring(0, sessionFile.lastIndexOf("/"));
if (!indexed_folders.includes(sessionDir)) {
indexed_folders.push(sessionDir);
}
} catch (error) {
console.error(`Failed to parse Codex session file ${sessionFile}:`, error);
}
}
if (skippedCount > 0) {
console.error(`⏭ Skipped ${skippedCount} unchanged Codex session file(s)`);
}
return {
conversations,
messages,
tool_uses,
tool_results,
file_edits,
thinking_blocks,
indexed_folders,
};
}
/**
* Recursively find all .jsonl session files.
*/
private findSessionFiles(dir: string): string[] {
const files: string[] = [];
if (!existsSync(dir)) {
return files;
}
const entries = readdirSync(dir);
for (const entry of entries) {
const fullPath = join(dir, entry);
const stat = statSync(fullPath);
if (stat.isDirectory()) {
files.push(...this.findSessionFiles(fullPath));
} else if (entry.endsWith(".jsonl") && entry.startsWith("rollout-")) {
files.push(fullPath);
}
}
return files;
}
/**
* Parse a single Codex session file.
*/
private parseSessionFile(filePath: string, codexPath: string): ParseResult {
const conversations: Conversation[] = [];
const messages: Message[] = [];
const tool_uses: ToolUse[] = [];
const tool_results: ToolResult[] = [];
const file_edits: FileEdit[] = [];
const thinking_blocks: ThinkingBlock[] = [];
const content = readFileSync(filePath, "utf-8");
const lines = content.trim().split("\n").filter((line) => line.trim());
if (lines.length === 0) {
return {
conversations,
messages,
tool_uses,
tool_results,
file_edits,
thinking_blocks,
};
}
// Parse all entries
const entries: CodexEntry[] = [];
for (const line of lines) {
try {
const entry = JSON.parse(line) as CodexEntry;
entries.push(entry);
} catch (_error) {
// Skip malformed lines
continue;
}
}
// Extract session metadata
const sessionMetaEntry = entries.find((e) => e.type === "session_meta");
if (!sessionMetaEntry) {
return {
conversations,
messages,
tool_uses,
tool_results,
file_edits,
thinking_blocks,
};
}
const sessionMeta = sessionMetaEntry.payload as unknown as CodexSessionMeta;
const sessionId = sessionMeta.id;
const sessionTimestamp = new Date(sessionMeta.timestamp).getTime();
const inferredCwd = sessionMeta.cwd || this.findCwd(entries);
const { rawCwd, projectPath, isGitRepo } = this.resolveProjectPath(inferredCwd, codexPath);
// Create conversation record
const conversation: Conversation = {
id: sessionId,
project_path: projectPath,
source_type: "codex",
first_message_at: sessionTimestamp,
last_message_at: sessionTimestamp,
message_count: 0,
git_branch: sessionMeta.git?.branch,
claude_version: sessionMeta.cli_version,
metadata: {
source: "codex",
originator: sessionMeta.originator,
model_provider: sessionMeta.model_provider,
git_commit: sessionMeta.git?.commit_hash,
git_repo: sessionMeta.git?.repository_url,
cwd: rawCwd,
canonical_project_path: projectPath,
is_git_repo: isGitRepo,
},
created_at: sessionTimestamp,
updated_at: sessionTimestamp,
};
// Process response_item entries (these contain user/assistant messages and tools)
const responseItems = entries.filter((e) => e.type === "response_item");
for (const entry of responseItems) {
const timestamp = new Date(entry.timestamp).getTime();
const payload = entry.payload;
// Update conversation timestamps
if (timestamp < conversation.first_message_at) {
conversation.first_message_at = timestamp;
}
if (timestamp > conversation.last_message_at) {
conversation.last_message_at = timestamp;
}
// Extract message role and content
const role = payload.role as string | undefined;
const content = payload.content as unknown[] | string | undefined;
if (!role || !content) {
continue;
}
// Create message record
const messageId = payload.id as string || `${sessionId}-${nanoid()}`;
const parentId = payload.parent_message_id as string | undefined;
const message: Message = {
id: messageId,
conversation_id: sessionId,
parent_id: parentId,
message_type: role === "user" ? "user" : "assistant",
role,
content: typeof content === "string" ? content : JSON.stringify(content),
timestamp,
is_sidechain: false,
metadata: payload as Record<string, unknown>,
};
messages.push(message);
conversation.message_count++;
// Extract tool uses and results from content array
if (Array.isArray(content)) {
for (const item of content) {
const contentItem = item as CodexContentItem;
// Extract thinking blocks
if (contentItem.type === "thinking" && contentItem.thinking) {
const thinkingBlock: ThinkingBlock = {
id: `${messageId}-thinking-${nanoid()}`,
message_id: messageId,
thinking_content: contentItem.thinking,
signature: contentItem.signature,
timestamp,
};
thinking_blocks.push(thinkingBlock);
}
// Extract tool uses
if (contentItem.type === "tool_use" && contentItem.name && contentItem.id) {
const toolUse: ToolUse = {
id: contentItem.id,
message_id: messageId,
tool_name: contentItem.name,
tool_input: (contentItem.input || {}) as Record<string, unknown>,
timestamp,
};
tool_uses.push(toolUse);
}
// Extract tool results
if (contentItem.type === "tool_result" && contentItem.tool_use_id) {
const toolResult: ToolResult = {
id: `${contentItem.tool_use_id}-result`,
tool_use_id: contentItem.tool_use_id,
message_id: messageId,
content: typeof contentItem.content === "string" ? contentItem.content : JSON.stringify(contentItem.content),
is_error: Boolean(contentItem.is_error),
stdout: contentItem.stdout,
stderr: contentItem.stderr,
is_image: false,
timestamp,
};
tool_results.push(toolResult);
}
}
}
}
conversations.push(conversation);
return {
conversations,
messages,
tool_uses,
tool_results,
file_edits,
thinking_blocks,
};
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/parsers/ConversationParser.ts | TypeScript | /**
* Multi-pass JSONL Conversation Parser for Claude Code history.
*
* This parser reads conversation history from Claude Code's storage locations
* (~/.claude/projects) and extracts structured data including messages, tool uses,
* file edits, and thinking blocks.
*
* The parser handles two directory structures:
* - Modern: ~/.claude/projects/{sanitized-path}
* - Legacy: ~/.claude/projects/{original-project-name}
*
* It performs a multi-pass parsing approach:
* 1. First pass: Extract conversations and messages
* 2. Second pass: Link tool uses and results
* 3. Third pass: Extract file edits from snapshots
* 4. Fourth pass: Extract thinking blocks
*
* @example
* ```typescript
* const parser = new ConversationParser();
* const result = parser.parseProject('/path/to/project');
* console.error(`Parsed ${result.conversations.length} conversations`);
* console.error(`Found ${result.messages.length} messages`);
* console.error(`Extracted ${result.tool_uses.length} tool uses`);
* ```
*/
import { readFileSync, readdirSync, existsSync, statSync, createReadStream } from "fs";
import { createInterface } from "readline";
import { join } from "path";
import { nanoid } from "nanoid";
import { pathToProjectFolderName } from "../utils/sanitization.js";
// Helper types for parsing dynamic JSON data
interface MessageData {
role?: string;
content?: unknown[] | string;
}
interface ToolUseResultData {
stdout?: string;
stderr?: string;
isImage?: boolean;
}
interface ContentItem {
type?: string;
id?: string;
name?: string;
input?: Record<string, unknown>;
tool_use_id?: string;
content?: unknown;
is_error?: boolean;
text?: string;
thinking?: string;
signature?: string;
}
interface SnapshotData {
trackedFileBackups?: Record<string, unknown>;
timestamp?: string;
}
// Type definitions based on investigation of conversation data
export interface ConversationMessage {
type: string;
uuid?: string;
parentUuid?: string | null;
sessionId?: string;
timestamp?: string;
isSidechain?: boolean;
agentId?: string;
userType?: string;
cwd?: string;
version?: string;
gitBranch?: string;
message?: unknown;
requestId?: string;
// File history snapshot fields
messageId?: string;
snapshot?: unknown;
// Summary fields
summary?: string;
leafUuid?: string;
// System message fields
subtype?: string;
level?: string;
content?: string | unknown[];
error?: unknown;
// Tool use result fields
toolUseResult?: unknown;
// Other fields
[key: string]: unknown;
}
export interface Conversation {
id: string;
project_path: string;
source_type?: 'claude-code' | 'codex';
first_message_at: number;
last_message_at: number;
message_count: number;
git_branch?: string;
claude_version?: string;
metadata: Record<string, unknown>;
created_at: number;
updated_at: number;
}
export interface Message {
id: string;
conversation_id: string;
parent_id?: string;
message_type: string;
role?: string;
content?: string;
timestamp: number;
is_sidechain: boolean;
agent_id?: string;
request_id?: string;
git_branch?: string;
cwd?: string;
metadata: Record<string, unknown>;
}
export interface ToolUse {
id: string;
message_id: string;
tool_name: string;
tool_input: Record<string, unknown>;
timestamp: number;
}
export interface ToolResult {
id: string;
tool_use_id: string;
message_id: string;
content?: string;
is_error: boolean;
stdout?: string;
stderr?: string;
is_image: boolean;
timestamp: number;
}
export interface FileEdit {
id: string;
conversation_id: string;
file_path: string;
message_id: string;
backup_version?: number;
backup_time?: number;
snapshot_timestamp: number;
metadata: Record<string, unknown>;
}
export interface ThinkingBlock {
id: string;
message_id: string;
thinking_content: string;
signature?: string;
timestamp: number;
}
/**
* Information about a parsing error
*/
export interface ParseError {
/** File path where error occurred */
file: string;
/** Line number (1-based) */
line: number;
/** Error message */
error: string;
}
/**
* Result of parsing conversation history.
*
* Contains all extracted entities from conversation files.
*/
export interface ParseResult {
/** Parsed conversations with metadata */
conversations: Conversation[];
/** All messages from conversations */
messages: Message[];
/** Tool invocations extracted from assistant messages */
tool_uses: ToolUse[];
/** Results from tool executions */
tool_results: ToolResult[];
/** File edit records from snapshots */
file_edits: FileEdit[];
/** Thinking blocks (Claude's internal reasoning) */
thinking_blocks: ThinkingBlock[];
/** Folders that were actually indexed */
indexed_folders?: string[];
/** Parsing errors encountered (bad JSON lines, etc.) */
parse_errors?: ParseError[];
}
/**
* Parser for Claude Code conversation history.
*
* Extracts structured data from JSONL conversation files stored in
* ~/.claude/projects. Handles both modern and legacy naming conventions.
*/
export class ConversationParser {
/**
* Parse all conversations for a project.
*
* Searches for conversation files in Claude's storage directories and
* parses them into structured entities. Supports filtering by session ID
* and handles both modern and legacy directory naming conventions.
*
* @param projectPath - Absolute path to the project (used for folder lookup)
* @param sessionId - Optional session ID to filter for a single conversation
* @param projectIdentifier - Optional identifier to store as project_path
* @param lastIndexedMs - Optional timestamp to skip unchanged files (mtime)
* @returns ParseResult containing all extracted entities
*
* @example
* ```typescript
* const parser = new ConversationParser();
*
* // Parse all conversations
* const allResults = parser.parseProject('/Users/me/my-project');
*
* // Parse specific session
* const sessionResults = parser.parseProject('/Users/me/my-project', 'session-123');
* ```
*/
parseProject(
projectPath: string,
sessionId?: string,
projectIdentifier?: string,
lastIndexedMs?: number
): ParseResult {
console.error(`Parsing conversations for project: ${projectPath}`);
if (sessionId) {
console.error(`Filtering for session: ${sessionId}`);
}
// Convert project path to Claude projects directory name
const projectDirName = pathToProjectFolderName(projectPath);
const homeDir = process.env.HOME || process.env.USERPROFILE;
if (!homeDir) {
throw new Error("HOME or USERPROFILE environment variable is not set");
}
const projectsBaseDir = join(homeDir, ".claude", "projects");
// Generate path variants to handle Claude Code's potential encoding differences
// Claude Code may encode hyphens as underscores or vice versa in path components
const pathVariants = this.generatePathVariants(projectDirName);
// Collect directories that exist
const dirsToCheck: string[] = [];
const checkedPaths: string[] = [];
for (const variant of pathVariants) {
const variantDir = join(projectsBaseDir, variant);
checkedPaths.push(variantDir);
if (existsSync(variantDir)) {
// Check if this directory has any .jsonl files
try {
const files = readdirSync(variantDir).filter(f => f.endsWith(".jsonl"));
if (files.length > 0 && !dirsToCheck.includes(variantDir)) {
dirsToCheck.push(variantDir);
console.error(`Found conversation directory: ${variant}`);
}
} catch (_e) {
// Directory exists but can't be read, skip it
}
}
}
if (dirsToCheck.length === 0) {
console.error(`⚠️ No conversation directories found`);
console.error(` Checked ${checkedPaths.length} path variants:`);
for (const path of checkedPaths.slice(0, 5)) {
console.error(` - ${path}`);
}
if (checkedPaths.length > 5) {
console.error(` ... and ${checkedPaths.length - 5} more`);
}
return {
conversations: [],
messages: [],
tool_uses: [],
tool_results: [],
file_edits: [],
thinking_blocks: [],
indexed_folders: [],
};
}
console.error(`Looking in ${dirsToCheck.length} director(ies): ${dirsToCheck.join(", ")}`);
// Collect all .jsonl files from all directories
const fileMap = new Map<string, string>(); // filename -> full path
for (const dir of dirsToCheck) {
const dirFiles = readdirSync(dir).filter((f) => f.endsWith(".jsonl"));
for (const file of dirFiles) {
const fullPath = join(dir, file);
// If file already exists in map, keep the one from the first directory (modern takes precedence)
if (!fileMap.has(file)) {
fileMap.set(file, fullPath);
}
}
}
let files = Array.from(fileMap.keys());
// If session_id provided, filter to only that session file
if (sessionId) {
files = files.filter((f) => f === `${sessionId}.jsonl`);
if (files.length === 0) {
console.error(`⚠️ Session file not found: ${sessionId}.jsonl`);
console.error(`Available sessions: ${Array.from(fileMap.keys()).join(", ")}`);
}
}
console.error(`Found ${files.length} conversation file(s) to parse`);
// Parse each file
const result: ParseResult = {
conversations: [],
messages: [],
tool_uses: [],
tool_results: [],
file_edits: [],
thinking_blocks: [],
indexed_folders: dirsToCheck,
};
const projectPathForRecords = projectIdentifier || projectPath;
let skippedCount = 0;
for (const file of files) {
const filePath = fileMap.get(file);
if (filePath) {
if (lastIndexedMs) {
try {
const stats = statSync(filePath);
if (stats.mtimeMs < lastIndexedMs) {
skippedCount++;
continue;
}
} catch (_e) {
// If we can't stat the file, try to parse it anyway
}
}
this.parseFile(filePath, result, projectPathForRecords);
}
}
if (skippedCount > 0) {
console.error(`⏭ Skipped ${skippedCount} unchanged file(s)`);
}
console.error(
`Parsed ${result.conversations.length} conversations, ${result.messages.length} messages`
);
return result;
}
/**
* Parse conversations across multiple project paths and merge results.
*
* @param projectPaths - Project paths to scan for conversation folders
* @param sessionId - Optional session ID to filter for a single conversation
* @param projectIdentifier - Optional identifier to store as project_path
*/
parseProjects(
projectPaths: string[],
sessionId?: string,
projectIdentifier?: string,
lastIndexedMs?: number
): ParseResult {
const combined: ParseResult = {
conversations: [],
messages: [],
tool_uses: [],
tool_results: [],
file_edits: [],
thinking_blocks: [],
indexed_folders: [],
parse_errors: [],
};
const seen = {
conversations: new Set<string>(),
messages: new Set<string>(),
toolUses: new Set<string>(),
toolResults: new Set<string>(),
fileEdits: new Set<string>(),
thinkingBlocks: new Set<string>(),
};
const indexedFolders = new Set<string>();
for (const path of projectPaths) {
const result = this.parseProject(path, sessionId, projectIdentifier, lastIndexedMs);
this.mergeParseResults(combined, result, seen, indexedFolders);
}
combined.indexed_folders = Array.from(indexedFolders);
if (combined.parse_errors && combined.parse_errors.length === 0) {
delete combined.parse_errors;
}
return combined;
}
private mergeParseResults(
target: ParseResult,
source: ParseResult,
seen: {
conversations: Set<string>;
messages: Set<string>;
toolUses: Set<string>;
toolResults: Set<string>;
fileEdits: Set<string>;
thinkingBlocks: Set<string>;
},
indexedFolders: Set<string>
): void {
for (const item of source.conversations) {
if (!seen.conversations.has(item.id)) {
seen.conversations.add(item.id);
target.conversations.push(item);
}
}
for (const item of source.messages) {
if (!seen.messages.has(item.id)) {
seen.messages.add(item.id);
target.messages.push(item);
}
}
for (const item of source.tool_uses) {
if (!seen.toolUses.has(item.id)) {
seen.toolUses.add(item.id);
target.tool_uses.push(item);
}
}
for (const item of source.tool_results) {
if (!seen.toolResults.has(item.id)) {
seen.toolResults.add(item.id);
target.tool_results.push(item);
}
}
for (const item of source.file_edits) {
if (!seen.fileEdits.has(item.id)) {
seen.fileEdits.add(item.id);
target.file_edits.push(item);
}
}
for (const item of source.thinking_blocks) {
if (!seen.thinkingBlocks.has(item.id)) {
seen.thinkingBlocks.add(item.id);
target.thinking_blocks.push(item);
}
}
if (source.indexed_folders) {
for (const folder of source.indexed_folders) {
indexedFolders.add(folder);
}
}
if (source.parse_errors && source.parse_errors.length > 0) {
if (!target.parse_errors) {
target.parse_errors = [];
}
target.parse_errors.push(...source.parse_errors);
}
}
/**
* Parse conversations directly from a Claude projects folder.
*
* This method is used when you already have the path to the conversation
* folder (e.g., ~/.claude/projects/-Users-me-my-project) rather than
* a project path that needs to be converted.
*
* @param folderPath - Absolute path to the Claude projects folder
* @param projectIdentifier - Optional identifier to use as project_path in records (defaults to folder path)
* @returns ParseResult containing all extracted entities
*
* @example
* ```typescript
* const parser = new ConversationParser();
* const result = parser.parseFromFolder('~/.claude/projects/-Users-me-my-project');
* ```
*/
parseFromFolder(
folderPath: string,
projectIdentifier?: string,
lastIndexedMs?: number
): ParseResult {
const result: ParseResult = {
conversations: [],
messages: [],
tool_uses: [],
tool_results: [],
file_edits: [],
thinking_blocks: [],
indexed_folders: [folderPath],
};
// Use folder path as project identifier if not provided
const projectPath = projectIdentifier || folderPath;
if (!existsSync(folderPath)) {
console.error(`⚠️ Folder does not exist: ${folderPath}`);
return result;
}
// Get all .jsonl files in the folder
const files = readdirSync(folderPath).filter((f) => f.endsWith(".jsonl"));
console.error(`Found ${files.length} conversation file(s) in ${folderPath}`);
// Parse each file, optionally skipping unchanged files in incremental mode
let skippedCount = 0;
for (const file of files) {
const filePath = join(folderPath, file);
// Skip unchanged files in incremental mode
if (lastIndexedMs) {
try {
const stats = statSync(filePath);
if (stats.mtimeMs < lastIndexedMs) {
skippedCount++;
continue;
}
} catch (_e) {
// If we can't stat the file, try to parse it anyway
}
}
this.parseFile(filePath, result, projectPath);
}
if (skippedCount > 0) {
console.error(`⏭ Skipped ${skippedCount} unchanged file(s)`);
}
console.error(
`Parsed ${result.conversations.length} conversations, ${result.messages.length} messages`
);
return result;
}
/**
* Parse conversations from a Claude projects folder using streaming.
*
* This async method uses line-by-line streaming to efficiently handle
* large JSONL files without loading the entire file into memory.
* Use this method for large conversation histories.
*
* @param folderPath - Absolute path to the Claude projects folder
* @param projectIdentifier - Optional identifier to use as project_path in records
* @param lastIndexedMs - Optional timestamp for incremental indexing (skip unchanged files)
* @returns Promise<ParseResult> containing all extracted entities
*
* @example
* ```typescript
* const parser = new ConversationParser();
* const result = await parser.parseFromFolderAsync('~/.claude/projects/-Users-me-my-project');
* ```
*/
async parseFromFolderAsync(
folderPath: string,
projectIdentifier?: string,
lastIndexedMs?: number
): Promise<ParseResult> {
const result: ParseResult = {
conversations: [],
messages: [],
tool_uses: [],
tool_results: [],
file_edits: [],
thinking_blocks: [],
indexed_folders: [folderPath],
};
// Use folder path as project identifier if not provided
const projectPath = projectIdentifier || folderPath;
if (!existsSync(folderPath)) {
console.error(`⚠️ Folder does not exist: ${folderPath}`);
return result;
}
// Get all .jsonl files in the folder
const files = readdirSync(folderPath).filter((f) => f.endsWith(".jsonl"));
console.error(`Found ${files.length} conversation file(s) in ${folderPath}`);
// Parse each file, optionally skipping unchanged files in incremental mode
let skippedCount = 0;
for (const file of files) {
const filePath = join(folderPath, file);
// Skip unchanged files in incremental mode
if (lastIndexedMs) {
try {
const stats = statSync(filePath);
if (stats.mtimeMs < lastIndexedMs) {
skippedCount++;
continue;
}
} catch (_e) {
// If we can't stat the file, try to parse it anyway
}
}
await this.parseFileAsync(filePath, result, projectPath);
}
if (skippedCount > 0) {
console.error(`⏭ Skipped ${skippedCount} unchanged file(s)`);
}
console.error(
`Parsed ${result.conversations.length} conversations, ${result.messages.length} messages`
);
return result;
}
/**
* Parse a single .jsonl file using streaming (async).
*
* Uses readline interface with createReadStream to read the file
* line by line, avoiding loading the entire file into memory.
*/
private async parseFileAsync(
filePath: string,
result: ParseResult,
projectPath: string
): Promise<void> {
const fileMessages: ConversationMessage[] = [];
// Create readline interface for streaming
const fileStream = createReadStream(filePath, { encoding: "utf-8" });
const rl = createInterface({
input: fileStream,
crlfDelay: Infinity, // Handle both \n and \r\n
});
let lineNumber = 0;
for await (const line of rl) {
lineNumber++;
const trimmedLine = line.trim();
if (!trimmedLine) {
continue;
}
try {
const msg = JSON.parse(trimmedLine);
fileMessages.push(msg);
} catch (error) {
const errorMsg = error instanceof Error ? error.message : String(error);
console.error(`Error parsing line ${lineNumber} in ${filePath}: ${errorMsg}`);
// Track the error
if (!result.parse_errors) {
result.parse_errors = [];
}
result.parse_errors.push({
file: filePath,
line: lineNumber,
error: errorMsg,
});
}
}
if (fileMessages.length === 0) {
return;
}
// Run multi-pass extraction (same as sync version)
this.extractConversation(fileMessages, result, projectPath);
this.extractMessages(fileMessages, result);
this.extractToolCalls(fileMessages, result);
this.extractFileEdits(fileMessages, result);
this.extractThinkingBlocks(fileMessages, result);
}
/**
* Parse a single .jsonl file
*/
private parseFile(
filePath: string,
result: ParseResult,
projectPath: string
): void {
const content = readFileSync(filePath, "utf-8");
const lines = content.split("\n").filter((l) => l.trim());
// Parse messages from this file
const fileMessages: ConversationMessage[] = [];
for (let i = 0; i < lines.length; i++) {
const line = lines[i];
try {
const msg = JSON.parse(line);
fileMessages.push(msg);
} catch (error) {
const errorMsg = error instanceof Error ? error.message : String(error);
console.error(`Error parsing line ${i + 1} in ${filePath}: ${errorMsg}`);
// Track the error
if (!result.parse_errors) {
result.parse_errors = [];
}
result.parse_errors.push({
file: filePath,
line: i + 1,
error: errorMsg,
});
}
}
if (fileMessages.length === 0) {return;}
// Pass 1: Extract conversation info
this.extractConversation(fileMessages, result, projectPath);
// Pass 2: Extract messages
this.extractMessages(fileMessages, result);
// Pass 3: Extract tool uses and results
this.extractToolCalls(fileMessages, result);
// Pass 4: Extract file edits
this.extractFileEdits(fileMessages, result);
// Pass 5: Extract thinking blocks
this.extractThinkingBlocks(fileMessages, result);
}
/**
* Pass 1: Extract conversation metadata
*/
private extractConversation(
messages: ConversationMessage[],
result: ParseResult,
projectPath: string
): void {
// Get sessionId from first message
const firstMsg = messages.find((m) => m.sessionId);
if (!firstMsg || !firstMsg.sessionId) {return;}
const sessionId = firstMsg.sessionId;
// Check if conversation already exists
if (result.conversations.some((c) => c.id === sessionId)) {
return;
}
// Find timestamps (filter out invalid/NaN timestamps)
const timestamps = messages
.filter((m): m is typeof m & { timestamp: string } => !!m.timestamp)
.map((m) => new Date(m.timestamp).getTime())
.filter((t) => !isNaN(t))
.sort((a, b) => a - b);
// Use fallback timestamp if no valid timestamps found
// This prevents FK constraint failures when messages reference this conversation
if (timestamps.length === 0) {
const fallbackTimestamp = Date.now();
timestamps.push(fallbackTimestamp);
console.error(`⚠️ No valid timestamps in conversation ${sessionId}, using current time as fallback`);
}
// Get most common git branch and version
const branches = messages
.filter((m): m is typeof m & { gitBranch: string } => !!m.gitBranch)
.map((m) => m.gitBranch);
const versions = messages
.filter((m): m is typeof m & { version: string } => !!m.version)
.map((m) => m.version);
// Detect MCP tool usage
const mcpUsage = this.detectMcpUsage(messages);
const conversation: Conversation = {
id: sessionId,
project_path: projectPath,
first_message_at: timestamps[0],
last_message_at: timestamps[timestamps.length - 1],
message_count: messages.filter((m) => m.type === "user" || m.type === "assistant").length,
git_branch: branches[branches.length - 1],
claude_version: versions[versions.length - 1],
metadata: {
total_messages: messages.length,
mcp_usage: mcpUsage,
},
created_at: timestamps[0],
updated_at: Date.now(),
};
result.conversations.push(conversation);
}
/**
* Detect MCP tool usage in conversation messages
*/
private detectMcpUsage(messages: ConversationMessage[]): {
detected: boolean;
servers: string[];
} {
const servers = new Set<string>();
for (const msg of messages) {
const messageData = msg.message as MessageData | undefined;
if (!messageData?.content || !Array.isArray(messageData.content)) {
continue;
}
for (const item of messageData.content) {
const contentItem = item as ContentItem;
if (contentItem.type === "tool_use" && contentItem.name?.startsWith("mcp__")) {
// Extract server name from tool name
// Format: mcp__server-name__tool-name
const parts = contentItem.name.split("__");
if (parts.length >= 2) {
servers.add(parts[1]);
}
}
}
}
return {
detected: servers.size > 0,
servers: Array.from(servers),
};
}
/**
* Pass 2: Extract individual messages
*/
private extractMessages(
messages: ConversationMessage[],
result: ParseResult
): void {
for (const msg of messages) {
if (!msg.uuid || !msg.sessionId) {continue;}
const message: Message = {
id: msg.uuid,
conversation_id: msg.sessionId,
parent_id: msg.parentUuid || undefined,
message_type: msg.type,
role: (msg.message as MessageData | undefined)?.role,
content: this.extractContent(msg),
timestamp: msg.timestamp ? new Date(msg.timestamp).getTime() : Date.now(),
is_sidechain: msg.isSidechain || false,
agent_id: msg.agentId,
request_id: msg.requestId,
git_branch: msg.gitBranch,
cwd: msg.cwd,
metadata: msg,
};
result.messages.push(message);
}
}
/**
* Pass 3: Extract tool uses and results
*/
private extractToolCalls(
messages: ConversationMessage[],
result: ParseResult
): void {
// Only store tool calls for messages that will be stored (FK to messages)
const storedMessageIds = new Set(result.messages.map((m) => m.id));
const toolUseIds = new Set<string>();
for (const msg of messages) {
const messageData = msg.message as MessageData | undefined;
if (
!messageData?.content ||
!Array.isArray(messageData.content) ||
!msg.uuid ||
!storedMessageIds.has(msg.uuid)
) {
continue;
}
const timestamp = msg.timestamp
? new Date(msg.timestamp).getTime()
: Date.now();
for (const item of messageData.content) {
const contentItem = item as ContentItem;
// Tool use
if (contentItem.type === "tool_use") {
if (!contentItem.id) {continue;}
const toolUse: ToolUse = {
id: contentItem.id,
message_id: msg.uuid,
tool_name: contentItem.name || "",
tool_input: contentItem.input || {},
timestamp,
};
toolUseIds.add(toolUse.id);
result.tool_uses.push(toolUse);
}
}
}
// Second pass: Tool results (require valid tool_use_id to avoid FK errors)
for (const msg of messages) {
const messageData = msg.message as MessageData | undefined;
if (
!messageData?.content ||
!Array.isArray(messageData.content) ||
!msg.uuid ||
!storedMessageIds.has(msg.uuid)
) {
continue;
}
const timestamp = msg.timestamp
? new Date(msg.timestamp).getTime()
: Date.now();
for (const item of messageData.content) {
const contentItem = item as ContentItem;
if (contentItem.type === "tool_result") {
const toolUseId = contentItem.tool_use_id;
if (!toolUseId || !toolUseIds.has(toolUseId)) {continue;}
const toolUseResult = msg.toolUseResult as ToolUseResultData | undefined;
const toolResult: ToolResult = {
id: nanoid(),
tool_use_id: toolUseId,
message_id: msg.uuid,
content: typeof contentItem.content === "string"
? contentItem.content
: JSON.stringify(contentItem.content),
is_error: contentItem.is_error || false,
stdout: toolUseResult?.stdout,
stderr: toolUseResult?.stderr,
is_image: toolUseResult?.isImage || false,
timestamp,
};
result.tool_results.push(toolResult);
}
}
}
}
/**
* Pass 4: Extract file edits from snapshots
*/
private extractFileEdits(
messages: ConversationMessage[],
result: ParseResult
): void {
// Build a Set of stored message IDs for quick lookup
const storedMessageIds = new Set(result.messages.map(m => m.id));
for (const msg of messages) {
if (msg.type !== "file-history-snapshot" || !msg.snapshot) {
continue;
}
// Get the message ID that would reference this snapshot
const messageId = msg.messageId || msg.uuid;
if (!messageId) {
continue; // No message ID to reference
}
// Skip if the message wasn't stored (e.g., lacks uuid or sessionId)
if (!storedMessageIds.has(messageId)) {
// This is expected for file-history-snapshot messages that don't have uuid/sessionId
continue;
}
const snapshot = msg.snapshot as SnapshotData;
const trackedFiles = snapshot.trackedFileBackups || {};
const conversationId = msg.sessionId;
if (!conversationId) {
continue; // Need conversation ID for foreign key
}
for (const [filePath, fileInfo] of Object.entries(trackedFiles)) {
const info = fileInfo as Record<string, unknown>;
const fileEdit: FileEdit = {
id: nanoid(),
conversation_id: conversationId,
file_path: filePath,
message_id: messageId,
backup_version: info.version as number | undefined,
backup_time: info.backupTime
? new Date(info.backupTime as string).getTime()
: undefined,
snapshot_timestamp: snapshot.timestamp
? new Date(snapshot.timestamp).getTime()
: Date.now(),
metadata: info,
};
result.file_edits.push(fileEdit);
}
}
}
/**
* Pass 5: Extract thinking blocks
*/
private extractThinkingBlocks(
messages: ConversationMessage[],
result: ParseResult
): void {
for (const msg of messages) {
const messageData = msg.message as MessageData | undefined;
if (!messageData?.content || !Array.isArray(messageData.content) || !msg.uuid) {
continue;
}
const timestamp = msg.timestamp
? new Date(msg.timestamp).getTime()
: Date.now();
for (const item of messageData.content) {
const contentItem = item as ContentItem;
if (contentItem.type === "thinking") {
const thinking: ThinkingBlock = {
id: nanoid(),
message_id: msg.uuid,
thinking_content: contentItem.thinking || "",
signature: contentItem.signature,
timestamp,
};
result.thinking_blocks.push(thinking);
}
}
}
}
/**
* Generate path variants to handle potential encoding differences.
*
* Claude Code may encode paths differently than expected:
* - Hyphens in path components might become underscores
* - Underscores might become hyphens
* - Dots might become hyphens (legacy)
*
* This method generates multiple variants to try when searching for directories.
*
* @example
* Input: "-Users-myid-GIT-projects-myProject"
* Output: [
* "-Users-myid-GIT-projects-myProject", // Original
* "-Users-myid-GIT_projects-myProject", // Hyphens in components -> underscores
* "-Users-myid-GIT-projects-myProject", // Dots -> hyphens (legacy)
* ]
*/
private generatePathVariants(projectDirName: string): string[] {
const variants = new Set<string>();
// 1. Original encoding (as computed by pathToProjectFolderName)
variants.add(projectDirName);
// 2. Legacy: dots replaced with hyphens
const legacyVariant = projectDirName.replace(/\./g, '-');
variants.add(legacyVariant);
// 3. Try swapping hyphens and underscores within path components
// Path format: "-Component1-Component2-Component3" or "Drive-Component1-Component2"
// We need to be careful not to change the leading hyphen or the separating hyphens
// Split into components by hyphen (the first element might be empty for Unix paths starting with -)
const parts = projectDirName.split('-');
// Try converting internal hyphens within multi-hyphen component names to underscores
// This handles cases like "GIT-projects" becoming "GIT_projects"
// Strategy: For each part that looks like it might have been originally hyphenated,
// create a variant with underscores
const hyphenToUnderscoreVariant = parts
.map((part) => {
// Skip empty parts and single chars (likely path separators)
if (part.length === 0) {
return part;
}
// Convert any underscores in parts to hyphens (in case source had underscores)
return part.replace(/_/g, '-');
})
.join('-');
const underscoreToHyphenVariant = parts
.map((part) => {
if (part.length === 0) {
return part;
}
// Convert any hyphens that might be internal to underscores
// This is tricky because hyphens are also used as separators
return part;
})
.join('-');
variants.add(hyphenToUnderscoreVariant);
variants.add(underscoreToHyphenVariant);
// 4. Try a variant where we replace all underscores with hyphens
const allUnderscoresToHyphens = projectDirName.replace(/_/g, '-');
variants.add(allUnderscoresToHyphens);
// 5. Try a variant where path components with hyphens have them as underscores
// e.g., "GIT-projects" -> "GIT_projects"
// We need to identify which consecutive hyphens are part of component names vs separators
// A simple heuristic: look for patterns like "X-Y" where X and Y are both alphanumeric
const internalHyphensToUnderscores = projectDirName.replace(
/([a-zA-Z0-9])[-]([a-zA-Z0-9])/g,
'$1_$2'
);
variants.add(internalHyphensToUnderscores);
// 6. Also try the reverse: convert underscores to hyphens
const internalUnderscoresToHyphens = projectDirName.replace(
/([a-zA-Z0-9])[_]([a-zA-Z0-9])/g,
'$1-$2'
);
variants.add(internalUnderscoresToHyphens);
// Apply the same transformations to the legacy variant
const legacyInternalHyphensToUnderscores = legacyVariant.replace(
/([a-zA-Z0-9])[-]([a-zA-Z0-9])/g,
'$1_$2'
);
variants.add(legacyInternalHyphensToUnderscores);
return Array.from(variants);
}
/**
* Extract text content from message
*/
private extractContent(msg: ConversationMessage): string | undefined {
// System messages
if (msg.type === "system" && typeof msg.content === "string") {
return msg.content;
}
// Summary messages
if (msg.type === "summary" && msg.summary) {
return msg.summary;
}
// User/Assistant messages
const messageData = msg.message as MessageData | undefined;
if (messageData?.content) {
if (typeof messageData.content === "string") {
return messageData.content;
}
if (Array.isArray(messageData.content)) {
// Extract text blocks
const textBlocks = messageData.content.filter(
(item: unknown) => (item as ContentItem).type === "text"
);
return textBlocks.map((item: unknown) => (item as ContentItem).text || "").join("\n");
}
}
return undefined;
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/parsers/DecisionExtractor.ts | TypeScript | /**
* Decision Extractor - Identifies and extracts decisions from conversations.
*
* This extractor analyzes conversation messages and thinking blocks to identify
* technical and architectural decisions made during development. It captures:
* - What decision was made
* - Why it was made (rationale)
* - What alternatives were considered
* - Why alternatives were rejected
* - Context (what the decision was about)
*
* Uses pattern matching to detect decision indicators like "we decided to",
* "using X instead of Y because", and user corrections.
*
* @example
* ```typescript
* const extractor = new DecisionExtractor();
* const decisions = extractor.extractDecisions(messages, thinkingBlocks);
* console.log(`Found ${decisions.length} decisions`);
* decisions.forEach(d => {
* console.log(`Decision: ${d.decision_text}`);
* console.log(`Rationale: ${d.rationale}`);
* });
* ```
*/
import { nanoid } from "nanoid";
import type { Message, ThinkingBlock } from "./ConversationParser.js";
/**
* Represents a technical or architectural decision made during development.
*/
export interface Decision {
/** Unique decision identifier */
id: string;
/** Conversation where this decision was made */
conversation_id: string;
/** Message containing the decision */
message_id: string;
/** The decision that was made */
decision_text: string;
/** Why this decision was made */
rationale?: string;
/** Alternative approaches that were considered */
alternatives_considered: string[];
/** Reasons why alternatives were rejected */
rejected_reasons: Record<string, string>;
/** Context/domain of the decision (e.g., 'database', 'authentication') */
context?: string;
/** Files affected by this decision */
related_files: string[];
/** Git commits implementing this decision */
related_commits: string[];
/** When the decision was made */
timestamp: number;
}
/**
* Extracts technical and architectural decisions from conversation history.
*
* Analyzes messages and thinking blocks using pattern matching to identify
* decisions, rationale, alternatives, and context.
*/
export class DecisionExtractor {
// Minimum quality score required to store a decision
private readonly MIN_QUALITY_SCORE = 2;
// Patterns that indicate noise/garbage to filter out
private readonly NOISE_PATTERNS = [
/this session is being continued/i,
/conversation is summarized below/i,
/previous conversation that ran out of context/i,
/here is the summary/i,
/summary of the conversation/i,
/context from previous session/i,
/let me summarize/i,
/to summarize what we've done/i,
/^I'll help you/i,
/^Let me help you/i,
/^Sure,? I can/i,
/^I understand/i,
/^Great question/i,
];
// Decision pattern indicators - more focused on technical decisions
private readonly DECISION_PATTERNS = [
// Technical decision with comparison and rationale
/(?:using|use|implement|adopt)\s+(.+?)\s+(?:instead of|over|rather than)\s+(.+?)\s+(?:because|since|as|for)\s+(.+?)(?:\.|$)/gi,
// Explicit architectural decision
/(?:architectural|technical|design)\s+decision:\s*(.+?)(?:\.|$)/gi,
// We decided with clear rationale
/(?:we|i)\s+(?:decided|chose)\s+(?:to\s+)?(.+?)\s+(?:because|since|due to|for)\s+(.+?)(?:\.|$)/gi,
// Rejected alternative with reason
/(?:rejected|dismissed|avoided|ruled out)\s+(.+?)\s+(?:because|due to|since|as)\s+(.+?)(?:\.|$)/gi,
];
// Correction patterns - stricter, require technical context
private readonly CORRECTION_PATTERNS = [
// "No, use X instead of Y"
/^no[,\s]+(?:use|implement|go with)\s+(.+?)\s+(?:instead|rather)/i,
// "Actually, we should use X because Y"
/^actually[,\s]+(?:we should|you should|use|implement)\s+(.+?)\s+(?:because|since)/i,
// "That's wrong, the correct approach is X"
/that'?s?\s+(?:wrong|incorrect)[,\s]+(?:the correct|use|we should)\s+(.+)/i,
// "Don't use X, use Y instead"
/don't\s+(?:use|implement)\s+(.+?)[,\s]+(?:use|implement)\s+(.+)/i,
];
// Context keywords to identify what the decision is about
private readonly CONTEXT_KEYWORDS = [
"authentication",
"auth",
"database",
"api",
"frontend",
"backend",
"testing",
"deployment",
"security",
"performance",
"architecture",
"design pattern",
"library",
"framework",
"optimization",
];
/**
* Extract decisions from messages and thinking blocks.
*
* Analyzes conversation messages to identify decisions using pattern matching.
* Looks for explicit decision statements, user corrections, and thinking blocks
* that contain decision-making processes.
*
* @param messages - Array of conversation messages to analyze
* @param thinkingBlocks - Array of thinking blocks (Claude's internal reasoning)
* @returns Array of extracted Decision objects
*
* @example
* ```typescript
* const extractor = new DecisionExtractor();
* const decisions = extractor.extractDecisions(messages, thinkingBlocks);
*
* // Find decisions about databases
* const dbDecisions = decisions.filter(d => d.context?.includes('database'));
* ```
*/
extractDecisions(
messages: Message[],
thinkingBlocks: ThinkingBlock[]
): Decision[] {
const decisions: Decision[] = [];
// Extract from assistant messages with thinking blocks
for (const message of messages) {
if (message.role === "assistant" && message.content) {
// Skip messages that are noise (session summaries, etc.)
if (this.isNoiseContent(message.content)) {
continue;
}
const thinking = thinkingBlocks.find((t) => t.message_id === message.id);
// Check for explicit decisions in message content
const explicitDecisions = this.extractExplicitDecisions(message, thinking);
decisions.push(...explicitDecisions);
}
// Extract from user corrections
if (message.role === "user" && message.content) {
// Skip noise content
if (this.isNoiseContent(message.content)) {
continue;
}
const corrections = this.extractCorrections(message);
decisions.push(...corrections);
}
}
// Deduplicate similar decisions
const deduplicated = this.deduplicateDecisions(decisions);
// Filter by quality score
return deduplicated.filter(
(d) => this.scoreDecisionImportance(d) >= this.MIN_QUALITY_SCORE
);
}
/**
* Check if content is noise that should be filtered out
*/
private isNoiseContent(content: string): boolean {
const firstChunk = content.substring(0, 500);
return this.NOISE_PATTERNS.some((pattern) => pattern.test(firstChunk));
}
/**
* Extract explicit decisions from assistant messages
*/
private extractExplicitDecisions(
message: Message,
thinkingBlock?: ThinkingBlock
): Decision[] {
const decisions: Decision[] = [];
const content = message.content || "";
const thinkingContent = thinkingBlock?.thinking_content || "";
const combinedContent = `${content}\n${thinkingContent}`;
// Look for decision patterns
for (const pattern of this.DECISION_PATTERNS) {
const matches = Array.from(combinedContent.matchAll(pattern));
for (const match of matches) {
const decision = this.parseDecisionMatch(match, message, thinkingBlock);
if (decision) {
decisions.push(decision);
}
}
}
// Extract from structured decision statements
const structuredDecisions = this.extractStructuredDecisions(
combinedContent,
message,
thinkingBlock
);
decisions.push(...structuredDecisions);
return decisions;
}
/**
* Parse a regex match into a Decision object
*/
private parseDecisionMatch(
match: RegExpMatchArray,
message: Message,
_thinking?: ThinkingBlock
): Decision | null {
if (!match[0]) {return null;}
const fullText = match[0];
const decisionText = this.extractDecisionText(fullText);
const rationale = this.extractRationale(fullText);
// Extract context (what this decision is about)
const context = this.identifyContext(fullText);
// Extract related files from message metadata
const relatedFiles = this.extractRelatedFiles(message);
return {
id: nanoid(),
conversation_id: message.conversation_id,
message_id: message.id,
decision_text: decisionText,
rationale,
alternatives_considered: this.extractAlternatives(fullText),
rejected_reasons: this.extractRejectedReasons(fullText),
context,
related_files: relatedFiles,
related_commits: [], // Will be filled by GitIntegrator
timestamp: message.timestamp,
};
}
/**
* Extract structured decisions (e.g., "Decision: ..." format)
*/
private extractStructuredDecisions(
content: string,
message: Message,
_thinking?: ThinkingBlock
): Decision[] {
const decisions: Decision[] = [];
// Look for structured decision blocks
const decisionBlockPattern =
/(?:Decision|Chose|Selected|Using):\s*([^\n]+)(?:\s*Rationale:\s*([^\n]+))?(?:\s*Alternatives:\s*([^\n]+))?/gi;
const matches = Array.from(content.matchAll(decisionBlockPattern));
for (const match of matches) {
const decisionText = match[1]?.trim();
if (!decisionText) {continue;}
const rationale = match[2]?.trim();
const alternativesText = match[3]?.trim();
const alternatives = alternativesText
? alternativesText.split(/,|;/).map((a) => a.trim())
: [];
decisions.push({
id: nanoid(),
conversation_id: message.conversation_id,
message_id: message.id,
decision_text: decisionText,
rationale,
alternatives_considered: alternatives,
rejected_reasons: {},
context: this.identifyContext(content),
related_files: this.extractRelatedFiles(message),
related_commits: [],
timestamp: message.timestamp,
});
}
return decisions;
}
/**
* Extract decisions from user corrections
*/
private extractCorrections(message: Message): Decision[] {
const content = message.content || "";
const decisions: Decision[] = [];
// Check each correction pattern and extract structured data
for (const pattern of this.CORRECTION_PATTERNS) {
const match = content.match(pattern);
if (match) {
// Extract the decision from the capture groups
const decisionText = match[1]?.trim() || match[0];
const alternative = match[2]?.trim();
// Must have technical context to be a valid correction
const context = this.identifyContext(content);
if (!context && !this.hasTechnicalKeywords(content)) {
continue;
}
decisions.push({
id: nanoid(),
conversation_id: message.conversation_id,
message_id: message.id,
decision_text: alternative
? `Use ${alternative} instead of ${decisionText}`
: decisionText,
rationale: "User correction - previous approach was incorrect",
alternatives_considered: alternative ? [decisionText] : [],
rejected_reasons: alternative
? { [decisionText]: "user rejected" }
: { "previous approach": "user rejected" },
context,
related_files: this.extractRelatedFiles(message),
related_commits: [],
timestamp: message.timestamp,
});
break; // Only extract one correction per message
}
}
return decisions;
}
/**
* Check if content contains technical keywords suggesting a real decision
*/
private hasTechnicalKeywords(content: string): boolean {
const technicalKeywords = [
"function",
"class",
"method",
"variable",
"import",
"export",
"component",
"module",
"package",
"library",
"framework",
"database",
"query",
"api",
"endpoint",
"route",
"config",
"setting",
"type",
"interface",
"schema",
];
const lowerContent = content.toLowerCase();
return technicalKeywords.some((kw) => lowerContent.includes(kw));
}
/**
* Extract decision text from matched pattern
*/
private extractDecisionText(text: string): string {
// Remove common prefixes
let cleaned = text.replace(
/^(?:we|i|let's)\s+(?:decided|choose|chose|went with|picked|selected)\s+(?:to\s+)?/i,
""
);
// Remove trailing explanation
cleaned = cleaned.replace(/\s+(?:because|since|as|for|due to).+$/i, "");
return cleaned.trim();
}
/**
* Extract rationale from decision text
*/
private extractRationale(text: string): string | undefined {
const rationaleMatch = text.match(/(?:because|since|as|for|due to)\s+(.+?)(?:\.|$)/i);
return rationaleMatch?.[1]?.trim();
}
/**
* Extract alternative approaches that were considered
*/
private extractAlternatives(text: string): string[] {
const alternatives: string[] = [];
// Look for "instead of X" patterns
const insteadOfMatch = text.match(/(?:instead of|over|rather than)\s+(.+?)(?:\s+because|$)/i);
if (insteadOfMatch) {
alternatives.push(insteadOfMatch[1].trim());
}
// Look for "considered X, Y, and Z"
const consideredMatch = text.match(/considered\s+(.+?)(?:\s+but|$)/i);
if (consideredMatch) {
const items = consideredMatch[1].split(/,|and/).map((s) => s.trim());
alternatives.push(...items);
}
return alternatives;
}
/**
* Extract reasons for rejecting alternatives
*/
private extractRejectedReasons(text: string): Record<string, string> {
const reasons: Record<string, string> = {};
// Look for "rejected X because Y" patterns
const rejectedPattern =
/(?:rejected|dismissed|avoided|didn't use)\s+(.+?)\s+(?:because|due to|since)\s+(.+?)(?:\.|$)/gi;
const matches = Array.from(text.matchAll(rejectedPattern));
for (const match of matches) {
const alternative = match[1]?.trim();
const reason = match[2]?.trim();
if (alternative && reason) {
reasons[alternative] = reason;
}
}
return reasons;
}
/**
* Identify what context/area this decision relates to
*/
private identifyContext(text: string): string | undefined {
const lowerText = text.toLowerCase();
for (const keyword of this.CONTEXT_KEYWORDS) {
if (lowerText.includes(keyword)) {
return keyword;
}
}
return undefined;
}
/**
* Extract related files from message metadata
*/
private extractRelatedFiles(message: Message): string[] {
const files: string[] = [];
// Check message metadata for file references
if (message.metadata) {
// Look for file paths in various metadata fields
const metadataStr = JSON.stringify(message.metadata);
const filePathPattern = /(?:\/[\w-]+)+\.[\w]+/g;
const matches = metadataStr.match(filePathPattern);
if (matches) {
files.push(...matches);
}
}
return [...new Set(files)]; // Deduplicate
}
/**
* Deduplicate similar decisions
*/
private deduplicateDecisions(decisions: Decision[]): Decision[] {
const unique: Decision[] = [];
const seen = new Set<string>();
for (const decision of decisions) {
// Create a signature including message_id to avoid collisions
// between different decisions with similar text in the same conversation
const textPrefix = decision.decision_text.toLowerCase().substring(0, 100);
const signature = `${decision.message_id}_${textPrefix}_${decision.timestamp}`;
if (!seen.has(signature)) {
seen.add(signature);
unique.push(decision);
}
}
return unique;
}
/**
* Score a decision's importance (for prioritization)
*/
scoreDecisionImportance(decision: Decision): number {
let score = 0;
// Has rationale
if (decision.rationale) {score += 2;}
// Has alternatives considered
if (decision.alternatives_considered.length > 0) {score += 3;}
// Has rejected reasons
if (Object.keys(decision.rejected_reasons).length > 0) {score += 3;}
// Has related files
if (decision.related_files.length > 0) {score += 2;}
// Has context
if (decision.context) {score += 1;}
// Is a correction (high importance)
if (decision.rationale?.includes("User correction")) {score += 5;}
return score;
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/parsers/ExtractionValidator.ts | TypeScript | /**
* Extraction Validator
* Validates extracted decisions and mistakes to reduce false positives
*/
/**
* Validation result
*/
export interface ValidationResult {
/** Whether the extraction is valid */
isValid: boolean;
/** Confidence score (0-1) */
confidence: number;
/** Reasons for the validation result */
reasons: string[];
/** Suggestions for improvement */
suggestions?: string[];
}
/**
* Configuration for validation
*/
export interface ValidationConfig {
/** Minimum text length */
minLength: number;
/** Maximum text length */
maxLength: number;
/** Required actionable keywords for decisions */
actionableKeywords: string[];
/** Session summary artifact patterns to exclude */
summaryPatterns: RegExp[];
/** Noise patterns to exclude */
noisePatterns: RegExp[];
/** Minimum confidence threshold */
minConfidence: number;
}
export const DEFAULT_DECISION_VALIDATION_CONFIG: ValidationConfig = {
minLength: 20,
maxLength: 500,
actionableKeywords: [
"use",
"implement",
"choose",
"adopt",
"prefer",
"select",
"create",
"build",
"switch",
"migrate",
"upgrade",
"configure",
"enable",
"disable",
"add",
"remove",
"install",
"setup",
"design",
"architect",
"structure",
"organize",
"refactor",
"optimize",
],
summaryPatterns: [
/^session summary/i,
/^conversation summary/i,
/^in this session/i,
/^today we/i,
/^we discussed/i,
/^the following/i,
/^here's what/i,
/^recap:/i,
/^summary:/i,
],
noisePatterns: [
/^(yes|no|ok|okay|sure|thanks|thank you|got it)/i,
/^(hi|hello|hey|good morning|good evening)/i,
/^(bye|goodbye|see you|later)/i,
/^\d+\.?\s*$/,
/^[a-z]\)?\s*$/i,
/^•\s*$/,
/^-\s*$/,
],
minConfidence: 0.5,
};
export const DEFAULT_MISTAKE_VALIDATION_CONFIG: ValidationConfig = {
minLength: 15,
maxLength: 600,
actionableKeywords: [
"error",
"bug",
"issue",
"problem",
"wrong",
"incorrect",
"failed",
"failure",
"crash",
"exception",
"fix",
"fixed",
"broke",
"broken",
"mistake",
"misunderstanding",
"typo",
"missing",
"forgot",
"overlooked",
],
summaryPatterns: [
/^session summary/i,
/^here's what happened/i,
/^in this session/i,
],
noisePatterns: [
/^(yes|no|ok|okay|sure|thanks)/i,
/^\d+\.?\s*$/,
],
minConfidence: 0.4,
};
/**
* Extraction Validator class
*/
export class ExtractionValidator {
private config: ValidationConfig;
constructor(config: ValidationConfig) {
this.config = config;
}
/**
* Validate an extracted decision
*/
validateDecision(
decisionText: string,
originalContent?: string
): ValidationResult {
const reasons: string[] = [];
let confidence = 1.0;
// Check minimum length
if (decisionText.length < this.config.minLength) {
reasons.push(`Too short (${decisionText.length} < ${this.config.minLength} chars)`);
confidence *= 0.3;
}
// Check maximum length
if (decisionText.length > this.config.maxLength) {
reasons.push(`Too long (${decisionText.length} > ${this.config.maxLength} chars)`);
confidence *= 0.7;
}
// Check for actionable keywords
const hasActionable = this.hasActionableKeywords(decisionText);
if (!hasActionable) {
reasons.push("Missing actionable keywords");
confidence *= 0.5;
}
// Check for session summary artifacts
const isSummary = this.isSummaryArtifact(decisionText);
if (isSummary) {
reasons.push("Appears to be a session summary artifact");
confidence *= 0.2;
}
// Check for noise patterns
const isNoise = this.isNoisePattern(decisionText);
if (isNoise) {
reasons.push("Matches noise pattern");
confidence *= 0.1;
}
// Verify content appears in source (if provided)
if (originalContent) {
const verified = this.verifyInSource(decisionText, originalContent);
if (!verified) {
reasons.push("Content not found in source");
confidence *= 0.6;
}
}
// Check for proper structure (sentences, not just keywords)
if (!this.hasProperStructure(decisionText)) {
reasons.push("Lacks proper sentence structure");
confidence *= 0.7;
}
const isValid = confidence >= this.config.minConfidence && !isNoise && !isSummary;
return {
isValid,
confidence,
reasons: reasons.length > 0 ? reasons : ["Passed all validation checks"],
suggestions: isValid ? undefined : this.generateSuggestions(reasons),
};
}
/**
* Validate an extracted mistake
*/
validateMistake(
mistakeText: string,
originalContent?: string
): ValidationResult {
const reasons: string[] = [];
let confidence = 1.0;
// Check minimum length
if (mistakeText.length < this.config.minLength) {
reasons.push(`Too short (${mistakeText.length} < ${this.config.minLength} chars)`);
confidence *= 0.3;
}
// Check maximum length
if (mistakeText.length > this.config.maxLength) {
reasons.push(`Too long (${mistakeText.length} > ${this.config.maxLength} chars)`);
confidence *= 0.7;
}
// Check for error-related keywords
const hasErrorKeyword = this.hasActionableKeywords(mistakeText);
if (!hasErrorKeyword) {
reasons.push("Missing error/mistake keywords");
confidence *= 0.5;
}
// Check for session summary artifacts
const isSummary = this.isSummaryArtifact(mistakeText);
if (isSummary) {
reasons.push("Appears to be a session summary artifact");
confidence *= 0.2;
}
// Check for noise patterns
const isNoise = this.isNoisePattern(mistakeText);
if (isNoise) {
reasons.push("Matches noise pattern");
confidence *= 0.1;
}
// Verify content appears in source (if provided)
if (originalContent) {
const verified = this.verifyInSource(mistakeText, originalContent);
if (!verified) {
reasons.push("Content not found in source");
confidence *= 0.6;
}
}
const isValid = confidence >= this.config.minConfidence && !isNoise && !isSummary;
return {
isValid,
confidence,
reasons: reasons.length > 0 ? reasons : ["Passed all validation checks"],
suggestions: isValid ? undefined : this.generateSuggestions(reasons),
};
}
/**
* Check if text contains actionable keywords
*/
private hasActionableKeywords(text: string): boolean {
const lowerText = text.toLowerCase();
return this.config.actionableKeywords.some((keyword) =>
lowerText.includes(keyword)
);
}
/**
* Check if text is a session summary artifact
*/
private isSummaryArtifact(text: string): boolean {
return this.config.summaryPatterns.some((pattern) => pattern.test(text));
}
/**
* Check if text matches noise patterns
*/
private isNoisePattern(text: string): boolean {
const trimmed = text.trim();
return this.config.noisePatterns.some((pattern) => pattern.test(trimmed));
}
/**
* Verify that extracted content appears in source
*/
private verifyInSource(extracted: string, source: string): boolean {
// Normalize both for comparison
const normalizedExtracted = extracted.toLowerCase().replace(/\s+/g, " ");
const normalizedSource = source.toLowerCase().replace(/\s+/g, " ");
// Check for substantial overlap (at least 50% of extracted text)
const words = normalizedExtracted.split(" ");
const minWords = Math.ceil(words.length * 0.5);
let foundWords = 0;
for (const word of words) {
if (word.length > 3 && normalizedSource.includes(word)) {
foundWords++;
}
}
return foundWords >= minWords;
}
/**
* Check if text has proper sentence structure
*/
private hasProperStructure(text: string): boolean {
// Should have at least one verb-like word
const verbPatterns = /\b(is|are|was|were|be|been|use|implement|choose|create|make|do|does|did|has|have|had|will|would|could|should|can|may|might)\b/i;
// Should have reasonable word count
const words = text.split(/\s+/).filter((w) => w.length > 0);
return verbPatterns.test(text) && words.length >= 3;
}
/**
* Generate suggestions based on validation failures
*/
private generateSuggestions(reasons: string[]): string[] {
const suggestions: string[] = [];
for (const reason of reasons) {
if (reason.includes("Too short")) {
suggestions.push("Provide more context about the decision/mistake");
}
if (reason.includes("Too long")) {
suggestions.push("Consider breaking into multiple, focused extractions");
}
if (reason.includes("Missing actionable keywords")) {
suggestions.push("Include specific technical terms or action verbs");
}
if (reason.includes("summary artifact")) {
suggestions.push("Extract specific decisions, not summaries");
}
if (reason.includes("noise pattern")) {
suggestions.push("Focus on substantive technical content");
}
}
return suggestions;
}
}
/**
* Get a decision validator
*/
export function getDecisionValidator(
config?: Partial<ValidationConfig>
): ExtractionValidator {
return new ExtractionValidator({
...DEFAULT_DECISION_VALIDATION_CONFIG,
...config,
});
}
/**
* Get a mistake validator
*/
export function getMistakeValidator(
config?: Partial<ValidationConfig>
): ExtractionValidator {
return new ExtractionValidator({
...DEFAULT_MISTAKE_VALIDATION_CONFIG,
...config,
});
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/parsers/GitIntegrator.ts | TypeScript | /**
* Git Integrator - Links git commits to conversations based on temporal and contextual analysis.
*
* This integrator connects git repository history with conversation history by:
* - Parsing git log to extract commits
* - Matching commits to conversations using multiple signals:
* - Temporal proximity (commit time vs conversation time)
* - File overlap (files changed in commit vs files edited in conversation)
* - Branch matching (git branch in commit vs conversation metadata)
* - Decision matching (commit message mentions decisions from conversation)
*
* Provides confidence scores (0-1) for each linkage based on:
* - Exact timestamp match (highest confidence)
* - File overlap percentage
* - Branch name match
* - Decision keyword presence
*
* Helps answer "WHY was this code changed?" by linking code changes to their
* discussion context.
*
* @example
* ```typescript
* const integrator = new GitIntegrator('/path/to/project');
* const linkedCommits = await integrator.linkCommitsToConversations(
* conversations,
* fileEdits,
* decisions
* );
* console.error(`Linked ${linkedCommits.filter(c => c.conversation_id).length} commits`);
* ```
*/
import simpleGit, { SimpleGit, DefaultLogFields, LogResult } from "simple-git";
import type { Conversation, FileEdit } from "./ConversationParser.js";
import type { Decision } from "./DecisionExtractor.js";
/**
* Represents a git commit with conversation linkage.
*/
export interface GitCommit {
/** Short commit hash (7 chars) or full hash */
hash: string;
/** Commit message */
message: string;
/** Commit author */
author?: string;
/** Commit timestamp */
timestamp: number;
/** Git branch name */
branch?: string;
/** Files modified in this commit */
files_changed: string[];
/** Linked conversation ID (if matched) */
conversation_id?: string;
/** Related message ID within the conversation */
related_message_id?: string;
/** Additional commit metadata */
metadata: Record<string, unknown>;
}
/**
* Represents a commit-to-conversation linkage with confidence score.
* @internal
*/
export interface CommitLinkage {
/** The git commit */
commit: GitCommit;
/** The matched conversation */
conversation: Conversation;
/** Confidence score (0-1) of the match */
confidence: number;
/** Reasons why this match was made */
reasons: string[];
}
/**
* Integrates git repository history with conversation history.
*
* Links commits to conversations using temporal and contextual analysis.
*/
export class GitIntegrator {
private git: SimpleGit;
/**
* Create a new GitIntegrator.
*
* @param projectPath - Path to the git repository
* @throws {Error} If the directory is not a git repository
*/
constructor(projectPath: string) {
this.git = simpleGit(projectPath);
}
/**
* Parse git history and link commits to conversations.
*
* Analyzes git log and matches commits to conversations using multiple signals:
* - Temporal proximity (commits made during conversation timeframe)
* - File overlap (files changed in commit match files edited in conversation)
* - Branch matching (git branch matches conversation metadata)
* - Decision context (commit message references decisions from conversation)
*
* Only creates links with confidence > 0.3 to avoid false positives.
*
* @param conversations - Array of conversations to match against
* @param fileEdits - Array of file edits from conversations
* @param decisions - Array of decisions that may be referenced in commits
* @returns Array of GitCommit objects with conversation_id set for matches
*
* @example
* ```typescript
* const integrator = new GitIntegrator('/path/to/project');
* const commits = await integrator.linkCommitsToConversations(
* conversations,
* fileEdits,
* decisions
* );
*
* // Find commits linked to a specific conversation
* const convCommits = commits.filter(c => c.conversation_id === 'conv-123');
* console.error(`${convCommits.length} commits for this conversation`);
* ```
*/
async linkCommitsToConversations(
conversations: Conversation[],
fileEdits: FileEdit[],
decisions: Decision[]
): Promise<GitCommit[]> {
console.error("Parsing git history...");
// Get git log
const commits = await this.parseGitHistory();
console.error(`Found ${commits.length} commits`);
// Link commits to conversations
const linkedCommits: GitCommit[] = [];
for (const commit of commits) {
const linkage = this.findBestConversationMatch(
commit,
conversations,
fileEdits,
decisions
);
if (linkage && linkage.confidence > 0.3) {
commit.conversation_id = linkage.conversation.id;
console.error(
`Linked commit ${commit.hash.substring(0, 7)} to conversation (confidence: ${(linkage.confidence * 100).toFixed(0)}%)`
);
console.error(` Reasons: ${linkage.reasons.join(", ")}`);
}
linkedCommits.push(commit);
}
console.error(
`Linked ${linkedCommits.filter((c) => c.conversation_id).length} commits to conversations`
);
return linkedCommits;
}
/**
* Parse git history
*/
private async parseGitHistory(): Promise<GitCommit[]> {
try {
const log: LogResult<DefaultLogFields> = await this.git.log({
"--all": null,
"--name-only": null,
});
const commits: GitCommit[] = [];
for (const entry of log.all) {
// Get current branch (if available)
let branch: string | undefined;
try {
const branches = await this.git.branch(["--contains", entry.hash]);
branch = branches.current || branches.all[0];
} catch (_e) {
// Branch info not available
}
// Parse changed files from diff
const files = await this.getChangedFiles(entry.hash);
commits.push({
hash: entry.hash,
message: entry.message,
author: entry.author_name,
timestamp: new Date(entry.date).getTime(),
branch,
files_changed: files,
metadata: {
author_email: entry.author_email,
refs: entry.refs,
body: entry.body,
},
});
}
return commits;
} catch (error) {
console.error("Error parsing git history:", error);
return [];
}
}
/**
* Get files changed in a commit
*/
private async getChangedFiles(commitHash: string): Promise<string[]> {
try {
const diff = await this.git.show([
"--name-only",
"--format=",
commitHash,
]);
return diff
.split("\n")
.map((f) => f.trim())
.filter((f) => f.length > 0);
} catch (_error) {
return [];
}
}
/**
* Find best conversation match for a commit
*/
private findBestConversationMatch(
commit: GitCommit,
conversations: Conversation[],
fileEdits: FileEdit[],
decisions: Decision[]
): CommitLinkage | null {
let bestMatch: CommitLinkage | null = null;
let highestConfidence = 0;
for (const conversation of conversations) {
const linkage = this.scoreCommitConversationMatch(
commit,
conversation,
fileEdits,
decisions
);
if (linkage.confidence > highestConfidence) {
highestConfidence = linkage.confidence;
bestMatch = linkage;
}
}
return bestMatch;
}
/**
* Score how well a commit matches a conversation
*/
private scoreCommitConversationMatch(
commit: GitCommit,
conversation: Conversation,
fileEdits: FileEdit[],
decisions: Decision[]
): CommitLinkage {
let score = 0;
const reasons: string[] = [];
const maxScore = 10; // Total possible points
// 1. Timestamp proximity (3 points max)
const timestampScore = this.scoreTimestampProximity(commit, conversation);
score += timestampScore;
if (timestampScore > 0) {
reasons.push(`timestamp proximity (${timestampScore.toFixed(1)}/3)`);
}
// 2. File overlap (4 points max)
const fileScore = this.scoreFileOverlap(commit, conversation, fileEdits);
score += fileScore;
if (fileScore > 0) {
reasons.push(`file overlap (${fileScore.toFixed(1)}/4)`);
}
// 3. Branch match (1 point)
if (commit.branch && commit.branch === conversation.git_branch) {
score += 1;
reasons.push("branch match");
}
// 4. Commit message keywords (2 points max)
const keywordScore = this.scoreCommitMessageKeywords(
commit,
conversation,
decisions
);
score += keywordScore;
if (keywordScore > 0) {
reasons.push(`message keywords (${keywordScore.toFixed(1)}/2)`);
}
return {
commit,
conversation,
confidence: score / maxScore,
reasons,
};
}
/**
* Score based on timestamp proximity
* Returns 0-3 points
*/
private scoreTimestampProximity(
commit: GitCommit,
conversation: Conversation
): number {
const { first_message_at, last_message_at } = conversation;
// Check if commit is within conversation timespan
if (
commit.timestamp >= first_message_at &&
commit.timestamp <= last_message_at
) {
return 3; // Perfect match
}
// Check if commit is within 5 minutes before/after conversation
const fiveMinutes = 5 * 60 * 1000;
const timeDelta = Math.min(
Math.abs(commit.timestamp - first_message_at),
Math.abs(commit.timestamp - last_message_at)
);
if (timeDelta < fiveMinutes) {
return 2; // Very close
}
// Check if commit is within 1 hour
const oneHour = 60 * 60 * 1000;
if (timeDelta < oneHour) {
return 1; // Somewhat close
}
return 0; // Too far away
}
/**
* Score based on file overlap
* Returns 0-4 points
*/
private scoreFileOverlap(
commit: GitCommit,
conversation: Conversation,
fileEdits: FileEdit[]
): number {
// Get files discussed in conversation
const conversationFiles = fileEdits
.filter((edit) => edit.conversation_id === conversation.id)
.map((edit) => edit.file_path);
if (conversationFiles.length === 0) {return 0;}
// Calculate overlap
const commitFilesSet = new Set(commit.files_changed);
const overlappingFiles = conversationFiles.filter((file) =>
commitFilesSet.has(file)
);
const overlapRatio =
overlappingFiles.length /
Math.max(commit.files_changed.length, conversationFiles.length);
// 4 points for 100% overlap, scaling down
return overlapRatio * 4;
}
/**
* Score based on commit message keywords
* Returns 0-2 points
*/
private scoreCommitMessageKeywords(
commit: GitCommit,
conversation: Conversation,
decisions: Decision[]
): number {
const commitMessage = commit.message.toLowerCase();
// Get keywords from conversation decisions
const conversationDecisions = decisions.filter(
(d) => d.conversation_id === conversation.id
);
if (conversationDecisions.length === 0) {return 0;}
// Extract keywords from decisions
const keywords = new Set<string>();
for (const decision of conversationDecisions) {
// Extract significant words (3+ characters)
const words = decision.decision_text
.toLowerCase()
.split(/\s+/)
.filter((w) => w.length >= 3);
words.forEach((w) => keywords.add(w));
if (decision.context) {
keywords.add(decision.context.toLowerCase());
}
}
// Count keyword matches
let matches = 0;
for (const keyword of keywords) {
if (commitMessage.includes(keyword)) {
matches++;
}
}
// 2 points for 3+ matches, scaling down
return Math.min(matches / 3, 1) * 2;
}
/**
* Get recent commits (last N days)
*/
async getRecentCommits(days: number = 30): Promise<GitCommit[]> {
const sinceDate = new Date();
sinceDate.setDate(sinceDate.getDate() - days);
try {
const log = await this.git.log({
"--since": sinceDate.toISOString(),
});
const commits: GitCommit[] = [];
for (const entry of log.all) {
const files = await this.getChangedFiles(entry.hash);
commits.push({
hash: entry.hash,
message: entry.message,
author: entry.author_name,
timestamp: new Date(entry.date).getTime(),
files_changed: files,
metadata: {
author_email: entry.author_email,
refs: entry.refs,
},
});
}
return commits;
} catch (error) {
console.error("Error getting recent commits:", error);
return [];
}
}
/**
* Get commits affecting a specific file
*/
async getCommitsForFile(filePath: string): Promise<GitCommit[]> {
try {
const log = await this.git.log({
file: filePath,
});
return log.all.map((entry) => ({
hash: entry.hash,
message: entry.message,
author: entry.author_name,
timestamp: new Date(entry.date).getTime(),
files_changed: [filePath],
metadata: {
author_email: entry.author_email,
refs: entry.refs,
},
}));
} catch (error) {
console.error(`Error getting commits for ${filePath}:`, error);
return [];
}
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/parsers/MethodologyExtractor.ts | TypeScript | /**
* Methodology Extractor - Identifies problem-solving approaches from conversations.
*
* This extractor analyzes conversation history to identify how AI solved problems,
* capturing the methodology, steps taken, and tools used. It helps trace:
* - Problem statement / initial understanding
* - Approach taken (exploration, research, implementation)
* - Steps / sequence of actions
* - Tools and commands used
* - Files explored
* - Outcome (success, partial, failed)
*
* @example
* ```typescript
* const extractor = new MethodologyExtractor();
* const methodologies = extractor.extractMethodologies(messages, toolUses, toolResults);
* methodologies.forEach(m => {
* console.log(`Problem: ${m.problem_statement}`);
* console.log(`Approach: ${m.approach}`);
* console.log(`Steps: ${m.steps_taken.length}`);
* });
* ```
*/
import { nanoid } from "nanoid";
import type { Message, ToolUse, ToolResult } from "./ConversationParser.js";
/**
* Represents a problem-solving methodology extracted from conversation history.
*/
export interface Methodology {
/** Unique methodology identifier */
id: string;
/** Conversation where this methodology was used */
conversation_id: string;
/** Starting message ID */
start_message_id: string;
/** Ending message ID */
end_message_id: string;
/** The problem being solved */
problem_statement: string;
/** High-level approach category */
approach: "exploration" | "research" | "implementation" | "debugging" | "refactoring" | "testing";
/** Sequence of steps taken */
steps_taken: MethodologyStep[];
/** Tools/commands used */
tools_used: string[];
/** Files explored or modified */
files_involved: string[];
/** Outcome of the approach */
outcome: "success" | "partial" | "failed" | "ongoing";
/** Summary of what worked */
what_worked?: string;
/** Summary of what didn't work */
what_didnt_work?: string;
/** When the methodology started */
started_at: number;
/** When the methodology ended */
ended_at: number;
}
/**
* A single step in the methodology.
*/
export interface MethodologyStep {
/** Step number */
order: number;
/** What was done */
action: string;
/** Tool used (if any) */
tool?: string;
/** Result of the action */
result?: string;
/** Whether this step succeeded */
succeeded: boolean;
}
/**
* Extracts problem-solving methodologies from conversation history.
*/
export class MethodologyExtractor {
// Patterns indicating start of problem-solving
private readonly PROBLEM_START_PATTERNS = [
/(?:I need to|help me|can you|let's|we need to|I want to)\s+(.+?)(?:\.|$)/i,
/(?:how do I|how can I|what's the best way to)\s+(.+?)(?:\?|$)/i,
/(?:fix|solve|resolve|implement|create|build|add|update|refactor|debug)\s+(.+?)(?:\.|$)/i,
/(?:there's (?:a|an)|I have (?:a|an))\s+(?:bug|error|issue|problem)\s+(?:in|with)\s+(.+?)(?:\.|$)/i,
];
// Patterns indicating approach type
private readonly APPROACH_PATTERNS: Record<string, RegExp[]> = {
exploration: [
/let me (?:look|check|explore|examine|see)/i,
/first,? (?:I'll|let me) (?:understand|read|examine)/i,
/exploring the codebase/i,
],
research: [
/(?:searching|looking up|researching)/i,
/(?:documentation|docs) (?:says|shows|indicates)/i,
/according to/i,
/best practices? (?:suggest|recommend)/i,
],
implementation: [
/(?:I'll|let me) (?:implement|create|write|add)/i,
/(?:implementing|creating|writing|adding)/i,
/here's the (?:code|implementation)/i,
],
debugging: [
/(?:debugging|investigating|tracing)/i,
/the (?:error|bug|issue) (?:is|was|occurs)/i,
/root cause/i,
/stack trace/i,
],
refactoring: [
/(?:refactoring|restructuring|reorganizing)/i,
/(?:cleaning up|simplifying|improving)/i,
/better (?:structure|organization|design)/i,
],
testing: [
/(?:testing|running tests|verifying)/i,
/test (?:passes|fails|results)/i,
/npm (?:test|run test)/i,
],
};
// Patterns indicating outcome
private readonly OUTCOME_PATTERNS = {
success: [
/(?:done|complete|finished|working|fixed|resolved|implemented)/i,
/(?:successfully|correctly) (?:implemented|fixed|added)/i,
/(?:all tests pass|tests are passing)/i,
/✓|✅/,
],
failed: [
/(?:failed|error|broken|doesn't work)/i,
/(?:still|continues to) (?:fail|error)/i,
/cannot|could not|unable to/i,
/✗|❌/,
],
partial: [
/(?:partially|almost|mostly) (?:done|working)/i,
/(?:some|few) (?:issues|problems) remain/i,
/needs? more work/i,
],
};
/**
* Extract methodologies from conversation history.
*
* @param messages - Array of conversation messages
* @param toolUses - Array of tool uses
* @param toolResults - Array of tool results
* @returns Array of extracted Methodology objects
*/
extractMethodologies(
messages: Message[],
toolUses: ToolUse[],
toolResults: ToolResult[]
): Methodology[] {
const methodologies: Methodology[] = [];
// Group messages by conversation
const conversationMessages = this.groupByConversation(messages);
for (const [conversationId, convMessages] of conversationMessages) {
// Get tool uses and results for this conversation
const convToolUses = toolUses.filter((tu) =>
convMessages.some((m) => m.id === tu.message_id)
);
const convToolResults = toolResults.filter((tr) =>
convMessages.some((m) => m.id === tr.message_id)
);
// Find problem-solving segments
const segments = this.identifyProblemSegments(convMessages);
for (const segment of segments) {
const methodology = this.extractMethodologyFromSegment(
conversationId,
segment,
convToolUses,
convToolResults
);
if (methodology) {
methodologies.push(methodology);
}
}
}
return methodologies;
}
/**
* Group messages by conversation ID.
*/
private groupByConversation(messages: Message[]): Map<string, Message[]> {
const groups = new Map<string, Message[]>();
for (const message of messages) {
const convId = message.conversation_id;
const existing = groups.get(convId);
if (existing) {
existing.push(message);
} else {
groups.set(convId, [message]);
}
}
// Sort each group by timestamp
for (const [, msgs] of groups) {
msgs.sort((a, b) => a.timestamp - b.timestamp);
}
return groups;
}
/**
* Identify segments of messages that represent problem-solving.
*/
private identifyProblemSegments(messages: Message[]): Message[][] {
const segments: Message[][] = [];
let currentSegment: Message[] = [];
let inProblem = false;
for (const message of messages) {
const content = message.content || "";
// Check if this starts a new problem
if (message.role === "user") {
const isProblemStart = this.PROBLEM_START_PATTERNS.some((p) =>
p.test(content)
);
if (isProblemStart) {
// Save previous segment if exists
if (currentSegment.length >= 2) {
segments.push(currentSegment);
}
currentSegment = [message];
inProblem = true;
continue;
}
}
// Continue collecting messages for current problem
if (inProblem) {
currentSegment.push(message);
// Check if problem is resolved
if (message.role === "assistant") {
const isResolved = this.OUTCOME_PATTERNS.success.some((p) =>
p.test(content)
);
const isFailed = this.OUTCOME_PATTERNS.failed.some((p) =>
p.test(content)
);
if ((isResolved || isFailed) && currentSegment.length >= 2) {
segments.push(currentSegment);
currentSegment = [];
inProblem = false;
}
}
}
}
// Don't forget last segment
if (currentSegment.length >= 2) {
segments.push(currentSegment);
}
return segments;
}
/**
* Extract methodology from a problem-solving segment.
*/
private extractMethodologyFromSegment(
conversationId: string,
segment: Message[],
toolUses: ToolUse[],
toolResults: ToolResult[]
): Methodology | null {
if (segment.length < 2) {
return null;
}
const firstMessage = segment[0];
const lastMessage = segment[segment.length - 1];
// Extract problem statement
const problemStatement = this.extractProblemStatement(segment);
if (!problemStatement) {
return null;
}
// Identify approach
const approach = this.identifyApproach(segment);
// Extract steps
const steps = this.extractSteps(segment, toolUses, toolResults);
// Get tools used
const toolsUsed = this.extractToolsUsed(segment, toolUses);
// Get files involved
const filesInvolved = this.extractFilesInvolved(segment, toolUses, toolResults);
// Determine outcome
const outcome = this.determineOutcome(segment);
// Extract what worked/didn't work
const { whatWorked, whatDidntWork } = this.extractLessonsLearned(segment);
return {
id: nanoid(),
conversation_id: conversationId,
start_message_id: firstMessage.id,
end_message_id: lastMessage.id,
problem_statement: problemStatement,
approach,
steps_taken: steps,
tools_used: toolsUsed,
files_involved: filesInvolved,
outcome,
what_worked: whatWorked,
what_didnt_work: whatDidntWork,
started_at: firstMessage.timestamp,
ended_at: lastMessage.timestamp,
};
}
/**
* Extract the problem statement from the first user message.
*/
private extractProblemStatement(segment: Message[]): string | null {
const userMessages = segment.filter((m) => m.role === "user" && m.content);
if (userMessages.length === 0) {
return null;
}
const firstUser = userMessages[0];
const content = firstUser.content || "";
// Try to extract a clean problem statement
for (const pattern of this.PROBLEM_START_PATTERNS) {
const match = content.match(pattern);
if (match && match[1]) {
return match[1].trim();
}
}
// Fall back to first sentence
const sentences = content.split(/[.!?]/);
const firstSentence = sentences[0]?.trim();
return firstSentence && firstSentence.length > 10
? firstSentence.substring(0, 200)
: null;
}
/**
* Identify the primary approach used.
*/
private identifyApproach(
segment: Message[]
): Methodology["approach"] {
const combinedContent = segment
.map((m) => m.content || "")
.join("\n");
for (const [approach, patterns] of Object.entries(this.APPROACH_PATTERNS)) {
for (const pattern of patterns) {
if (pattern.test(combinedContent)) {
return approach as Methodology["approach"];
}
}
}
return "implementation"; // Default
}
/**
* Extract steps taken during problem-solving.
*/
private extractSteps(
segment: Message[],
toolUses: ToolUse[],
toolResults: ToolResult[]
): MethodologyStep[] {
const steps: MethodologyStep[] = [];
let order = 1;
// Get message IDs in this segment
const segmentMessageIds = new Set(segment.map((m) => m.id));
// Get tool uses for this segment
const segmentToolUses = toolUses.filter((tu) =>
segmentMessageIds.has(tu.message_id)
);
for (const toolUse of segmentToolUses) {
// Find the corresponding result
const result = toolResults.find((tr) => tr.tool_use_id === toolUse.id);
// Describe the action
const action = this.describeToolAction(toolUse);
const resultSummary = result
? this.summarizeToolResult(result)
: undefined;
steps.push({
order: order++,
action,
tool: toolUse.tool_name,
result: resultSummary,
succeeded: !result?.is_error,
});
}
return steps;
}
/**
* Describe what a tool action did.
*/
private describeToolAction(toolUse: ToolUse): string {
const toolName = toolUse.tool_name;
const input = toolUse.tool_input || {};
switch (toolName) {
case "Read":
return `Read file: ${input.file_path || "unknown"}`;
case "Write":
return `Write file: ${input.file_path || "unknown"}`;
case "Edit":
return `Edit file: ${input.file_path || "unknown"}`;
case "Glob":
return `Search files matching: ${input.pattern || "unknown"}`;
case "Grep":
return `Search content for: ${input.pattern || "unknown"}`;
case "Bash": {
const cmd = String(input.command || "");
return `Execute: ${cmd.substring(0, 100)}`;
}
case "WebSearch":
return `Search web for: ${input.query || "unknown"}`;
case "WebFetch":
return `Fetch URL: ${input.url || "unknown"}`;
default:
return `Use tool: ${toolName}`;
}
}
/**
* Summarize a tool result.
*/
private summarizeToolResult(result: ToolResult): string {
if (result.is_error) {
const error = result.stderr || result.content || "Error occurred";
return `Error: ${error.substring(0, 100)}`;
}
const content = result.content || result.stdout || "";
if (content.length > 100) {
return content.substring(0, 100) + "...";
}
return content || "Success";
}
/**
* Extract unique tools used in the segment.
*/
private extractToolsUsed(segment: Message[], toolUses: ToolUse[]): string[] {
const segmentMessageIds = new Set(segment.map((m) => m.id));
const tools = new Set<string>();
for (const toolUse of toolUses) {
if (segmentMessageIds.has(toolUse.message_id)) {
tools.add(toolUse.tool_name);
}
}
return Array.from(tools);
}
/**
* Extract files involved in the segment.
*/
private extractFilesInvolved(
segment: Message[],
toolUses: ToolUse[],
_toolResults: ToolResult[]
): string[] {
const files = new Set<string>();
const segmentMessageIds = new Set(segment.map((m) => m.id));
// Extract from tool uses
for (const toolUse of toolUses) {
if (!segmentMessageIds.has(toolUse.message_id)) {
continue;
}
const input = toolUse.tool_input || {};
if (input.file_path) {
files.add(String(input.file_path));
}
if (input.path) {
files.add(String(input.path));
}
}
// Extract file paths from message content
const filePattern = /(?:\/[\w.-]+)+\.[\w]+/g;
for (const message of segment) {
const matches = (message.content || "").match(filePattern);
if (matches) {
for (const match of matches) {
files.add(match);
}
}
}
return Array.from(files);
}
/**
* Determine the outcome of the problem-solving.
*/
private determineOutcome(segment: Message[]): Methodology["outcome"] {
// Check last few messages for outcome indicators
const lastMessages = segment.slice(-3);
const combinedContent = lastMessages
.map((m) => m.content || "")
.join("\n");
for (const pattern of this.OUTCOME_PATTERNS.success) {
if (pattern.test(combinedContent)) {
return "success";
}
}
for (const pattern of this.OUTCOME_PATTERNS.failed) {
if (pattern.test(combinedContent)) {
return "failed";
}
}
for (const pattern of this.OUTCOME_PATTERNS.partial) {
if (pattern.test(combinedContent)) {
return "partial";
}
}
return "ongoing";
}
/**
* Extract lessons learned from the segment.
*/
private extractLessonsLearned(
segment: Message[]
): { whatWorked?: string; whatDidntWork?: string } {
const assistantMessages = segment.filter(
(m) => m.role === "assistant" && m.content
);
let whatWorked: string | undefined;
let whatDidntWork: string | undefined;
// Look for patterns indicating what worked
const workedPatterns = [
/(?:this|that) (?:works?|solved|fixed)/i,
/(?:the|this) (?:solution|fix|approach) (?:is|was)/i,
/(?:successfully|correctly) (?:implemented|fixed)/i,
];
const failedPatterns = [
/(?:didn't|doesn't|won't) work/i,
/(?:this|that) (?:failed|broke|caused)/i,
/(?:the|this) (?:issue|problem|error) (?:is|was)/i,
];
for (const message of assistantMessages) {
const content = message.content || "";
const sentences = content.split(/[.!?]/);
for (const sentence of sentences) {
if (!whatWorked) {
for (const pattern of workedPatterns) {
if (pattern.test(sentence)) {
whatWorked = sentence.trim().substring(0, 200);
break;
}
}
}
if (!whatDidntWork) {
for (const pattern of failedPatterns) {
if (pattern.test(sentence)) {
whatDidntWork = sentence.trim().substring(0, 200);
break;
}
}
}
}
}
return { whatWorked, whatDidntWork };
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/parsers/MistakeExtractor.ts | TypeScript | /**
* Mistake Extractor - Identifies errors and how they were corrected.
*
* This extractor analyzes conversation messages and tool results to identify
* mistakes made during development and how they were corrected. It helps prevent
* repeating the same errors by documenting:
* - What went wrong
* - How it was corrected
* - Files affected
* - Type of mistake (logic error, wrong approach, etc.)
*
* Sources of mistakes:
* - Tool execution errors (failed commands, syntax errors)
* - User corrections ("that's wrong", "don't do that")
* - Error discussions (debugging conversations)
*
* @example
* ```typescript
* const extractor = new MistakeExtractor();
* const mistakes = extractor.extractMistakes(messages, toolResults);
* console.log(`Found ${mistakes.length} mistakes`);
* mistakes.forEach(m => {
* console.log(`${m.mistake_type}: ${m.what_went_wrong}`);
* if (m.correction) console.log(`Fixed by: ${m.correction}`);
* });
* ```
*/
import { nanoid } from "nanoid";
import type { Message, ToolResult } from "./ConversationParser.js";
/**
* Represents a mistake made during development and how it was corrected.
*/
export interface Mistake {
/** Unique mistake identifier */
id: string;
/** Conversation where the mistake occurred */
conversation_id: string;
/** Message containing or referencing the mistake */
message_id: string;
/** Category of mistake */
mistake_type: "logic_error" | "wrong_approach" | "misunderstanding" | "tool_error" | "syntax_error";
/** Description of what went wrong */
what_went_wrong: string;
/** How the mistake was corrected */
correction?: string;
/** User's message correcting the mistake */
user_correction_message?: string;
/** Files affected by this mistake */
files_affected: string[];
/** When the mistake occurred */
timestamp: number;
}
/**
* Extracts mistakes and corrections from conversation history.
*
* Analyzes tool errors, user corrections, and error discussions to document
* mistakes and prevent repetition.
*/
export class MistakeExtractor {
// Minimum severity score required to store a mistake
// Score 1 keeps any classified mistake - filtering is done by stricter patterns
private readonly MIN_SEVERITY_SCORE = 1;
// Noise patterns to filter out
private readonly NOISE_PATTERNS = [
/this session is being continued/i,
/conversation is summarized below/i,
/previous conversation that ran out of context/i,
/here is the summary/i,
/summary of the conversation/i,
];
// User correction indicators - must be explicit corrections about code/approach
private readonly CORRECTION_INDICATORS = [
// "That's wrong, you should use X"
/that'?s?\s+(?:wrong|incorrect|a mistake)[,\s]+(?:you should|use|the correct)/i,
// "You made an error in the code/implementation"
/(?:you|that)\s+(?:made|caused|introduced)\s+(?:a|an)\s+(?:error|bug)\s+(?:in|with|when)/i,
// "Don't do that because X"
/don't\s+do\s+that[,\s]+(?:because|it will|it causes)/i,
// "You shouldn't have done/used X because Y"
/(?:should not|shouldn't)\s+(?:have\s+)?(?:done|used)\s+(.+?)\s+(?:because|since)/i,
// "Fix the bug/error in X"
/(?:fix|correct)\s+(?:the|that)\s+(?:bug|error)\s+(?:in|with)/i,
];
// Error indicators - stricter, real errors only
private readonly ERROR_INDICATORS = [
/error:\s*\w+/i, // error: SomeError
/Error:\s*\w+/, // Error: SomeError
/failed:\s*.+/i, // failed: something
/exception:\s*.+/i, // exception: something
/TypeError:|ReferenceError:|SyntaxError:/i, // JS errors
/ENOENT|EACCES|EPERM/i, // Node.js file errors
/command\s+failed/i, // command failed
/exit\s+code\s+[1-9]/i, // non-zero exit code
];
// Mistake type patterns
private readonly MISTAKE_PATTERNS = {
logic_error: [/logic\s+error/i, /incorrect\s+logic/i, /wrong\s+condition/i],
wrong_approach: [
/wrong\s+approach/i,
/better\s+way/i,
/should\s+(?:have\s+)?use(?:d)?/i,
],
misunderstanding: [
/misunderstood/i,
/(?:didn't|don't)\s+understand/i,
/confused\s+about/i,
],
syntax_error: [/syntax\s+error/i, /parse\s+error/i, /invalid\s+syntax/i],
};
/**
* Extract mistakes from messages and tool results.
*
* Analyzes three sources to identify mistakes:
* 1. Tool execution errors (failed commands, syntax errors)
* 2. User corrections (explicit corrections by the user)
* 3. Error discussions (conversations about bugs and fixes)
*
* @param messages - Array of conversation messages
* @param toolResults - Array of tool execution results
* @returns Array of extracted Mistake objects, deduplicated
*
* @example
* ```typescript
* const extractor = new MistakeExtractor();
* const mistakes = extractor.extractMistakes(messages, toolResults);
*
* // Find logic errors
* const logicErrors = mistakes.filter(m => m.mistake_type === 'logic_error');
* ```
*/
extractMistakes(messages: Message[], toolResults: ToolResult[]): Mistake[] {
const mistakes: Mistake[] = [];
// Filter messages to exclude noise
const cleanMessages = messages.filter((m) => !this.isNoiseContent(m.content || ""));
// Extract from tool errors (tool errors are real, keep them)
const toolErrors = this.extractToolErrors(toolResults, messages);
mistakes.push(...toolErrors);
// Extract from user corrections
const userCorrections = this.extractUserCorrections(cleanMessages);
mistakes.push(...userCorrections);
// Extract from error discussions
const errorDiscussions = this.extractErrorDiscussions(cleanMessages);
mistakes.push(...errorDiscussions);
// Deduplicate and filter by severity
const deduplicated = this.deduplicateMistakes(mistakes);
return deduplicated.filter(
(m) => this.scoreMistakeSeverity(m) >= this.MIN_SEVERITY_SCORE
);
}
/**
* Check if content is noise that should be filtered out
*/
private isNoiseContent(content: string): boolean {
const firstChunk = content.substring(0, 500);
return this.NOISE_PATTERNS.some((pattern) => pattern.test(firstChunk));
}
/**
* Extract mistakes from tool execution errors
*/
private extractToolErrors(
toolResults: ToolResult[],
messages: Message[]
): Mistake[] {
const mistakes: Mistake[] = [];
for (const result of toolResults) {
if (!result.is_error) {continue;}
const message = messages.find((m) => m.id === result.message_id);
if (!message) {continue;}
// Combine ALL error sources for better context
// stderr might just have "exit code 255" while content has the actual error
const errorParts: string[] = [];
if (result.stderr) {errorParts.push(result.stderr);}
if (result.stdout) {errorParts.push(result.stdout);}
if (result.content) {errorParts.push(result.content);}
const errorContent = errorParts.join("\n");
const mistakeType = this.classifyMistakeType(errorContent);
mistakes.push({
id: nanoid(),
conversation_id: message.conversation_id,
message_id: message.id,
mistake_type: mistakeType || "tool_error",
what_went_wrong: this.summarizeError(errorContent),
correction: this.findCorrection(message, messages),
files_affected: this.extractFilesFromError(errorContent),
timestamp: result.timestamp,
});
}
return mistakes;
}
/**
* Extract mistakes from user corrections
*/
private extractUserCorrections(messages: Message[]): Mistake[] {
const mistakes: Mistake[] = [];
for (let i = 0; i < messages.length; i++) {
const message = messages[i];
if (message.role !== "user" || !message.content) {
continue;
}
const content = message.content;
// Check if this is a correction
const isCorrection = this.CORRECTION_INDICATORS.some((pattern) =>
pattern.test(content)
);
if (!isCorrection) {continue;}
// Find the previous assistant message
const previousAssistant = this.findPreviousAssistantMessage(messages, i);
if (!previousAssistant) {continue;}
const mistakeType = this.classifyMistakeType(message.content);
mistakes.push({
id: nanoid(),
conversation_id: message.conversation_id,
message_id: previousAssistant.id,
mistake_type: mistakeType || "misunderstanding",
what_went_wrong: this.extractWhatWentWrong(message.content),
correction: this.extractCorrection(message.content),
user_correction_message: message.content,
files_affected: this.extractFilesFromMessage(message),
timestamp: message.timestamp,
});
}
return mistakes;
}
/**
* Extract mistakes from error discussions in messages
*/
private extractErrorDiscussions(messages: Message[]): Mistake[] {
const mistakes: Mistake[] = [];
for (const message of messages) {
if (message.role !== "assistant" || !message.content) {
continue;
}
const content = message.content;
// Check if message discusses an error
const hasErrorDiscussion = this.ERROR_INDICATORS.some((pattern) =>
pattern.test(content)
);
if (!hasErrorDiscussion) {continue;}
// Extract error discussion
const errorText = this.extractErrorDiscussion(message.content);
if (!errorText) {continue;}
const mistakeType = this.classifyMistakeType(errorText);
mistakes.push({
id: nanoid(),
conversation_id: message.conversation_id,
message_id: message.id,
mistake_type: mistakeType || "logic_error",
what_went_wrong: errorText,
correction: this.extractSolutionFromSameMessage(message.content),
files_affected: this.extractFilesFromMessage(message),
timestamp: message.timestamp,
});
}
return mistakes;
}
/**
* Classify the type of mistake
*/
private classifyMistakeType(
text: string
):
| "logic_error"
| "wrong_approach"
| "misunderstanding"
| "syntax_error"
| null {
for (const [type, patterns] of Object.entries(this.MISTAKE_PATTERNS)) {
for (const pattern of patterns) {
if (pattern.test(text)) {
return type as "logic_error" | "wrong_approach" | "misunderstanding" | "syntax_error";
}
}
}
return null;
}
/**
* Summarize error message - find the most descriptive error line
*/
private summarizeError(errorText: string): string {
const lines = errorText.split("\n").map(l => l.trim()).filter(l => l.length > 0);
if (lines.length === 0) {return "Unknown error";}
// Patterns that indicate a descriptive error (prioritize these)
const descriptivePatterns = [
/permission denied/i,
/host key verification failed/i,
/connection refused/i,
/no such file or directory/i,
/command not found/i,
/authentication failed/i,
/timeout/i,
/could not resolve/i,
/network is unreachable/i,
/operation not permitted/i,
/access denied/i,
/invalid argument/i,
/no space left/i,
/disk quota exceeded/i,
/broken pipe/i,
/connection reset/i,
/refused|denied|failed|error:|fatal:/i,
];
// Find the first line matching a descriptive pattern
for (const line of lines) {
for (const pattern of descriptivePatterns) {
if (pattern.test(line)) {
return line.length > 300 ? line.substring(0, 300) + "..." : line;
}
}
}
// Skip generic "exit code" lines if there's more content
const nonExitCodeLines = lines.filter(l => !/^exit\s*code\s*\d+$/i.test(l));
const bestLine = nonExitCodeLines.length > 0 ? nonExitCodeLines[0] : lines[0];
return bestLine.length > 300 ? bestLine.substring(0, 300) + "..." : bestLine;
}
/**
* Find correction in subsequent messages
*/
private findCorrection(errorMessage: Message, allMessages: Message[]): string | undefined {
const index = allMessages.findIndex((m) => m.id === errorMessage.id);
if (index === -1) {return undefined;}
// Look at next few messages for a fix
const nextMessages = allMessages.slice(index + 1, index + 5);
for (const msg of nextMessages) {
if (msg.role === "assistant" && msg.content) {
// Look for fix indicators
if (
/(?:fixed|resolved|corrected|solved)/i.test(msg.content)
) {
return msg.content.substring(0, 500);
}
}
}
return undefined;
}
/**
* Extract files mentioned in error
*/
private extractFilesFromError(errorText: string): string[] {
const files: string[] = [];
// Common file path patterns
const filePathPattern = /(?:\/|\.\/|\.\.\/)?(?:[\w-]+\/)*[\w-]+\.[\w]+/g;
const matches = errorText.match(filePathPattern);
if (matches) {
files.push(...matches);
}
return [...new Set(files)];
}
/**
* Extract files from message metadata
*/
private extractFilesFromMessage(message: Message): string[] {
const files: string[] = [];
if (message.metadata) {
const metadataStr = JSON.stringify(message.metadata);
const filePathPattern = /(?:\/[\w-]+)+\.[\w]+/g;
const matches = metadataStr.match(filePathPattern);
if (matches) {
files.push(...matches);
}
}
return [...new Set(files)];
}
/**
* Find previous assistant message
*/
private findPreviousAssistantMessage(
messages: Message[],
currentIndex: number
): Message | undefined {
for (let i = currentIndex - 1; i >= 0; i--) {
if (messages[i].role === "assistant") {
return messages[i];
}
}
return undefined;
}
/**
* Extract what went wrong from correction message
*/
private extractWhatWentWrong(correctionText: string): string {
// Remove correction indicators
let cleaned = correctionText;
for (const pattern of this.CORRECTION_INDICATORS) {
cleaned = cleaned.replace(pattern, "");
}
// Take first sentence or up to 300 characters
const sentences = cleaned.split(/\.|!|\?/);
const firstSentence = sentences[0]?.trim();
return firstSentence && firstSentence.length > 0
? firstSentence.substring(0, 300)
: cleaned.substring(0, 300);
}
/**
* Extract correction from user message
*/
private extractCorrection(correctionText: string): string | undefined {
// Look for "instead" or "should" patterns
const insteadMatch = correctionText.match(/instead[,\s]+(.+?)(?:\.|$)/i);
if (insteadMatch) {
return insteadMatch[1].trim();
}
const shouldMatch = correctionText.match(/should\s+(?:have\s+)?(.+?)(?:\.|$)/i);
if (shouldMatch) {
return shouldMatch[1].trim();
}
return undefined;
}
/**
* Extract error discussion from message
*/
private extractErrorDiscussion(content: string): string | undefined {
// Find sentences containing error indicators
const sentences = content.split(/\.|!|\?/);
for (const sentence of sentences) {
if (this.ERROR_INDICATORS.some((pattern) => pattern.test(sentence))) {
return sentence.trim();
}
}
return undefined;
}
/**
* Extract solution from same message that discusses error
*/
private extractSolutionFromSameMessage(content: string): string | undefined {
// Look for solution indicators
const solutionPattern = /(?:to fix|solution|resolved by|corrected by|fixed by)\s+(.+?)(?:\.|$)/i;
const match = content.match(solutionPattern);
return match?.[1]?.trim();
}
/**
* Deduplicate similar mistakes
*/
private deduplicateMistakes(mistakes: Mistake[]): Mistake[] {
const unique: Mistake[] = [];
const seen = new Set<string>();
for (const mistake of mistakes) {
// Create signature including message_id to avoid collisions
// between different mistakes with similar text in the same conversation
const textPrefix = mistake.what_went_wrong.substring(0, 100);
const signature = `${mistake.message_id}_${textPrefix}_${mistake.timestamp}`;
if (!seen.has(signature)) {
seen.add(signature);
unique.push(mistake);
}
}
return unique;
}
/**
* Score mistake severity (for prioritization)
*/
scoreMistakeSeverity(mistake: Mistake): number {
let score = 0;
// Has correction
if (mistake.correction) {score += 2;}
// User explicitly corrected
if (mistake.user_correction_message) {score += 3;}
// Affects files
if (mistake.files_affected.length > 0) {score += 2;}
// Type-based severity
switch (mistake.mistake_type) {
case "logic_error":
score += 3;
break;
case "wrong_approach":
score += 2;
break;
case "syntax_error":
score += 1;
break;
case "tool_error":
score += 1;
break;
}
return score;
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/parsers/RequirementsExtractor.ts | TypeScript | /**
* Requirements and Validations Extractor - Tracks constraints, dependencies, and testing context.
*
* This extractor analyzes conversation messages and tool executions to identify:
* - Requirements (dependencies, performance, compatibility, business constraints)
* - Validations (test runs, results, and performance data)
*
* Helps document:
* - What dependencies are required and why
* - Performance requirements and constraints
* - Compatibility requirements (versions, platforms)
* - Business rules and limitations
* - Test executions and their results
*
* @example
* ```typescript
* const extractor = new RequirementsExtractor();
* const requirements = extractor.extractRequirements(messages);
* const validations = extractor.extractValidations(toolUses, toolResults, messages);
*
* console.log(`Found ${requirements.length} requirements`);
* console.log(`Found ${validations.length} test validations`);
* ```
*/
import { nanoid } from "nanoid";
import type { Message, ToolUse, ToolResult } from "./ConversationParser.js";
/**
* Represents a requirement or constraint for the system.
*/
export interface Requirement {
/** Unique requirement identifier */
id: string;
/** Category of requirement */
type: "dependency" | "performance" | "compatibility" | "business";
/** Description of the requirement */
description: string;
/** Why this requirement exists */
rationale?: string;
/** Components affected by this requirement */
affects_components: string[];
/** Conversation where requirement was discussed */
conversation_id: string;
/** Message containing the requirement */
message_id: string;
/** When the requirement was documented */
timestamp: number;
}
/**
* Represents a test validation or verification.
*/
export interface Validation {
/** Unique validation identifier */
id: string;
/** Conversation where test was run */
conversation_id: string;
/** Description of what was tested */
what_was_tested: string;
/** Command used to run the test */
test_command?: string;
/** Test result status */
result: "passed" | "failed" | "error";
/** Performance metrics from the test */
performance_data?: Record<string, unknown>;
/** Files that were tested */
files_tested: string[];
/** When the test was run */
timestamp: number;
}
/**
* Extracts requirements and validations from conversation history.
*
* Analyzes messages for requirement patterns and tool executions for test results.
*/
export class RequirementsExtractor {
// Requirement indicators
private readonly REQUIREMENT_PATTERNS = {
dependency: [
/(?:need|require|must use|depends on)\s+(.+?)\s+(?:library|package|module|dependency)/gi,
/(?:install|add)\s+(.+?)\s+(?:for|to)/gi,
],
performance: [
/(?:must|should|need to)\s+(?:be|run)\s+(?:faster|slower|within|under)\s+(.+)/gi,
/response time\s+(?:must|should)\s+(?:be\s+)?(?:under|less than|within)\s+(.+)/gi,
/(?:latency|throughput|performance)\s+requirement:\s*(.+)/gi,
],
compatibility: [
/(?:must|should|need to)\s+(?:support|work with|be compatible with)\s+(.+)/gi,
/(?:requires?|needs?)\s+(.+?)\s+(?:version|or higher|or later)/gi,
],
business: [
/business requirement:\s*(.+)/gi,
/(?:must|cannot|can't)\s+(?:exceed|violate|break)\s+(.+)/gi,
/(?:constraint|limitation):\s*(.+)/gi,
],
};
// Test/validation indicators
private readonly TEST_PATTERNS = [
/(?:npm|yarn|pnpm)\s+test/,
/(?:npm|yarn|pnpm)\s+run\s+test/,
/pytest/,
/jest/,
/mocha/,
/cargo\s+test/,
/go\s+test/,
];
/**
* Get combined output from tool result (stdout + stderr + content)
* Test output may go to either stdout or stderr depending on the tool
*/
private getToolOutput(result: ToolResult): string {
const parts: string[] = [];
if (result.stdout) {parts.push(result.stdout);}
if (result.stderr) {parts.push(result.stderr);}
if (result.content) {parts.push(result.content);}
return parts.join("\n");
}
/**
* Extract requirements from conversation messages.
*
* Analyzes messages using pattern matching to identify four types of requirements:
* - Dependency: Required libraries, packages, modules
* - Performance: Speed, latency, throughput constraints
* - Compatibility: Version requirements, platform support
* - Business: Business rules, limitations, constraints
*
* @param messages - Array of conversation messages to analyze
* @returns Array of extracted Requirement objects
*
* @example
* ```typescript
* const extractor = new RequirementsExtractor();
* const requirements = extractor.extractRequirements(messages);
*
* // Find all dependency requirements
* const deps = requirements.filter(r => r.type === 'dependency');
* deps.forEach(d => console.log(`${d.description} - ${d.rationale}`));
* ```
*/
extractRequirements(messages: Message[]): Requirement[] {
const requirements: Requirement[] = [];
for (const message of messages) {
if (!message.content) {continue;}
// Check each requirement type
for (const [type, patterns] of Object.entries(
this.REQUIREMENT_PATTERNS
)) {
for (const pattern of patterns) {
const matches = Array.from(message.content.matchAll(pattern));
for (const match of matches) {
const requirement = this.parseRequirement(
type as Requirement["type"],
match,
message
);
if (requirement) {
requirements.push(requirement);
}
}
}
}
}
return this.deduplicateRequirements(requirements);
}
/**
* Extract validations from tool executions.
*
* Analyzes Bash tool uses to identify test command executions and their results.
* Captures test runs including pass/fail status, performance data, and files tested.
*
* Recognized test commands:
* - npm/yarn/pnpm test
* - pytest
* - jest/mocha
* - cargo test (Rust)
* - go test (Go)
*
* @param toolUses - Array of tool invocations
* @param toolResults - Array of tool execution results
* @param messages - Array of conversation messages for context
* @returns Array of extracted Validation objects
*
* @example
* ```typescript
* const extractor = new RequirementsExtractor();
* const validations = extractor.extractValidations(toolUses, toolResults, messages);
*
* // Find failed tests
* const failures = validations.filter(v => v.result === 'failed');
* console.log(`${failures.length} test failures found`);
* ```
*/
extractValidations(
toolUses: ToolUse[],
toolResults: ToolResult[],
messages: Message[]
): Validation[] {
const validations: Validation[] = [];
for (const toolUse of toolUses) {
// Check if this is a test command
if (toolUse.tool_name === "Bash") {
const command = toolUse.tool_input.command;
if (!command || typeof command !== "string") {
continue;
}
const isTest = this.TEST_PATTERNS.some((pattern) =>
pattern.test(command)
);
if (isTest) {
// Find corresponding result
const result = toolResults.find((r) => r.tool_use_id === toolUse.id);
const message = messages.find((m) => m.id === toolUse.message_id);
if (result && message) {
validations.push({
id: nanoid(),
conversation_id: message.conversation_id,
what_was_tested: this.extractWhatWasTested(command, result),
test_command: command,
result: this.determineTestResult(result),
performance_data: this.extractPerformanceData(result),
files_tested: this.extractTestedFiles(result),
timestamp: toolUse.timestamp,
});
}
}
}
}
return validations;
}
/**
* Parse a requirement from pattern match
*/
private parseRequirement(
type: Requirement["type"],
match: RegExpMatchArray,
message: Message
): Requirement | null {
const description = match[1]?.trim();
if (!description) {
return null;
}
if (!message.content) {
return null;
}
// Extract rationale from message context
const rationale = this.extractRationale(match[0], message.content);
// Extract affected components
const components = this.extractAffectedComponents(message);
return {
id: nanoid(),
type,
description,
rationale,
affects_components: components,
conversation_id: message.conversation_id,
message_id: message.id,
timestamp: message.timestamp,
};
}
/**
* Extract rationale from requirement text
*/
private extractRationale(
requirementText: string,
fullContent: string
): string | undefined {
// Look for "because", "since", "for" explanations
const rationaleMatch = requirementText.match(
/(?:because|since|for|due to)\s+(.+?)(?:\.|$)/i
);
if (rationaleMatch) {
return rationaleMatch[1].trim();
}
// Look in surrounding context
const contextMatch = fullContent.match(
new RegExp(
`${requirementText.replace(/[.*+?^${}()|[\]\\]/g, "\\$&")}[^.]*?(?:because|since|for|due to)\\s+(.+?)(?:\\.|$)`,
"i"
)
);
return contextMatch?.[1]?.trim();
}
/**
* Extract affected components from message metadata
*/
private extractAffectedComponents(message: Message): string[] {
const components: string[] = [];
// Common component keywords
const componentKeywords = [
"frontend",
"backend",
"api",
"database",
"auth",
"ui",
"server",
"client",
];
const content = message.content?.toLowerCase() || "";
for (const keyword of componentKeywords) {
if (content.includes(keyword)) {
components.push(keyword);
}
}
return [...new Set(components)];
}
/**
* Extract what was tested from command and result
*/
private extractWhatWasTested(
command: string,
result: ToolResult
): string {
// Try to extract test file/suite name
const fileMatch = command.match(/test[/\\](.+?)(?:\s|$)/);
if (fileMatch) {
return fileMatch[1];
}
// Try to extract from result (check all output sources)
const resultContent = this.getToolOutput(result);
const suiteMatch = resultContent.match(/(?:Test Suite|Describe):\s*(.+)/i);
if (suiteMatch) {
return suiteMatch[1].trim();
}
// Fallback to command
return command;
}
/**
* Determine test result from tool result
*/
private determineTestResult(
result: ToolResult
): "passed" | "failed" | "error" {
if (result.is_error) {
return "error";
}
const output = this.getToolOutput(result).toLowerCase();
// Check for pass indicators
if (
/(?:all tests? passed|✓|✔|success)/i.test(output) ||
/(?:\d+\s+passed,\s+0\s+failed)/i.test(output)
) {
return "passed";
}
// Check for fail indicators
if (
/(?:test failed|✗|✘|failure|failed)/i.test(output) ||
/(?:\d+\s+failed)/i.test(output)
) {
return "failed";
}
// Default to passed if no errors
return result.is_error ? "error" : "passed";
}
/**
* Extract performance data from test results
*/
private extractPerformanceData(
result: ToolResult
): Record<string, unknown> | undefined {
const output = this.getToolOutput(result);
const data: Record<string, unknown> = {};
// Extract timing information
const timeMatch = output.match(/(\d+(?:\.\d+)?)\s*(ms|s|seconds?|milliseconds?)/i);
if (timeMatch) {
const value = parseFloat(timeMatch[1]);
const unit = timeMatch[2].toLowerCase();
data.duration_ms = unit.startsWith("s") ? value * 1000 : value;
}
// Extract test counts
const passedMatch = output.match(/(\d+)\s+passed/i);
if (passedMatch) {
data.tests_passed = parseInt(passedMatch[1]);
}
const failedMatch = output.match(/(\d+)\s+failed/i);
if (failedMatch) {
data.tests_failed = parseInt(failedMatch[1]);
}
return Object.keys(data).length > 0 ? data : undefined;
}
/**
* Extract files that were tested
*/
private extractTestedFiles(result: ToolResult): string[] {
const output = this.getToolOutput(result);
const files: string[] = [];
// Look for file paths
const filePattern = /(?:PASS|FAIL|ERROR)\s+([\w/.-]+\.(?:test|spec)\.[\w]+)/gi;
const matches = Array.from(output.matchAll(filePattern));
for (const match of matches) {
files.push(match[1]);
}
return [...new Set(files)];
}
/**
* Deduplicate similar requirements
*/
private deduplicateRequirements(requirements: Requirement[]): Requirement[] {
const unique: Requirement[] = [];
const seen = new Set<string>();
for (const req of requirements) {
const signature = `${req.type}_${req.description.substring(0, 50)}`;
if (!seen.has(signature)) {
seen.add(signature);
unique.push(req);
}
}
return unique;
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/parsers/ResearchExtractor.ts | TypeScript | /**
* Research Extractor - Identifies discoveries and findings from conversations.
*
* This extractor analyzes conversation history to identify research activities
* and their discoveries. It captures:
* - What was being researched
* - Discovery / finding
* - Source of the discovery (code, docs, web, experimentation)
* - Relevance to the current problem
* - Confidence level
*
* @example
* ```typescript
* const extractor = new ResearchExtractor();
* const findings = extractor.extractFindings(messages, toolUses, toolResults);
* findings.forEach(f => {
* console.log(`Topic: ${f.topic}`);
* console.log(`Discovery: ${f.discovery}`);
* console.log(`Source: ${f.source_type}`);
* });
* ```
*/
import { nanoid } from "nanoid";
import type { Message, ToolUse, ToolResult } from "./ConversationParser.js";
/**
* Represents a research finding extracted from conversation history.
*/
export interface ResearchFinding {
/** Unique finding identifier */
id: string;
/** Conversation where this finding was made */
conversation_id: string;
/** Message containing the finding */
message_id: string;
/** Topic being researched */
topic: string;
/** The actual discovery/finding */
discovery: string;
/** Type of source */
source_type: "code" | "documentation" | "web" | "experimentation" | "user_input";
/** Specific source reference (file path, URL, etc.) */
source_reference?: string;
/** How relevant this is to the problem */
relevance: "high" | "medium" | "low";
/** Confidence in this finding */
confidence: "verified" | "likely" | "uncertain";
/** Related files or components */
related_to: string[];
/** When the finding was made */
timestamp: number;
}
/**
* Extracts research findings from conversation history.
*/
export class ResearchExtractor {
// Noise patterns to filter out
private readonly NOISE_PATTERNS = [
/this session is being continued/i,
/conversation is summarized below/i,
/previous conversation that ran out of context/i,
];
// Discovery indicator patterns
private readonly DISCOVERY_PATTERNS = [
/(?:I (?:found|discovered|noticed|see) that)\s+(.+?)(?:\.|$)/i,
/(?:it (?:appears|seems|looks like))\s+(.+?)(?:\.|$)/i,
/(?:the (?:code|file|function|class|module) (?:shows|indicates|reveals))\s+(.+?)(?:\.|$)/i,
/(?:according to (?:the|this))\s+(.+?)(?:\.|$)/i,
/(?:based on (?:my|the) (?:analysis|reading|exploration))[,\s]+(.+?)(?:\.|$)/i,
/(?:this (?:means|indicates|suggests|shows))\s+(.+?)(?:\.|$)/i,
/(?:the (?:issue|problem|error) is)\s+(.+?)(?:\.|$)/i,
/(?:(?:here's|here is) what I (?:found|learned))[:\s]+(.+?)(?:\.|$)/i,
];
// Source type patterns
private readonly SOURCE_PATTERNS: Record<ResearchFinding["source_type"], RegExp[]> = {
code: [
/(?:in|from) the (?:code|file|source|implementation)/i,
/(?:looking at|reading|examining) (?:the )?[\w./]+\.\w+/i,
/(?:the function|class|method|variable)/i,
],
documentation: [
/(?:according to|from|in) (?:the )?(?:docs|documentation|readme|guide)/i,
/(?:the documentation (?:says|shows|indicates))/i,
/(?:as (?:documented|specified|described) in)/i,
],
web: [
/(?:according to|from|based on) (?:the )?(?:web|online|internet)/i,
/(?:searching|searched|googled|looked up)/i,
/https?:\/\//i,
],
experimentation: [
/(?:testing|tried|tested|experimented)/i,
/(?:running|ran) (?:the )?(?:code|test|command)/i,
/(?:the (?:result|output) (?:shows|is))/i,
],
user_input: [
/(?:you (?:said|mentioned|asked|told me))/i,
/(?:based on (?:your|the user's) (?:input|request|question))/i,
],
};
// Topic extraction patterns
private readonly TOPIC_PATTERNS = [
/(?:looking (?:at|into)|investigating|researching|exploring)\s+(.+?)(?:\.|,|$)/i,
/(?:understanding|learning about|figuring out)\s+(.+?)(?:\.|,|$)/i,
/(?:how (?:to|does)|what is|why does)\s+(.+?)(?:\?|$)/i,
];
// Relevance indicators
private readonly RELEVANCE_INDICATORS = {
high: [
/(?:this is (?:critical|crucial|essential|important|key))/i,
/(?:(?:directly|exactly) (?:what|how|why))/i,
/(?:the (?:root cause|main issue|solution|answer))/i,
],
medium: [
/(?:(?:related|relevant) to)/i,
/(?:this (?:helps|explains|clarifies))/i,
/(?:might|could|may) (?:be|help)/i,
],
low: [
/(?:for (?:reference|future|later))/i,
/(?:not (?:directly|immediately) (?:relevant|needed))/i,
/(?:tangentially|incidentally)/i,
],
};
// Confidence indicators
private readonly CONFIDENCE_INDICATORS = {
verified: [
/(?:confirmed|verified|proven|definitely|certainly)/i,
/(?:this is correct|I'm certain|no doubt)/i,
/(?:tests? (?:pass|confirm)|output shows)/i,
],
likely: [
/(?:likely|probably|most likely|appears to be)/i,
/(?:seems|looks like|I (?:think|believe))/i,
],
uncertain: [
/(?:might|could|may|possibly|perhaps)/i,
/(?:I'm not sure|unclear|uncertain)/i,
/(?:need(?:s)? to (?:verify|confirm|check))/i,
],
};
/**
* Extract research findings from conversation history.
*
* @param messages - Array of conversation messages
* @param toolUses - Array of tool uses
* @param toolResults - Array of tool results
* @returns Array of extracted ResearchFinding objects
*/
extractFindings(
messages: Message[],
toolUses: ToolUse[],
toolResults: ToolResult[]
): ResearchFinding[] {
const findings: ResearchFinding[] = [];
// Filter out noise
const cleanMessages = messages.filter(
(m) => !this.isNoiseContent(m.content || "")
);
// Create lookup maps
const toolUsesByMessage = this.groupToolUsesByMessage(toolUses);
const toolResultsByUse = this.mapToolResultsByUse(toolResults);
for (const message of cleanMessages) {
if (message.role !== "assistant" || !message.content) {
continue;
}
// Extract findings from assistant messages
const messageFindings = this.extractFromMessage(
message,
toolUsesByMessage.get(message.id) || [],
toolResultsByUse
);
findings.push(...messageFindings);
}
// Deduplicate similar findings
return this.deduplicateFindings(findings);
}
/**
* Check if content is noise that should be filtered out.
*/
private isNoiseContent(content: string): boolean {
const firstChunk = content.substring(0, 500);
return this.NOISE_PATTERNS.some((pattern) => pattern.test(firstChunk));
}
/**
* Group tool uses by message ID.
*/
private groupToolUsesByMessage(toolUses: ToolUse[]): Map<string, ToolUse[]> {
const groups = new Map<string, ToolUse[]>();
for (const toolUse of toolUses) {
const existing = groups.get(toolUse.message_id);
if (existing) {
existing.push(toolUse);
} else {
groups.set(toolUse.message_id, [toolUse]);
}
}
return groups;
}
/**
* Map tool results by tool use ID.
*/
private mapToolResultsByUse(toolResults: ToolResult[]): Map<string, ToolResult> {
const resultMap = new Map<string, ToolResult>();
for (const result of toolResults) {
resultMap.set(result.tool_use_id, result);
}
return resultMap;
}
/**
* Extract findings from a single message.
*/
private extractFromMessage(
message: Message,
messageToolUses: ToolUse[],
toolResultsByUse: Map<string, ToolResult>
): ResearchFinding[] {
const findings: ResearchFinding[] = [];
const content = message.content || "";
// Find discovery statements
for (const pattern of this.DISCOVERY_PATTERNS) {
const matches = content.matchAll(new RegExp(pattern, "gi"));
for (const match of matches) {
const discovery = match[1]?.trim();
if (!discovery || discovery.length < 20) {
continue;
}
// Extract context around the discovery
const context = this.getContextAround(content, match.index || 0);
// Identify topic
const topic = this.identifyTopic(context, message);
// Identify source type
const sourceType = this.identifySourceType(
context,
messageToolUses,
toolResultsByUse
);
// Get source reference
const sourceReference = this.extractSourceReference(
context,
messageToolUses
);
// Determine relevance
const relevance = this.determineRelevance(context);
// Determine confidence
const confidence = this.determineConfidence(context);
// Extract related files/components
const relatedTo = this.extractRelatedItems(context, messageToolUses);
findings.push({
id: nanoid(),
conversation_id: message.conversation_id,
message_id: message.id,
topic,
discovery: discovery.substring(0, 500),
source_type: sourceType,
source_reference: sourceReference,
relevance,
confidence,
related_to: relatedTo,
timestamp: message.timestamp,
});
}
}
return findings;
}
/**
* Get context around a match position.
*/
private getContextAround(content: string, position: number): string {
const start = Math.max(0, position - 200);
const end = Math.min(content.length, position + 400);
return content.substring(start, end);
}
/**
* Identify the topic being researched.
*/
private identifyTopic(context: string, _message: Message): string {
// Try topic patterns
for (const pattern of this.TOPIC_PATTERNS) {
const match = context.match(pattern);
if (match && match[1]) {
return match[1].trim().substring(0, 100);
}
}
// Fall back to extracting from first sentence
const firstSentence = context.split(/[.!?]/)[0]?.trim();
return firstSentence?.substring(0, 100) || "General exploration";
}
/**
* Identify the source type of the finding.
*/
private identifySourceType(
context: string,
toolUses: ToolUse[],
_toolResultsByUse: Map<string, ToolResult>
): ResearchFinding["source_type"] {
// Check tool uses first
for (const toolUse of toolUses) {
const toolName = toolUse.tool_name;
if (["Read", "Glob", "Grep"].includes(toolName)) {
return "code";
}
if (toolName === "WebFetch") {
return "documentation";
}
if (toolName === "WebSearch") {
return "web";
}
if (toolName === "Bash") {
return "experimentation";
}
}
// Check content patterns
for (const [sourceType, patterns] of Object.entries(this.SOURCE_PATTERNS)) {
for (const pattern of patterns) {
if (pattern.test(context)) {
return sourceType as ResearchFinding["source_type"];
}
}
}
return "code"; // Default
}
/**
* Extract source reference (file path, URL, etc.).
*/
private extractSourceReference(
context: string,
toolUses: ToolUse[]
): string | undefined {
// Check tool uses for file paths or URLs
for (const toolUse of toolUses) {
const input = toolUse.tool_input || {};
if (input.file_path && typeof input.file_path === "string") {
return input.file_path;
}
if (input.url && typeof input.url === "string") {
return input.url;
}
if (input.path && typeof input.path === "string") {
return input.path;
}
}
// Look for file paths in context
const fileMatch = context.match(/(?:\/[\w.-]+)+\.[\w]+/);
if (fileMatch) {
return fileMatch[0];
}
// Look for URLs in context
const urlMatch = context.match(/https?:\/\/[^\s)]+/);
if (urlMatch) {
return urlMatch[0];
}
return undefined;
}
/**
* Determine relevance of the finding.
*/
private determineRelevance(context: string): ResearchFinding["relevance"] {
for (const pattern of this.RELEVANCE_INDICATORS.high) {
if (pattern.test(context)) {
return "high";
}
}
for (const pattern of this.RELEVANCE_INDICATORS.low) {
if (pattern.test(context)) {
return "low";
}
}
return "medium"; // Default
}
/**
* Determine confidence level of the finding.
*/
private determineConfidence(context: string): ResearchFinding["confidence"] {
for (const pattern of this.CONFIDENCE_INDICATORS.verified) {
if (pattern.test(context)) {
return "verified";
}
}
for (const pattern of this.CONFIDENCE_INDICATORS.uncertain) {
if (pattern.test(context)) {
return "uncertain";
}
}
return "likely"; // Default
}
/**
* Extract related files or components.
*/
private extractRelatedItems(
context: string,
toolUses: ToolUse[]
): string[] {
const items = new Set<string>();
// Extract from tool uses
for (const toolUse of toolUses) {
const input = toolUse.tool_input || {};
if (input.file_path && typeof input.file_path === "string") {
items.add(input.file_path);
}
if (input.path && typeof input.path === "string") {
items.add(input.path);
}
}
// Extract file paths from context
const filePattern = /(?:\/[\w.-]+)+\.[\w]+/g;
const matches = context.match(filePattern);
if (matches) {
for (const match of matches) {
items.add(match);
}
}
return Array.from(items);
}
/**
* Deduplicate similar findings.
*/
private deduplicateFindings(findings: ResearchFinding[]): ResearchFinding[] {
const unique: ResearchFinding[] = [];
const seen = new Set<string>();
for (const finding of findings) {
// Create signature
const discoveryPrefix = finding.discovery.substring(0, 100).toLowerCase();
const signature = `${finding.message_id}_${discoveryPrefix}`;
if (!seen.has(signature)) {
seen.add(signature);
unique.push(finding);
}
}
return unique;
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/parsers/SolutionPatternExtractor.ts | TypeScript | /**
* Solution Pattern Extractor - Identifies reusable solution patterns from conversations.
*
* This extractor analyzes conversation history to identify successful solutions
* that can be reused for similar problems. It captures:
* - Problem type/category
* - Solution approach
* - Code pattern or technique used
* - Prerequisites/dependencies
* - When to apply this solution
* - When NOT to apply this solution
*
* @example
* ```typescript
* const extractor = new SolutionPatternExtractor();
* const patterns = extractor.extractPatterns(messages, toolUses, toolResults);
* patterns.forEach(p => {
* console.log(`Problem: ${p.problem_category}`);
* console.log(`Solution: ${p.solution_summary}`);
* console.log(`Applies when: ${p.applies_when}`);
* });
* ```
*/
import { nanoid } from "nanoid";
import type { Message, ToolUse, ToolResult } from "./ConversationParser.js";
/**
* Represents a reusable solution pattern extracted from conversation history.
*/
export interface SolutionPattern {
/** Unique pattern identifier */
id: string;
/** Conversation where this pattern was identified */
conversation_id: string;
/** Message where solution was applied */
message_id: string;
/** Category of problem this solves */
problem_category: string;
/** Brief description of the problem */
problem_description: string;
/** Summary of the solution */
solution_summary: string;
/** Detailed solution steps */
solution_steps: string[];
/** Code snippet or technique */
code_pattern?: string;
/** Technology/framework involved */
technology: string[];
/** Prerequisites for this solution */
prerequisites: string[];
/** When to apply this solution */
applies_when: string;
/** When NOT to apply this solution */
avoid_when?: string;
/** Files where this pattern was applied */
applied_to_files: string[];
/** How well did this work */
effectiveness: "excellent" | "good" | "moderate" | "poor";
/** When the pattern was identified */
timestamp: number;
}
/**
* Extracts reusable solution patterns from conversation history.
*/
export class SolutionPatternExtractor {
// Noise patterns to filter out
private readonly NOISE_PATTERNS = [
/this session is being continued/i,
/conversation is summarized below/i,
/previous conversation that ran out of context/i,
];
// Problem category patterns
private readonly CATEGORY_PATTERNS: Record<string, RegExp[]> = {
"error-handling": [
/(?:error|exception|try|catch|throw)/i,
/(?:handling errors?|error handling)/i,
],
"performance": [
/(?:performance|optimization|slow|fast|efficient)/i,
/(?:caching|memoization|lazy)/i,
],
"authentication": [
/(?:auth|authentication|authorization|login|session|token)/i,
/(?:jwt|oauth|credentials)/i,
],
"database": [
/(?:database|db|sql|query|migration)/i,
/(?:sqlite|postgres|mysql|mongodb)/i,
],
"api-design": [
/(?:api|endpoint|route|rest|graphql)/i,
/(?:request|response|http)/i,
],
"testing": [
/(?:test|testing|spec|jest|mocha)/i,
/(?:unit test|integration test|e2e)/i,
],
"refactoring": [
/(?:refactor|restructure|reorganize|clean)/i,
/(?:extract|inline|rename)/i,
],
"configuration": [
/(?:config|configuration|settings|environment)/i,
/(?:env|dotenv|.env)/i,
],
"file-operations": [
/(?:file|read|write|path|directory)/i,
/(?:fs|filesystem|io)/i,
],
"async-patterns": [
/(?:async|await|promise|callback)/i,
/(?:concurrent|parallel|sequential)/i,
],
};
// Solution indicator patterns
private readonly SOLUTION_PATTERNS = [
/(?:the (?:solution|fix|answer) is to)\s+(.+?)(?:\.|$)/i,
/(?:(?:to )?(?:solve|fix|resolve) this)[,\s]+(.+?)(?:\.|$)/i,
/(?:the (?:correct|right|proper) (?:way|approach) is to)\s+(.+?)(?:\.|$)/i,
/(?:(?:you (?:can|should)|we (?:can|should)) (?:use|apply|implement))\s+(.+?)(?:\.|$)/i,
/(?:here's how to (?:fix|solve|handle) it)[:\s]+(.+?)(?:\.|$)/i,
/(?:the (?:trick|key) is to)\s+(.+?)(?:\.|$)/i,
];
// Applies when patterns
private readonly APPLIES_WHEN_PATTERNS = [
/(?:when (?:you|we) (?:need|want) to)\s+(.+?)(?:\.|,|$)/i,
/(?:if (?:you|we) (?:have|encounter|see))\s+(.+?)(?:\.|,|$)/i,
/(?:for (?:situations|cases) (?:where|when))\s+(.+?)(?:\.|,|$)/i,
/(?:this is useful when)\s+(.+?)(?:\.|,|$)/i,
];
// Avoid when patterns
private readonly AVOID_WHEN_PATTERNS = [
/(?:(?:don't|do not) use this (?:when|if))\s+(.+?)(?:\.|,|$)/i,
/(?:avoid (?:this|using this) (?:when|if))\s+(.+?)(?:\.|,|$)/i,
/(?:this (?:won't|doesn't) work (?:when|if))\s+(.+?)(?:\.|,|$)/i,
/(?:not (?:suitable|appropriate) (?:for|when))\s+(.+?)(?:\.|,|$)/i,
];
// Technology extraction patterns
private readonly TECH_PATTERNS = [
/(?:using|with|via)\s+([\w.-]+(?:\s+[\w.-]+)?)/gi,
/(?:in|for)\s+(typescript|javascript|python|ruby|go|rust)/gi,
/(?:react|vue|angular|svelte|next\.?js|node\.?js|express|fastify)/gi,
/(?:jest|mocha|pytest|rspec|vitest)/gi,
];
// Effectiveness indicators
private readonly EFFECTIVENESS_PATTERNS = {
excellent: [
/(?:works? (?:perfectly|great|excellently))/i,
/(?:exactly what (?:we|I) needed)/i,
/(?:solves? the problem completely)/i,
],
good: [
/(?:works? (?:well|correctly|fine))/i,
/(?:this (?:fixes|solves|resolves) it)/i,
/(?:successfully (?:implemented|fixed))/i,
],
moderate: [
/(?:mostly works?|works? for (?:most|some))/i,
/(?:partial(?:ly)? (?:fixes|solves))/i,
/(?:could be better)/i,
],
poor: [
/(?:still (?:has|have) (?:issues|problems))/i,
/(?:doesn't (?:fully|completely) (?:work|solve))/i,
/(?:needs? more work)/i,
],
};
/**
* Extract solution patterns from conversation history.
*
* @param messages - Array of conversation messages
* @param toolUses - Array of tool uses
* @param toolResults - Array of tool results
* @returns Array of extracted SolutionPattern objects
*/
extractPatterns(
messages: Message[],
toolUses: ToolUse[],
toolResults: ToolResult[]
): SolutionPattern[] {
const patterns: SolutionPattern[] = [];
// Filter out noise
const cleanMessages = messages.filter(
(m) => !this.isNoiseContent(m.content || "")
);
// Find solution segments (problem + solution pairs)
const segments = this.findSolutionSegments(cleanMessages);
for (const segment of segments) {
const pattern = this.extractPatternFromSegment(
segment,
toolUses,
toolResults
);
if (pattern) {
patterns.push(pattern);
}
}
return this.deduplicatePatterns(patterns);
}
/**
* Check if content is noise.
*/
private isNoiseContent(content: string): boolean {
const firstChunk = content.substring(0, 500);
return this.NOISE_PATTERNS.some((pattern) => pattern.test(firstChunk));
}
/**
* Find message segments that contain problem + solution pairs.
*/
private findSolutionSegments(messages: Message[]): Message[][] {
const segments: Message[][] = [];
let currentSegment: Message[] = [];
let hasSolution = false;
for (const message of messages) {
currentSegment.push(message);
// Check if this message contains a solution
if (message.role === "assistant" && message.content) {
const hasSolutionIndicator = this.SOLUTION_PATTERNS.some((p) =>
p.test(message.content || "")
);
if (hasSolutionIndicator) {
hasSolution = true;
}
}
// Check for segment end (solution applied or conversation shift)
if (hasSolution && currentSegment.length >= 2) {
const content = message.content || "";
const isComplete =
/(?:done|complete|finished|working|fixed)/i.test(content) ||
/(?:successfully|correctly) (?:implemented|fixed)/i.test(content);
if (isComplete) {
segments.push(currentSegment);
currentSegment = [];
hasSolution = false;
}
}
// Limit segment size
if (currentSegment.length > 10) {
if (hasSolution) {
segments.push(currentSegment);
}
currentSegment = [];
hasSolution = false;
}
}
// Don't forget last segment
if (hasSolution && currentSegment.length >= 2) {
segments.push(currentSegment);
}
return segments;
}
/**
* Extract a solution pattern from a message segment.
*/
private extractPatternFromSegment(
segment: Message[],
toolUses: ToolUse[],
_toolResults: ToolResult[]
): SolutionPattern | null {
if (segment.length < 2) {
return null;
}
// Find the user problem
const userMessages = segment.filter((m) => m.role === "user" && m.content);
if (userMessages.length === 0) {
return null;
}
const problemMessage = userMessages[0];
const problemDescription = this.extractProblemDescription(problemMessage);
if (!problemDescription) {
return null;
}
// Find the solution message
const assistantMessages = segment.filter(
(m) => m.role === "assistant" && m.content
);
const solutionMessage = this.findSolutionMessage(assistantMessages);
if (!solutionMessage) {
return null;
}
// Extract all components
const problemCategory = this.identifyCategory(problemDescription);
const solutionSummary = this.extractSolutionSummary(solutionMessage);
const solutionSteps = this.extractSolutionSteps(segment);
const codePattern = this.extractCodePattern(solutionMessage);
const technology = this.extractTechnology(segment);
const prerequisites = this.extractPrerequisites(solutionMessage);
const appliesWhen = this.extractAppliesWhen(segment, problemDescription);
const avoidWhen = this.extractAvoidWhen(segment);
const appliedToFiles = this.extractAppliedFiles(segment, toolUses);
const effectiveness = this.determineEffectiveness(segment);
return {
id: nanoid(),
conversation_id: solutionMessage.conversation_id,
message_id: solutionMessage.id,
problem_category: problemCategory,
problem_description: problemDescription,
solution_summary: solutionSummary,
solution_steps: solutionSteps,
code_pattern: codePattern,
technology,
prerequisites,
applies_when: appliesWhen,
avoid_when: avoidWhen,
applied_to_files: appliedToFiles,
effectiveness,
timestamp: solutionMessage.timestamp,
};
}
/**
* Extract problem description from user message.
*/
private extractProblemDescription(message: Message): string | null {
const content = message.content || "";
const sentences = content.split(/[.!?]/);
const firstSentence = sentences[0]?.trim();
if (firstSentence && firstSentence.length >= 15) {
return firstSentence.substring(0, 300);
}
return content.substring(0, 300) || null;
}
/**
* Find the message containing the solution.
*/
private findSolutionMessage(messages: Message[]): Message | null {
for (const message of messages) {
const content = message.content || "";
const hasSolution = this.SOLUTION_PATTERNS.some((p) => p.test(content));
if (hasSolution) {
return message;
}
}
// Fall back to last assistant message if it looks complete
const lastMessage = messages[messages.length - 1];
if (lastMessage) {
const content = lastMessage.content || "";
const isComplete = /(?:done|complete|fixed|working)/i.test(content);
if (isComplete) {
return lastMessage;
}
}
return messages[0] || null;
}
/**
* Identify problem category.
*/
private identifyCategory(problem: string): string {
for (const [category, patterns] of Object.entries(this.CATEGORY_PATTERNS)) {
for (const pattern of patterns) {
if (pattern.test(problem)) {
return category;
}
}
}
return "general";
}
/**
* Extract solution summary.
*/
private extractSolutionSummary(message: Message): string {
const content = message.content || "";
for (const pattern of this.SOLUTION_PATTERNS) {
const match = content.match(pattern);
if (match && match[1]) {
return match[1].trim().substring(0, 300);
}
}
// Fall back to first sentence of solution
const sentences = content.split(/[.!?]/);
return sentences[0]?.trim().substring(0, 300) || "Solution applied";
}
/**
* Extract solution steps from messages.
*/
private extractSolutionSteps(segment: Message[]): string[] {
const steps: string[] = [];
for (const message of segment) {
if (message.role !== "assistant" || !message.content) {
continue;
}
const content = message.content;
// Look for numbered steps
const numberedPattern = /(?:^|\n)\s*\d+[.)]\s*(.+?)(?:\n|$)/g;
const matches = content.matchAll(numberedPattern);
for (const match of matches) {
if (match[1]) {
steps.push(match[1].trim());
}
}
// Look for bullet points
const bulletPattern = /(?:^|\n)\s*[-*•]\s*(.+?)(?:\n|$)/g;
const bulletMatches = content.matchAll(bulletPattern);
for (const match of bulletMatches) {
if (match[1] && steps.length < 10) {
steps.push(match[1].trim());
}
}
}
return steps.slice(0, 10);
}
/**
* Extract code pattern if present.
*/
private extractCodePattern(message: Message): string | undefined {
const content = message.content || "";
// Look for code blocks
const codeBlockPattern = /```(?:\w+)?\n([\s\S]+?)```/;
const match = content.match(codeBlockPattern);
if (match && match[1]) {
const code = match[1].trim();
// Return code if it's not too long
if (code.length <= 500) {
return code;
}
return code.substring(0, 500) + "\n// ...";
}
return undefined;
}
/**
* Extract technology mentions.
*/
private extractTechnology(segment: Message[]): string[] {
const tech = new Set<string>();
const combinedContent = segment.map((m) => m.content || "").join("\n");
for (const pattern of this.TECH_PATTERNS) {
const matches = combinedContent.matchAll(new RegExp(pattern, "gi"));
for (const match of matches) {
const techName = (match[1] || match[0]).trim().toLowerCase();
if (techName.length >= 2 && techName.length <= 30) {
tech.add(techName);
}
}
}
return Array.from(tech).slice(0, 5);
}
/**
* Extract prerequisites.
*/
private extractPrerequisites(message: Message): string[] {
const prereqs: string[] = [];
const content = message.content || "";
// Look for prerequisite patterns
const prereqPatterns = [
/(?:(?:first|before),? (?:you need|ensure|make sure))\s+(.+?)(?:\.|$)/gi,
/(?:requires?|needs?)\s+(.+?)(?:\.|$)/gi,
/(?:install|setup|configure)\s+(.+?)(?:\.|$)/gi,
];
for (const pattern of prereqPatterns) {
const matches = content.matchAll(pattern);
for (const match of matches) {
if (match[1] && prereqs.length < 5) {
prereqs.push(match[1].trim().substring(0, 100));
}
}
}
return prereqs;
}
/**
* Extract when the solution applies.
*/
private extractAppliesWhen(segment: Message[], problem: string): string {
const combinedContent = segment.map((m) => m.content || "").join("\n");
for (const pattern of this.APPLIES_WHEN_PATTERNS) {
const match = combinedContent.match(pattern);
if (match && match[1]) {
return match[1].trim().substring(0, 200);
}
}
// Fall back to problem description
return problem.substring(0, 200);
}
/**
* Extract when to avoid this solution.
*/
private extractAvoidWhen(segment: Message[]): string | undefined {
const combinedContent = segment.map((m) => m.content || "").join("\n");
for (const pattern of this.AVOID_WHEN_PATTERNS) {
const match = combinedContent.match(pattern);
if (match && match[1]) {
return match[1].trim().substring(0, 200);
}
}
return undefined;
}
/**
* Extract files where solution was applied.
*/
private extractAppliedFiles(segment: Message[], toolUses: ToolUse[]): string[] {
const files = new Set<string>();
const segmentMessageIds = new Set(segment.map((m) => m.id));
// Extract from tool uses
for (const toolUse of toolUses) {
if (!segmentMessageIds.has(toolUse.message_id)) {
continue;
}
if (["Write", "Edit", "MultiEdit"].includes(toolUse.tool_name)) {
const input = toolUse.tool_input || {};
if (input.file_path && typeof input.file_path === "string") {
files.add(input.file_path);
}
}
}
return Array.from(files);
}
/**
* Determine effectiveness of the solution.
*/
private determineEffectiveness(segment: Message[]): SolutionPattern["effectiveness"] {
const lastMessages = segment.slice(-3);
const combinedContent = lastMessages.map((m) => m.content || "").join("\n");
for (const pattern of this.EFFECTIVENESS_PATTERNS.excellent) {
if (pattern.test(combinedContent)) {
return "excellent";
}
}
for (const pattern of this.EFFECTIVENESS_PATTERNS.poor) {
if (pattern.test(combinedContent)) {
return "poor";
}
}
for (const pattern of this.EFFECTIVENESS_PATTERNS.moderate) {
if (pattern.test(combinedContent)) {
return "moderate";
}
}
return "good"; // Default
}
/**
* Deduplicate similar patterns.
*/
private deduplicatePatterns(patterns: SolutionPattern[]): SolutionPattern[] {
const unique: SolutionPattern[] = [];
const seen = new Set<string>();
for (const pattern of patterns) {
const signature = `${pattern.problem_category}_${pattern.solution_summary.substring(0, 50).toLowerCase()}`;
if (!seen.has(signature)) {
seen.add(signature);
unique.push(pattern);
}
}
return unique;
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/realtime/ConversationWatcher.ts | TypeScript | /**
* Conversation Watcher
*
* Watches Claude conversation JSONL files for changes using chokidar.
* Triggers incremental parsing and extraction when files are modified.
*/
import { watch, type FSWatcher } from "chokidar";
import { join } from "path";
import { homedir } from "os";
import { EventEmitter } from "events";
import { IncrementalParser, type ParsedMessage } from "./IncrementalParser.js";
import { LiveExtractor, type ExtractionResult } from "./LiveExtractor.js";
import type { Database } from "better-sqlite3";
import type { RealtimeConfig } from "../memory/types.js";
/**
* Events emitted by the watcher
*/
export interface WatcherEvents {
message: (filePath: string, message: ParsedMessage) => void;
extraction: (filePath: string, result: ExtractionResult) => void;
error: (error: Error) => void;
started: () => void;
stopped: () => void;
}
/**
* Watcher status
*/
export interface WatcherStatus {
isRunning: boolean;
watchedPaths: string[];
trackedFiles: number;
extractionsPending: number;
lastExtraction?: number;
}
export class ConversationWatcher extends EventEmitter {
private watcher: FSWatcher | null = null;
private parser: IncrementalParser;
private extractor: LiveExtractor;
private config: RealtimeConfig;
private isRunning = false;
private extractionQueue: Map<string, ReturnType<typeof setTimeout>> = new Map();
private pendingExtractions = 0;
private lastExtractionTime?: number;
constructor(db: Database, config?: Partial<RealtimeConfig>) {
super();
this.config = {
enabled: true,
watchPaths: this.getDefaultWatchPaths(),
extractionInterval: 1000, // Debounce interval
checkpointInterval: 60000,
autoRemember: {
decisions: true,
fileEdits: true,
errors: true,
},
...config,
};
this.parser = new IncrementalParser();
this.extractor = new LiveExtractor(db, this.config);
}
/**
* Get default watch paths for Claude conversations
*/
private getDefaultWatchPaths(): string[] {
const claudeProjectsPath = join(homedir(), ".claude", "projects");
return [claudeProjectsPath];
}
/**
* Start watching for conversation changes
*/
start(): void {
if (this.isRunning) {
return;
}
// Initialize chokidar watcher
this.watcher = watch(this.config.watchPaths, {
persistent: true,
ignoreInitial: true, // Don't process existing files on start
followSymlinks: false,
depth: 10, // Limit depth for performance
awaitWriteFinish: {
stabilityThreshold: 500,
pollInterval: 100,
},
// Only watch JSONL files
ignored: (path: string) => {
// Only watch .jsonl files in expected locations
if (path.endsWith(".jsonl")) {
return false;
}
// Allow directories to be traversed
return !path.includes("projects");
},
});
// Set up event handlers
this.watcher.on("change", (path) => this.handleFileChange(path));
this.watcher.on("add", (path) => this.handleFileChange(path));
this.watcher.on("error", (error) => this.emit("error", error));
this.isRunning = true;
this.emit("started");
console.error(
`[Watcher] Started watching ${this.config.watchPaths.length} path(s)`
);
}
/**
* Stop watching
*/
async stop(): Promise<void> {
if (!this.isRunning || !this.watcher) {
return;
}
// Clear pending extractions
for (const timeout of this.extractionQueue.values()) {
clearTimeout(timeout);
}
this.extractionQueue.clear();
await this.watcher.close();
this.watcher = null;
this.isRunning = false;
this.emit("stopped");
console.error("[Watcher] Stopped");
}
/**
* Handle file change event
*/
private handleFileChange(filePath: string): void {
// Debounce extraction for the same file
const existingTimeout = this.extractionQueue.get(filePath);
if (existingTimeout) {
clearTimeout(existingTimeout);
}
const timeout = setTimeout(() => {
this.processFileChange(filePath);
this.extractionQueue.delete(filePath);
}, this.config.extractionInterval);
this.extractionQueue.set(filePath, timeout);
}
/**
* Process file change after debounce
*/
private async processFileChange(filePath: string): Promise<void> {
try {
// Parse new messages
const messages = this.parser.parseNewContent(filePath);
if (messages.length === 0) {
return;
}
// Emit message events
for (const message of messages) {
this.emit("message", filePath, message);
}
// Extract decisions, file edits, etc.
this.pendingExtractions++;
const result = await this.extractor.processMessages(filePath, messages);
this.pendingExtractions--;
this.lastExtractionTime = Date.now();
if (result.decisionsExtracted > 0 || result.filesTracked > 0 || result.errorsDetected > 0) {
this.emit("extraction", filePath, result);
console.error(
`[Watcher] Extracted from ${filePath}: ` +
`${result.decisionsExtracted} decisions, ` +
`${result.filesTracked} files, ` +
`${result.errorsDetected} errors`
);
}
} catch (error) {
this.emit("error", error instanceof Error ? error : new Error(String(error)));
}
}
/**
* Get current watcher status
*/
getStatus(): WatcherStatus {
return {
isRunning: this.isRunning,
watchedPaths: this.config.watchPaths,
trackedFiles: this.parser.getTrackedFiles().length,
extractionsPending: this.pendingExtractions,
lastExtraction: this.lastExtractionTime,
};
}
/**
* Add a path to watch
*/
addPath(path: string): void {
if (!this.config.watchPaths.includes(path)) {
this.config.watchPaths.push(path);
if (this.watcher) {
this.watcher.add(path);
}
}
}
/**
* Remove a path from watching
*/
removePath(path: string): void {
const index = this.config.watchPaths.indexOf(path);
if (index !== -1) {
this.config.watchPaths.splice(index, 1);
if (this.watcher) {
this.watcher.unwatch(path);
}
}
}
/**
* Force re-process a specific file
*/
async reprocessFile(filePath: string): Promise<ExtractionResult> {
// Reset parser tracking for this file
this.parser.resetFile(filePath);
// Parse all content
const messages = this.parser.parseNewContent(filePath);
// Extract
return this.extractor.processMessages(filePath, messages);
}
/**
* Get parser for testing
*/
getParser(): IncrementalParser {
return this.parser;
}
/**
* Get extractor for testing
*/
getExtractor(): LiveExtractor {
return this.extractor;
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/realtime/IncrementalParser.ts | TypeScript | /**
* Incremental Parser
*
* Parses new lines from Claude conversation JSONL files as they are appended.
* Maintains file positions to only process new content.
*/
import { readFileSync, statSync, existsSync } from "fs";
/**
* Parsed message from a JSONL file
*/
export interface ParsedMessage {
type: "user" | "assistant" | "system";
content: string;
timestamp?: number;
toolUse?: {
name: string;
input: Record<string, unknown>;
};
toolResult?: {
name: string;
output: string;
isError?: boolean;
};
thinkingContent?: string;
}
/**
* File tracking info
*/
interface FileInfo {
path: string;
lastPosition: number;
lastModified: number;
lineCount: number;
}
export class IncrementalParser {
private filePositions: Map<string, FileInfo> = new Map();
constructor() {
// No initialization needed - file positions are initialized inline
}
/**
* Parse new content from a file since last read
*/
parseNewContent(filePath: string): ParsedMessage[] {
if (!existsSync(filePath)) {
return [];
}
const stats = statSync(filePath);
const fileInfo = this.filePositions.get(filePath);
// Check if file has been modified (mtime granularity can be coarse)
if (fileInfo) {
const sizeUnchanged = stats.size <= fileInfo.lastPosition;
const mtimeUnchanged = stats.mtimeMs <= fileInfo.lastModified;
if (mtimeUnchanged && sizeUnchanged) {
return []; // No changes
}
}
// Read file content
const content = readFileSync(filePath, "utf-8");
const lines = content.split("\n").filter((line) => line.trim());
// Determine where to start parsing (reset if file was truncated)
const startLine =
fileInfo && stats.size >= fileInfo.lastPosition ? fileInfo.lineCount : 0;
// Parse new lines
const messages: ParsedMessage[] = [];
for (let i = startLine; i < lines.length; i++) {
const parsed = this.parseLine(lines[i]);
if (parsed) {
messages.push(parsed);
}
}
// Update tracking info
this.filePositions.set(filePath, {
path: filePath,
lastPosition: content.length,
lastModified: stats.mtimeMs,
lineCount: lines.length,
});
return messages;
}
/**
* Parse a single JSONL line
*/
private parseLine(line: string): ParsedMessage | null {
try {
const data = JSON.parse(line) as Record<string, unknown>;
return this.extractMessage(data);
} catch (_error) {
// Invalid JSON - skip
return null;
}
}
/**
* Extract message from parsed JSON data
*/
private extractMessage(data: Record<string, unknown>): ParsedMessage | null {
// Handle different Claude message formats
// Standard message format
if (data.type === "message" || data.role) {
const role = (data.role || data.type) as string;
let messageType: ParsedMessage["type"] = "user";
if (role === "assistant" || role === "model") {
messageType = "assistant";
} else if (role === "system") {
messageType = "system";
}
const message: ParsedMessage = {
type: messageType,
content: this.extractContent(data.content),
timestamp: typeof data.timestamp === "number" ? data.timestamp : Date.now(),
};
// Extract tool use if present
const toolUse = this.extractToolUse(data);
if (toolUse) {
message.toolUse = toolUse;
}
// Extract tool result if present
const toolResult = this.extractToolResult(data);
if (toolResult) {
message.toolResult = toolResult;
}
// Extract thinking content
const thinking = this.extractThinking(data);
if (thinking) {
message.thinkingContent = thinking;
}
return message;
}
// Tool use block format
if (data.type === "tool_use") {
return {
type: "assistant",
content: "",
toolUse: {
name: typeof data.name === "string" ? data.name : "unknown",
input: (data.input || {}) as Record<string, unknown>,
},
timestamp: Date.now(),
};
}
// Tool result format
if (data.type === "tool_result") {
return {
type: "user",
content: "",
toolResult: {
name: typeof data.tool_use_id === "string" ? data.tool_use_id : "unknown",
output: this.extractContent(data.content),
isError: data.is_error === true,
},
timestamp: Date.now(),
};
}
return null;
}
/**
* Extract text content from various content formats
*/
private extractContent(content: unknown): string {
if (typeof content === "string") {
return content;
}
if (Array.isArray(content)) {
return content
.map((block) => {
if (typeof block === "string") {
return block;
}
if (typeof block === "object" && block !== null) {
const b = block as Record<string, unknown>;
if (b.type === "text" && typeof b.text === "string") {
return b.text;
}
}
return "";
})
.join("\n");
}
return "";
}
/**
* Extract tool use information
*/
private extractToolUse(
data: Record<string, unknown>
): { name: string; input: Record<string, unknown> } | null {
// Check content array for tool_use blocks
if (Array.isArray(data.content)) {
for (const block of data.content) {
if (typeof block === "object" && block !== null) {
const b = block as Record<string, unknown>;
if (b.type === "tool_use" && typeof b.name === "string") {
return {
name: b.name,
input: (b.input || {}) as Record<string, unknown>,
};
}
}
}
}
return null;
}
/**
* Extract tool result information
*/
private extractToolResult(
data: Record<string, unknown>
): { name: string; output: string; isError?: boolean } | null {
// Check content array for tool_result blocks
if (Array.isArray(data.content)) {
for (const block of data.content) {
if (typeof block === "object" && block !== null) {
const b = block as Record<string, unknown>;
if (b.type === "tool_result") {
return {
name: typeof b.tool_use_id === "string" ? b.tool_use_id : "unknown",
output: this.extractContent(b.content),
isError: b.is_error === true,
};
}
}
}
}
return null;
}
/**
* Extract thinking/reasoning content
*/
private extractThinking(data: Record<string, unknown>): string | null {
if (Array.isArray(data.content)) {
for (const block of data.content) {
if (typeof block === "object" && block !== null) {
const b = block as Record<string, unknown>;
if (b.type === "thinking" && typeof b.thinking === "string") {
return b.thinking;
}
}
}
}
return null;
}
/**
* Reset tracking for a specific file
*/
resetFile(filePath: string): void {
this.filePositions.delete(filePath);
}
/**
* Reset all tracking
*/
resetAll(): void {
this.filePositions.clear();
}
/**
* Get current tracking info for a file
*/
getFileInfo(filePath: string): FileInfo | undefined {
return this.filePositions.get(filePath);
}
/**
* Get all tracked files
*/
getTrackedFiles(): string[] {
return Array.from(this.filePositions.keys());
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/realtime/LiveExtractor.ts | TypeScript | /**
* Live Extractor
*
* Extracts decisions, file operations, and errors from parsed messages
* in real-time and stores them in working memory.
*/
import type { Database } from "better-sqlite3";
import type { ParsedMessage } from "./IncrementalParser.js";
import type { RealtimeConfig } from "../memory/types.js";
import { WorkingMemoryStore } from "../memory/WorkingMemoryStore.js";
import { dirname } from "path";
import { getCanonicalProjectPath } from "../utils/worktree.js";
/**
* Result of extraction processing
*/
export interface ExtractionResult {
messagesProcessed: number;
decisionsExtracted: number;
filesTracked: number;
errorsDetected: number;
memoryItemsCreated: number;
}
/**
* Detected decision pattern
*/
interface DetectedDecision {
text: string;
rationale?: string;
confidence: number;
}
/**
* Detected file operation
*/
interface DetectedFileOp {
path: string;
action: "read" | "edit" | "create" | "delete";
timestamp: number;
}
export class LiveExtractor {
private memoryStore: WorkingMemoryStore;
private config: RealtimeConfig;
// Decision detection patterns
private decisionPatterns = [
/(?:I'll|I will|Let's|We should|I've decided to|Going to) ([^.]+)/gi,
/(?:decided|choosing|opting|selecting) (?:to )?([^.]+)/gi,
/(?:using|implementing|adopting) ([^.]+?) (?:for|because|since)/gi,
/(?:the (?:best|right|correct) (?:approach|solution|way) is) ([^.]+)/gi,
];
// Error detection patterns
private errorPatterns = [
/error[:\s]+(.+)/gi,
/failed[:\s]+(.+)/gi,
/exception[:\s]+(.+)/gi,
/cannot (.+)/gi,
/unable to (.+)/gi,
];
constructor(db: Database, config?: Partial<RealtimeConfig>) {
this.memoryStore = new WorkingMemoryStore(db);
this.config = {
enabled: true,
watchPaths: [],
extractionInterval: 1000,
checkpointInterval: 60000,
autoRemember: {
decisions: true,
fileEdits: true,
errors: true,
},
...config,
};
}
/**
* Process a batch of messages and extract relevant information
*/
async processMessages(
filePath: string,
messages: ParsedMessage[]
): Promise<ExtractionResult> {
const projectPath = this.extractProjectPath(filePath);
const result: ExtractionResult = {
messagesProcessed: messages.length,
decisionsExtracted: 0,
filesTracked: 0,
errorsDetected: 0,
memoryItemsCreated: 0,
};
for (const message of messages) {
// Process assistant messages for decisions
if (message.type === "assistant" && this.config.autoRemember.decisions) {
const decisions = this.extractDecisions(message.content);
for (const decision of decisions) {
if (decision.confidence > 0.5) {
this.storeDecision(projectPath, decision);
result.decisionsExtracted++;
result.memoryItemsCreated++;
}
}
}
// Process tool uses for file operations
if (message.toolUse && this.config.autoRemember.fileEdits) {
const fileOp = this.extractFileOperation(message.toolUse);
if (fileOp) {
this.storeFileOperation(projectPath, fileOp);
result.filesTracked++;
result.memoryItemsCreated++;
}
}
// Process tool results for errors
if (message.toolResult?.isError && this.config.autoRemember.errors) {
this.storeError(projectPath, message.toolResult.output);
result.errorsDetected++;
result.memoryItemsCreated++;
}
// Also check content for errors
if (this.config.autoRemember.errors) {
const errors = this.extractErrors(message.content);
for (const error of errors) {
this.storeError(projectPath, error);
result.errorsDetected++;
result.memoryItemsCreated++;
}
}
}
return result;
}
/**
* Extract project path from file path
*/
private extractProjectPath(filePath: string): string {
// Claude conversation paths are typically:
// ~/.claude/projects/-Users-name-project/.../conversation.jsonl
// We want to extract the actual project path
const match = filePath.match(/projects\/(.+?)\//);
if (match) {
// Convert the encoded path back to real path
const encoded = match[1];
const decodedPath = "/" + encoded.replace(/-/g, "/").replace(/\/\//g, "-");
return getCanonicalProjectPath(decodedPath).canonicalPath;
}
return getCanonicalProjectPath(dirname(filePath)).canonicalPath;
}
/**
* Extract decisions from message content
*/
private extractDecisions(content: string): DetectedDecision[] {
const decisions: DetectedDecision[] = [];
for (const pattern of this.decisionPatterns) {
// Reset regex lastIndex
pattern.lastIndex = 0;
let match;
while ((match = pattern.exec(content)) !== null) {
const decisionText = match[1].trim();
// Skip if too short or too long
if (decisionText.length < 10 || decisionText.length > 200) {
continue;
}
// Calculate confidence based on context
const confidence = this.calculateDecisionConfidence(decisionText, content);
decisions.push({
text: decisionText,
confidence,
});
}
}
// Deduplicate similar decisions
return this.deduplicateDecisions(decisions);
}
/**
* Calculate confidence score for a decision
*/
private calculateDecisionConfidence(decision: string, context: string): number {
let confidence = 0.5;
// Higher confidence if it mentions specific tech/patterns
const techPatterns = [
/typescript|javascript|python|rust|go/i,
/react|vue|angular|svelte/i,
/sqlite|postgres|mongodb|redis/i,
/api|rest|graphql|grpc/i,
/pattern|architecture|design/i,
];
for (const pattern of techPatterns) {
if (pattern.test(decision)) {
confidence += 0.1;
}
}
// Higher confidence if in a longer context
if (context.length > 500) {
confidence += 0.1;
}
// Lower confidence if it looks like a question
if (decision.includes("?")) {
confidence -= 0.2;
}
return Math.min(1, Math.max(0, confidence));
}
/**
* Deduplicate similar decisions
*/
private deduplicateDecisions(decisions: DetectedDecision[]): DetectedDecision[] {
const seen = new Set<string>();
const unique: DetectedDecision[] = [];
for (const decision of decisions) {
const normalized = decision.text.toLowerCase().replace(/\s+/g, " ");
if (!seen.has(normalized)) {
seen.add(normalized);
unique.push(decision);
}
}
return unique;
}
/**
* Extract file operation from tool use
*/
private extractFileOperation(toolUse: {
name: string;
input: Record<string, unknown>;
}): DetectedFileOp | null {
const { name, input } = toolUse;
let filePath: string | undefined;
let action: DetectedFileOp["action"] = "read";
switch (name) {
case "Read":
filePath = input.file_path as string | undefined;
action = "read";
break;
case "Edit":
filePath = input.file_path as string | undefined;
action = "edit";
break;
case "Write":
filePath = input.file_path as string | undefined;
action = "create";
break;
case "Bash": {
// Try to extract file paths from bash commands
const cmd = input.command as string | undefined;
if (cmd) {
if (cmd.includes("rm ") || cmd.includes("rm -")) {
const rmMatch = cmd.match(/rm\s+(?:-\w+\s+)*(\S+)/);
if (rmMatch) {
filePath = rmMatch[1];
action = "delete";
}
}
}
break;
}
}
if (filePath) {
return {
path: filePath,
action,
timestamp: Date.now(),
};
}
return null;
}
/**
* Extract errors from content
*/
private extractErrors(content: string): string[] {
const errors: string[] = [];
for (const pattern of this.errorPatterns) {
pattern.lastIndex = 0;
let match;
while ((match = pattern.exec(content)) !== null) {
const errorText = match[1].trim();
if (errorText.length > 5 && errorText.length < 500) {
errors.push(errorText);
}
}
}
return errors.slice(0, 5); // Limit to 5 errors per message
}
/**
* Store a detected decision in working memory
*/
private storeDecision(projectPath: string, decision: DetectedDecision): void {
const key = `decision_${Date.now()}_${Math.random().toString(36).slice(2, 8)}`;
this.memoryStore.remember({
key,
value: decision.text,
context: decision.rationale,
tags: ["decision", "auto-extracted"],
projectPath,
ttl: 86400 * 7, // 7 days
});
}
/**
* Store a file operation in working memory
*/
private storeFileOperation(projectPath: string, fileOp: DetectedFileOp): void {
// Use a stable key for the file to update rather than create new entries
const key = `file_${fileOp.path.replace(/[^a-zA-Z0-9]/g, "_")}`;
this.memoryStore.remember({
key,
value: `${fileOp.action}: ${fileOp.path}`,
tags: ["file", fileOp.action, "auto-extracted"],
projectPath,
ttl: 86400, // 1 day
});
}
/**
* Store an error in working memory
*/
private storeError(projectPath: string, error: string): void {
const key = `error_${Date.now()}_${Math.random().toString(36).slice(2, 8)}`;
this.memoryStore.remember({
key,
value: error,
tags: ["error", "auto-extracted"],
projectPath,
ttl: 86400 * 3, // 3 days
});
}
/**
* Get the working memory store for direct access
*/
getMemoryStore(): WorkingMemoryStore {
return this.memoryStore;
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/search/HybridReranker.ts | TypeScript | /**
* Hybrid Re-Ranker using Reciprocal Rank Fusion (RRF)
* Combines vector search results with FTS5 results for better ranking
*/
/**
* Configuration for hybrid re-ranking
*/
export interface RerankConfig {
/** RRF constant k - higher values reduce the impact of rank differences (default: 60) */
rrfK: number;
/** Weight for vector search results (0-1, default: 0.7) */
vectorWeight: number;
/** Weight for FTS results (0-1, default: 0.3) */
ftsWeight: number;
/** Whether re-ranking is enabled */
enabled: boolean;
}
export const DEFAULT_RERANK_CONFIG: RerankConfig = {
rrfK: 60,
vectorWeight: 0.7,
ftsWeight: 0.3,
enabled: true,
};
/**
* Generic result with ID and score
*/
export interface RankableResult {
id: number | string;
score: number;
}
/**
* Re-ranked result with combined score
*/
export interface RerankResult {
id: number | string;
combinedScore: number;
vectorRank: number | null;
ftsRank: number | null;
vectorScore: number | null;
ftsScore: number | null;
}
/**
* Hybrid Re-Ranker class
*/
export class HybridReranker {
private config: RerankConfig;
constructor(config?: Partial<RerankConfig>) {
this.config = { ...DEFAULT_RERANK_CONFIG, ...config };
}
/**
* Get current configuration
*/
getConfig(): RerankConfig {
return { ...this.config };
}
/**
* Calculate RRF score for a given rank
* Formula: 1 / (k + rank)
*/
private calculateRrfScore(rank: number): number {
return 1 / (this.config.rrfK + rank);
}
/**
* Re-rank results using Reciprocal Rank Fusion
* Combines vector search and FTS results for better overall ranking
*
* @param vectorResults - Results from vector/semantic search (sorted by similarity)
* @param ftsResults - Results from full-text search (sorted by relevance)
* @param limit - Maximum results to return
*/
rerank(
vectorResults: RankableResult[],
ftsResults: RankableResult[],
limit: number
): RerankResult[] {
// If re-ranking is disabled, just return vector results
if (!this.config.enabled) {
return vectorResults.slice(0, limit).map((r, idx) => ({
id: r.id,
combinedScore: r.score,
vectorRank: idx + 1,
ftsRank: null,
vectorScore: r.score,
ftsScore: null,
}));
}
// Build lookup maps for ranks
const vectorRanks = new Map<number | string, { rank: number; score: number }>();
const ftsRanks = new Map<number | string, { rank: number; score: number }>();
vectorResults.forEach((result, idx) => {
vectorRanks.set(result.id, { rank: idx + 1, score: result.score });
});
ftsResults.forEach((result, idx) => {
ftsRanks.set(result.id, { rank: idx + 1, score: result.score });
});
// Collect all unique IDs
const allIds = new Set<number | string>();
for (const result of vectorResults) {
allIds.add(result.id);
}
for (const result of ftsResults) {
allIds.add(result.id);
}
// Calculate combined RRF scores
const combinedResults: RerankResult[] = [];
for (const id of allIds) {
const vectorData = vectorRanks.get(id);
const ftsData = ftsRanks.get(id);
let combinedScore = 0;
if (vectorData) {
combinedScore += this.config.vectorWeight * this.calculateRrfScore(vectorData.rank);
}
if (ftsData) {
combinedScore += this.config.ftsWeight * this.calculateRrfScore(ftsData.rank);
}
combinedResults.push({
id,
combinedScore,
vectorRank: vectorData?.rank ?? null,
ftsRank: ftsData?.rank ?? null,
vectorScore: vectorData?.score ?? null,
ftsScore: ftsData?.score ?? null,
});
}
// Sort by combined score (descending)
combinedResults.sort((a, b) => b.combinedScore - a.combinedScore);
// Return top-K results
return combinedResults.slice(0, limit);
}
/**
* Re-rank with boosting for results that appear in both sources
* Results in both vector and FTS get an extra boost
*
* @param vectorResults - Results from vector/semantic search
* @param ftsResults - Results from full-text search
* @param limit - Maximum results to return
* @param overlapBoost - Extra score multiplier for overlapping results (default: 1.2)
*/
rerankWithOverlapBoost(
vectorResults: RankableResult[],
ftsResults: RankableResult[],
limit: number,
overlapBoost: number = 1.2
): RerankResult[] {
const results = this.rerank(vectorResults, ftsResults, limit);
// Apply overlap boost
for (const result of results) {
if (result.vectorRank !== null && result.ftsRank !== null) {
result.combinedScore *= overlapBoost;
}
}
// Re-sort after boost
results.sort((a, b) => b.combinedScore - a.combinedScore);
return results;
}
}
/**
* Get or create a hybrid reranker with given config
*/
export function getHybridReranker(config?: Partial<RerankConfig>): HybridReranker {
return new HybridReranker(config);
}
/**
* Get rerank config from environment or defaults
*/
export function getRerankConfig(): RerankConfig {
const config = { ...DEFAULT_RERANK_CONFIG };
if (process.env.CCCMEMORY_RERANK_ENABLED !== undefined) {
config.enabled = process.env.CCCMEMORY_RERANK_ENABLED === "true";
}
if (process.env.CCCMEMORY_RERANK_WEIGHT) {
const weight = parseFloat(process.env.CCCMEMORY_RERANK_WEIGHT);
if (!isNaN(weight) && weight >= 0 && weight <= 1) {
config.vectorWeight = weight;
config.ftsWeight = 1 - weight;
}
}
if (process.env.CCCMEMORY_RRF_K) {
const k = parseInt(process.env.CCCMEMORY_RRF_K, 10);
if (!isNaN(k) && k > 0) {
config.rrfK = k;
}
}
return config;
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/search/QueryExpander.ts | TypeScript | /**
* Query Expander
* Expands search queries with domain-specific synonyms
*/
/**
* Configuration for query expansion
*/
export interface QueryExpansionConfig {
/** Whether query expansion is enabled */
enabled: boolean;
/** Maximum number of expanded queries to generate */
maxVariants: number;
/** Custom synonym map (key → alternatives) */
customSynonyms?: Map<string, string[]>;
}
export const DEFAULT_EXPANSION_CONFIG: QueryExpansionConfig = {
enabled: false, // Disabled by default, can be enabled via env
maxVariants: 3,
};
/**
* Domain-specific synonym map for development contexts
*/
const DOMAIN_SYNONYMS: Map<string, string[]> = new Map([
// Errors and bugs
["error", ["bug", "issue", "problem", "exception", "failure"]],
["bug", ["error", "issue", "defect", "problem"]],
["issue", ["problem", "bug", "error", "concern"]],
["exception", ["error", "crash", "failure", "thrown"]],
["crash", ["exception", "failure", "error", "abort"]],
// API and endpoints
["api", ["endpoint", "interface", "service", "route"]],
["endpoint", ["api", "route", "path", "url"]],
["route", ["endpoint", "path", "url", "api"]],
// Functions and methods
["function", ["method", "procedure", "routine", "handler"]],
["method", ["function", "procedure", "operation"]],
["handler", ["callback", "listener", "function"]],
["callback", ["handler", "listener", "hook"]],
// Data structures
["array", ["list", "collection", "sequence"]],
["list", ["array", "collection", "items"]],
["object", ["instance", "entity", "record"]],
["map", ["dictionary", "hash", "hashmap"]],
["dictionary", ["map", "hash", "object"]],
// Database terms
["database", ["db", "datastore", "storage"]],
["query", ["search", "lookup", "fetch", "retrieve"]],
["schema", ["structure", "model", "definition"]],
["migration", ["upgrade", "change", "update"]],
// UI terms
["component", ["widget", "element", "module"]],
["button", ["btn", "control", "action"]],
["modal", ["dialog", "popup", "overlay"]],
["form", ["input", "fields", "submission"]],
// Testing
["test", ["spec", "check", "verify", "validate"]],
["unit test", ["spec", "test case"]],
["mock", ["stub", "fake", "double"]],
// Configuration
["config", ["configuration", "settings", "options"]],
["settings", ["config", "preferences", "options"]],
["option", ["setting", "parameter", "flag"]],
// Authentication
["auth", ["authentication", "login", "authorization"]],
["authentication", ["auth", "login", "signin"]],
["authorization", ["auth", "permissions", "access"]],
["login", ["signin", "authenticate", "auth"]],
// Security
["password", ["credential", "secret", "passphrase"]],
["token", ["jwt", "key", "credential"]],
["encryption", ["crypto", "cipher", "encrypt"]],
// Performance
["performance", ["speed", "optimization", "efficiency"]],
["optimize", ["improve", "enhance", "speed up"]],
["cache", ["memoize", "store", "buffer"]],
// State management
["state", ["data", "store", "context"]],
["store", ["state", "repository", "cache"]],
// File operations
["file", ["document", "asset", "resource"]],
["upload", ["import", "submit", "send"]],
["download", ["export", "fetch", "retrieve"]],
// Misc development terms
["deploy", ["release", "publish", "ship"]],
["build", ["compile", "bundle", "package"]],
["install", ["setup", "configure", "add"]],
["dependency", ["package", "module", "library"]],
["refactor", ["restructure", "rewrite", "improve"]],
]);
/**
* Query Expander class
*/
export class QueryExpander {
private config: QueryExpansionConfig;
private synonyms: Map<string, string[]>;
constructor(config?: Partial<QueryExpansionConfig>) {
this.config = { ...DEFAULT_EXPANSION_CONFIG, ...config };
// Merge custom synonyms with domain synonyms
this.synonyms = new Map(DOMAIN_SYNONYMS);
if (this.config.customSynonyms) {
for (const [key, values] of this.config.customSynonyms) {
const existing = this.synonyms.get(key) || [];
this.synonyms.set(key, [...new Set([...existing, ...values])]);
}
}
}
/**
* Get current configuration
*/
getConfig(): QueryExpansionConfig {
return { ...this.config };
}
/**
* Expand a query into multiple variants
*/
expand(query: string): string[] {
if (!this.config.enabled) {
return [query];
}
const variants: Set<string> = new Set([query]);
const words = query.toLowerCase().split(/\s+/);
// Find expandable words
const expandableWords: Array<{ index: number; word: string; synonyms: string[] }> = [];
for (let i = 0; i < words.length; i++) {
const word = words[i];
const syns = this.synonyms.get(word);
if (syns && syns.length > 0) {
expandableWords.push({ index: i, word, synonyms: syns });
}
}
// Generate variants by replacing one word at a time
for (const { index, synonyms } of expandableWords) {
for (const syn of synonyms) {
if (variants.size >= this.config.maxVariants) {
break;
}
const newWords = [...words];
newWords[index] = syn;
variants.add(newWords.join(" "));
}
if (variants.size >= this.config.maxVariants) {
break;
}
}
return Array.from(variants).slice(0, this.config.maxVariants);
}
/**
* Get synonyms for a specific word
*/
getSynonyms(word: string): string[] {
return this.synonyms.get(word.toLowerCase()) || [];
}
/**
* Check if a word has synonyms
*/
hasSynonyms(word: string): boolean {
return this.synonyms.has(word.toLowerCase());
}
/**
* Add custom synonyms
*/
addSynonyms(word: string, synonyms: string[]): void {
const existing = this.synonyms.get(word.toLowerCase()) || [];
this.synonyms.set(word.toLowerCase(), [...new Set([...existing, ...synonyms])]);
}
}
/**
* Get or create a query expander
*/
export function getQueryExpander(config?: Partial<QueryExpansionConfig>): QueryExpander {
return new QueryExpander(config);
}
/**
* Get expansion config from environment or defaults
*/
export function getExpansionConfig(): QueryExpansionConfig {
const config = { ...DEFAULT_EXPANSION_CONFIG };
if (process.env.CCCMEMORY_QUERY_EXPANSION !== undefined) {
config.enabled = process.env.CCCMEMORY_QUERY_EXPANSION === "true";
}
if (process.env.CCCMEMORY_MAX_QUERY_VARIANTS) {
const max = parseInt(process.env.CCCMEMORY_MAX_QUERY_VARIANTS, 10);
if (!isNaN(max) && max > 0) {
config.maxVariants = max;
}
}
return config;
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/search/ResultAggregator.ts | TypeScript | /**
* Result Aggregator
* Combines chunk search results back to message level
*/
import type { ChunkSearchResult } from "../embeddings/VectorStore.js";
/**
* Match info from a single chunk
*/
export interface ChunkMatch {
chunkId: string;
chunkIndex: number;
totalChunks: number;
content: string;
startOffset: number;
endOffset: number;
similarity: number;
}
/**
* Aggregated result combining multiple chunks from the same message
*/
export interface AggregatedResult {
messageId: number;
similarity: number; // Max chunk similarity
matchedChunks: ChunkMatch[];
bestSnippet: string; // From highest-scoring chunk
totalChunks: number;
}
/**
* Configuration for result aggregation
*/
export interface AggregationConfig {
/** Minimum similarity threshold */
minSimilarity: number;
/** Maximum results to return after aggregation */
limit: number;
/** Whether to deduplicate similar content within conversation */
deduplicate: boolean;
/** Jaccard similarity threshold for deduplication (0-1) */
deduplicationThreshold: number;
}
export const DEFAULT_AGGREGATION_CONFIG: AggregationConfig = {
minSimilarity: 0.30,
limit: 10,
deduplicate: true,
deduplicationThreshold: 0.7,
};
/**
* Calculate Jaccard similarity between two strings (word-level)
*/
function jaccardSimilarity(a: string, b: string): number {
const wordsA = new Set(a.toLowerCase().split(/\s+/).filter((w) => w.length > 2));
const wordsB = new Set(b.toLowerCase().split(/\s+/).filter((w) => w.length > 2));
if (wordsA.size === 0 && wordsB.size === 0) {
return 1.0;
}
if (wordsA.size === 0 || wordsB.size === 0) {
return 0.0;
}
const intersection = new Set([...wordsA].filter((x) => wordsB.has(x)));
const union = new Set([...wordsA, ...wordsB]);
return intersection.size / union.size;
}
/**
* Result Aggregator class
*/
export class ResultAggregator {
private config: AggregationConfig;
constructor(config?: Partial<AggregationConfig>) {
this.config = { ...DEFAULT_AGGREGATION_CONFIG, ...config };
}
/**
* Aggregate chunk search results by parent message
*/
aggregate(chunkResults: ChunkSearchResult[]): AggregatedResult[] {
// Group by message ID
const messageGroups = new Map<number, ChunkSearchResult[]>();
for (const chunk of chunkResults) {
// Apply minimum similarity filter
if (chunk.similarity < this.config.minSimilarity) {
continue;
}
const existing = messageGroups.get(chunk.messageId);
if (existing) {
existing.push(chunk);
} else {
messageGroups.set(chunk.messageId, [chunk]);
}
}
// Convert to aggregated results
const aggregatedResults: AggregatedResult[] = [];
for (const [messageId, chunks] of messageGroups) {
// Sort chunks by similarity (descending)
chunks.sort((a, b) => b.similarity - a.similarity);
// Get best chunk for snippet
const bestChunk = chunks[0];
// Calculate max similarity for the message
const maxSimilarity = bestChunk.similarity;
// Convert to ChunkMatch format
const matchedChunks: ChunkMatch[] = chunks.map((c) => ({
chunkId: c.chunkId,
chunkIndex: c.chunkIndex,
totalChunks: c.totalChunks,
content: c.content,
startOffset: c.startOffset,
endOffset: c.endOffset,
similarity: c.similarity,
}));
aggregatedResults.push({
messageId,
similarity: maxSimilarity,
matchedChunks,
bestSnippet: bestChunk.content,
totalChunks: bestChunk.totalChunks,
});
}
// Sort by similarity (descending)
aggregatedResults.sort((a, b) => b.similarity - a.similarity);
// Apply deduplication if enabled
let results = aggregatedResults;
if (this.config.deduplicate) {
results = this.deduplicateResults(aggregatedResults);
}
// Apply limit
return results.slice(0, this.config.limit);
}
/**
* Deduplicate similar results using Jaccard similarity
*/
private deduplicateResults(results: AggregatedResult[]): AggregatedResult[] {
const deduplicated: AggregatedResult[] = [];
for (const result of results) {
// Check if this result is too similar to any already accepted result
let isDuplicate = false;
for (const accepted of deduplicated) {
const similarity = jaccardSimilarity(result.bestSnippet, accepted.bestSnippet);
if (similarity >= this.config.deduplicationThreshold) {
isDuplicate = true;
break;
}
}
if (!isDuplicate) {
deduplicated.push(result);
}
}
return deduplicated;
}
/**
* Merge results from multiple search sources (e.g., chunk + message embeddings)
* Uses max similarity for duplicate message IDs
*/
mergeResults(
chunkResults: AggregatedResult[],
messageResults: Array<{ messageId: number; content: string; similarity: number }>
): AggregatedResult[] {
const merged = new Map<number, AggregatedResult>();
// Add chunk results first
for (const result of chunkResults) {
merged.set(result.messageId, result);
}
// Add or update with message results
for (const msgResult of messageResults) {
const existing = merged.get(msgResult.messageId);
if (existing) {
// Keep higher similarity
if (msgResult.similarity > existing.similarity) {
existing.similarity = msgResult.similarity;
existing.bestSnippet = msgResult.content;
}
} else {
// Add new result from message search
merged.set(msgResult.messageId, {
messageId: msgResult.messageId,
similarity: msgResult.similarity,
matchedChunks: [],
bestSnippet: msgResult.content,
totalChunks: 1,
});
}
}
// Convert to array, sort, and return
const results = Array.from(merged.values());
results.sort((a, b) => b.similarity - a.similarity);
return results.slice(0, this.config.limit);
}
}
/**
* Get or create a result aggregator with given config
*/
export function getResultAggregator(config?: Partial<AggregationConfig>): ResultAggregator {
return new ResultAggregator(config);
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/search/SemanticSearch.ts | TypeScript | /**
* Semantic Search Interface
* Combines vector store and embedding generation for conversation search
*/
import type { SQLiteManager } from "../storage/SQLiteManager.js";
import { VectorStore } from "../embeddings/VectorStore.js";
import { getEmbeddingGenerator, EmbeddingGenerator } from "../embeddings/EmbeddingGenerator.js";
import type { Message, Conversation } from "../parsers/ConversationParser.js";
import type { Decision } from "../parsers/DecisionExtractor.js";
import type { Mistake } from "../parsers/MistakeExtractor.js";
import type { MessageRow, DecisionRow, ConversationRow } from "../types/ToolTypes.js";
import { safeJsonParse } from "../utils/safeJson.js";
import { getTextChunker, getChunkingConfig } from "../chunking/index.js";
import { ResultAggregator } from "./ResultAggregator.js";
import { HybridReranker, getRerankConfig } from "./HybridReranker.js";
import { SnippetGenerator } from "./SnippetGenerator.js";
export interface SearchFilter {
date_range?: [number, number];
message_type?: string[];
conversation_id?: string;
}
export interface SearchResult {
message: Message;
conversation: Conversation;
similarity: number;
snippet: string;
}
export interface DecisionSearchResult {
decision: Decision;
conversation: Conversation;
similarity: number;
}
export interface MistakeSearchResult {
mistake: Mistake;
conversation: Conversation;
similarity: number;
}
export class SemanticSearch {
private vectorStore: VectorStore;
private db: SQLiteManager;
constructor(sqliteManager: SQLiteManager) {
this.db = sqliteManager;
this.vectorStore = new VectorStore(sqliteManager);
}
/**
* Index all messages for semantic search
* Uses chunking for long messages that exceed the embedding model's token limit
* @param messages - Messages to index
* @param incremental - If true, skip messages that already have embeddings (default: true for fast re-indexing)
*/
async indexMessages(
messages: Array<{ id: number; content?: string }>,
incremental: boolean = true
): Promise<void> {
console.error(`Indexing ${messages.length} messages...`);
const embedder = await getEmbeddingGenerator();
if (!embedder.isAvailable()) {
console.error("Embeddings not available - skipping indexing");
return;
}
// Filter messages with content
const messagesWithContent = messages.filter(
(m): m is { id: number; content: string } => !!m.content && m.content.trim().length > 0
);
// In incremental mode, skip messages that already have embeddings
let messagesToIndex = messagesWithContent;
if (incremental) {
const existingIds = this.vectorStore.getExistingMessageEmbeddingIds();
messagesToIndex = messagesWithContent.filter((m) => !existingIds.has(m.id));
if (messagesToIndex.length === 0) {
console.error(`⏭ All ${messagesWithContent.length} messages already have embeddings`);
return;
}
if (existingIds.size > 0) {
console.error(`⏭ Skipping ${messagesWithContent.length - messagesToIndex.length} already-embedded messages`);
}
}
console.error(`Generating embeddings for ${messagesToIndex.length} ${incremental ? "new " : ""}messages...`);
// Get model name from embedder info
const embedderInfo = EmbeddingGenerator.getInfo();
const modelName = embedderInfo?.model || "all-MiniLM-L6-v2";
// Check if chunking is enabled and supported
const chunkingConfig = getChunkingConfig();
const useChunking = chunkingConfig.enabled && this.vectorStore.hasChunkEmbeddingsTable();
if (useChunking) {
await this.indexMessagesWithChunking(messagesToIndex, embedder, modelName);
} else {
// Original behavior: embed full messages
const texts = messagesToIndex.map((m) => m.content);
const embeddings = await embedder.embedBatch(texts, 32);
for (let i = 0; i < messagesToIndex.length; i++) {
await this.vectorStore.storeMessageEmbedding(
messagesToIndex[i].id,
messagesToIndex[i].content,
embeddings[i],
modelName
);
}
}
console.error("✓ Indexing complete");
}
/**
* Index messages using chunking for long content
*/
private async indexMessagesWithChunking(
messages: Array<{ id: number; content: string }>,
embedder: Awaited<ReturnType<typeof getEmbeddingGenerator>>,
modelName: string
): Promise<void> {
const chunker = getTextChunker();
let totalChunks = 0;
let chunkedMessages = 0;
// Process each message
for (const message of messages) {
const chunkResult = chunker.chunk(message.content);
if (chunkResult.wasChunked) {
// Message was chunked - store chunk embeddings
chunkedMessages++;
// Generate embeddings for all chunks
const chunkTexts = chunkResult.chunks.map((c) => c.content);
const chunkEmbeddings = await embedder.embedBatch(chunkTexts, 32);
// Store chunk embeddings
for (let i = 0; i < chunkResult.chunks.length; i++) {
await this.vectorStore.storeChunkEmbedding({
messageId: message.id,
chunk: chunkResult.chunks[i],
embedding: chunkEmbeddings[i],
modelName,
});
}
totalChunks += chunkResult.chunks.length;
// Also store the first chunk as the "representative" message embedding
// This ensures backwards compatibility with non-chunk-aware search
await this.vectorStore.storeMessageEmbedding(
message.id,
chunkResult.chunks[0].content,
chunkEmbeddings[0],
modelName
);
} else {
// Message fits in single embedding - use standard approach
const embedding = await embedder.embed(message.content);
await this.vectorStore.storeMessageEmbedding(
message.id,
message.content,
embedding,
modelName
);
}
}
if (chunkedMessages > 0) {
console.error(`📦 Chunked ${chunkedMessages} long messages into ${totalChunks} chunks`);
}
}
/**
* Index decisions for semantic search
* @param decisions - Decisions to index
* @param incremental - If true, skip decisions that already have embeddings (default: true for fast re-indexing)
*/
async indexDecisions(
decisions: Array<{ id: number; decision_text: string; rationale?: string; context?: string | null }>,
incremental: boolean = true
): Promise<void> {
console.error(`Indexing ${decisions.length} decisions...`);
const embedder = await getEmbeddingGenerator();
if (!embedder.isAvailable()) {
console.error("Embeddings not available - skipping decision indexing");
return;
}
// In incremental mode, skip decisions that already have embeddings
let decisionsToIndex = decisions;
if (incremental) {
const existingIds = this.vectorStore.getExistingDecisionEmbeddingIds();
decisionsToIndex = decisions.filter((d) => !existingIds.has(d.id));
if (decisionsToIndex.length === 0) {
console.error(`⏭ All ${decisions.length} decisions already have embeddings`);
return;
}
if (existingIds.size > 0) {
console.error(`⏭ Skipping ${decisions.length - decisionsToIndex.length} already-embedded decisions`);
}
}
console.error(`Generating embeddings for ${decisionsToIndex.length} ${incremental ? "new " : ""}decisions...`);
// Generate embeddings for decision text + rationale
const texts = decisionsToIndex.map((d) => {
const parts = [d.decision_text];
if (d.rationale) {parts.push(d.rationale);}
if (d.context) {parts.push(d.context);}
return parts.join(" ");
});
const embeddings = await embedder.embedBatch(texts, 32);
// Store embeddings
for (let i = 0; i < decisionsToIndex.length; i++) {
await this.vectorStore.storeDecisionEmbedding(
decisionsToIndex[i].id,
embeddings[i]
);
}
console.error("✓ Decision indexing complete");
}
/**
* Index all decisions in the database that don't have embeddings.
* This catches decisions that were stored before embeddings were available.
*/
async indexMissingDecisionEmbeddings(): Promise<number> {
const embedder = await getEmbeddingGenerator();
if (!embedder.isAvailable()) {
console.error("Embeddings not available - skipping missing decision indexing");
return 0;
}
// Get decisions without embeddings
const existingIds = this.vectorStore.getExistingDecisionEmbeddingIds();
interface DecisionRow {
id: number;
decision_text: string;
rationale: string | null;
context: string | null;
}
const allDecisions = this.db
.prepare("SELECT id, decision_text, rationale, context FROM decisions")
.all() as DecisionRow[];
const missingDecisions = allDecisions.filter((d) => !existingIds.has(d.id));
if (missingDecisions.length === 0) {
return 0;
}
console.error(`Generating embeddings for ${missingDecisions.length} decisions missing embeddings...`);
// Generate embeddings for decision text + rationale
const texts = missingDecisions.map((d) => {
const parts = [d.decision_text];
if (d.rationale) {parts.push(d.rationale);}
if (d.context) {parts.push(d.context);}
return parts.join(" ");
});
const embeddings = await embedder.embedBatch(texts, 32);
// Store embeddings
for (let i = 0; i < missingDecisions.length; i++) {
await this.vectorStore.storeDecisionEmbedding(
missingDecisions[i].id,
embeddings[i]
);
}
console.error(`✓ Generated ${missingDecisions.length} missing decision embeddings`);
return missingDecisions.length;
}
/**
* Index mistakes for semantic search
* @param mistakes - Mistakes to index
* @param incremental - If true, skip mistakes that already have embeddings (default: true)
*/
async indexMistakes(
mistakes: Array<{ id: number; what_went_wrong: string; correction?: string | null; mistake_type: string }>,
incremental: boolean = true
): Promise<void> {
console.error(`Indexing ${mistakes.length} mistakes...`);
const embedder = await getEmbeddingGenerator();
if (!embedder.isAvailable()) {
console.error("Embeddings not available - skipping mistake indexing");
return;
}
// In incremental mode, skip mistakes that already have embeddings
let mistakesToIndex = mistakes;
if (incremental) {
const existingIds = this.vectorStore.getExistingMistakeEmbeddingIds();
mistakesToIndex = mistakes.filter((m) => !existingIds.has(m.id));
if (mistakesToIndex.length === 0) {
console.error(`⏭ All ${mistakes.length} mistakes already have embeddings`);
return;
}
if (existingIds.size > 0) {
console.error(`⏭ Skipping ${mistakes.length - mistakesToIndex.length} already-embedded mistakes`);
}
}
console.error(`Generating embeddings for ${mistakesToIndex.length} ${incremental ? "new " : ""}mistakes...`);
// Generate embeddings for mistake text + correction
const texts = mistakesToIndex.map((m) => {
const parts = [m.what_went_wrong];
if (m.correction) {parts.push(m.correction);}
if (m.mistake_type) {parts.push(m.mistake_type);}
return parts.join(" ");
});
const embeddings = await embedder.embedBatch(texts, 32);
// Store embeddings
for (let i = 0; i < mistakesToIndex.length; i++) {
await this.vectorStore.storeMistakeEmbedding(
mistakesToIndex[i].id,
embeddings[i]
);
}
console.error("✓ Mistake indexing complete");
}
/**
* Index all mistakes in the database that don't have embeddings.
* This catches mistakes that were stored before embeddings were available.
*/
async indexMissingMistakeEmbeddings(): Promise<number> {
const embedder = await getEmbeddingGenerator();
if (!embedder.isAvailable()) {
console.error("Embeddings not available - skipping missing mistake indexing");
return 0;
}
// Get mistakes without embeddings
const existingIds = this.vectorStore.getExistingMistakeEmbeddingIds();
interface MistakeRow {
id: number;
what_went_wrong: string;
correction: string | null;
mistake_type: string;
}
const allMistakes = this.db
.prepare("SELECT id, what_went_wrong, correction, mistake_type FROM mistakes")
.all() as MistakeRow[];
const missingMistakes = allMistakes.filter((m) => !existingIds.has(m.id));
if (missingMistakes.length === 0) {
return 0;
}
console.error(`Generating embeddings for ${missingMistakes.length} mistakes missing embeddings...`);
// Generate embeddings for mistake text + correction
const texts = missingMistakes.map((m) => {
const parts = [m.what_went_wrong];
if (m.correction) {parts.push(m.correction);}
if (m.mistake_type) {parts.push(m.mistake_type);}
return parts.join(" ");
});
const embeddings = await embedder.embedBatch(texts, 32);
// Store embeddings
for (let i = 0; i < missingMistakes.length; i++) {
await this.vectorStore.storeMistakeEmbedding(
missingMistakes[i].id,
embeddings[i]
);
}
console.error(`✓ Generated ${missingMistakes.length} missing mistake embeddings`);
return missingMistakes.length;
}
/**
* Search for mistakes using semantic search
*/
async searchMistakes(
query: string,
limit: number = 10
): Promise<MistakeSearchResult[]> {
const embedder = await getEmbeddingGenerator();
if (!embedder.isAvailable()) {
console.error("Embeddings not available - using text search");
return this.fallbackMistakeSearch(query, limit);
}
try {
// Generate query embedding
const queryEmbedding = await embedder.embed(query);
this.vectorStore.prepareVecTables(queryEmbedding.length);
// Use vec_distance_cosine for efficient ANN search with JOINs
// Note: Must include byteOffset/byteLength in case Float32Array is a view
const queryBuffer = Buffer.from(queryEmbedding.buffer, queryEmbedding.byteOffset, queryEmbedding.byteLength);
const rows = this.db
.prepare(
`SELECT
vec.id as vec_id,
vec_distance_cosine(vec.embedding, ?) as distance,
m.external_id as mistake_external_id,
m.conversation_id,
m.message_id,
m.mistake_type,
m.what_went_wrong,
m.correction,
m.user_correction_message,
m.files_affected,
m.timestamp,
c.id as conv_id,
c.external_id as conv_external_id,
c.project_path,
c.source_type,
c.first_message_at,
c.last_message_at,
c.message_count,
c.git_branch,
c.claude_version,
c.metadata as conv_metadata,
c.created_at as conv_created_at,
c.updated_at as conv_updated_at,
msg.external_id as message_external_id
FROM vec_mistake_embeddings vec
JOIN mistake_embeddings me ON vec.id = me.id
JOIN mistakes m ON me.mistake_id = m.id
JOIN conversations c ON m.conversation_id = c.id
LEFT JOIN messages msg ON m.message_id = msg.id
ORDER BY distance
LIMIT ?`
)
.all(queryBuffer, limit) as Array<{
vec_id: string;
distance: number;
mistake_external_id: string;
conversation_id: number;
message_id: number;
mistake_type: string;
what_went_wrong: string;
correction: string | null;
user_correction_message: string | null;
files_affected: string;
timestamp: number;
conv_id: number;
conv_external_id: string;
project_path: string;
source_type: string;
first_message_at: number;
last_message_at: number;
message_count: number;
git_branch: string;
claude_version: string;
conv_metadata: string;
conv_created_at: number;
conv_updated_at: number;
message_external_id: string | null;
}>;
// Fall back to FTS if vector search returned no results
if (rows.length === 0) {
if (process.env.NODE_ENV !== "test") {
console.error("Vector search returned no mistake results - falling back to FTS");
}
return this.fallbackMistakeSearch(query, limit);
}
const results: MistakeSearchResult[] = [];
for (const row of rows) {
if (!row.message_external_id) {
continue;
}
results.push({
mistake: {
id: row.mistake_external_id,
conversation_id: row.conv_external_id,
message_id: row.message_external_id,
mistake_type: row.mistake_type as Mistake["mistake_type"],
what_went_wrong: row.what_went_wrong,
correction: row.correction || undefined,
user_correction_message: row.user_correction_message || undefined,
files_affected: safeJsonParse<string[]>(row.files_affected, []),
timestamp: row.timestamp,
},
conversation: {
id: row.conv_external_id,
project_path: row.project_path,
source_type: row.source_type as "claude-code" | "codex",
first_message_at: row.first_message_at,
last_message_at: row.last_message_at,
message_count: row.message_count,
git_branch: row.git_branch,
claude_version: row.claude_version,
metadata: safeJsonParse<Record<string, unknown>>(row.conv_metadata, {}),
created_at: row.conv_created_at,
updated_at: row.conv_updated_at,
},
similarity: 1 - row.distance, // Convert distance to similarity
});
}
return results;
} catch (error) {
// Fallback to text search if vec search fails
const message = error instanceof Error ? error.message : String(error);
if (
process.env.NODE_ENV !== "test" &&
!message.includes("no such table: vec_mistake_embeddings")
) {
console.error("Vec mistake search failed, falling back to text search:", message);
}
return this.fallbackMistakeSearch(query, limit);
}
}
/**
* Search conversations using natural language query
* Uses chunk search for better coverage of long messages
* @param query - The search query text
* @param limit - Maximum results to return
* @param filter - Optional filter criteria
* @param precomputedEmbedding - Optional pre-computed embedding to avoid re-embedding
*/
async searchConversations(
query: string,
limit: number = 10,
filter?: SearchFilter,
precomputedEmbedding?: Float32Array
): Promise<SearchResult[]> {
const embedder = await getEmbeddingGenerator();
if (!embedder.isAvailable() && !precomputedEmbedding) {
console.error("Embeddings not available - falling back to full-text search");
return this.fallbackFullTextSearch(query, limit, filter);
}
try {
// Use pre-computed embedding if provided, otherwise generate
const queryEmbedding = precomputedEmbedding ?? await embedder.embed(query);
// Check if chunk embeddings are available
const useChunks = this.vectorStore.hasChunkEmbeddingsTable() &&
this.vectorStore.getChunkEmbeddingCount() > 0;
let enrichedResults: SearchResult[] = [];
if (useChunks) {
// Use hybrid search: chunks + messages
enrichedResults = await this.searchWithChunkAggregation(
queryEmbedding,
query,
limit,
filter
);
} else {
// Original behavior: search message embeddings only
const vectorResults = await this.vectorStore.searchMessages(
queryEmbedding,
limit * 2 // Get more results for filtering
);
for (const vecResult of vectorResults) {
const message = this.getMessage(vecResult.id);
if (!message) {continue;}
// Apply filters
if (filter) {
if (!this.applyFilter(message, filter)) {continue;}
}
const conversation = this.getConversation(message.conversation_internal_id);
if (!conversation) {continue;}
enrichedResults.push({
message,
conversation,
similarity: vecResult.similarity,
snippet: this.generateSnippet(vecResult.content, query),
});
if (enrichedResults.length >= limit) {break;}
}
}
// Fall back to FTS if vector search returned no results
if (enrichedResults.length === 0) {
if (process.env.NODE_ENV !== "test") {
console.error("Vector search returned no results - falling back to FTS");
}
return this.fallbackFullTextSearch(query, limit, filter);
}
return enrichedResults;
} catch (error) {
// If embedding fails, fall back to FTS
console.error("Embedding error, falling back to FTS:", (error as Error).message);
return this.fallbackFullTextSearch(query, limit, filter);
}
}
/**
* Search using chunk aggregation for better coverage of long messages
* Now includes hybrid re-ranking with FTS results
*/
private async searchWithChunkAggregation(
queryEmbedding: Float32Array,
query: string,
limit: number,
filter?: SearchFilter
): Promise<SearchResult[]> {
// Calculate dynamic similarity threshold
const minSimilarity = this.calculateDynamicThreshold(query);
// Search chunks with 3x limit for aggregation
const chunkResults = await this.vectorStore.searchChunks(queryEmbedding, limit * 3);
// Also search message embeddings for non-chunked messages
const messageResults = await this.vectorStore.searchMessages(queryEmbedding, limit * 2);
// Aggregate chunk results by message
const aggregator = new ResultAggregator({
minSimilarity,
limit: limit * 2, // Get more for filtering/reranking
deduplicate: true,
deduplicationThreshold: 0.7,
});
const aggregatedChunks = aggregator.aggregate(chunkResults);
// Merge with message results
const mergedResults = aggregator.mergeResults(
aggregatedChunks,
messageResults.map((r) => ({
messageId: r.id,
content: r.content,
similarity: r.similarity,
}))
);
// Check if hybrid re-ranking is enabled
const rerankConfig = getRerankConfig();
let rankedResults: Array<{ messageId: number; similarity: number; snippet: string }>;
if (rerankConfig.enabled) {
// Get FTS results for re-ranking
const ftsMessageIds = this.getFtsMessageIds(query, limit * 2, filter);
if (ftsMessageIds.length > 0) {
// Create reranker
const reranker = new HybridReranker(rerankConfig);
// Prepare results for reranking
const vectorRankable = mergedResults.map((r) => ({
id: r.messageId,
score: r.similarity,
}));
const ftsRankable = ftsMessageIds.map((r, idx) => ({
id: r.id,
score: 1 / (idx + 1), // Convert rank to score
}));
// Apply RRF
const reranked = reranker.rerankWithOverlapBoost(
vectorRankable,
ftsRankable,
limit * 2
);
// Map reranked results back to our format
const resultMap = new Map(
mergedResults.map((r) => [r.messageId, r])
);
rankedResults = reranked
.map((rr) => {
const original = resultMap.get(rr.id as number);
if (original) {
return {
messageId: original.messageId,
similarity: rr.combinedScore,
snippet: original.bestSnippet,
};
}
// FTS-only result - need to fetch content
return {
messageId: rr.id as number,
similarity: rr.combinedScore,
snippet: "", // Will be filled later
};
})
.filter((r) => r !== null);
} else {
// No FTS results, use vector-only
rankedResults = mergedResults.map((r) => ({
messageId: r.messageId,
similarity: r.similarity,
snippet: r.bestSnippet,
}));
}
} else {
// Re-ranking disabled, use merged results directly
rankedResults = mergedResults.map((r) => ({
messageId: r.messageId,
similarity: r.similarity,
snippet: r.bestSnippet,
}));
}
// Enrich with message and conversation data
const enrichedResults: SearchResult[] = [];
for (const result of rankedResults) {
const message = this.getMessage(result.messageId);
if (!message) {continue;}
// Apply filters
if (filter) {
if (!this.applyFilter(message, filter)) {continue;}
}
const conversation = this.getConversation(message.conversation_internal_id);
if (!conversation) {continue;}
// If snippet is empty (FTS-only result), generate it
const snippet = result.snippet || message.content || "";
enrichedResults.push({
message,
conversation,
similarity: result.similarity,
snippet: this.generateSnippet(snippet, query),
});
if (enrichedResults.length >= limit) {break;}
}
return enrichedResults;
}
/**
* Get message IDs from FTS search (for re-ranking)
*/
private getFtsMessageIds(
query: string,
limit: number,
filter?: SearchFilter
): Array<{ id: number; content: string }> {
const ftsQuery = this.sanitizeFtsQuery(query);
try {
let sql = `
SELECT m.id, m.content
FROM messages m
WHERE m.id IN (
SELECT id FROM messages_fts WHERE messages_fts MATCH ?
)
`;
const params: (string | number)[] = [ftsQuery];
// Apply filters
if (filter) {
if (filter.date_range) {
sql += " AND m.timestamp BETWEEN ? AND ?";
params.push(filter.date_range[0], filter.date_range[1]);
}
if (filter.message_type && filter.message_type.length > 0) {
sql += ` AND m.message_type IN (${filter.message_type.map(() => "?").join(",")})`;
params.push(...filter.message_type);
}
if (filter.conversation_id) {
sql += " AND m.conversation_id IN (SELECT id FROM conversations WHERE external_id = ?)";
params.push(filter.conversation_id);
}
}
sql += " ORDER BY m.timestamp DESC LIMIT ?";
params.push(limit);
const rows = this.db.prepare(sql).all(...params) as Array<{
id: number;
content: string;
}>;
return rows;
} catch (_e) {
// FTS might not be available
return [];
}
}
/**
* Calculate dynamic similarity threshold based on query length
* Longer queries should have higher thresholds (more context = better matching)
*/
private calculateDynamicThreshold(query: string): number {
const baseThreshold = 0.30;
const maxThreshold = 0.55;
const words = query.trim().split(/\s+/).length;
// Add 0.01 per word, capped at maxThreshold
return Math.min(baseThreshold + words * 0.01, maxThreshold);
}
/**
* Search for decisions
*/
async searchDecisions(
query: string,
limit: number = 10
): Promise<DecisionSearchResult[]> {
const embedder = await getEmbeddingGenerator();
if (!embedder.isAvailable()) {
console.error("Embeddings not available - using text search");
return this.fallbackDecisionSearch(query, limit);
}
try {
// Generate query embedding
const queryEmbedding = await embedder.embed(query);
this.vectorStore.prepareVecTables(queryEmbedding.length);
// Use vec_distance_cosine for efficient ANN search with JOINs to avoid N+1 queries
// Note: Must include byteOffset/byteLength in case Float32Array is a view
const queryBuffer = Buffer.from(queryEmbedding.buffer, queryEmbedding.byteOffset, queryEmbedding.byteLength);
const rows = this.db
.prepare(
`SELECT
vec.id as vec_id,
vec_distance_cosine(vec.embedding, ?) as distance,
d.external_id as decision_external_id,
d.conversation_id,
d.message_id,
d.decision_text,
d.rationale,
d.alternatives_considered,
d.rejected_reasons,
d.context,
d.related_files,
d.related_commits,
d.timestamp,
c.id as conv_id,
c.external_id as conv_external_id,
c.project_path,
c.source_type,
c.first_message_at,
c.last_message_at,
c.message_count,
c.git_branch,
c.claude_version,
c.metadata as conv_metadata,
c.created_at as conv_created_at,
c.updated_at as conv_updated_at,
m.external_id as message_external_id
FROM vec_decision_embeddings vec
JOIN decision_embeddings de ON vec.id = de.id
JOIN decisions d ON de.decision_id = d.id
JOIN conversations c ON d.conversation_id = c.id
LEFT JOIN messages m ON d.message_id = m.id
ORDER BY distance
LIMIT ?`
)
.all(queryBuffer, limit) as Array<{
vec_id: string;
distance: number;
decision_external_id: string;
conversation_id: number;
message_id: number;
decision_text: string;
rationale: string;
alternatives_considered: string;
rejected_reasons: string;
context: string;
related_files: string;
related_commits: string;
timestamp: number;
conv_id: number;
conv_external_id: string;
project_path: string;
source_type: string;
first_message_at: number;
last_message_at: number;
message_count: number;
git_branch: string;
claude_version: string;
conv_metadata: string;
conv_created_at: number;
conv_updated_at: number;
message_external_id: string | null;
}>;
// Fall back to FTS if vector search returned no results
if (rows.length === 0) {
if (process.env.NODE_ENV !== "test") {
console.error("Vector search returned no decision results - falling back to FTS");
}
return this.fallbackDecisionSearch(query, limit);
}
const results: DecisionSearchResult[] = [];
for (const row of rows) {
if (!row.message_external_id) {
continue;
}
results.push({
decision: {
id: row.decision_external_id,
conversation_id: row.conv_external_id,
message_id: row.message_external_id,
decision_text: row.decision_text,
rationale: row.rationale,
alternatives_considered: safeJsonParse<string[]>(row.alternatives_considered, []),
rejected_reasons: safeJsonParse<Record<string, string>>(row.rejected_reasons, {}),
context: row.context,
related_files: safeJsonParse<string[]>(row.related_files, []),
related_commits: safeJsonParse<string[]>(row.related_commits, []),
timestamp: row.timestamp,
},
conversation: {
id: row.conv_external_id,
project_path: row.project_path,
source_type: row.source_type as "claude-code" | "codex",
first_message_at: row.first_message_at,
last_message_at: row.last_message_at,
message_count: row.message_count,
git_branch: row.git_branch,
claude_version: row.claude_version,
metadata: safeJsonParse<Record<string, unknown>>(row.conv_metadata, {}),
created_at: row.conv_created_at,
updated_at: row.conv_updated_at,
},
similarity: 1 - row.distance, // Convert distance to similarity
});
}
return results;
} catch (error) {
// Fallback to text search if vec search fails (e.g., table doesn't exist)
const message = error instanceof Error ? error.message : String(error);
if (
process.env.NODE_ENV !== "test" &&
!message.includes("no such table: vec_decision_embeddings")
) {
console.error("Vec decision search failed, falling back to text search:", message);
}
return this.fallbackDecisionSearch(query, limit);
}
}
/**
* Sanitize query for FTS5 MATCH syntax.
* FTS5 has special characters that need escaping: . * " - + ( ) OR AND NOT
* We wrap each word in double quotes to treat them as literal strings.
*/
private sanitizeFtsQuery(query: string): string {
// Split into words and wrap each in double quotes to escape special chars
// Also escape any existing double quotes within words
const words = query.trim().split(/\s+/).filter(w => w.length > 0);
if (words.length === 0) {
return '""'; // Empty query
}
// Escape double quotes and wrap each word
const escapedWords = words.map(word => {
// Escape internal double quotes by doubling them
const escaped = word.replace(/"/g, '""');
return `"${escaped}"`;
});
// Join with space (implicit AND in FTS5)
return escapedWords.join(' ');
}
/**
* Fallback to full-text search when embeddings unavailable
*/
private fallbackFullTextSearch(
query: string,
limit: number,
filter?: SearchFilter
): SearchResult[] {
// Sanitize the query for FTS5 syntax
const ftsQuery = this.sanitizeFtsQuery(query);
interface JoinedRow {
internal_message_id: number;
message_external_id: string;
parent_external_id?: string | null;
message_type: string;
role?: string;
content?: string;
timestamp: number;
is_sidechain: number;
agent_id?: string;
request_id?: string;
git_branch?: string;
cwd?: string;
metadata: string;
conv_internal_id: number;
conv_external_id: string;
project_path: string;
first_message_at: number;
last_message_at: number;
conv_message_count: number;
conv_git_branch?: string;
claude_version?: string;
conv_metadata: string;
conv_created_at: number;
conv_updated_at: number;
}
const mapRowToResult = (row: JoinedRow): SearchResult => {
const conversation = {
id: row.conv_external_id,
project_path: row.project_path,
first_message_at: row.first_message_at,
last_message_at: row.last_message_at,
message_count: row.conv_message_count,
git_branch: row.conv_git_branch,
claude_version: row.claude_version,
metadata: safeJsonParse<Record<string, unknown>>(row.conv_metadata, {}),
created_at: row.conv_created_at,
updated_at: row.conv_updated_at,
};
return {
message: {
id: row.message_external_id,
conversation_id: row.conv_external_id,
parent_id: row.parent_external_id ?? undefined,
message_type: row.message_type,
role: row.role,
content: row.content,
timestamp: row.timestamp,
is_sidechain: Boolean(row.is_sidechain),
agent_id: row.agent_id,
request_id: row.request_id,
git_branch: row.git_branch,
cwd: row.cwd,
metadata: safeJsonParse<Record<string, unknown>>(row.metadata, {}),
} as Message,
conversation,
similarity: 0.5, // Default similarity for FTS/LIKE
snippet: this.generateSnippet(row.content || "", query),
};
};
// Try FTS first, fall back to LIKE if FTS fails
try {
let sql = `
SELECT
m.id as internal_message_id,
m.external_id as message_external_id,
m.parent_external_id,
m.message_type,
m.role,
m.content,
m.timestamp,
m.is_sidechain,
m.agent_id,
m.request_id,
m.git_branch,
m.cwd,
m.metadata,
c.id as conv_internal_id,
c.external_id as conv_external_id,
c.project_path,
c.first_message_at,
c.last_message_at,
c.message_count as conv_message_count,
c.git_branch as conv_git_branch,
c.claude_version,
c.metadata as conv_metadata,
c.created_at as conv_created_at,
c.updated_at as conv_updated_at
FROM messages m
JOIN conversations c ON m.conversation_id = c.id
WHERE m.id IN (
SELECT id FROM messages_fts WHERE messages_fts MATCH ?
)
`;
const params: (string | number)[] = [ftsQuery];
// Apply filters
if (filter) {
if (filter.date_range) {
sql += " AND m.timestamp BETWEEN ? AND ?";
params.push(filter.date_range[0], filter.date_range[1]);
}
if (filter.message_type && filter.message_type.length > 0) {
sql += ` AND m.message_type IN (${filter.message_type.map(() => "?").join(",")})`;
params.push(...filter.message_type);
}
if (filter.conversation_id) {
sql += " AND c.external_id = ?";
params.push(filter.conversation_id);
}
}
sql += " ORDER BY m.timestamp DESC LIMIT ?";
params.push(limit);
const rows = this.db.prepare(sql).all(...params) as JoinedRow[];
return rows.map(mapRowToResult);
} catch (_e) {
// FTS table may not exist or be corrupted, fall back to LIKE search
console.error("Messages FTS not available, using LIKE search");
let sql = `
SELECT
m.id as internal_message_id,
m.external_id as message_external_id,
m.parent_external_id,
m.message_type,
m.role,
m.content,
m.timestamp,
m.is_sidechain,
m.agent_id,
m.request_id,
m.git_branch,
m.cwd,
m.metadata,
c.id as conv_internal_id,
c.external_id as conv_external_id,
c.project_path,
c.first_message_at,
c.last_message_at,
c.message_count as conv_message_count,
c.git_branch as conv_git_branch,
c.claude_version,
c.metadata as conv_metadata,
c.created_at as conv_created_at,
c.updated_at as conv_updated_at
FROM messages m
JOIN conversations c ON m.conversation_id = c.id
WHERE m.content LIKE ?
`;
const likeQuery = `%${query}%`;
const params: (string | number)[] = [likeQuery];
// Apply filters
if (filter) {
if (filter.date_range) {
sql += " AND m.timestamp BETWEEN ? AND ?";
params.push(filter.date_range[0], filter.date_range[1]);
}
if (filter.message_type && filter.message_type.length > 0) {
sql += ` AND m.message_type IN (${filter.message_type.map(() => "?").join(",")})`;
params.push(...filter.message_type);
}
if (filter.conversation_id) {
sql += " AND c.external_id = ?";
params.push(filter.conversation_id);
}
}
sql += " ORDER BY m.timestamp DESC LIMIT ?";
params.push(limit);
const rows = this.db.prepare(sql).all(...params) as JoinedRow[];
return rows.map(mapRowToResult);
}
}
/**
* Fallback decision search
*/
private fallbackDecisionSearch(
query: string,
limit: number
): DecisionSearchResult[] {
// Sanitize the query for FTS5 syntax
const ftsQuery = this.sanitizeFtsQuery(query);
const mapRowToResult = (
row: DecisionRow & { message_external_id: string }
): DecisionSearchResult => {
const conversation = this.getConversation(row.conversation_id);
if (!conversation) {
console.error(`Warning: Conversation ${row.conversation_id} not found for decision ${row.id}`);
throw new Error(`Data integrity error: Conversation ${row.conversation_id} not found`);
}
return {
decision: {
id: row.external_id,
conversation_id: conversation.id,
message_id: row.message_external_id,
decision_text: row.decision_text,
rationale: row.rationale,
alternatives_considered: safeJsonParse<string[]>(row.alternatives_considered, []),
rejected_reasons: safeJsonParse<Record<string, string>>(row.rejected_reasons, {}),
context: row.context,
related_files: safeJsonParse<string[]>(row.related_files, []),
related_commits: safeJsonParse<string[]>(row.related_commits, []),
timestamp: row.timestamp,
} as Decision,
conversation,
similarity: 0.5,
};
};
// Try FTS first, fall back to LIKE if FTS fails
try {
const sql = `
SELECT d.*, m.external_id as message_external_id
FROM decisions d
LEFT JOIN messages m ON d.message_id = m.id
WHERE d.id IN (
SELECT id FROM decisions_fts WHERE decisions_fts MATCH ?
)
ORDER BY d.timestamp DESC
LIMIT ?
`;
const rows = this.db.prepare(sql).all(ftsQuery, limit) as Array<DecisionRow & { message_external_id?: string | null }>;
const filteredRows = rows.filter(
(row): row is DecisionRow & { message_external_id: string } => Boolean(row.message_external_id)
);
return filteredRows.map(mapRowToResult);
} catch (_e) {
// FTS table may not exist or be corrupted, fall back to LIKE search
console.error("Decisions FTS not available, using LIKE search");
const sql = `
SELECT d.*, m.external_id as message_external_id
FROM decisions d
LEFT JOIN messages m ON d.message_id = m.id
WHERE d.decision_text LIKE ? OR d.rationale LIKE ? OR d.context LIKE ?
ORDER BY d.timestamp DESC
LIMIT ?
`;
const likeQuery = `%${query}%`;
const rows = this.db
.prepare(sql)
.all(likeQuery, likeQuery, likeQuery, limit) as Array<DecisionRow & { message_external_id?: string | null }>;
const filteredRows = rows.filter(
(row): row is DecisionRow & { message_external_id: string } => Boolean(row.message_external_id)
);
return filteredRows.map(mapRowToResult);
}
}
/**
* Fallback mistake search using FTS
*/
private fallbackMistakeSearch(
query: string,
limit: number
): MistakeSearchResult[] {
// Sanitize the query for FTS5 syntax
const ftsQuery = this.sanitizeFtsQuery(query);
// Try FTS first, fall back to LIKE if FTS table doesn't exist
try {
const sql = `
SELECT
m.id,
m.external_id as mistake_external_id,
m.conversation_id,
m.message_id,
m.mistake_type,
m.what_went_wrong,
m.correction,
m.user_correction_message,
m.files_affected,
m.timestamp,
c.external_id as conv_external_id,
c.project_path,
c.git_branch,
c.first_message_at,
c.last_message_at,
c.message_count,
c.claude_version,
c.metadata as conv_metadata,
c.created_at as conv_created_at,
c.updated_at as conv_updated_at,
msg.external_id as message_external_id
FROM mistakes m
JOIN conversations c ON m.conversation_id = c.id
LEFT JOIN messages msg ON m.message_id = msg.id
WHERE m.id IN (
SELECT id FROM mistakes_fts WHERE mistakes_fts MATCH ?
)
ORDER BY m.timestamp DESC
LIMIT ?
`;
interface MistakeRowWithConv {
id: number;
mistake_external_id: string;
conversation_id: number;
message_id: number;
mistake_type: string;
what_went_wrong: string;
correction: string | null;
user_correction_message: string | null;
files_affected: string;
timestamp: number;
project_path: string;
git_branch: string;
conv_external_id: string;
first_message_at: number;
last_message_at: number;
message_count: number;
claude_version: string;
conv_metadata: string;
conv_created_at: number;
conv_updated_at: number;
message_external_id: string | null;
}
const rows = this.db.prepare(sql).all(ftsQuery, limit) as MistakeRowWithConv[];
const results: MistakeSearchResult[] = [];
for (const row of rows) {
if (!row.message_external_id) {
continue;
}
results.push({
mistake: {
id: row.mistake_external_id,
conversation_id: row.conv_external_id,
message_id: row.message_external_id,
mistake_type: row.mistake_type as Mistake["mistake_type"],
what_went_wrong: row.what_went_wrong,
correction: row.correction || undefined,
user_correction_message: row.user_correction_message || undefined,
files_affected: safeJsonParse<string[]>(row.files_affected, []),
timestamp: row.timestamp,
},
conversation: {
id: row.conv_external_id,
project_path: row.project_path,
first_message_at: row.first_message_at,
last_message_at: row.last_message_at,
message_count: row.message_count,
git_branch: row.git_branch,
claude_version: row.claude_version,
metadata: safeJsonParse<Record<string, unknown>>(row.conv_metadata, {}),
created_at: row.conv_created_at,
updated_at: row.conv_updated_at,
},
similarity: 0.5,
});
}
return results;
} catch (_e) {
// FTS table may not exist, fall back to LIKE search
console.error("Mistakes FTS not available, using LIKE search");
const sql = `
SELECT
m.id,
m.external_id as mistake_external_id,
m.conversation_id,
m.message_id,
m.mistake_type,
m.what_went_wrong,
m.correction,
m.user_correction_message,
m.files_affected,
m.timestamp,
c.external_id as conv_external_id,
c.project_path,
c.git_branch,
c.first_message_at,
c.last_message_at,
c.message_count,
c.claude_version,
c.metadata as conv_metadata,
c.created_at as conv_created_at,
c.updated_at as conv_updated_at,
msg.external_id as message_external_id
FROM mistakes m
JOIN conversations c ON m.conversation_id = c.id
LEFT JOIN messages msg ON m.message_id = msg.id
WHERE m.what_went_wrong LIKE ? OR m.correction LIKE ?
ORDER BY m.timestamp DESC
LIMIT ?
`;
interface MistakeRowWithConv {
id: number;
mistake_external_id: string;
conversation_id: number;
message_id: number;
mistake_type: string;
what_went_wrong: string;
correction: string | null;
user_correction_message: string | null;
files_affected: string;
timestamp: number;
project_path: string;
git_branch: string;
conv_external_id: string;
first_message_at: number;
last_message_at: number;
message_count: number;
claude_version: string;
conv_metadata: string;
conv_created_at: number;
conv_updated_at: number;
message_external_id: string | null;
}
const likeQuery = `%${query}%`;
const rows = this.db.prepare(sql).all(likeQuery, likeQuery, limit) as MistakeRowWithConv[];
const results: MistakeSearchResult[] = [];
for (const row of rows) {
if (!row.message_external_id) {
continue;
}
results.push({
mistake: {
id: row.mistake_external_id,
conversation_id: row.conv_external_id,
message_id: row.message_external_id,
mistake_type: row.mistake_type as Mistake["mistake_type"],
what_went_wrong: row.what_went_wrong,
correction: row.correction || undefined,
user_correction_message: row.user_correction_message || undefined,
files_affected: safeJsonParse<string[]>(row.files_affected, []),
timestamp: row.timestamp,
},
conversation: {
id: row.conv_external_id,
project_path: row.project_path,
first_message_at: row.first_message_at,
last_message_at: row.last_message_at,
message_count: row.message_count,
git_branch: row.git_branch,
claude_version: row.claude_version,
metadata: safeJsonParse<Record<string, unknown>>(row.conv_metadata, {}),
created_at: row.conv_created_at,
updated_at: row.conv_updated_at,
},
similarity: 0.5,
});
}
return results;
}
}
/**
* Apply filter to message
*/
private applyFilter(message: Message, filter: SearchFilter): boolean {
if (filter.date_range) {
if (
message.timestamp < filter.date_range[0] ||
message.timestamp > filter.date_range[1]
) {
return false;
}
}
if (filter.message_type) {
if (!filter.message_type.includes(message.message_type)) {
return false;
}
}
if (filter.conversation_id) {
if (message.conversation_id !== filter.conversation_id) {
return false;
}
}
return true;
}
/**
* Snippet generator instance
*/
private snippetGenerator: SnippetGenerator = new SnippetGenerator();
/**
* Generate snippet from content using advanced snippet generation
*/
private generateSnippet(content: string, query: string, _length: number = 150): string {
return this.snippetGenerator.generate(content, query);
}
/**
* Get message by ID
*/
private getMessage(id: number): (Message & { conversation_internal_id: number }) | null {
const row = this.db
.prepare(
`SELECT
m.id,
m.external_id,
m.conversation_id,
c.external_id as conversation_external_id,
m.parent_message_id,
m.parent_external_id,
m.message_type,
m.role,
m.content,
m.timestamp,
m.is_sidechain,
m.agent_id,
m.request_id,
m.git_branch,
m.cwd,
m.metadata
FROM messages m
JOIN conversations c ON c.id = m.conversation_id
WHERE m.id = ?`
)
.get(id) as (MessageRow & { conversation_external_id: string }) | undefined;
if (!row) {
return null;
}
return {
id: row.external_id,
conversation_id: row.conversation_external_id,
parent_id: row.parent_external_id ?? undefined,
message_type: row.message_type,
role: row.role,
content: row.content,
timestamp: row.timestamp,
is_sidechain: Boolean(row.is_sidechain),
agent_id: row.agent_id,
request_id: row.request_id,
git_branch: row.git_branch,
cwd: row.cwd,
metadata: safeJsonParse<Record<string, unknown>>(row.metadata, {}),
conversation_internal_id: row.conversation_id,
} as Message & { conversation_internal_id: number };
}
/**
* Get conversation by ID
*/
private getConversation(id: number): Conversation | null {
const row = this.db
.prepare(
`SELECT
id,
external_id,
project_path,
source_type,
first_message_at,
last_message_at,
message_count,
git_branch,
claude_version,
metadata,
created_at,
updated_at
FROM conversations WHERE id = ?`
)
.get(id) as ConversationRow | undefined;
if (!row) {
return null;
}
return {
id: row.external_id,
project_path: row.project_path,
source_type: row.source_type as 'claude-code' | 'codex',
first_message_at: row.first_message_at,
last_message_at: row.last_message_at,
message_count: row.message_count,
git_branch: row.git_branch,
claude_version: row.claude_version,
metadata: safeJsonParse<Record<string, unknown>>(row.metadata, {}),
created_at: row.created_at,
updated_at: row.updated_at,
};
}
/**
* Get search statistics
*/
getStats(): {
total_embeddings: number;
vec_enabled: boolean;
model_info: Record<string, unknown>;
} {
return {
total_embeddings: this.vectorStore.getEmbeddingCount(),
vec_enabled: this.vectorStore.isVecEnabled(),
model_info: {
model: "all-MiniLM-L6-v2",
dimensions: 384,
},
};
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/search/SnippetGenerator.ts | TypeScript | /**
* Snippet Generator
* Generates context-aware snippets with query term highlighting
*/
/**
* Configuration for snippet generation
*/
export interface SnippetConfig {
/** Target snippet length in characters */
targetLength: number;
/** Context before match (characters) */
contextBefore: number;
/** Context after match (characters) */
contextAfter: number;
/** Whether to highlight query terms */
highlight: boolean;
/** Highlight format (markdown bold by default) */
highlightStart: string;
highlightEnd: string;
/** Ellipsis character for truncation */
ellipsis: string;
/** Whether to prefer sentence boundaries */
preferSentenceBoundaries: boolean;
}
export const DEFAULT_SNIPPET_CONFIG: SnippetConfig = {
targetLength: 200,
contextBefore: 60,
contextAfter: 120,
highlight: true,
highlightStart: "**",
highlightEnd: "**",
ellipsis: "...",
preferSentenceBoundaries: true,
};
/**
* Information about a snippet match
*/
export interface SnippetMatch {
start: number;
end: number;
term: string;
}
/**
* Snippet Generator class
*/
export class SnippetGenerator {
private config: SnippetConfig;
constructor(config?: Partial<SnippetConfig>) {
this.config = { ...DEFAULT_SNIPPET_CONFIG, ...config };
}
/**
* Generate a snippet from content highlighting query terms
*/
generate(content: string, query: string): string {
if (!content || content.length === 0) {
return "";
}
// Tokenize query into terms
const queryTerms = this.tokenizeQuery(query);
if (queryTerms.length === 0) {
// No query terms - return beginning of content
return this.truncateToLength(content, this.config.targetLength);
}
// Find all matches in content
const matches = this.findMatches(content, queryTerms);
if (matches.length === 0) {
// No matches found - return beginning of content
return this.truncateToLength(content, this.config.targetLength);
}
// Find the best region using sliding window
const bestRegion = this.findBestRegion(content, matches);
// Extract snippet from best region
let snippet = this.extractRegion(content, bestRegion.start, bestRegion.end);
// Highlight query terms if enabled
if (this.config.highlight) {
snippet = this.highlightTerms(snippet, queryTerms);
}
return snippet;
}
/**
* Tokenize query into searchable terms
*/
private tokenizeQuery(query: string): string[] {
// Split on whitespace and filter short words
const words = query
.toLowerCase()
.split(/\s+/)
.filter((w) => w.length >= 2);
// Remove common stop words
const stopWords = new Set([
"the",
"a",
"an",
"and",
"or",
"but",
"in",
"on",
"at",
"to",
"for",
"of",
"with",
"by",
"is",
"it",
"as",
"be",
"was",
"are",
"were",
"been",
"has",
"have",
"had",
"do",
"does",
"did",
"will",
"would",
"could",
"should",
"may",
"might",
"can",
"this",
"that",
"these",
"those",
"i",
"you",
"we",
"they",
"he",
"she",
]);
return words.filter((w) => !stopWords.has(w));
}
/**
* Find all matches of query terms in content
*/
private findMatches(content: string, terms: string[]): SnippetMatch[] {
const matches: SnippetMatch[] = [];
const lowerContent = content.toLowerCase();
for (const term of terms) {
let index = 0;
while (true) {
const pos = lowerContent.indexOf(term, index);
if (pos === -1) {break;}
matches.push({
start: pos,
end: pos + term.length,
term,
});
index = pos + 1;
}
}
// Sort by position
matches.sort((a, b) => a.start - b.start);
return matches;
}
/**
* Find the best region using sliding window density
*/
private findBestRegion(
content: string,
matches: SnippetMatch[]
): { start: number; end: number } {
const windowSize = this.config.targetLength;
if (content.length <= windowSize) {
return { start: 0, end: content.length };
}
let bestStart = 0;
let bestScore = 0;
// Slide window across content
for (let start = 0; start <= content.length - windowSize; start += 10) {
const end = Math.min(start + windowSize, content.length);
// Count matches in window
let score = 0;
for (const match of matches) {
if (match.start >= start && match.end <= end) {
// Full match in window
score += 2;
} else if (
(match.start >= start && match.start < end) ||
(match.end > start && match.end <= end)
) {
// Partial match
score += 1;
}
}
// Bonus for starting near sentence boundary
if (this.config.preferSentenceBoundaries) {
const before = content.slice(Math.max(0, start - 5), start);
if (/[.!?]\s*$/.test(before) || start === 0) {
score += 0.5;
}
}
if (score > bestScore) {
bestScore = score;
bestStart = start;
}
}
return {
start: bestStart,
end: Math.min(bestStart + windowSize, content.length),
};
}
/**
* Extract a region from content with proper boundaries
*/
private extractRegion(content: string, start: number, end: number): string {
// Adjust to word boundaries
let adjustedStart = start;
let adjustedEnd = end;
// Find word boundary for start (search backward)
if (start > 0) {
while (adjustedStart > 0 && !/\s/.test(content[adjustedStart - 1])) {
adjustedStart--;
}
// If we're in the middle of content, trim to word start
if (adjustedStart > 0) {
// Skip leading whitespace
while (adjustedStart < content.length && /\s/.test(content[adjustedStart])) {
adjustedStart++;
}
}
}
// Find word boundary for end (search forward)
if (end < content.length) {
while (adjustedEnd < content.length && !/\s/.test(content[adjustedEnd])) {
adjustedEnd++;
}
}
// Extract the region
let snippet = content.slice(adjustedStart, adjustedEnd).trim();
// Add ellipsis if truncated
if (adjustedStart > 0) {
snippet = this.config.ellipsis + snippet;
}
if (adjustedEnd < content.length) {
snippet = snippet + this.config.ellipsis;
}
return snippet;
}
/**
* Highlight query terms in snippet
*/
private highlightTerms(snippet: string, terms: string[]): string {
let result = snippet;
// Sort terms by length (longest first) to avoid partial replacements
const sortedTerms = [...terms].sort((a, b) => b.length - a.length);
for (const term of sortedTerms) {
// Use word boundary-aware regex for highlighting
const regex = new RegExp(`(${this.escapeRegex(term)})`, "gi");
result = result.replace(
regex,
`${this.config.highlightStart}$1${this.config.highlightEnd}`
);
}
return result;
}
/**
* Escape special regex characters
*/
private escapeRegex(str: string): string {
return str.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
}
/**
* Truncate content to target length at word boundary
*/
private truncateToLength(content: string, maxLength: number): string {
if (content.length <= maxLength) {
return content;
}
// Find word boundary near max length
let end = maxLength;
while (end > 0 && !/\s/.test(content[end])) {
end--;
}
// If we couldn't find a space, just cut at maxLength
if (end === 0) {
end = maxLength;
}
return content.slice(0, end).trim() + this.config.ellipsis;
}
}
/**
* Get or create a snippet generator
*/
export function getSnippetGenerator(config?: Partial<SnippetConfig>): SnippetGenerator {
return new SnippetGenerator(config);
}
/**
* Generate a snippet using default configuration
*/
export function generateSnippet(content: string, query: string): string {
const generator = new SnippetGenerator();
return generator.generate(content, query);
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/search/index.ts | TypeScript | /**
* Search Module
* Exports semantic search, re-ranking, aggregation, and snippet generation
*/
export { SemanticSearch } from "./SemanticSearch.js";
export type { SearchFilter, SearchResult, DecisionSearchResult, MistakeSearchResult } from "./SemanticSearch.js";
export { ResultAggregator, getResultAggregator } from "./ResultAggregator.js";
export type { AggregatedResult, ChunkMatch, AggregationConfig } from "./ResultAggregator.js";
export { HybridReranker, getHybridReranker, getRerankConfig } from "./HybridReranker.js";
export type { RerankConfig, RankableResult, RerankResult } from "./HybridReranker.js";
export { SnippetGenerator, getSnippetGenerator, generateSnippet } from "./SnippetGenerator.js";
export type { SnippetConfig, SnippetMatch } from "./SnippetGenerator.js";
export { QueryExpander, getQueryExpander, getExpansionConfig } from "./QueryExpander.js";
export type { QueryExpansionConfig } from "./QueryExpander.js";
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/storage/BackupManager.ts | TypeScript | /**
* BackupManager - Create and manage backups before deletion operations
* Exports affected data to JSON for potential restoration
*/
import { writeFileSync, mkdirSync, existsSync, chmodSync } from "fs";
import { join } from "path";
import { homedir } from "os";
import type Database from "better-sqlite3";
import { pathToProjectFolderName } from "../utils/sanitization.js";
import { getCanonicalProjectPath } from "../utils/worktree.js";
/**
* Backup metadata
*/
export interface BackupMetadata {
timestamp: number;
description: string;
projectPath: string;
tables: string[];
recordCounts: Record<string, number>;
backupPath: string;
}
/**
* Backup data structure
*/
export interface BackupData {
metadata: BackupMetadata;
data: Record<string, unknown[]>;
}
/**
* BackupManager class
*/
export class BackupManager {
private db: Database.Database;
constructor(db: Database.Database) {
this.db = db;
}
/**
* Create a backup of specific conversations and related data
*/
createBackupForConversations(
conversationIds: string[],
description: string,
projectPath: string
): BackupMetadata {
if (conversationIds.length === 0) {
throw new Error("No conversations to backup");
}
// Prepare backup directory
const backupDir = this.getBackupDirectory(projectPath);
this.ensureDirectoryExists(backupDir);
// Generate backup filename
const timestamp = Date.now();
const backupFilename = `backup-${timestamp}.json`;
const backupPath = join(backupDir, backupFilename);
// Collect data from all related tables
const backupData: Record<string, unknown[]> = {};
const recordCounts: Record<string, number> = {};
// Conversations
const conversations = this.getConversations(conversationIds);
backupData.conversations = conversations;
recordCounts.conversations = conversations.length;
// Messages
const messages = this.getMessages(conversationIds);
backupData.messages = messages;
recordCounts.messages = messages.length;
// Get message IDs for related data
const messageIds = messages.map((m) => (m as Record<string, unknown>).id as string);
// Tool uses
const toolUses = this.getToolUses(messageIds);
backupData.tool_uses = toolUses;
recordCounts.tool_uses = toolUses.length;
// Tool results
const toolResults = this.getToolResults(messageIds);
backupData.tool_results = toolResults;
recordCounts.tool_results = toolResults.length;
// File edits
const fileEdits = this.getFileEdits(conversationIds);
backupData.file_edits = fileEdits;
recordCounts.file_edits = fileEdits.length;
// Thinking blocks
const thinkingBlocks = this.getThinkingBlocks(messageIds);
backupData.thinking_blocks = thinkingBlocks;
recordCounts.thinking_blocks = thinkingBlocks.length;
// Decisions
const decisions = this.getDecisions(conversationIds);
backupData.decisions = decisions;
recordCounts.decisions = decisions.length;
// Mistakes
const mistakes = this.getMistakes(conversationIds);
backupData.mistakes = mistakes;
recordCounts.mistakes = mistakes.length;
// Requirements
const requirements = this.getRequirements(conversationIds);
backupData.requirements = requirements;
recordCounts.requirements = requirements.length;
// Validations
const validations = this.getValidations(conversationIds);
backupData.validations = validations;
recordCounts.validations = validations.length;
// Embeddings
const messageEmbeddings = this.getMessageEmbeddings(messageIds);
backupData.message_embeddings = messageEmbeddings;
recordCounts.message_embeddings = messageEmbeddings.length;
const decisionIds = decisions.map((d) => (d as Record<string, unknown>).id as string);
const decisionEmbeddings = this.getDecisionEmbeddings(decisionIds);
backupData.decision_embeddings = decisionEmbeddings;
recordCounts.decision_embeddings = decisionEmbeddings.length;
// Create metadata
const metadata: BackupMetadata = {
timestamp,
description,
projectPath,
tables: Object.keys(backupData),
recordCounts,
backupPath,
};
// Write backup file
const fullBackup: BackupData = {
metadata,
data: backupData,
};
writeFileSync(backupPath, JSON.stringify(fullBackup, null, 2), "utf-8");
// Set restrictive permissions (owner read/write only) to protect sensitive data
// 0o600 = rw------- (only owner can read/write)
try {
chmodSync(backupPath, 0o600);
} catch (_error) {
// chmod may fail on some platforms (e.g., Windows), continue anyway
}
console.error(`✓ Backup created: ${backupPath}`);
console.error(` ${recordCounts.conversations} conversations`);
console.error(` ${recordCounts.messages} messages`);
console.error(` ${recordCounts.decisions} decisions`);
console.error(` ${recordCounts.mistakes} mistakes`);
return metadata;
}
/**
* Get backup directory for project
*/
private getBackupDirectory(projectPath: string): string {
const canonicalPath = getCanonicalProjectPath(projectPath).canonicalPath;
const projectFolderName = pathToProjectFolderName(canonicalPath);
return join(homedir(), ".claude", "backups", projectFolderName);
}
/**
* Ensure directory exists
*/
private ensureDirectoryExists(dir: string): void {
if (!existsSync(dir)) {
mkdirSync(dir, { recursive: true });
}
}
/**
* Query helpers for each table
*/
private getConversations(conversationIds: string[]): unknown[] {
const placeholders = conversationIds.map(() => "?").join(",");
return this.db
.prepare(`SELECT * FROM conversations WHERE id IN (${placeholders})`)
.all(...conversationIds);
}
private getMessages(conversationIds: string[]): unknown[] {
const placeholders = conversationIds.map(() => "?").join(",");
return this.db
.prepare(`SELECT * FROM messages WHERE conversation_id IN (${placeholders})`)
.all(...conversationIds);
}
private getToolUses(messageIds: string[]): unknown[] {
if (messageIds.length === 0) {
return [];
}
const placeholders = messageIds.map(() => "?").join(",");
return this.db
.prepare(`SELECT * FROM tool_uses WHERE message_id IN (${placeholders})`)
.all(...messageIds);
}
private getToolResults(messageIds: string[]): unknown[] {
if (messageIds.length === 0) {
return [];
}
const placeholders = messageIds.map(() => "?").join(",");
return this.db
.prepare(`SELECT * FROM tool_results WHERE message_id IN (${placeholders})`)
.all(...messageIds);
}
private getFileEdits(conversationIds: string[]): unknown[] {
const placeholders = conversationIds.map(() => "?").join(",");
return this.db
.prepare(`SELECT * FROM file_edits WHERE conversation_id IN (${placeholders})`)
.all(...conversationIds);
}
private getThinkingBlocks(messageIds: string[]): unknown[] {
if (messageIds.length === 0) {
return [];
}
const placeholders = messageIds.map(() => "?").join(",");
return this.db
.prepare(`SELECT * FROM thinking_blocks WHERE message_id IN (${placeholders})`)
.all(...messageIds);
}
private getDecisions(conversationIds: string[]): unknown[] {
const placeholders = conversationIds.map(() => "?").join(",");
return this.db
.prepare(`SELECT * FROM decisions WHERE conversation_id IN (${placeholders})`)
.all(...conversationIds);
}
private getMistakes(conversationIds: string[]): unknown[] {
const placeholders = conversationIds.map(() => "?").join(",");
return this.db
.prepare(`SELECT * FROM mistakes WHERE conversation_id IN (${placeholders})`)
.all(...conversationIds);
}
private getRequirements(conversationIds: string[]): unknown[] {
const placeholders = conversationIds.map(() => "?").join(",");
return this.db
.prepare(`SELECT * FROM requirements WHERE conversation_id IN (${placeholders})`)
.all(...conversationIds);
}
private getValidations(conversationIds: string[]): unknown[] {
const placeholders = conversationIds.map(() => "?").join(",");
return this.db
.prepare(`SELECT * FROM validations WHERE conversation_id IN (${placeholders})`)
.all(...conversationIds);
}
private getMessageEmbeddings(messageIds: string[]): unknown[] {
if (messageIds.length === 0) {
return [];
}
const placeholders = messageIds.map(() => "?").join(",");
// Note: Only metadata is exported, not the actual embedding BLOBs (too large for JSON)
return this.db
.prepare(`SELECT id, message_id, model_name, dimensions, created_at FROM message_embeddings WHERE message_id IN (${placeholders})`)
.all(...messageIds);
}
private getDecisionEmbeddings(decisionIds: string[]): unknown[] {
if (decisionIds.length === 0) {
return [];
}
const placeholders = decisionIds.map(() => "?").join(",");
return this.db
.prepare(`SELECT id, decision_id, model_name, dimensions, created_at FROM decision_embeddings WHERE decision_id IN (${placeholders})`)
.all(...decisionIds);
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/storage/ConversationStorage.ts | TypeScript | /**
* Conversation Storage Layer - CRUD operations for all conversation-related data.
*
* This class provides the data access layer for the cccmemory system.
* It handles storing and retrieving conversations, messages, tool uses, decisions,
* mistakes, requirements, and git commits.
*
* All store operations use transactions for atomicity and performance.
* All JSON fields are automatically serialized/deserialized.
*
* @example
* ```typescript
* const storage = new ConversationStorage(sqliteManager);
* await storage.storeConversations(conversations);
* const conv = storage.getConversation('conv-123');
* const timeline = storage.getFileTimeline('src/index.ts');
* ```
*/
import type { SQLiteManager } from "./SQLiteManager.js";
import type {
Conversation,
Message,
ToolUse,
ToolResult,
FileEdit,
ThinkingBlock,
} from "../parsers/ConversationParser.js";
import type { Decision } from "../parsers/DecisionExtractor.js";
import type { Mistake } from "../parsers/MistakeExtractor.js";
import type { GitCommit } from "../parsers/GitIntegrator.js";
import type { Requirement, Validation } from "../parsers/RequirementsExtractor.js";
import type { Methodology } from "../parsers/MethodologyExtractor.js";
import type { ResearchFinding } from "../parsers/ResearchExtractor.js";
import type { SolutionPattern } from "../parsers/SolutionPatternExtractor.js";
import { sanitizeForLike } from "../utils/sanitization.js";
import type { GitCommitRow, ConversationRow } from "../types/ToolTypes.js";
import { QueryCache, type QueryCacheConfig, type CacheStats } from "../cache/QueryCache.js";
import { safeJsonParse } from "../utils/safeJson.js";
import { getCanonicalProjectPath } from "../utils/worktree.js";
/**
* Data access layer for conversation memory storage.
*
* Provides CRUD operations for all conversation-related entities using SQLite.
* Supports optional caching for frequently accessed queries.
*/
export class ConversationStorage {
private cache: QueryCache | null = null;
private projectIdCache = new Map<string, number>();
/**
* Create a new ConversationStorage instance.
*
* @param db - SQLiteManager instance for database access
*/
constructor(private db: SQLiteManager) {}
// ==================== Cache Management ====================
/**
* Enable query result caching.
*
* Caching improves performance for frequently accessed queries by storing
* results in memory. Cache is automatically invalidated when data changes.
*
* @param config - Cache configuration (maxSize and ttlMs)
*
* @example
* ```typescript
* storage.enableCache({ maxSize: 100, ttlMs: 300000 });
* ```
*/
enableCache(config: QueryCacheConfig): void {
this.cache = new QueryCache(config);
}
/**
* Disable query result caching.
*
* Clears all cached data and stops caching new queries.
*/
disableCache(): void {
this.cache = null;
}
/**
* Check if caching is enabled.
*
* @returns True if caching is enabled
*/
isCacheEnabled(): boolean {
return this.cache !== null;
}
/**
* Clear all cached query results.
*
* Clears the cache but keeps caching enabled.
*/
clearCache(): void {
if (this.cache) {
this.cache.clear();
this.cache.resetStats();
}
}
/**
* Get cache statistics.
*
* Returns performance metrics including hits, misses, hit rate, and evictions.
*
* @returns Cache statistics or null if caching is disabled
*
* @example
* ```typescript
* const stats = storage.getCacheStats();
* if (stats) {
* console.error(`Hit rate: ${(stats.hitRate * 100).toFixed(1)}%`);
* }
* ```
*/
getCacheStats(): CacheStats | null {
return this.cache ? this.cache.getStats() : null;
}
getProjectId(projectPath: string): number {
return this.ensureProjectId(projectPath);
}
private ensureProjectId(projectPath: string): number {
const canonicalPath = getCanonicalProjectPath(projectPath).canonicalPath;
const cached = this.projectIdCache.get(canonicalPath);
if (cached) {
return cached;
}
const existing = this.db
.prepare("SELECT id FROM projects WHERE canonical_path = ?")
.get(canonicalPath) as { id: number } | undefined;
if (existing) {
this.projectIdCache.set(canonicalPath, existing.id);
return existing.id;
}
const alias = this.db
.prepare("SELECT project_id FROM project_aliases WHERE alias_path = ?")
.get(canonicalPath) as { project_id: number } | undefined;
if (alias) {
this.projectIdCache.set(canonicalPath, alias.project_id);
return alias.project_id;
}
const now = Date.now();
const result = this.db
.prepare(
"INSERT INTO projects (canonical_path, display_path, created_at, updated_at) VALUES (?, ?, ?, ?)"
)
.run(canonicalPath, canonicalPath, now, now);
const id = Number(result.lastInsertRowid);
this.projectIdCache.set(canonicalPath, id);
return id;
}
// ==================== Conversations ====================
/**
* Store conversations in the database.
*
* Uses UPSERT (INSERT ON CONFLICT UPDATE) to handle both new and updated conversations.
* All operations are performed in a single transaction for atomicity.
*
* @param conversations - Array of conversation objects to store
* @returns Promise that resolves when all conversations are stored
*
* @example
* ```typescript
* await storage.storeConversations([
* {
* id: 'conv-123',
* project_path: '/path/to/project',
* first_message_at: Date.now(),
* last_message_at: Date.now(),
* message_count: 42,
* git_branch: 'main',
* claude_version: '3.5',
* metadata: {},
* created_at: Date.now(),
* updated_at: Date.now()
* }
* ]);
* ```
*/
async storeConversations(conversations: Conversation[]): Promise<Map<string, number>> {
const stmt = this.db.prepare(`
INSERT INTO conversations
(project_id, project_path, source_type, external_id, first_message_at, last_message_at, message_count,
git_branch, claude_version, metadata, created_at, updated_at)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(project_id, source_type, external_id) DO UPDATE SET
project_path = excluded.project_path,
first_message_at = excluded.first_message_at,
last_message_at = excluded.last_message_at,
message_count = excluded.message_count,
git_branch = excluded.git_branch,
claude_version = excluded.claude_version,
metadata = excluded.metadata,
updated_at = excluded.updated_at
`);
const selectStmt = this.db.prepare(
"SELECT id FROM conversations WHERE project_id = ? AND source_type = ? AND external_id = ?"
);
const conversationIdMap = new Map<string, number>();
this.db.transaction(() => {
for (const conv of conversations) {
const canonicalProjectPath = getCanonicalProjectPath(conv.project_path).canonicalPath;
const projectId = this.ensureProjectId(canonicalProjectPath);
const sourceType = conv.source_type || "claude-code";
stmt.run(
projectId,
canonicalProjectPath,
sourceType,
conv.id,
conv.first_message_at,
conv.last_message_at,
conv.message_count,
conv.git_branch,
conv.claude_version,
JSON.stringify(conv.metadata),
conv.created_at,
conv.updated_at
);
const row = selectStmt.get(projectId, sourceType, conv.id) as { id: number };
conversationIdMap.set(conv.id, row.id);
}
});
// Invalidate cache once after batch (not per-item)
if (this.cache) {
this.cache.clear();
}
console.error(`✓ Stored ${conversations.length} conversations`);
return conversationIdMap;
}
/**
* Retrieve a single conversation by ID.
*
* @param id - Conversation ID to retrieve
* @returns Conversation object if found, null otherwise
*
* @example
* ```typescript
* const conv = storage.getConversation('conv-123');
* if (conv) {
* console.error(`${conv.message_count} messages on ${conv.git_branch}`);
* }
* ```
*/
getConversation(id: string, projectPath?: string): Conversation | null {
const cacheKey = `conversation:${id}:${projectPath ?? "any"}`;
// Check cache first
if (this.cache) {
const cached = this.cache.get<Conversation | null>(cacheKey);
if (cached !== undefined) {
return cached;
}
}
let sql = "SELECT * FROM conversations WHERE external_id = ?";
const params: (string | number)[] = [id];
if (projectPath) {
sql += " AND project_path = ?";
params.push(projectPath);
}
sql += " ORDER BY last_message_at DESC LIMIT 1";
const row = this.db.prepare(sql).get(...params) as ConversationRow | undefined;
if (!row) {
// Cache null result to avoid repeated queries
this.cache?.set(cacheKey, null);
return null;
}
const result = {
id: row.external_id,
project_path: row.project_path,
source_type: row.source_type as 'claude-code' | 'codex',
first_message_at: row.first_message_at,
last_message_at: row.last_message_at,
message_count: row.message_count,
git_branch: row.git_branch,
claude_version: row.claude_version,
metadata: safeJsonParse<Record<string, unknown>>(row.metadata, {}),
created_at: row.created_at,
updated_at: row.updated_at,
};
// Cache the result
this.cache?.set(cacheKey, result);
return result;
}
// ==================== Messages ====================
/**
* Store messages in the database.
*
* Stores all messages from conversations including content, metadata, and relationships.
* Uses UPSERT (INSERT ON CONFLICT UPDATE) for idempotent storage.
*
* @param messages - Array of message objects to store
* @param skipFtsRebuild - Skip FTS rebuild (for batch operations, call rebuildAllFts() at end)
* @returns Promise that resolves when all messages are stored
*
* @example
* ```typescript
* await storage.storeMessages([
* {
* id: 'msg-123',
* conversation_id: 'conv-123',
* message_type: 'text',
* role: 'user',
* content: 'Hello',
* timestamp: Date.now(),
* is_sidechain: false,
* metadata: {}
* }
* ]);
* ```
*/
async storeMessages(
messages: Message[],
options: { skipFtsRebuild?: boolean; conversationIdMap: Map<string, number> }
): Promise<Map<string, number>> {
if (messages.length === 0) {
return new Map();
}
const { skipFtsRebuild = false, conversationIdMap } = options;
const stmt = this.db.prepare(`
INSERT INTO messages
(conversation_id, external_id, parent_external_id, message_type, role, content,
timestamp, is_sidechain, agent_id, request_id, git_branch, cwd, metadata)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(conversation_id, external_id) DO UPDATE SET
parent_external_id = excluded.parent_external_id,
message_type = excluded.message_type,
role = excluded.role,
content = excluded.content,
timestamp = excluded.timestamp,
is_sidechain = excluded.is_sidechain,
agent_id = excluded.agent_id,
request_id = excluded.request_id,
git_branch = excluded.git_branch,
cwd = excluded.cwd,
metadata = excluded.metadata
`);
const selectStmt = this.db.prepare(
"SELECT id FROM messages WHERE conversation_id = ? AND external_id = ?"
);
const messageIdMap = new Map<string, number>();
let skipped = 0;
this.db.transaction(() => {
for (const msg of messages) {
const convId = conversationIdMap.get(msg.conversation_id);
if (!convId) {
skipped += 1;
continue;
}
stmt.run(
convId,
msg.id,
msg.parent_id ?? null,
msg.message_type,
msg.role || null,
msg.content || null,
msg.timestamp,
msg.is_sidechain ? 1 : 0,
msg.agent_id || null,
msg.request_id || null,
msg.git_branch || null,
msg.cwd || null,
JSON.stringify(msg.metadata)
);
const row = selectStmt.get(convId, msg.id) as { id: number };
messageIdMap.set(msg.id, row.id);
}
});
if (skipped > 0) {
console.error(`⚠️ Skipping ${skipped} message(s) with missing conversations`);
}
// Resolve parent_message_id after inserts
this.db.exec(`
UPDATE messages
SET parent_message_id = (
SELECT m2.id FROM messages m2
WHERE m2.conversation_id = messages.conversation_id
AND m2.external_id = messages.parent_external_id
)
WHERE parent_external_id IS NOT NULL AND parent_message_id IS NULL
`);
if (!skipFtsRebuild) {
this.rebuildMessagesFts();
}
console.error(`✓ Stored ${messages.length - skipped} messages`);
return messageIdMap;
}
/**
* Rebuild the messages FTS index.
* Required for FTS5 external content tables after inserting data.
* Call this after batch operations that used skipFtsRebuild=true.
*/
rebuildMessagesFts(): void {
try {
this.db.getDatabase().exec("INSERT INTO messages_fts(messages_fts) VALUES('rebuild')");
} catch (error) {
// FTS rebuild may fail if table doesn't exist or schema mismatch
// Log but don't throw - FTS is optional fallback
console.error("FTS rebuild warning:", (error as Error).message);
}
}
// ==================== Tool Uses ====================
/**
* Store tool use records in the database.
*
* Records all tool invocations from assistant messages.
*
* @param toolUses - Array of tool use objects
* @returns Promise that resolves when stored
*/
async storeToolUses(
toolUses: ToolUse[],
messageIdMap: Map<string, number>
): Promise<Map<string, number>> {
if (toolUses.length === 0) {
return new Map();
}
const stmt = this.db.prepare(`
INSERT INTO tool_uses
(message_id, external_id, tool_name, tool_input, timestamp)
VALUES (?, ?, ?, ?, ?)
ON CONFLICT(message_id, external_id) DO UPDATE SET
tool_name = excluded.tool_name,
tool_input = excluded.tool_input,
timestamp = excluded.timestamp
`);
const selectStmt = this.db.prepare(
"SELECT id FROM tool_uses WHERE message_id = ? AND external_id = ?"
);
const toolUseIdMap = new Map<string, number>();
let skipped = 0;
this.db.transaction(() => {
for (const tool of toolUses) {
const messageId = messageIdMap.get(tool.message_id);
if (!messageId) {
skipped += 1;
continue;
}
stmt.run(
messageId,
tool.id,
tool.tool_name,
JSON.stringify(tool.tool_input),
tool.timestamp
);
const row = selectStmt.get(messageId, tool.id) as { id: number };
toolUseIdMap.set(tool.id, row.id);
}
});
if (skipped > 0) {
console.error(`⚠️ Skipping ${skipped} tool use(s) with missing messages`);
}
console.error(`✓ Stored ${toolUses.length - skipped} tool uses`);
return toolUseIdMap;
}
// ==================== Tool Results ====================
/**
* Store tool execution results in the database.
*
* Records the output/results from tool invocations.
*
* @param toolResults - Array of tool result objects
* @returns Promise that resolves when stored
*/
async storeToolResults(
toolResults: ToolResult[],
messageIdMap: Map<string, number>,
toolUseIdMap: Map<string, number>
): Promise<void> {
if (toolResults.length === 0) {
return;
}
const stmt = this.db.prepare(`
INSERT INTO tool_results
(tool_use_id, message_id, external_id, content, is_error, stdout, stderr, is_image, timestamp)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(tool_use_id, external_id) DO UPDATE SET
message_id = excluded.message_id,
content = excluded.content,
is_error = excluded.is_error,
stdout = excluded.stdout,
stderr = excluded.stderr,
is_image = excluded.is_image,
timestamp = excluded.timestamp
`);
let skipped = 0;
this.db.transaction(() => {
for (const result of toolResults) {
const toolUseId = toolUseIdMap.get(result.tool_use_id);
const messageId = messageIdMap.get(result.message_id);
if (!toolUseId || !messageId) {
skipped += 1;
continue;
}
stmt.run(
toolUseId,
messageId,
result.id,
result.content || null,
result.is_error ? 1 : 0,
result.stdout || null,
result.stderr || null,
result.is_image ? 1 : 0,
result.timestamp
);
}
});
if (skipped > 0) {
console.error(`⚠️ Skipping ${skipped} tool result(s) with missing refs`);
}
console.error(`✓ Stored ${toolResults.length - skipped} tool results`);
}
// ==================== File Edits ====================
/**
* Store file edit records in the database.
*
* Records all file modifications made during conversations.
*
* @param fileEdits - Array of file edit objects
* @returns Promise that resolves when stored
*/
async storeFileEdits(
fileEdits: FileEdit[],
conversationIdMap: Map<string, number>,
messageIdMap: Map<string, number>
): Promise<void> {
if (fileEdits.length === 0) {
return;
}
const stmt = this.db.prepare(`
INSERT INTO file_edits
(external_id, conversation_id, file_path, message_id, backup_version,
backup_time, snapshot_timestamp, metadata)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(conversation_id, external_id) DO UPDATE SET
file_path = excluded.file_path,
message_id = excluded.message_id,
backup_version = excluded.backup_version,
backup_time = excluded.backup_time,
snapshot_timestamp = excluded.snapshot_timestamp,
metadata = excluded.metadata
`);
let skipped = 0;
this.db.transaction(() => {
for (const edit of fileEdits) {
const conversationId = conversationIdMap.get(edit.conversation_id);
const messageId = messageIdMap.get(edit.message_id);
if (!conversationId || !messageId) {
skipped += 1;
continue;
}
stmt.run(
edit.id,
conversationId,
edit.file_path,
messageId,
edit.backup_version || null,
edit.backup_time || null,
edit.snapshot_timestamp,
JSON.stringify(edit.metadata)
);
if (this.cache) {
this.cache.delete(`edits:${edit.file_path}`);
this.cache.delete(`timeline:${edit.file_path}`);
}
}
});
if (skipped > 0) {
console.error(`⚠️ Skipping ${skipped} file edit(s) with missing refs`);
}
console.error(`✓ Stored ${fileEdits.length - skipped} file edits`);
}
/**
* Retrieve all edits for a specific file.
*
* @param filePath - Path to the file
* @returns Array of file edits, ordered by timestamp (most recent first)
*/
getFileEdits(filePath: string): FileEdit[] {
const cacheKey = `edits:${filePath}`;
// Check cache first
if (this.cache) {
const cached = this.cache.get<FileEdit[]>(cacheKey);
if (cached !== undefined) {
return cached;
}
}
interface FileEditRow {
id: string;
conversation_id: string;
file_path: string;
message_id: string;
backup_version?: number;
backup_time?: number;
snapshot_timestamp: number;
metadata: string; // JSON string from database
}
const rows = this.db
.prepare(
`SELECT
fe.external_id as id,
c.external_id as conversation_id,
fe.file_path,
m.external_id as message_id,
fe.backup_version,
fe.backup_time,
fe.snapshot_timestamp,
fe.metadata
FROM file_edits fe
JOIN conversations c ON fe.conversation_id = c.id
JOIN messages m ON fe.message_id = m.id
WHERE fe.file_path = ?
ORDER BY fe.snapshot_timestamp DESC`
)
.all(filePath) as FileEditRow[];
// Parse metadata JSON for each row
const result: FileEdit[] = rows.map(row => ({
...row,
metadata: safeJsonParse<Record<string, unknown>>(row.metadata, {}),
}));
// Cache the result
this.cache?.set(cacheKey, result);
return result;
}
// ==================== Thinking Blocks ====================
/**
* Store thinking blocks in the database.
*
* Thinking blocks contain Claude's internal reasoning. They can be large and
* are optionally indexed based on the includeThinking flag.
*
* @param blocks - Array of thinking block objects
* @returns Promise that resolves when stored
*/
async storeThinkingBlocks(
blocks: ThinkingBlock[],
messageIdMap: Map<string, number>
): Promise<void> {
if (blocks.length === 0) {
return;
}
const stmt = this.db.prepare(`
INSERT INTO thinking_blocks
(external_id, message_id, thinking_content, signature, timestamp)
VALUES (?, ?, ?, ?, ?)
ON CONFLICT(message_id, external_id) DO UPDATE SET
thinking_content = excluded.thinking_content,
signature = excluded.signature,
timestamp = excluded.timestamp
`);
let skipped = 0;
this.db.transaction(() => {
for (const block of blocks) {
const messageId = messageIdMap.get(block.message_id);
if (!messageId) {
skipped += 1;
continue;
}
stmt.run(
block.id,
messageId,
block.thinking_content,
block.signature || null,
block.timestamp
);
}
});
if (skipped > 0) {
console.error(`⚠️ Skipping ${skipped} thinking block(s) with missing messages`);
}
console.error(`✓ Stored ${blocks.length - skipped} thinking blocks`);
}
// ==================== Decisions ====================
/**
* Store extracted decisions in the database.
*
* Decisions include architectural choices, technical decisions, and their rationale.
*
* @param decisions - Array of decision objects
* @param skipFtsRebuild - Skip FTS rebuild (for batch operations, call rebuildAllFts() at end)
* @returns Promise that resolves when stored
*/
async storeDecisions(
decisions: Decision[],
options: {
skipFtsRebuild?: boolean;
conversationIdMap: Map<string, number>;
messageIdMap: Map<string, number>;
}
): Promise<Map<string, number>> {
if (decisions.length === 0) {
return new Map();
}
const { skipFtsRebuild = false, conversationIdMap, messageIdMap } = options;
const stmt = this.db.prepare(`
INSERT INTO decisions
(external_id, conversation_id, message_id, decision_text, rationale,
alternatives_considered, rejected_reasons, context, related_files,
related_commits, timestamp)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(conversation_id, external_id) DO UPDATE SET
message_id = excluded.message_id,
decision_text = excluded.decision_text,
rationale = excluded.rationale,
alternatives_considered = excluded.alternatives_considered,
rejected_reasons = excluded.rejected_reasons,
context = excluded.context,
related_files = excluded.related_files,
related_commits = excluded.related_commits,
timestamp = excluded.timestamp
`);
const selectStmt = this.db.prepare(
"SELECT id FROM decisions WHERE conversation_id = ? AND external_id = ?"
);
const decisionIdMap = new Map<string, number>();
let skipped = 0;
this.db.transaction(() => {
for (const decision of decisions) {
const conversationId = conversationIdMap.get(decision.conversation_id);
const messageId = messageIdMap.get(decision.message_id);
if (!conversationId || !messageId) {
skipped += 1;
continue;
}
stmt.run(
decision.id,
conversationId,
messageId,
decision.decision_text,
decision.rationale || null,
JSON.stringify(decision.alternatives_considered || []),
JSON.stringify(decision.rejected_reasons || {}),
decision.context || null,
JSON.stringify(decision.related_files || []),
JSON.stringify(decision.related_commits || []),
decision.timestamp
);
const row = selectStmt.get(conversationId, decision.id) as { id: number };
decisionIdMap.set(decision.id, row.id);
if (this.cache && decision.related_files) {
for (const filePath of decision.related_files) {
this.cache.delete(`decisions:${filePath}`);
this.cache.delete(`timeline:${filePath}`);
}
}
}
});
if (!skipFtsRebuild) {
this.rebuildDecisionsFts();
}
if (skipped > 0) {
console.error(`⚠️ Skipping ${skipped} decision(s) with missing refs`);
}
console.error(`✓ Stored ${decisions.length - skipped} decisions`);
return decisionIdMap;
}
/**
* Rebuild the decisions FTS index.
* Required for FTS5 external content tables after inserting data.
* Call this after batch operations that used skipFtsRebuild=true.
*/
rebuildDecisionsFts(): void {
try {
this.db.getDatabase().exec("INSERT INTO decisions_fts(decisions_fts) VALUES('rebuild')");
} catch (error) {
// FTS rebuild may fail if table doesn't exist or schema mismatch
// Log but don't throw - FTS is optional fallback
console.error("FTS decisions rebuild warning:", (error as Error).message);
}
}
/**
* Rebuild all FTS indexes.
* Call this once after batch operations that used skipFtsRebuild=true.
*/
rebuildAllFts(): void {
this.rebuildMessagesFts();
this.rebuildDecisionsFts();
}
/**
* Retrieve all decisions related to a specific file.
*
* @param filePath - Path to the file
* @returns Array of decisions that reference this file
* @internal
*/
getDecisionsForFile(filePath: string): Decision[] {
const cacheKey = `decisions:${filePath}`;
// Check cache first
if (this.cache) {
const cached = this.cache.get<Decision[]>(cacheKey);
if (cached !== undefined) {
return cached;
}
}
const sanitized = sanitizeForLike(filePath);
const rows = this.db
.prepare(
`SELECT
d.external_id as decision_external_id,
d.decision_text,
d.rationale,
d.alternatives_considered,
d.rejected_reasons,
d.context,
d.related_files,
d.related_commits,
d.timestamp,
c.external_id as conv_external_id,
m.external_id as message_external_id
FROM decisions d
JOIN conversations c ON d.conversation_id = c.id
LEFT JOIN messages m ON d.message_id = m.id
WHERE d.related_files LIKE ? ESCAPE '\\'
ORDER BY d.timestamp DESC`
)
.all(`%"${sanitized}"%`) as Array<{
decision_external_id: string;
decision_text: string;
rationale?: string | null;
alternatives_considered: string;
rejected_reasons: string;
context?: string | null;
related_files: string;
related_commits: string;
timestamp: number;
conv_external_id: string;
message_external_id: string | null;
}>;
const result: Decision[] = [];
for (const row of rows) {
if (!row.message_external_id) {
continue;
}
result.push({
id: row.decision_external_id,
conversation_id: row.conv_external_id,
message_id: row.message_external_id,
decision_text: row.decision_text,
rationale: row.rationale || undefined,
alternatives_considered: safeJsonParse<string[]>(row.alternatives_considered, []),
rejected_reasons: safeJsonParse<Record<string, string>>(row.rejected_reasons, {}),
context: row.context || undefined,
related_files: safeJsonParse<string[]>(row.related_files, []),
related_commits: safeJsonParse<string[]>(row.related_commits, []),
timestamp: row.timestamp,
});
}
// Cache the result
this.cache?.set(cacheKey, result);
return result;
}
// ==================== Git Commits ====================
/**
* Store git commit records linked to conversations.
*
* Links git commits to the conversations where they were made or discussed.
*
* @param commits - Array of git commit objects
* @returns Promise that resolves when stored
*/
async storeGitCommits(
commits: GitCommit[],
projectId: number,
conversationIdMap: Map<string, number>,
messageIdMap: Map<string, number>
): Promise<void> {
if (commits.length === 0) {
return;
}
const stmt = this.db.prepare(`
INSERT INTO git_commits
(project_id, hash, message, author, timestamp, branch, files_changed,
conversation_id, related_message_id, metadata)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(project_id, hash) DO UPDATE SET
message = excluded.message,
author = excluded.author,
timestamp = excluded.timestamp,
branch = excluded.branch,
files_changed = excluded.files_changed,
conversation_id = excluded.conversation_id,
related_message_id = excluded.related_message_id,
metadata = excluded.metadata
`);
this.db.transaction(() => {
for (const commit of commits) {
const conversationId = commit.conversation_id
? conversationIdMap.get(commit.conversation_id) ?? null
: null;
const messageId = commit.related_message_id
? messageIdMap.get(commit.related_message_id) ?? null
: null;
stmt.run(
projectId,
commit.hash,
commit.message,
commit.author || null,
commit.timestamp,
commit.branch || null,
JSON.stringify(commit.files_changed),
conversationId,
messageId,
JSON.stringify(commit.metadata)
);
if (this.cache && commit.files_changed) {
for (const filePath of commit.files_changed) {
this.cache.delete(`commits:${filePath}`);
this.cache.delete(`timeline:${filePath}`);
}
}
}
});
console.error(`✓ Stored ${commits.length} git commits`);
}
getCommitsForFile(filePath: string): GitCommit[] {
const cacheKey = `commits:${filePath}`;
// Check cache first
if (this.cache) {
const cached = this.cache.get<GitCommit[]>(cacheKey);
if (cached !== undefined) {
return cached;
}
}
const sanitized = sanitizeForLike(filePath);
const rows = this.db
.prepare(
`SELECT
gc.id,
gc.project_id,
gc.hash,
gc.message,
gc.author,
gc.timestamp,
gc.branch,
gc.files_changed,
gc.metadata,
c.external_id as conversation_external_id,
m.external_id as message_external_id
FROM git_commits gc
LEFT JOIN conversations c ON gc.conversation_id = c.id
LEFT JOIN messages m ON gc.related_message_id = m.id
WHERE gc.files_changed LIKE ? ESCAPE '\\'
ORDER BY gc.timestamp DESC`
)
.all(`%"${sanitized}"%`) as Array<GitCommitRow & { conversation_external_id?: string | null; message_external_id?: string | null }>;
const result = rows.map((row) => ({
hash: row.hash,
message: row.message,
author: row.author,
timestamp: row.timestamp,
branch: row.branch,
files_changed: safeJsonParse<string[]>(row.files_changed, []),
conversation_id: row.conversation_external_id || undefined,
related_message_id: row.message_external_id || undefined,
metadata: safeJsonParse<Record<string, unknown>>(row.metadata, {}),
}));
// Cache the result
this.cache?.set(cacheKey, result);
return result;
}
// ==================== Mistakes ====================
/**
* Store extracted mistakes in the database.
*
* Mistakes include errors, bugs, and wrong approaches that were later corrected.
*
* @param mistakes - Array of mistake objects
* @returns Promise that resolves when stored
*/
async storeMistakes(
mistakes: Mistake[],
conversationIdMap: Map<string, number>,
messageIdMap: Map<string, number>
): Promise<Map<string, number>> {
if (mistakes.length === 0) {
return new Map();
}
const stmt = this.db.prepare(`
INSERT INTO mistakes
(external_id, conversation_id, message_id, mistake_type, what_went_wrong,
correction, user_correction_message, files_affected, timestamp)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(conversation_id, external_id) DO UPDATE SET
message_id = excluded.message_id,
mistake_type = excluded.mistake_type,
what_went_wrong = excluded.what_went_wrong,
correction = excluded.correction,
user_correction_message = excluded.user_correction_message,
files_affected = excluded.files_affected,
timestamp = excluded.timestamp
`);
const selectStmt = this.db.prepare(
"SELECT id FROM mistakes WHERE conversation_id = ? AND external_id = ?"
);
const mistakeIdMap = new Map<string, number>();
let skipped = 0;
this.db.transaction(() => {
for (const mistake of mistakes) {
const conversationId = conversationIdMap.get(mistake.conversation_id);
const messageId = messageIdMap.get(mistake.message_id);
if (!conversationId || !messageId) {
skipped += 1;
continue;
}
stmt.run(
mistake.id,
conversationId,
messageId,
mistake.mistake_type,
mistake.what_went_wrong,
mistake.correction || null,
mistake.user_correction_message || null,
JSON.stringify(mistake.files_affected),
mistake.timestamp
);
const row = selectStmt.get(conversationId, mistake.id) as { id: number };
mistakeIdMap.set(mistake.id, row.id);
}
});
if (skipped > 0) {
console.error(`⚠️ Skipping ${skipped} mistake(s) with missing refs`);
}
console.error(`✓ Stored ${mistakes.length - skipped} mistakes`);
return mistakeIdMap;
}
// ==================== Requirements ====================
/**
* Store extracted requirements in the database.
*
* Requirements include dependencies, constraints, and specifications for components.
*
* @param requirements - Array of requirement objects
* @returns Promise that resolves when stored
*/
async storeRequirements(
requirements: Requirement[],
conversationIdMap: Map<string, number>,
messageIdMap: Map<string, number>
): Promise<void> {
if (requirements.length === 0) {
return;
}
const stmt = this.db.prepare(`
INSERT INTO requirements
(external_id, type, description, rationale, affects_components,
conversation_id, message_id, timestamp)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(conversation_id, external_id) DO UPDATE SET
type = excluded.type,
description = excluded.description,
rationale = excluded.rationale,
affects_components = excluded.affects_components,
message_id = excluded.message_id,
timestamp = excluded.timestamp
`);
let skipped = 0;
this.db.transaction(() => {
for (const req of requirements) {
const conversationId = conversationIdMap.get(req.conversation_id);
const messageId = messageIdMap.get(req.message_id);
if (!conversationId || !messageId) {
skipped += 1;
continue;
}
stmt.run(
req.id,
req.type,
req.description,
req.rationale || null,
JSON.stringify(req.affects_components),
conversationId,
messageId,
req.timestamp
);
}
});
if (skipped > 0) {
console.error(`⚠️ Skipping ${skipped} requirement(s) with missing refs`);
}
console.error(`✓ Stored ${requirements.length - skipped} requirements`);
}
// ==================== Validations ====================
/**
* Store validation records in the database.
*
* Validations capture test results and performance data from conversations.
*
* @param validations - Array of validation objects
* @returns Promise that resolves when stored
*/
async storeValidations(
validations: Validation[],
conversationIdMap: Map<string, number>
): Promise<void> {
if (validations.length === 0) {
return;
}
const stmt = this.db.prepare(`
INSERT INTO validations
(external_id, conversation_id, what_was_tested, test_command, result,
performance_data, files_tested, timestamp)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(conversation_id, external_id) DO UPDATE SET
what_was_tested = excluded.what_was_tested,
test_command = excluded.test_command,
result = excluded.result,
performance_data = excluded.performance_data,
files_tested = excluded.files_tested,
timestamp = excluded.timestamp
`);
let skipped = 0;
this.db.transaction(() => {
for (const val of validations) {
const conversationId = conversationIdMap.get(val.conversation_id);
if (!conversationId) {
skipped += 1;
continue;
}
stmt.run(
val.id,
conversationId,
val.what_was_tested,
val.test_command || null,
val.result,
val.performance_data ? JSON.stringify(val.performance_data) : null,
JSON.stringify(val.files_tested),
val.timestamp
);
}
});
if (skipped > 0) {
console.error(`⚠️ Skipping ${skipped} validation(s) with missing conversations`);
}
console.error(`✓ Stored ${validations.length - skipped} validations`);
}
// ==================== Queries ====================
/**
* Get the complete timeline of changes to a file.
*
* Combines file edits, git commits, and related decisions into a single timeline.
* This is a key method used by tools like checkBeforeModify and getFileEvolution.
*
* @param filePath - Path to the file
* @returns Object containing:
* - `file_path`: The file path queried
* - `edits`: All file edit records
* - `commits`: All git commits affecting this file
* - `decisions`: All decisions related to this file
*
* @example
* ```typescript
* const timeline = storage.getFileTimeline('src/index.ts');
* console.error(`${timeline.edits.length} edits`);
* console.error(`${timeline.commits.length} commits`);
* console.error(`${timeline.decisions.length} decisions`);
* ```
*/
getFileTimeline(filePath: string): {
file_path: string;
edits: FileEdit[];
commits: GitCommit[];
decisions: Decision[];
} {
const cacheKey = `timeline:${filePath}`;
// Check cache first
if (this.cache) {
const cached = this.cache.get<{
file_path: string;
edits: FileEdit[];
commits: GitCommit[];
decisions: Decision[];
}>(cacheKey);
if (cached !== undefined) {
return cached;
}
}
// Combine file edits, commits, and decisions
const edits = this.getFileEdits(filePath);
const commits = this.getCommitsForFile(filePath);
const decisions = this.getDecisionsForFile(filePath);
const result = {
file_path: filePath,
edits,
commits,
decisions,
};
// Cache the result
this.cache?.set(cacheKey, result);
return result;
}
/**
* Get statistics about the indexed conversation data.
*
* Returns counts of all major entity types stored in the database.
* Used for displaying indexing results and system health checks.
*
* @returns Object containing counts for:
* - `conversations`: Total conversations indexed
* - `messages`: Total messages stored
* - `decisions`: Total decisions extracted
* - `mistakes`: Total mistakes documented
* - `git_commits`: Total git commits linked
*
* @example
* ```typescript
* const stats = storage.getStats();
* console.error(`Indexed ${stats.conversations.count} conversations`);
* console.error(`Extracted ${stats.decisions.count} decisions`);
* console.error(`Linked ${stats.git_commits.count} commits`);
* ```
*/
getStats(): {
conversations: { count: number };
messages: { count: number };
decisions: { count: number };
mistakes: { count: number };
git_commits: { count: number };
} {
const stats = {
conversations: this.db
.prepare("SELECT COUNT(*) as count FROM conversations")
.get() as { count: number },
messages: this.db
.prepare("SELECT COUNT(*) as count FROM messages")
.get() as { count: number },
decisions: this.db
.prepare("SELECT COUNT(*) as count FROM decisions")
.get() as { count: number },
mistakes: this.db
.prepare("SELECT COUNT(*) as count FROM mistakes")
.get() as { count: number },
git_commits: this.db
.prepare("SELECT COUNT(*) as count FROM git_commits")
.get() as { count: number },
};
return stats;
}
getStatsForProject(
projectPath: string,
sourceType: "claude-code" | "codex"
): {
conversations: { count: number };
messages: { count: number };
decisions: { count: number };
mistakes: { count: number };
git_commits: { count: number };
} {
const canonicalPath = getCanonicalProjectPath(projectPath).canonicalPath;
const projectRow = this.db
.prepare("SELECT id FROM projects WHERE canonical_path = ?")
.get(canonicalPath) as { id: number } | undefined;
if (!projectRow) {
return {
conversations: { count: 0 },
messages: { count: 0 },
decisions: { count: 0 },
mistakes: { count: 0 },
git_commits: { count: 0 },
};
}
const stats = {
conversations: this.db
.prepare("SELECT COUNT(*) as count FROM conversations WHERE project_path = ? AND source_type = ?")
.get(canonicalPath, sourceType) as { count: number },
messages: this.db
.prepare(
`
SELECT COUNT(*) as count
FROM messages m
JOIN conversations c ON m.conversation_id = c.id
WHERE c.project_path = ? AND c.source_type = ?
`
)
.get(canonicalPath, sourceType) as { count: number },
decisions: this.db
.prepare(
`
SELECT COUNT(*) as count
FROM decisions d
JOIN conversations c ON d.conversation_id = c.id
WHERE c.project_path = ? AND c.source_type = ?
`
)
.get(canonicalPath, sourceType) as { count: number },
mistakes: this.db
.prepare(
`
SELECT COUNT(*) as count
FROM mistakes m
JOIN conversations c ON m.conversation_id = c.id
WHERE c.project_path = ? AND c.source_type = ?
`
)
.get(canonicalPath, sourceType) as { count: number },
git_commits: this.db
.prepare("SELECT COUNT(*) as count FROM git_commits WHERE project_id = ?")
.get(projectRow.id) as { count: number },
};
return stats;
}
// ==================== Methodologies ====================
/**
* Store extracted methodologies in the database.
*
* Methodologies track how AI solved problems (approach, steps, tools).
*
* @param methodologies - Array of methodology objects
* @param conversationIdMap - Map of external to internal conversation IDs
* @param messageIdMap - Map of external to internal message IDs
* @returns Promise with map of external to internal methodology IDs
*/
async storeMethodologies(
methodologies: Methodology[],
conversationIdMap: Map<string, number>,
messageIdMap: Map<string, number>
): Promise<Map<string, string>> {
if (methodologies.length === 0) {
return new Map();
}
const stmt = this.db.prepare(`
INSERT INTO methodologies
(id, conversation_id, start_message_id, end_message_id, problem_statement,
approach, steps_taken, tools_used, files_involved, outcome,
what_worked, what_didnt_work, started_at, ended_at)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(id) DO UPDATE SET
problem_statement = excluded.problem_statement,
approach = excluded.approach,
steps_taken = excluded.steps_taken,
tools_used = excluded.tools_used,
files_involved = excluded.files_involved,
outcome = excluded.outcome,
what_worked = excluded.what_worked,
what_didnt_work = excluded.what_didnt_work,
ended_at = excluded.ended_at
`);
const ftsStmt = this.db.prepare(`
INSERT INTO methodologies_fts (id, problem_statement, what_worked, what_didnt_work)
VALUES (?, ?, ?, ?)
`);
const methodologyIdMap = new Map<string, string>();
let skipped = 0;
this.db.transaction(() => {
for (const methodology of methodologies) {
const conversationId = conversationIdMap.get(methodology.conversation_id);
const startMessageId = messageIdMap.get(methodology.start_message_id);
const endMessageId = messageIdMap.get(methodology.end_message_id);
if (!conversationId || !startMessageId || !endMessageId) {
skipped += 1;
continue;
}
stmt.run(
methodology.id,
conversationId,
startMessageId,
endMessageId,
methodology.problem_statement,
methodology.approach,
JSON.stringify(methodology.steps_taken),
JSON.stringify(methodology.tools_used),
JSON.stringify(methodology.files_involved),
methodology.outcome,
methodology.what_worked || null,
methodology.what_didnt_work || null,
methodology.started_at,
methodology.ended_at
);
// Index in FTS
try {
ftsStmt.run(
methodology.id,
methodology.problem_statement,
methodology.what_worked || "",
methodology.what_didnt_work || ""
);
} catch {
// FTS entry may already exist, ignore
}
methodologyIdMap.set(methodology.id, methodology.id);
}
});
if (skipped > 0) {
console.error(`⚠️ Skipping ${skipped} methodology(ies) with missing refs`);
}
console.error(`✓ Stored ${methodologies.length - skipped} methodologies`);
return methodologyIdMap;
}
// ==================== Research Findings ====================
/**
* Store extracted research findings in the database.
*
* Research findings track discoveries made during exploration/research.
*
* @param findings - Array of research finding objects
* @param conversationIdMap - Map of external to internal conversation IDs
* @param messageIdMap - Map of external to internal message IDs
* @returns Promise with map of external to internal finding IDs
*/
async storeResearchFindings(
findings: ResearchFinding[],
conversationIdMap: Map<string, number>,
messageIdMap: Map<string, number>
): Promise<Map<string, string>> {
if (findings.length === 0) {
return new Map();
}
const stmt = this.db.prepare(`
INSERT INTO research_findings
(id, conversation_id, message_id, topic, discovery, source_type,
source_reference, relevance, confidence, related_to, timestamp)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(id) DO UPDATE SET
topic = excluded.topic,
discovery = excluded.discovery,
source_type = excluded.source_type,
source_reference = excluded.source_reference,
relevance = excluded.relevance,
confidence = excluded.confidence,
related_to = excluded.related_to
`);
const ftsStmt = this.db.prepare(`
INSERT INTO research_fts (id, topic, discovery, source_reference)
VALUES (?, ?, ?, ?)
`);
const findingIdMap = new Map<string, string>();
let skipped = 0;
this.db.transaction(() => {
for (const finding of findings) {
const conversationId = conversationIdMap.get(finding.conversation_id);
const messageId = messageIdMap.get(finding.message_id);
if (!conversationId || !messageId) {
skipped += 1;
continue;
}
stmt.run(
finding.id,
conversationId,
messageId,
finding.topic,
finding.discovery,
finding.source_type,
finding.source_reference || null,
finding.relevance,
finding.confidence,
JSON.stringify(finding.related_to),
finding.timestamp
);
// Index in FTS
try {
ftsStmt.run(
finding.id,
finding.topic,
finding.discovery,
finding.source_reference || ""
);
} catch {
// FTS entry may already exist, ignore
}
findingIdMap.set(finding.id, finding.id);
}
});
if (skipped > 0) {
console.error(`⚠️ Skipping ${skipped} research finding(s) with missing refs`);
}
console.error(`✓ Stored ${findings.length - skipped} research findings`);
return findingIdMap;
}
// ==================== Solution Patterns ====================
/**
* Store extracted solution patterns in the database.
*
* Solution patterns track reusable solutions for common problems.
*
* @param patterns - Array of solution pattern objects
* @param conversationIdMap - Map of external to internal conversation IDs
* @param messageIdMap - Map of external to internal message IDs
* @returns Promise with map of external to internal pattern IDs
*/
async storeSolutionPatterns(
patterns: SolutionPattern[],
conversationIdMap: Map<string, number>,
messageIdMap: Map<string, number>
): Promise<Map<string, string>> {
if (patterns.length === 0) {
return new Map();
}
const stmt = this.db.prepare(`
INSERT INTO solution_patterns
(id, conversation_id, message_id, problem_category, problem_description,
solution_summary, solution_steps, code_pattern, technology, prerequisites,
applies_when, avoid_when, applied_to_files, effectiveness, timestamp)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(id) DO UPDATE SET
problem_category = excluded.problem_category,
problem_description = excluded.problem_description,
solution_summary = excluded.solution_summary,
solution_steps = excluded.solution_steps,
code_pattern = excluded.code_pattern,
technology = excluded.technology,
prerequisites = excluded.prerequisites,
applies_when = excluded.applies_when,
avoid_when = excluded.avoid_when,
applied_to_files = excluded.applied_to_files,
effectiveness = excluded.effectiveness
`);
const ftsStmt = this.db.prepare(`
INSERT INTO patterns_fts (id, problem_description, solution_summary, applies_when)
VALUES (?, ?, ?, ?)
`);
const patternIdMap = new Map<string, string>();
let skipped = 0;
this.db.transaction(() => {
for (const pattern of patterns) {
const conversationId = conversationIdMap.get(pattern.conversation_id);
const messageId = messageIdMap.get(pattern.message_id);
if (!conversationId || !messageId) {
skipped += 1;
continue;
}
stmt.run(
pattern.id,
conversationId,
messageId,
pattern.problem_category,
pattern.problem_description,
pattern.solution_summary,
JSON.stringify(pattern.solution_steps),
pattern.code_pattern || null,
JSON.stringify(pattern.technology),
JSON.stringify(pattern.prerequisites),
pattern.applies_when,
pattern.avoid_when || null,
JSON.stringify(pattern.applied_to_files),
pattern.effectiveness,
pattern.timestamp
);
// Index in FTS
try {
ftsStmt.run(
pattern.id,
pattern.problem_description,
pattern.solution_summary,
pattern.applies_when
);
} catch {
// FTS entry may already exist, ignore
}
patternIdMap.set(pattern.id, pattern.id);
}
});
if (skipped > 0) {
console.error(`⚠️ Skipping ${skipped} solution pattern(s) with missing refs`);
}
console.error(`✓ Stored ${patterns.length - skipped} solution patterns`);
return patternIdMap;
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/storage/DeletionService.ts | TypeScript | /**
* DeletionService - Handle selective deletion of conversations by topic/keyword
* Uses semantic + FTS5 search to find matching conversations
*/
import type Database from "better-sqlite3";
import { BackupManager, type BackupMetadata } from "./BackupManager.js";
import type { ConversationStorage } from "./ConversationStorage.js";
import type { SemanticSearch } from "../search/SemanticSearch.js";
/**
* Deletion preview - shows what would be deleted
*/
export interface DeletionPreview {
conversationIds: string[];
conversations: Array<{
id: string;
session_id: string;
created_at: number;
message_count: number;
}>;
totalMessages: number;
totalDecisions: number;
totalMistakes: number;
summary: string;
}
/**
* Deletion result - what was actually deleted
*/
export interface DeletionResult {
backup: BackupMetadata;
deleted: {
conversations: number;
messages: number;
decisions: number;
mistakes: number;
toolUses: number;
fileEdits: number;
};
summary: string;
}
/**
* DeletionService class
*/
export class DeletionService {
private db: Database.Database;
private backupManager: BackupManager;
private storage: ConversationStorage;
private semanticSearch: SemanticSearch | null;
constructor(
db: Database.Database,
storage: ConversationStorage,
semanticSearch: SemanticSearch | null = null
) {
this.db = db;
this.backupManager = new BackupManager(db);
this.storage = storage;
this.semanticSearch = semanticSearch;
}
/**
* Preview what would be deleted for given keywords/topics
*/
async previewDeletionByTopic(
keywords: string[],
projectPath: string
): Promise<DeletionPreview> {
// Find matching conversations using search
const conversationIds = await this.findConversationsByTopic(keywords, projectPath);
if (conversationIds.length === 0) {
return {
conversationIds: [],
conversations: [],
totalMessages: 0,
totalDecisions: 0,
totalMistakes: 0,
summary: `No conversations found matching: ${keywords.join(", ")}`,
};
}
// Get conversation details
const placeholders = conversationIds.map(() => "?").join(",");
const conversations = this.db
.prepare(
`SELECT id, id as session_id, created_at, message_count
FROM conversations
WHERE id IN (${placeholders})
ORDER BY created_at DESC`
)
.all(...conversationIds) as Array<{
id: string;
session_id: string;
created_at: number;
message_count: number;
}>;
// Count related records
const totalMessages = this.countMessages(conversationIds);
const totalDecisions = this.countDecisions(conversationIds);
const totalMistakes = this.countMistakes(conversationIds);
const summary = `Found ${conversations.length} conversation${conversations.length !== 1 ? "s" : ""} (${totalMessages} messages, ${totalDecisions} decisions, ${totalMistakes} mistakes) matching: ${keywords.join(", ")}`;
return {
conversationIds,
conversations,
totalMessages,
totalDecisions,
totalMistakes,
summary,
};
}
/**
* Delete conversations by topic/keywords with automatic backup
*/
async forgetByTopic(
keywords: string[],
projectPath: string
): Promise<DeletionResult> {
// First, preview what we're going to delete
const preview = await this.previewDeletionByTopic(keywords, projectPath);
if (preview.conversationIds.length === 0) {
throw new Error(`No conversations found matching: ${keywords.join(", ")}`);
}
// Create backup before deletion
const description = `Forget conversations about: ${keywords.join(", ")}`;
const backup = this.backupManager.createBackupForConversations(
preview.conversationIds,
description,
projectPath
);
// Count records before deletion for reporting
const beforeCounts = {
conversations: preview.conversationIds.length,
messages: preview.totalMessages,
decisions: preview.totalDecisions,
mistakes: preview.totalMistakes,
toolUses: this.countToolUses(preview.conversationIds),
fileEdits: this.countFileEdits(preview.conversationIds),
};
// Perform deletion (CASCADE will handle related records)
const placeholders = preview.conversationIds.map(() => "?").join(",");
// Use transaction for atomic deletion
this.db.transaction(() => {
// Delete FTS entries first (no CASCADE)
this.db
.prepare(
`DELETE FROM messages_fts
WHERE rowid IN (
SELECT rowid FROM messages
WHERE conversation_id IN (${placeholders})
)`
)
.run(...preview.conversationIds);
this.db
.prepare(
`DELETE FROM decisions_fts
WHERE rowid IN (
SELECT rowid FROM decisions
WHERE conversation_id IN (${placeholders})
)`
)
.run(...preview.conversationIds);
// Delete vector embeddings (no CASCADE from sqlite-vec tables)
// Get message IDs first
const messageIds = this.db
.prepare(`SELECT id FROM messages WHERE conversation_id IN (${placeholders})`)
.all(...preview.conversationIds) as Array<{ id: string }>;
if (messageIds.length > 0) {
const msgPlaceholders = messageIds.map(() => "?").join(",");
const msgEmbedIds = messageIds.map(m => `msg_${m.id}`);
// Delete from BLOB fallback table
try {
this.db
.prepare(`DELETE FROM message_embeddings WHERE message_id IN (${msgPlaceholders})`)
.run(...messageIds.map(m => m.id));
} catch (_e) {
// Table might not exist
}
// Delete from sqlite-vec virtual table
try {
this.db
.prepare(`DELETE FROM vec_message_embeddings WHERE id IN (${msgPlaceholders})`)
.run(...msgEmbedIds);
} catch (_e) {
// Vec table might not exist
}
}
// Delete decision embeddings
const decisionIds = this.db
.prepare(`SELECT id FROM decisions WHERE conversation_id IN (${placeholders})`)
.all(...preview.conversationIds) as Array<{ id: string }>;
if (decisionIds.length > 0) {
const decPlaceholders = decisionIds.map(() => "?").join(",");
const decEmbedIds = decisionIds.map(d => `dec_${d.id}`);
// Delete from BLOB fallback table
try {
this.db
.prepare(`DELETE FROM decision_embeddings WHERE decision_id IN (${decPlaceholders})`)
.run(...decisionIds.map(d => d.id));
} catch (_e) {
// Table might not exist
}
// Delete from sqlite-vec virtual table
try {
this.db
.prepare(`DELETE FROM vec_decision_embeddings WHERE id IN (${decPlaceholders})`)
.run(...decEmbedIds);
} catch (_e) {
// Vec table might not exist
}
}
// Delete conversations (CASCADE handles the rest)
this.db
.prepare(`DELETE FROM conversations WHERE id IN (${placeholders})`)
.run(...preview.conversationIds);
})();
// Clear cache since data was deleted
this.storage.clearCache();
const summary = `✓ Deleted ${beforeCounts.conversations} conversations (${beforeCounts.messages} messages, ${beforeCounts.decisions} decisions, ${beforeCounts.mistakes} mistakes)\n✓ Backup saved: ${backup.backupPath}`;
return {
backup,
deleted: beforeCounts,
summary,
};
}
/**
* Find conversations matching keywords/topics using search
*/
private async findConversationsByTopic(
keywords: string[],
projectPath: string
): Promise<string[]> {
const conversationIds = new Set<string>();
// Build search query from keywords
const searchQuery = keywords.join(" ");
// Try semantic search if available
if (this.semanticSearch) {
try {
const results = await this.semanticSearch.searchConversations(
searchQuery,
100 // Cast wide net
);
// Filter by project path
for (const result of results) {
if (result.conversation.project_path === projectPath) {
conversationIds.add(result.conversation.id);
}
}
} catch (error) {
console.error("Semantic search failed, falling back to FTS:", (error as Error).message);
}
}
// Also try FTS5 search for exact matches
try {
// Escape internal double quotes in keywords to prevent FTS syntax errors
const ftsQuery = keywords.map((k) => `"${k.replace(/"/g, '""')}"`).join(" OR ");
const messages = this.db
.prepare(
`SELECT DISTINCT m.conversation_id
FROM messages_fts mf
JOIN messages m ON m.rowid = mf.rowid
JOIN conversations c ON c.id = m.conversation_id
WHERE messages_fts MATCH ?
AND c.project_path = ?
LIMIT 100`
)
.all(ftsQuery, projectPath) as Array<{ conversation_id: string }>;
for (const msg of messages) {
conversationIds.add(msg.conversation_id);
}
} catch (error) {
console.error("FTS search failed:", (error as Error).message);
}
return Array.from(conversationIds);
}
/**
* Count helpers
*/
private countMessages(conversationIds: string[]): number {
const placeholders = conversationIds.map(() => "?").join(",");
const result = this.db
.prepare(
`SELECT COUNT(*) as count FROM messages WHERE conversation_id IN (${placeholders})`
)
.get(...conversationIds) as { count: number };
return result.count;
}
private countDecisions(conversationIds: string[]): number {
const placeholders = conversationIds.map(() => "?").join(",");
const result = this.db
.prepare(
`SELECT COUNT(*) as count FROM decisions WHERE conversation_id IN (${placeholders})`
)
.get(...conversationIds) as { count: number };
return result.count;
}
private countMistakes(conversationIds: string[]): number {
const placeholders = conversationIds.map(() => "?").join(",");
const result = this.db
.prepare(
`SELECT COUNT(*) as count FROM mistakes WHERE conversation_id IN (${placeholders})`
)
.get(...conversationIds) as { count: number };
return result.count;
}
private countToolUses(conversationIds: string[]): number {
const placeholders = conversationIds.map(() => "?").join(",");
const result = this.db
.prepare(
`SELECT COUNT(*) as count
FROM tool_uses
WHERE message_id IN (
SELECT id FROM messages WHERE conversation_id IN (${placeholders})
)`
)
.get(...conversationIds) as { count: number };
return result.count;
}
private countFileEdits(conversationIds: string[]): number {
const placeholders = conversationIds.map(() => "?").join(",");
const result = this.db
.prepare(
`SELECT COUNT(*) as count FROM file_edits WHERE conversation_id IN (${placeholders})`
)
.get(...conversationIds) as { count: number };
return result.count;
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/storage/GlobalIndex.ts | TypeScript | /**
* Global Index for Cross-Project Search (Single DB)
*
* Stores project registry inside the main database.
*/
import { getSQLiteManager, SQLiteManager } from "./SQLiteManager.js";
import { getCanonicalProjectPath } from "../utils/worktree.js";
import { safeJsonParse } from "../utils/safeJson.js";
export interface ProjectMetadata {
id: number; // project_sources.id
project_id: number;
project_path: string;
source_type: "claude-code" | "codex";
source_root: string | null;
last_indexed: number;
message_count: number;
conversation_count: number;
decision_count: number;
mistake_count: number;
metadata: Record<string, unknown>;
created_at: number;
updated_at: number;
}
export interface RegisterProjectOptions {
project_path: string;
source_type: "claude-code" | "codex";
source_root?: string;
message_count?: number;
conversation_count?: number;
decision_count?: number;
mistake_count?: number;
metadata?: Record<string, unknown>;
}
export class GlobalIndex {
private sqliteManager: SQLiteManager;
private db: ReturnType<SQLiteManager["getDatabase"]>;
private ownsManager: boolean;
constructor(sqliteManager?: SQLiteManager) {
this.sqliteManager = sqliteManager ?? getSQLiteManager();
this.db = this.sqliteManager.getDatabase();
this.ownsManager = false;
}
private resolveProjectId(projectPath: string): number | null {
const canonical = getCanonicalProjectPath(projectPath).canonicalPath;
const projectRow = this.db
.prepare("SELECT id FROM projects WHERE canonical_path = ?")
.get(canonical) as { id: number } | undefined;
if (projectRow) {
return projectRow.id;
}
const aliasRow = this.db
.prepare("SELECT project_id FROM project_aliases WHERE alias_path = ?")
.get(canonical) as { project_id: number } | undefined;
return aliasRow?.project_id ?? null;
}
registerProject(options: RegisterProjectOptions): ProjectMetadata {
const now = Date.now();
const canonical = getCanonicalProjectPath(options.project_path).canonicalPath;
const existingProject = this.db
.prepare("SELECT id, created_at FROM projects WHERE canonical_path = ?")
.get(canonical) as { id: number; created_at: number } | undefined;
let projectId: number;
if (existingProject) {
projectId = existingProject.id;
this.db
.prepare("UPDATE projects SET updated_at = ?, display_path = ? WHERE id = ?")
.run(now, canonical, projectId);
} else {
const result = this.db
.prepare(
"INSERT INTO projects (canonical_path, display_path, created_at, updated_at) VALUES (?, ?, ?, ?)"
)
.run(canonical, canonical, now, now);
projectId = Number(result.lastInsertRowid);
}
this.db
.prepare(
`
INSERT INTO project_sources (
project_id, source_type, source_root, last_indexed,
message_count, conversation_count, decision_count, mistake_count,
metadata, created_at, updated_at
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(project_id, source_type) DO UPDATE SET
source_root = excluded.source_root,
last_indexed = excluded.last_indexed,
message_count = excluded.message_count,
conversation_count = excluded.conversation_count,
decision_count = excluded.decision_count,
mistake_count = excluded.mistake_count,
metadata = excluded.metadata,
updated_at = excluded.updated_at
`
)
.run(
projectId,
options.source_type,
options.source_root ?? null,
now,
options.message_count ?? 0,
options.conversation_count ?? 0,
options.decision_count ?? 0,
options.mistake_count ?? 0,
JSON.stringify(options.metadata ?? {}),
now,
now
);
const row = this.db
.prepare(
`
SELECT
ps.id,
ps.project_id,
p.canonical_path as project_path,
ps.source_type,
ps.source_root,
ps.last_indexed,
ps.message_count,
ps.conversation_count,
ps.decision_count,
ps.mistake_count,
ps.metadata,
ps.created_at,
ps.updated_at
FROM project_sources ps
JOIN projects p ON p.id = ps.project_id
WHERE ps.project_id = ? AND ps.source_type = ?
`
)
.get(projectId, options.source_type) as {
id: number;
project_id: number;
project_path: string;
source_type: "claude-code" | "codex";
source_root: string | null;
last_indexed: number;
message_count: number;
conversation_count: number;
decision_count: number;
mistake_count: number;
metadata: string;
created_at: number;
updated_at: number;
};
return {
id: row.id,
project_id: row.project_id,
project_path: row.project_path,
source_type: row.source_type,
source_root: row.source_root,
last_indexed: row.last_indexed,
message_count: row.message_count,
conversation_count: row.conversation_count,
decision_count: row.decision_count,
mistake_count: row.mistake_count,
metadata: safeJsonParse(row.metadata, {}),
created_at: row.created_at,
updated_at: row.updated_at,
};
}
getAllProjects(sourceType?: "claude-code" | "codex"): ProjectMetadata[] {
let sql = `
SELECT
ps.id,
ps.project_id,
p.canonical_path as project_path,
ps.source_type,
ps.source_root,
ps.last_indexed,
ps.message_count,
ps.conversation_count,
ps.decision_count,
ps.mistake_count,
ps.metadata,
ps.created_at,
ps.updated_at
FROM project_sources ps
JOIN projects p ON p.id = ps.project_id
`;
const params: string[] = [];
if (sourceType) {
sql += " WHERE ps.source_type = ?";
params.push(sourceType);
}
sql += " ORDER BY ps.last_indexed DESC";
const rows = this.db.prepare(sql).all(...params) as Array<{
id: number;
project_id: number;
project_path: string;
source_type: "claude-code" | "codex";
source_root: string | null;
last_indexed: number;
message_count: number;
conversation_count: number;
decision_count: number;
mistake_count: number;
metadata: string;
created_at: number;
updated_at: number;
}>;
return rows.map((row) => ({
...row,
metadata: safeJsonParse<Record<string, unknown>>(row.metadata, {}),
}));
}
getProject(projectPath: string, sourceType?: "claude-code" | "codex"): ProjectMetadata | null {
const projectId = this.resolveProjectId(projectPath);
if (!projectId) {
return null;
}
let sql = `
SELECT
ps.id,
ps.project_id,
p.canonical_path as project_path,
ps.source_type,
ps.source_root,
ps.last_indexed,
ps.message_count,
ps.conversation_count,
ps.decision_count,
ps.mistake_count,
ps.metadata,
ps.created_at,
ps.updated_at
FROM project_sources ps
JOIN projects p ON p.id = ps.project_id
WHERE ps.project_id = ?
`;
const params: (number | string)[] = [projectId];
if (sourceType) {
sql += " AND ps.source_type = ?";
params.push(sourceType);
} else {
sql += " ORDER BY ps.last_indexed DESC LIMIT 1";
}
const row = this.db.prepare(sql).get(...params) as {
id: number;
project_id: number;
project_path: string;
source_type: "claude-code" | "codex";
source_root: string | null;
last_indexed: number;
message_count: number;
conversation_count: number;
decision_count: number;
mistake_count: number;
metadata: string;
created_at: number;
updated_at: number;
} | undefined;
if (!row) {
return null;
}
return {
...row,
metadata: safeJsonParse<Record<string, unknown>>(row.metadata, {}),
};
}
removeProject(projectPath: string, sourceType?: "claude-code" | "codex"): boolean {
const projectId = this.resolveProjectId(projectPath);
if (!projectId) {
return false;
}
if (sourceType) {
const result = this.db
.prepare("DELETE FROM project_sources WHERE project_id = ? AND source_type = ?")
.run(projectId, sourceType);
const remaining = this.db
.prepare("SELECT COUNT(*) as count FROM project_sources WHERE project_id = ?")
.get(projectId) as { count: number };
if (remaining.count === 0) {
this.db.prepare("DELETE FROM projects WHERE id = ?").run(projectId);
this.db.prepare("DELETE FROM project_aliases WHERE project_id = ?").run(projectId);
}
return result.changes > 0;
}
const result = this.db
.prepare("DELETE FROM project_sources WHERE project_id = ?")
.run(projectId);
this.db.prepare("DELETE FROM projects WHERE id = ?").run(projectId);
this.db.prepare("DELETE FROM project_aliases WHERE project_id = ?").run(projectId);
return result.changes > 0;
}
getGlobalStats(): {
total_projects: number;
claude_code_projects: number;
codex_projects: number;
total_messages: number;
total_conversations: number;
total_decisions: number;
total_mistakes: number;
} {
const stats = this.db
.prepare(
`
SELECT
COUNT(DISTINCT project_id) as total_projects,
COALESCE(SUM(CASE WHEN source_type = 'claude-code' THEN 1 ELSE 0 END), 0) as claude_code_projects,
COALESCE(SUM(CASE WHEN source_type = 'codex' THEN 1 ELSE 0 END), 0) as codex_projects,
COALESCE(SUM(message_count), 0) as total_messages,
COALESCE(SUM(conversation_count), 0) as total_conversations,
COALESCE(SUM(decision_count), 0) as total_decisions,
COALESCE(SUM(mistake_count), 0) as total_mistakes
FROM project_sources
`
)
.get() as {
total_projects: number;
claude_code_projects: number;
codex_projects: number;
total_messages: number;
total_conversations: number;
total_decisions: number;
total_mistakes: number;
};
return stats;
}
close(): void {
if (this.ownsManager) {
this.sqliteManager.close();
}
}
getDbPath(): string {
return this.sqliteManager.getDbPath();
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/storage/SQLiteManager.ts | TypeScript | /**
* SQLite Manager with optimized settings for local indexing workloads
*/
import Database from "better-sqlite3";
import { readFileSync, mkdirSync, existsSync, openSync, closeSync, renameSync } from "fs";
import { join, dirname, basename, resolve } from "path";
import { homedir } from "os";
import { fileURLToPath } from "url";
import { pathToProjectFolderName, escapeTableName } from "../utils/sanitization.js";
import { getCanonicalProjectPath } from "../utils/worktree.js";
import * as sqliteVec from "sqlite-vec";
import { MigrationManager, migrations } from "./migrations.js";
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
// Performance constants
const DEFAULT_CACHE_SIZE_KB = 64000; // 64MB cache
const DEFAULT_MMAP_SIZE = 1000000000; // 1GB memory-mapped I/O (safe default)
const PAGE_SIZE = 4096; // 4KB page size
const WAL_AUTOCHECKPOINT = 1000; // Checkpoint WAL after 1000 pages
const NEW_DB_FILE_NAME = ".cccmemory.db";
const LEGACY_DB_FILE_NAMES = [".claude-conversations-memory.db", ".codex-conversations-memory.db"];
export interface SQLiteConfig {
dbPath?: string;
projectPath?: string;
readOnly?: boolean;
verbose?: boolean;
dbMode?: "single" | "per-project";
/** Memory-mapped I/O size in bytes (default: 1GB). Set to 0 to disable. */
mmapSize?: number;
/** Cache size in KB (default: 64MB) */
cacheSizeKb?: number;
}
function resolveDbPath(config: SQLiteConfig = {}): string {
const projectPath = config.projectPath || process.cwd();
const canonicalPath = getCanonicalProjectPath(projectPath).canonicalPath;
const projectFolderName = pathToProjectFolderName(canonicalPath);
const homeDir = process.env.HOME ? resolve(process.env.HOME) : homedir();
const defaultPath = join(
homeDir,
".claude",
"projects",
projectFolderName,
NEW_DB_FILE_NAME
);
const fallbackPath = join(canonicalPath, ".cccmemory", NEW_DB_FILE_NAME);
const singleDbPath = join(homeDir, NEW_DB_FILE_NAME);
const dbMode = config.dbMode || (process.env.CCCMEMORY_DB_MODE as "single" | "per-project" | undefined) || "single";
const normalizeRequestedPath = (requestedPath: string): string => {
const requestedBase = basename(requestedPath);
if (LEGACY_DB_FILE_NAMES.includes(requestedBase)) {
return join(dirname(requestedPath), NEW_DB_FILE_NAME);
}
return requestedPath;
};
const getLegacyCandidates = (dir: string, targetPath: string): string[] => {
return LEGACY_DB_FILE_NAMES
.map((name) => join(dir, name))
.filter((legacyPath) => legacyPath !== targetPath);
};
const canCreateDbFile = (dbPath: string): boolean => {
try {
mkdirSync(dirname(dbPath), { recursive: true });
const fd = openSync(dbPath, "a");
closeSync(fd);
return true;
} catch (error) {
const err = error as { code?: string };
if (err.code === "EACCES" || err.code === "EPERM" || err.code === "EROFS") {
return false;
}
throw error;
}
};
const canWriteExistingDbFile = (dbPath: string): boolean => {
try {
const fd = openSync(dbPath, "r+");
closeSync(fd);
return true;
} catch (error) {
const err = error as { code?: string };
if (
err.code === "EACCES" ||
err.code === "EPERM" ||
err.code === "EROFS" ||
err.code === "ENOENT"
) {
return false;
}
throw error;
}
};
const maybeMigrateLegacyDb = (targetPath: string, readOnly: boolean): string => {
if (existsSync(targetPath)) {
return targetPath;
}
const legacyCandidates = getLegacyCandidates(dirname(targetPath), targetPath);
const legacyPath = legacyCandidates.find((candidate) => existsSync(candidate));
if (!legacyPath) {
return targetPath;
}
if (readOnly) {
console.error(
`⚠️ Found legacy database at ${legacyPath}. Using legacy file in read-only mode.`
);
return legacyPath;
}
try {
renameSync(legacyPath, targetPath);
console.error(`✓ Migrated legacy database to ${targetPath}`);
return targetPath;
} catch (error) {
const err = error as { code?: string };
if (err.code === "EACCES" || err.code === "EPERM" || err.code === "EXDEV" || err.code === "EROFS") {
const canWriteLegacy = canWriteExistingDbFile(legacyPath);
if (canWriteLegacy) {
console.error(
`⚠️ Failed to rename legacy database (${legacyPath} → ${targetPath}). Using legacy file instead.`
);
return legacyPath;
}
throw new Error(
`Legacy database found at ${legacyPath} but cannot be migrated or written.\n` +
`Fix permissions for ${dirname(legacyPath)} or set CCCMEMORY_DB_PATH to a writable file path.`
);
}
throw error;
}
};
const requestedPath = config.dbPath || process.env.CCCMEMORY_DB_PATH;
if (requestedPath) {
const normalizedPath = normalizeRequestedPath(requestedPath);
if (config.readOnly) {
const migratedPath = maybeMigrateLegacyDb(normalizedPath, true);
if (existsSync(migratedPath)) {
return migratedPath;
}
throw new Error(
`Database file not found at ${migratedPath} (read-only mode).\n` +
`Provide a valid path via CCCMEMORY_DB_PATH or config.dbPath.`
);
}
const migratedPath = maybeMigrateLegacyDb(normalizedPath, false);
if (migratedPath !== normalizedPath) {
return migratedPath;
}
if (canCreateDbFile(normalizedPath)) {
return normalizedPath;
}
throw new Error(
`Database path is not writable: ${normalizedPath}\n` +
"Fix permissions or set CCCMEMORY_DB_PATH to a writable file path.\n" +
"If you're running under Codex or Claude with a locked home dir (~/.claude or ~/.codex), " +
"you must set CCCMEMORY_DB_PATH explicitly."
);
}
if (dbMode === "single") {
if (config.readOnly) {
const migratedSingle = maybeMigrateLegacyDb(singleDbPath, true);
if (existsSync(migratedSingle)) {
return migratedSingle;
}
throw new Error(
`Database file not found at ${singleDbPath} (read-only mode).\n` +
"Create the database in write mode, or set CCCMEMORY_DB_PATH to an existing file."
);
}
const migratedSingle = maybeMigrateLegacyDb(singleDbPath, false);
if (migratedSingle !== singleDbPath) {
return migratedSingle;
}
if (canCreateDbFile(singleDbPath)) {
return singleDbPath;
}
throw new Error(
`Unable to create database at ${singleDbPath}.\n` +
"Fix permissions or set CCCMEMORY_DB_PATH to a writable file path.\n" +
"If you're running under Codex or Claude with a locked home dir (~/.claude or ~/.codex), " +
"you must set CCCMEMORY_DB_PATH explicitly."
);
}
if (config.readOnly) {
const migratedDefault = maybeMigrateLegacyDb(defaultPath, true);
if (existsSync(migratedDefault)) {
return migratedDefault;
}
if (existsSync(fallbackPath)) {
console.error(
`⚠️ Using existing project-local database at ${fallbackPath}. ` +
"No new files are created there automatically. " +
"Set CCCMEMORY_DB_PATH to make this explicit."
);
return fallbackPath;
}
throw new Error(
`Database file not found at ${defaultPath} (read-only mode).\n` +
"Create the database in write mode, or set CCCMEMORY_DB_PATH to an existing file."
);
}
const migratedDefault = maybeMigrateLegacyDb(defaultPath, false);
if (migratedDefault !== defaultPath) {
return migratedDefault;
}
if (canCreateDbFile(defaultPath)) {
return defaultPath;
}
if (existsSync(fallbackPath) && canWriteExistingDbFile(fallbackPath)) {
console.error(
`⚠️ Using existing project-local database at ${fallbackPath}. ` +
"No new files are created there automatically. " +
"Set CCCMEMORY_DB_PATH to make this explicit."
);
return fallbackPath;
}
throw new Error(
`Unable to create database in ${dirname(defaultPath)}.\n` +
`Fix permissions for ${dirname(defaultPath)} or set CCCMEMORY_DB_PATH to a writable file path.\n` +
"If you're running under Codex or Claude with a locked home dir (~/.claude or ~/.codex), " +
"you must set CCCMEMORY_DB_PATH explicitly."
);
}
export class SQLiteManager {
private db: Database.Database;
private dbPath: string;
private isReadOnly: boolean;
private mmapSize: number;
private cacheSizeKb: number;
constructor(config: SQLiteConfig = {}) {
this.mmapSize = config.mmapSize ?? DEFAULT_MMAP_SIZE;
this.cacheSizeKb = config.cacheSizeKb ?? DEFAULT_CACHE_SIZE_KB;
// Determine database location
this.dbPath = resolveDbPath(config);
this.isReadOnly = config.readOnly || false;
// Ensure directory exists (only in write mode)
if (!this.isReadOnly) {
this.ensureDirectoryExists();
} else {
// In read-only mode, verify the database file exists
if (!existsSync(this.dbPath)) {
throw new Error(`Database file not found: ${this.dbPath}`);
}
}
// Initialize database
this.db = new Database(this.dbPath, {
readonly: this.isReadOnly,
verbose: config.verbose ? console.log : undefined,
});
// Load sqlite-vec extension
this.loadVectorExtension();
// Apply optimized PRAGMAs
this.optimizeDatabase();
// Initialize schema if needed
if (!this.isReadOnly) {
this.initializeSchema();
}
}
private ensureDirectoryExists(): void {
const dir = dirname(this.dbPath);
if (!existsSync(dir)) {
mkdirSync(dir, { recursive: true });
}
}
/**
* Load sqlite-vec extension for vector search
*/
private loadVectorExtension(): void {
try {
sqliteVec.load(this.db);
console.error("✓ sqlite-vec extension loaded");
// Note: Vec tables will be created when embedding dimensions are known
} catch (error) {
console.error("⚠️ Failed to load sqlite-vec extension:", (error as Error).message);
console.error(" Vector search will use BLOB fallback");
}
}
/**
* Create sqlite-vec virtual tables for vector search with specified dimensions
* Public method called when embedding provider dimensions are known
*/
createVecTablesWithDimensions(dimensions: number): void {
// SECURITY: Validate dimensions to prevent SQL injection
if (!Number.isInteger(dimensions) || dimensions <= 0 || dimensions > 10000) {
throw new Error(`Invalid dimensions: must be a positive integer <= 10000, got ${typeof dimensions === 'number' ? dimensions : 'non-number'}`);
}
try {
// Check if ALL vec tables exist - only skip if all three exist
let allTablesExist = true;
const vecTables = ['vec_message_embeddings', 'vec_decision_embeddings', 'vec_mistake_embeddings'];
for (const table of vecTables) {
try {
this.db.prepare(`SELECT 1 FROM ${table} LIMIT 1`).get();
} catch {
allTablesExist = false;
break;
}
}
if (allTablesExist) {
console.error(`✓ sqlite-vec virtual tables already exist`);
return;
}
// Create message embeddings virtual table
// dimensions is validated above to be a safe integer
this.db.exec(`
CREATE VIRTUAL TABLE IF NOT EXISTS vec_message_embeddings
USING vec0(
id TEXT PRIMARY KEY,
embedding float[${dimensions}]
)
`);
// Create decision embeddings virtual table
this.db.exec(`
CREATE VIRTUAL TABLE IF NOT EXISTS vec_decision_embeddings
USING vec0(
id TEXT PRIMARY KEY,
embedding float[${dimensions}]
)
`);
// Create mistake embeddings virtual table
this.db.exec(`
CREATE VIRTUAL TABLE IF NOT EXISTS vec_mistake_embeddings
USING vec0(
id TEXT PRIMARY KEY,
embedding float[${dimensions}]
)
`);
console.error(`✓ sqlite-vec virtual tables created (${dimensions} dimensions)`);
} catch (error) {
console.error("⚠️ Failed to create vec virtual tables:", (error as Error).message);
console.error(" Will fall back to BLOB storage");
}
}
/**
* Apply performance optimizations
*/
private optimizeDatabase(): void {
// Skip write-related PRAGMAs in read-only mode
if (!this.isReadOnly) {
// WAL mode for concurrent reads during writes
// If WAL cannot be enabled (e.g., sandboxed filesystem), fall back to MEMORY
try {
this.db.pragma("journal_mode = WAL");
} catch (error) {
console.error("⚠️ Failed to enable WAL mode, falling back to MEMORY journal:", (error as Error).message);
this.db.pragma("journal_mode = MEMORY");
}
// NORMAL synchronous for balance between safety and speed
this.db.pragma("synchronous = NORMAL");
// 4KB page size (optimal for most systems)
this.db.pragma(`page_size = ${PAGE_SIZE}`);
// Auto-checkpoint WAL after 1000 pages
this.db.pragma(`wal_autocheckpoint = ${WAL_AUTOCHECKPOINT}`);
// Analysis for query optimization
this.db.pragma("optimize");
}
// These PRAGMAs are safe in read-only mode
// Configurable cache for better performance (default 64MB)
this.db.pragma(`cache_size = -${this.cacheSizeKb}`);
// Store temp tables in memory
this.db.pragma("temp_store = MEMORY");
// Configurable memory-mapped I/O (default 1GB, safe for most systems)
if (this.mmapSize > 0) {
this.db.pragma(`mmap_size = ${this.mmapSize}`);
}
// Enable foreign key constraints
this.db.pragma("foreign_keys = ON");
}
/**
* Initialize database schema from schema.sql
*/
private initializeSchema(): void {
try {
// Check if schema is already initialized
const schemaVersionExists = this.db
.prepare(
"SELECT name FROM sqlite_master WHERE type='table' AND name='schema_version'"
)
.all();
if (schemaVersionExists.length === 0) {
// Check if this is a legacy database with incompatible schema
const conversationsExists = this.db
.prepare(
"SELECT name FROM sqlite_master WHERE type='table' AND name='conversations'"
)
.all();
if (conversationsExists.length > 0) {
// Check if conversations table has expected columns
const conversationColumns = this.db
.prepare("PRAGMA table_info(conversations)")
.all() as Array<{ name: string }>;
const hasSourceType = conversationColumns.some(
(col) => col.name === "source_type"
);
const hasMessageCount = conversationColumns.some(
(col) => col.name === "message_count"
);
const hasProjectId = conversationColumns.some(
(col) => col.name === "project_id"
);
const hasExternalId = conversationColumns.some(
(col) => col.name === "external_id"
);
const messagesExists = this.db
.prepare(
"SELECT name FROM sqlite_master WHERE type='table' AND name='messages'"
)
.all();
const messageColumns = messagesExists.length > 0
? (this.db.prepare("PRAGMA table_info(messages)").all() as Array<{ name: string }>)
: [];
const messageHasExternalId = messageColumns.some(
(col) => col.name === "external_id"
);
if (!hasSourceType || !hasMessageCount || !hasProjectId || !hasExternalId || !messageHasExternalId) {
// Legacy database with incompatible schema - drop and recreate
console.error(
"⚠️ Legacy database detected with incompatible schema. Recreating..."
);
this.dropAllTables();
console.error("Legacy tables dropped");
}
}
console.error("Initializing database schema...");
// Read and execute schema.sql
const schemaPath = join(__dirname, "schema.sql");
const schema = readFileSync(schemaPath, "utf-8");
// Execute the entire schema at once
// SQLite can handle multiple statements in a single exec() call
this.db.exec(schema);
const latestVersion = migrations[migrations.length - 1]?.version ?? 1;
const latestDescription = migrations[migrations.length - 1]?.description ?? "Initial schema";
// Record schema version (schema.sql already includes latest tables)
this.db
.prepare(
"INSERT INTO schema_version (version, applied_at, description) VALUES (?, ?, ?)"
)
.run(latestVersion, Date.now(), latestDescription);
console.error("Database schema initialized successfully");
} else {
// If schema_version exists, verify core columns match expected schema
if (this.isLegacySchema()) {
console.error(
"⚠️ Legacy database detected with incompatible schema. Recreating..."
);
this.dropAllTables();
console.error("Legacy tables dropped");
console.error("Initializing database schema...");
const schemaPath = join(__dirname, "schema.sql");
const schema = readFileSync(schemaPath, "utf-8");
this.db.exec(schema);
const latestVersion = migrations[migrations.length - 1]?.version ?? 1;
const latestDescription = migrations[migrations.length - 1]?.description ?? "Initial schema";
this.db
.prepare(
"INSERT INTO schema_version (version, applied_at, description) VALUES (?, ?, ?)"
)
.run(latestVersion, Date.now(), latestDescription);
console.error("Database schema initialized successfully");
} else {
// Apply migrations if needed
this.applyMigrations();
}
}
} catch (error) {
console.error("Error initializing schema:", error);
throw error;
}
}
private isLegacySchema(): boolean {
const conversationsExists = this.db
.prepare(
"SELECT name FROM sqlite_master WHERE type='table' AND name='conversations'"
)
.all();
if (conversationsExists.length === 0) {
return false;
}
const conversationColumns = this.db
.prepare("PRAGMA table_info(conversations)")
.all() as Array<{ name: string }>;
const hasProjectId = conversationColumns.some((col) => col.name === "project_id");
const hasExternalId = conversationColumns.some((col) => col.name === "external_id");
const messagesExists = this.db
.prepare(
"SELECT name FROM sqlite_master WHERE type='table' AND name='messages'"
)
.all();
const messageColumns = messagesExists.length > 0
? (this.db.prepare("PRAGMA table_info(messages)").all() as Array<{ name: string }>)
: [];
const messageHasExternalId = messageColumns.some((col) => col.name === "external_id");
return !hasProjectId || !hasExternalId || !messageHasExternalId;
}
private dropAllTables(): void {
const allTables = this.db
.prepare(
"SELECT name FROM sqlite_master WHERE type='table' AND name NOT LIKE 'sqlite_%'"
)
.all() as Array<{ name: string }>;
for (const table of allTables) {
try {
const safeName = escapeTableName(table.name);
this.db.exec(`DROP TABLE IF EXISTS "${safeName}"`);
} catch (_e) {
// Ignore errors when dropping (virtual tables may have dependencies)
}
}
}
/**
* Apply database migrations for existing databases
*/
private applyMigrations(): void {
// Ensure schema_version table exists (legacy DBs may not have it)
this.db.exec(`
CREATE TABLE IF NOT EXISTS schema_version (
version INTEGER PRIMARY KEY,
applied_at INTEGER NOT NULL,
description TEXT,
checksum TEXT
)
`);
const manager = new MigrationManager(this);
manager.applyPendingMigrations();
}
/**
* Get the underlying database instance
*/
getDatabase(): Database.Database {
return this.db;
}
/**
* Execute a transaction
*/
transaction<T>(fn: () => T): T {
const tx = this.db.transaction(fn);
return tx();
}
/**
* Prepare a statement
*/
prepare<T extends unknown[] = unknown[]>(sql: string): Database.Statement<T> {
return this.db.prepare<T>(sql);
}
/**
* Execute SQL directly
*/
exec(sql: string): void {
this.db.exec(sql);
}
/**
* Close the database connection
*/
close(): void {
if (this.db.open) {
this.db.close();
}
}
/**
* Get database statistics
*/
getStats(): {
dbPath: string;
fileSize: number;
pageCount: number;
pageSize: number;
wal: { enabled: boolean; size: number | null };
} {
const pageCount = this.db.pragma("page_count", { simple: true }) as number;
const pageSize = this.db.pragma("page_size", { simple: true }) as number;
const journalMode = this.db.pragma("journal_mode", {
simple: true,
}) as string;
let walSize: number | null = null;
if (journalMode === "wal") {
try {
const walStat = this.db
.prepare("SELECT * FROM pragma_wal_checkpoint('PASSIVE')")
.get() as { log?: number } | undefined;
walSize = walStat?.log ?? null;
} catch (_e) {
// WAL not available
}
}
return {
dbPath: this.dbPath,
fileSize: pageCount * pageSize,
pageCount,
pageSize,
wal: {
enabled: journalMode === "wal",
size: walSize,
},
};
}
/**
* Get database file path
*/
getDbPath(): string {
return this.dbPath;
}
/**
* Vacuum the database to reclaim space
*/
vacuum(): void {
if (this.isReadOnly) {
throw new Error("Cannot vacuum database in read-only mode");
}
this.db.exec("VACUUM");
}
/**
* Analyze the database for query optimization
*/
analyze(): void {
if (this.isReadOnly) {
throw new Error("Cannot analyze database in read-only mode");
}
this.db.exec("ANALYZE");
}
/**
* Checkpoint the WAL file
*/
checkpoint(): void {
if (this.isReadOnly) {
throw new Error("Cannot checkpoint database in read-only mode");
}
this.db.pragma("wal_checkpoint(TRUNCATE)");
}
/**
* Get current schema version
*/
getSchemaVersion(): number {
try {
const result = this.db
.prepare("SELECT MAX(version) as version FROM schema_version")
.get() as { version: number } | undefined;
return result?.version || 0;
} catch (_error) {
return 0;
}
}
}
// Instance cache keyed by dbPath to support multiple databases
const instances = new Map<string, SQLiteManager>();
/**
* Get a SQLiteManager instance for the given config.
* Instances are cached by dbPath to avoid re-opening the same database.
*/
export function getSQLiteManager(config?: SQLiteConfig): SQLiteManager {
const resolvedPath = resolveDbPath(config);
// Check if we already have an instance for this path
const existing = instances.get(resolvedPath);
if (existing) {
return existing;
}
// Create new instance and cache it
const instance = new SQLiteManager({ ...config, dbPath: resolvedPath });
instances.set(instance.getDbPath(), instance);
return instance;
}
/**
* Reset all cached SQLiteManager instances.
* Useful for testing or when switching projects.
*/
export function resetSQLiteManager(): void {
for (const instance of instances.values()) {
instance.close();
}
instances.clear();
}
/**
* Reset a specific SQLiteManager instance by path.
*/
export function resetSQLiteManagerByPath(dbPath: string): void {
const instance = instances.get(dbPath);
if (instance) {
instance.close();
instances.delete(dbPath);
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/storage/migrations.ts | TypeScript | /**
* Database migration system
* Versioned schema updates for a single database
*/
import { SQLiteManager } from "./SQLiteManager.js";
import { createHash } from "crypto";
export interface Migration {
version: number;
description: string;
up: string; // SQL to apply migration
down?: string; // SQL to rollback migration (optional)
checksum?: string; // Verify migration integrity
}
export const migrations: Migration[] = [
{
version: 1,
description: "Single-DB schema (projects + sources + scoped entities)",
up: `
-- Schema is created by schema.sql during initialization
`,
},
{
version: 2,
description: "Phase 1: Tag Management, Memory Confidence, Cleanup/Maintenance",
up: `
-- ==================================================
-- TAG MANAGEMENT TABLES
-- ==================================================
-- Centralized tag registry
CREATE TABLE IF NOT EXISTS tags (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
project_path TEXT, -- NULL for global tags
description TEXT, -- Optional tag description
color TEXT, -- Optional UI color hint
created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now') * 1000),
updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now') * 1000),
UNIQUE(name, project_path)
);
CREATE INDEX IF NOT EXISTS idx_tags_name ON tags(name);
CREATE INDEX IF NOT EXISTS idx_tags_project ON tags(project_path);
-- Polymorphic tag associations
CREATE TABLE IF NOT EXISTS item_tags (
id INTEGER PRIMARY KEY AUTOINCREMENT,
tag_id INTEGER NOT NULL,
item_type TEXT NOT NULL, -- 'memory', 'decision', 'pattern', 'session', 'mistake'
item_id INTEGER NOT NULL,
created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now') * 1000),
FOREIGN KEY (tag_id) REFERENCES tags(id) ON DELETE CASCADE,
UNIQUE(tag_id, item_type, item_id)
);
CREATE INDEX IF NOT EXISTS idx_item_tags_tag ON item_tags(tag_id);
CREATE INDEX IF NOT EXISTS idx_item_tags_item ON item_tags(item_type, item_id);
-- View: Tag usage statistics
CREATE VIEW IF NOT EXISTS tag_stats AS
SELECT
t.id,
t.name,
t.project_path,
t.description,
t.color,
t.created_at,
t.updated_at,
COUNT(it.id) as usage_count,
MAX(it.created_at) as last_used_at,
GROUP_CONCAT(DISTINCT it.item_type) as used_in_types
FROM tags t
LEFT JOIN item_tags it ON t.id = it.tag_id
GROUP BY t.id;
-- ==================================================
-- MEMORY CONFIDENCE/QUALITY COLUMNS
-- ==================================================
-- Add confidence level to working_memory
ALTER TABLE working_memory ADD COLUMN confidence TEXT DEFAULT 'likely';
-- Values: uncertain, likely, confirmed, verified
-- Add importance level to working_memory
ALTER TABLE working_memory ADD COLUMN importance TEXT DEFAULT 'normal';
-- Values: low, normal, high, critical
-- Add pinned flag to working_memory
ALTER TABLE working_memory ADD COLUMN pinned INTEGER DEFAULT 0;
-- Add archived flag to working_memory
ALTER TABLE working_memory ADD COLUMN archived INTEGER DEFAULT 0;
-- Add archive reason to working_memory
ALTER TABLE working_memory ADD COLUMN archive_reason TEXT;
-- Add source attribution to working_memory
ALTER TABLE working_memory ADD COLUMN source TEXT;
-- Free text: "user stated", "extracted from session X", "confirmed in production"
-- Add source session link to working_memory
ALTER TABLE working_memory ADD COLUMN source_session_id TEXT;
-- Add verification timestamp to working_memory
ALTER TABLE working_memory ADD COLUMN verified_at INTEGER;
-- Add verifier info to working_memory
ALTER TABLE working_memory ADD COLUMN verified_by TEXT;
-- Indexes for new working_memory fields
CREATE INDEX IF NOT EXISTS idx_working_memory_confidence ON working_memory(confidence);
CREATE INDEX IF NOT EXISTS idx_working_memory_importance ON working_memory(importance);
CREATE INDEX IF NOT EXISTS idx_working_memory_pinned ON working_memory(pinned);
CREATE INDEX IF NOT EXISTS idx_working_memory_archived ON working_memory(archived);
-- ==================================================
-- CLEANUP/MAINTENANCE TABLES
-- ==================================================
-- Maintenance operation log
CREATE TABLE IF NOT EXISTS maintenance_log (
id INTEGER PRIMARY KEY AUTOINCREMENT,
task_type TEXT NOT NULL,
started_at INTEGER NOT NULL,
completed_at INTEGER,
status TEXT NOT NULL DEFAULT 'running', -- running, completed, failed
items_processed INTEGER DEFAULT 0,
items_affected INTEGER DEFAULT 0,
details TEXT, -- JSON with task-specific details
error_message TEXT
);
CREATE INDEX IF NOT EXISTS idx_maintenance_log_type ON maintenance_log(task_type);
CREATE INDEX IF NOT EXISTS idx_maintenance_log_time ON maintenance_log(started_at);
CREATE INDEX IF NOT EXISTS idx_maintenance_log_status ON maintenance_log(status);
-- Scheduled maintenance tasks
CREATE TABLE IF NOT EXISTS scheduled_maintenance (
id INTEGER PRIMARY KEY AUTOINCREMENT,
task_type TEXT NOT NULL,
schedule TEXT NOT NULL, -- cron expression or 'daily', 'weekly', 'monthly'
options TEXT, -- JSON task options
enabled INTEGER DEFAULT 1,
last_run_at INTEGER,
next_run_at INTEGER,
created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now') * 1000)
);
CREATE INDEX IF NOT EXISTS idx_scheduled_maintenance_type ON scheduled_maintenance(task_type);
CREATE INDEX IF NOT EXISTS idx_scheduled_maintenance_enabled ON scheduled_maintenance(enabled);
CREATE INDEX IF NOT EXISTS idx_scheduled_maintenance_next_run ON scheduled_maintenance(next_run_at);
`,
down: `
-- Rollback Phase 1 changes
-- Drop tag management tables
DROP VIEW IF EXISTS tag_stats;
DROP TABLE IF EXISTS item_tags;
DROP TABLE IF EXISTS tags;
-- Drop maintenance tables
DROP TABLE IF EXISTS scheduled_maintenance;
DROP TABLE IF EXISTS maintenance_log;
-- Note: SQLite doesn't support DROP COLUMN, so working_memory columns
-- will remain but be unused after rollback. A full schema rebuild
-- would be required to remove them completely.
`,
},
{
version: 3,
description: "Search Quality: Chunk embeddings for long messages",
up: `
-- ==================================================
-- CHUNK EMBEDDINGS TABLE
-- Stores embeddings for text chunks from long messages
-- ==================================================
CREATE TABLE IF NOT EXISTS chunk_embeddings (
id TEXT PRIMARY KEY, -- Format: chunk_<msg_id>_<index>
message_id INTEGER NOT NULL,
chunk_index INTEGER NOT NULL,
total_chunks INTEGER NOT NULL,
content TEXT NOT NULL,
start_offset INTEGER NOT NULL,
end_offset INTEGER NOT NULL,
embedding BLOB NOT NULL,
strategy TEXT NOT NULL, -- 'sentence', 'sliding_window', etc.
model_name TEXT,
estimated_tokens INTEGER,
created_at INTEGER NOT NULL,
FOREIGN KEY (message_id) REFERENCES messages(id) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS idx_chunk_embed_msg ON chunk_embeddings(message_id);
CREATE INDEX IF NOT EXISTS idx_chunk_embed_strategy ON chunk_embeddings(strategy);
CREATE INDEX IF NOT EXISTS idx_chunk_embed_created ON chunk_embeddings(created_at);
-- Search configuration table for tuning parameters
CREATE TABLE IF NOT EXISTS search_config (
key TEXT PRIMARY KEY,
value TEXT NOT NULL,
description TEXT,
updated_at INTEGER NOT NULL
);
-- Insert default search configuration
INSERT OR IGNORE INTO search_config (key, value, description, updated_at) VALUES
('chunking_enabled', 'true', 'Enable text chunking for long messages', strftime('%s', 'now') * 1000),
('chunk_size', '450', 'Target chunk size in tokens', strftime('%s', 'now') * 1000),
('chunk_overlap', '0.1', 'Overlap between chunks as fraction', strftime('%s', 'now') * 1000),
('min_similarity', '0.30', 'Minimum similarity threshold for results', strftime('%s', 'now') * 1000),
('rerank_enabled', 'true', 'Enable hybrid re-ranking with FTS', strftime('%s', 'now') * 1000),
('rerank_weight', '0.7', 'Weight for vector search in re-ranking (0-1)', strftime('%s', 'now') * 1000);
`,
down: `
DROP TABLE IF EXISTS search_config;
DROP TABLE IF EXISTS chunk_embeddings;
`,
},
{
version: 4,
description: "Phase 9: Methodology and Research Tracking",
up: `
-- ==================================================
-- METHODOLOGIES TABLE
-- Tracks how AI solved problems (approach, steps, tools)
-- ==================================================
CREATE TABLE IF NOT EXISTS methodologies (
id TEXT PRIMARY KEY,
conversation_id INTEGER NOT NULL,
start_message_id INTEGER NOT NULL,
end_message_id INTEGER NOT NULL,
problem_statement TEXT NOT NULL,
approach TEXT NOT NULL, -- exploration, research, implementation, debugging, refactoring, testing
steps_taken TEXT NOT NULL, -- JSON array of MethodologyStep
tools_used TEXT NOT NULL, -- JSON array of tool names
files_involved TEXT NOT NULL, -- JSON array of file paths
outcome TEXT NOT NULL, -- success, partial, failed, ongoing
what_worked TEXT,
what_didnt_work TEXT,
started_at INTEGER NOT NULL,
ended_at INTEGER NOT NULL,
created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now') * 1000),
FOREIGN KEY (conversation_id) REFERENCES conversations(id) ON DELETE CASCADE,
FOREIGN KEY (start_message_id) REFERENCES messages(id) ON DELETE CASCADE,
FOREIGN KEY (end_message_id) REFERENCES messages(id) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS idx_methodology_conv ON methodologies(conversation_id);
CREATE INDEX IF NOT EXISTS idx_methodology_approach ON methodologies(approach);
CREATE INDEX IF NOT EXISTS idx_methodology_outcome ON methodologies(outcome);
CREATE INDEX IF NOT EXISTS idx_methodology_started ON methodologies(started_at);
-- ==================================================
-- RESEARCH FINDINGS TABLE
-- Tracks discoveries made during exploration/research
-- ==================================================
CREATE TABLE IF NOT EXISTS research_findings (
id TEXT PRIMARY KEY,
conversation_id INTEGER NOT NULL,
message_id INTEGER NOT NULL,
topic TEXT NOT NULL,
discovery TEXT NOT NULL,
source_type TEXT NOT NULL, -- code, documentation, web, experimentation, user_input
source_reference TEXT, -- file path, URL, etc.
relevance TEXT NOT NULL, -- high, medium, low
confidence TEXT NOT NULL, -- verified, likely, uncertain
related_to TEXT NOT NULL, -- JSON array of related files/components
timestamp INTEGER NOT NULL,
created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now') * 1000),
FOREIGN KEY (conversation_id) REFERENCES conversations(id) ON DELETE CASCADE,
FOREIGN KEY (message_id) REFERENCES messages(id) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS idx_research_conv ON research_findings(conversation_id);
CREATE INDEX IF NOT EXISTS idx_research_topic ON research_findings(topic);
CREATE INDEX IF NOT EXISTS idx_research_source ON research_findings(source_type);
CREATE INDEX IF NOT EXISTS idx_research_relevance ON research_findings(relevance);
CREATE INDEX IF NOT EXISTS idx_research_timestamp ON research_findings(timestamp);
-- ==================================================
-- SOLUTION PATTERNS TABLE
-- Tracks reusable solution patterns
-- ==================================================
CREATE TABLE IF NOT EXISTS solution_patterns (
id TEXT PRIMARY KEY,
conversation_id INTEGER NOT NULL,
message_id INTEGER NOT NULL,
problem_category TEXT NOT NULL, -- error-handling, performance, auth, etc.
problem_description TEXT NOT NULL,
solution_summary TEXT NOT NULL,
solution_steps TEXT NOT NULL, -- JSON array of step strings
code_pattern TEXT, -- Code snippet if applicable
technology TEXT NOT NULL, -- JSON array of technologies used
prerequisites TEXT NOT NULL, -- JSON array of prerequisites
applies_when TEXT NOT NULL,
avoid_when TEXT,
applied_to_files TEXT NOT NULL, -- JSON array of file paths
effectiveness TEXT NOT NULL, -- excellent, good, moderate, poor
timestamp INTEGER NOT NULL,
created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now') * 1000),
FOREIGN KEY (conversation_id) REFERENCES conversations(id) ON DELETE CASCADE,
FOREIGN KEY (message_id) REFERENCES messages(id) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS idx_pattern_conv ON solution_patterns(conversation_id);
CREATE INDEX IF NOT EXISTS idx_pattern_category ON solution_patterns(problem_category);
CREATE INDEX IF NOT EXISTS idx_pattern_effectiveness ON solution_patterns(effectiveness);
CREATE INDEX IF NOT EXISTS idx_pattern_timestamp ON solution_patterns(timestamp);
-- ==================================================
-- EMBEDDING TABLES FOR SEMANTIC SEARCH
-- ==================================================
CREATE TABLE IF NOT EXISTS methodology_embeddings (
id TEXT PRIMARY KEY,
methodology_id TEXT NOT NULL,
embedding BLOB NOT NULL,
created_at INTEGER NOT NULL,
FOREIGN KEY (methodology_id) REFERENCES methodologies(id) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS idx_methodology_embed ON methodology_embeddings(methodology_id);
CREATE TABLE IF NOT EXISTS research_embeddings (
id TEXT PRIMARY KEY,
research_id TEXT NOT NULL,
embedding BLOB NOT NULL,
created_at INTEGER NOT NULL,
FOREIGN KEY (research_id) REFERENCES research_findings(id) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS idx_research_embed ON research_embeddings(research_id);
CREATE TABLE IF NOT EXISTS pattern_embeddings (
id TEXT PRIMARY KEY,
pattern_id TEXT NOT NULL,
embedding BLOB NOT NULL,
created_at INTEGER NOT NULL,
FOREIGN KEY (pattern_id) REFERENCES solution_patterns(id) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS idx_pattern_embed ON pattern_embeddings(pattern_id);
-- ==================================================
-- FTS TABLES FOR KEYWORD SEARCH
-- ==================================================
CREATE VIRTUAL TABLE IF NOT EXISTS methodologies_fts USING fts5(
id UNINDEXED,
problem_statement,
what_worked,
what_didnt_work
);
CREATE VIRTUAL TABLE IF NOT EXISTS research_fts USING fts5(
id UNINDEXED,
topic,
discovery,
source_reference
);
CREATE VIRTUAL TABLE IF NOT EXISTS patterns_fts USING fts5(
id UNINDEXED,
problem_description,
solution_summary,
applies_when
);
`,
down: `
DROP TABLE IF EXISTS patterns_fts;
DROP TABLE IF EXISTS research_fts;
DROP TABLE IF EXISTS methodologies_fts;
DROP TABLE IF EXISTS pattern_embeddings;
DROP TABLE IF EXISTS research_embeddings;
DROP TABLE IF EXISTS methodology_embeddings;
DROP TABLE IF EXISTS solution_patterns;
DROP TABLE IF EXISTS research_findings;
DROP TABLE IF EXISTS methodologies;
`,
},
];
export class MigrationManager {
private db: SQLiteManager;
constructor(db: SQLiteManager) {
this.db = db;
}
/**
* Get current schema version
*/
getCurrentVersion(): number {
return this.db.getSchemaVersion();
}
/**
* Get all pending migrations sorted by version ascending
*/
getPendingMigrations(): Migration[] {
const currentVersion = this.getCurrentVersion();
return migrations
.filter((m) => m.version > currentVersion)
.sort((a, b) => a.version - b.version);
}
/**
* Apply a single migration with locking to prevent concurrent execution.
* Uses BEGIN IMMEDIATE to acquire a write lock before checking/applying.
*/
applyMigration(migration: Migration): void {
console.error(
`Applying migration v${migration.version}: ${migration.description}`
);
// Calculate checksum
const checksum = this.calculateChecksum(migration);
// Use BEGIN IMMEDIATE to acquire exclusive lock and prevent concurrent migrations
this.db.exec("BEGIN IMMEDIATE");
try {
// Re-check if migration was already applied (by concurrent process)
const alreadyApplied = this.db
.prepare("SELECT 1 FROM schema_version WHERE version = ?")
.get(migration.version);
if (alreadyApplied) {
this.db.exec("ROLLBACK");
console.error(`Migration v${migration.version} already applied (concurrent execution)`);
return;
}
// Execute the migration SQL using db.exec() directly
// SQLite handles multiple statements and comments correctly
if (migration.up && migration.up.trim()) {
this.db.exec(migration.up);
}
// Record migration
this.db
.prepare(
"INSERT INTO schema_version (version, applied_at, description, checksum) VALUES (?, ?, ?, ?)"
)
.run(migration.version, Date.now(), migration.description, checksum);
this.db.exec("COMMIT");
console.error(`Migration v${migration.version} applied successfully`);
} catch (error) {
this.db.exec("ROLLBACK");
throw error;
}
}
/**
* Apply all pending migrations
*/
applyPendingMigrations(): void {
const pending = this.getPendingMigrations();
if (pending.length === 0) {
console.error("No pending migrations");
return;
}
console.error(`Found ${pending.length} pending migrations`);
for (const migration of pending) {
this.applyMigration(migration);
}
console.error("All migrations applied successfully");
}
/**
* Rollback to a specific version
*/
rollbackTo(targetVersion: number): void {
const currentVersion = this.getCurrentVersion();
if (targetVersion >= currentVersion) {
console.error("Nothing to rollback");
return;
}
// Get migrations to rollback (in reverse order)
const toRollback = migrations
.filter((m) => m.version > targetVersion && m.version <= currentVersion)
.sort((a, b) => b.version - a.version);
for (const migration of toRollback) {
const downSql = migration.down;
if (!downSql) {
throw new Error(
`Migration v${migration.version} does not support rollback`
);
}
console.error(`Rolling back migration v${migration.version}`);
this.db.transaction(() => {
// Execute rollback SQL
this.db.exec(downSql);
// Remove migration record
this.db
.prepare("DELETE FROM schema_version WHERE version = ?")
.run(migration.version);
});
console.error(`Migration v${migration.version} rolled back`);
}
}
/**
* Calculate migration checksum for verification
*/
private calculateChecksum(migration: Migration): string {
const content = `${migration.version}:${migration.description}:${migration.up}`;
return createHash("sha256").update(content).digest("hex");
}
/**
* Verify migration integrity
*/
verifyMigrations(): boolean {
const applied = this.db
.prepare(
"SELECT version, checksum FROM schema_version WHERE version > 0 ORDER BY version"
)
.all() as Array<{ version: number; checksum: string | null }>;
for (const record of applied) {
const migration = migrations.find((m) => m.version === record.version);
if (!migration) {
console.error(`Migration v${record.version} not found in code`);
return false;
}
const expectedChecksum = this.calculateChecksum(migration);
if (record.checksum === null) {
// NULL checksum indicates migration was applied before checksums were added
// Backfill the checksum for future verification
console.error(`Migration v${record.version} has no checksum - backfilling`);
this.db
.prepare("UPDATE schema_version SET checksum = ? WHERE version = ?")
.run(expectedChecksum, record.version);
} else if (record.checksum !== expectedChecksum) {
console.error(
`Migration v${record.version} checksum mismatch - database may be corrupted`
);
return false;
}
}
return true;
}
/**
* Get migration history
*/
getHistory(): Array<{
version: number;
description: string;
applied_at: number;
}> {
return this.db
.prepare(
"SELECT version, description, applied_at FROM schema_version ORDER BY version"
)
.all() as Array<{
version: number;
description: string;
applied_at: number;
}>;
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/storage/schema.sql | SQL | -- CCCMemory Database Schema
-- Single-DB layout (projects + sources + scoped entities)
-- Optimized for SQLite + sqlite-vec
-- ==================================================
-- PROJECT REGISTRY
-- ==================================================
CREATE TABLE IF NOT EXISTS projects (
id INTEGER PRIMARY KEY,
canonical_path TEXT NOT NULL UNIQUE,
display_path TEXT,
git_root TEXT,
metadata TEXT,
created_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_projects_path ON projects(canonical_path);
CREATE TABLE IF NOT EXISTS project_sources (
id INTEGER PRIMARY KEY,
project_id INTEGER NOT NULL,
source_type TEXT NOT NULL, -- 'claude-code' or 'codex'
source_root TEXT, -- ~/.claude/projects/... or ~/.codex/...
last_indexed INTEGER NOT NULL,
message_count INTEGER DEFAULT 0,
conversation_count INTEGER DEFAULT 0,
decision_count INTEGER DEFAULT 0,
mistake_count INTEGER DEFAULT 0,
metadata TEXT,
created_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL,
UNIQUE(project_id, source_type),
FOREIGN KEY (project_id) REFERENCES projects(id) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS idx_proj_source_type ON project_sources(source_type);
CREATE INDEX IF NOT EXISTS idx_proj_source_last_indexed ON project_sources(last_indexed);
CREATE TABLE IF NOT EXISTS project_aliases (
alias_path TEXT PRIMARY KEY,
project_id INTEGER NOT NULL,
created_at INTEGER NOT NULL,
FOREIGN KEY (project_id) REFERENCES projects(id) ON DELETE CASCADE
);
-- ==================================================
-- CORE TABLES
-- ==================================================
CREATE TABLE IF NOT EXISTS conversations (
id INTEGER PRIMARY KEY,
project_id INTEGER NOT NULL,
project_path TEXT NOT NULL, -- Denormalized for fast access
source_type TEXT NOT NULL,
external_id TEXT NOT NULL, -- sessionId from JSONL
first_message_at INTEGER NOT NULL,
last_message_at INTEGER NOT NULL,
message_count INTEGER DEFAULT 0,
git_branch TEXT,
claude_version TEXT,
metadata TEXT,
created_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL,
FOREIGN KEY (project_id) REFERENCES projects(id) ON DELETE CASCADE,
UNIQUE(project_id, source_type, external_id)
);
CREATE INDEX IF NOT EXISTS idx_conv_project ON conversations(project_id);
CREATE INDEX IF NOT EXISTS idx_conv_project_path ON conversations(project_path);
CREATE INDEX IF NOT EXISTS idx_conv_source ON conversations(source_type);
CREATE INDEX IF NOT EXISTS idx_conv_time ON conversations(last_message_at);
CREATE INDEX IF NOT EXISTS idx_conv_branch ON conversations(git_branch);
CREATE INDEX IF NOT EXISTS idx_conv_created ON conversations(created_at);
CREATE TABLE IF NOT EXISTS messages (
id INTEGER PRIMARY KEY,
conversation_id INTEGER NOT NULL,
external_id TEXT NOT NULL, -- uuid from JSONL
parent_message_id INTEGER, -- internal parent id
parent_external_id TEXT, -- external parent id (for import)
message_type TEXT NOT NULL,
role TEXT,
content TEXT,
timestamp INTEGER NOT NULL,
is_sidechain INTEGER DEFAULT 0,
agent_id TEXT,
request_id TEXT,
git_branch TEXT,
cwd TEXT,
metadata TEXT,
FOREIGN KEY (conversation_id) REFERENCES conversations(id) ON DELETE CASCADE,
FOREIGN KEY (parent_message_id) REFERENCES messages(id) ON DELETE SET NULL,
UNIQUE(conversation_id, external_id)
);
CREATE INDEX IF NOT EXISTS idx_msg_conv ON messages(conversation_id);
CREATE INDEX IF NOT EXISTS idx_msg_parent ON messages(parent_message_id);
CREATE INDEX IF NOT EXISTS idx_msg_type ON messages(message_type);
CREATE INDEX IF NOT EXISTS idx_msg_time ON messages(timestamp);
CREATE INDEX IF NOT EXISTS idx_msg_conv_time ON messages(conversation_id, timestamp);
CREATE INDEX IF NOT EXISTS idx_msg_role ON messages(role);
CREATE TABLE IF NOT EXISTS tool_uses (
id INTEGER PRIMARY KEY,
message_id INTEGER NOT NULL,
external_id TEXT NOT NULL,
tool_name TEXT NOT NULL,
tool_input TEXT NOT NULL,
timestamp INTEGER NOT NULL,
FOREIGN KEY (message_id) REFERENCES messages(id) ON DELETE CASCADE,
UNIQUE(message_id, external_id)
);
CREATE INDEX IF NOT EXISTS idx_tool_msg ON tool_uses(message_id);
CREATE INDEX IF NOT EXISTS idx_tool_name ON tool_uses(tool_name);
CREATE INDEX IF NOT EXISTS idx_tool_time ON tool_uses(timestamp);
CREATE INDEX IF NOT EXISTS idx_tool_name_time ON tool_uses(tool_name, timestamp);
CREATE TABLE IF NOT EXISTS tool_results (
id INTEGER PRIMARY KEY,
tool_use_id INTEGER NOT NULL,
message_id INTEGER NOT NULL,
external_id TEXT,
content TEXT,
is_error INTEGER DEFAULT 0,
stdout TEXT,
stderr TEXT,
is_image INTEGER DEFAULT 0,
timestamp INTEGER NOT NULL,
FOREIGN KEY (tool_use_id) REFERENCES tool_uses(id) ON DELETE CASCADE,
FOREIGN KEY (message_id) REFERENCES messages(id) ON DELETE CASCADE,
UNIQUE(tool_use_id, external_id)
);
CREATE INDEX IF NOT EXISTS idx_result_tool ON tool_results(tool_use_id);
CREATE INDEX IF NOT EXISTS idx_result_msg ON tool_results(message_id);
CREATE INDEX IF NOT EXISTS idx_result_error ON tool_results(is_error);
CREATE TABLE IF NOT EXISTS file_edits (
id INTEGER PRIMARY KEY,
external_id TEXT NOT NULL,
conversation_id INTEGER NOT NULL,
file_path TEXT NOT NULL,
message_id INTEGER NOT NULL,
backup_version INTEGER,
backup_time INTEGER,
snapshot_timestamp INTEGER NOT NULL,
metadata TEXT,
FOREIGN KEY (conversation_id) REFERENCES conversations(id) ON DELETE CASCADE,
FOREIGN KEY (message_id) REFERENCES messages(id) ON DELETE CASCADE,
UNIQUE(conversation_id, external_id)
);
CREATE INDEX IF NOT EXISTS idx_edit_file ON file_edits(file_path);
CREATE INDEX IF NOT EXISTS idx_edit_conv ON file_edits(conversation_id);
CREATE INDEX IF NOT EXISTS idx_edit_time ON file_edits(snapshot_timestamp);
CREATE INDEX IF NOT EXISTS idx_edit_file_time ON file_edits(file_path, snapshot_timestamp);
CREATE TABLE IF NOT EXISTS thinking_blocks (
id INTEGER PRIMARY KEY,
external_id TEXT NOT NULL,
message_id INTEGER NOT NULL,
thinking_content TEXT NOT NULL,
signature TEXT,
timestamp INTEGER NOT NULL,
FOREIGN KEY (message_id) REFERENCES messages(id) ON DELETE CASCADE,
UNIQUE(message_id, external_id)
);
CREATE INDEX IF NOT EXISTS idx_think_msg ON thinking_blocks(message_id);
-- ==================================================
-- ENHANCED MEMORY TABLES
-- ==================================================
CREATE TABLE IF NOT EXISTS decisions (
id INTEGER PRIMARY KEY,
external_id TEXT NOT NULL,
conversation_id INTEGER NOT NULL,
message_id INTEGER NOT NULL,
decision_text TEXT NOT NULL,
rationale TEXT,
alternatives_considered TEXT,
rejected_reasons TEXT,
context TEXT,
related_files TEXT,
related_commits TEXT,
timestamp INTEGER NOT NULL,
FOREIGN KEY (conversation_id) REFERENCES conversations(id) ON DELETE CASCADE,
FOREIGN KEY (message_id) REFERENCES messages(id) ON DELETE CASCADE,
UNIQUE(conversation_id, external_id)
);
CREATE INDEX IF NOT EXISTS idx_decision_conv ON decisions(conversation_id);
CREATE INDEX IF NOT EXISTS idx_decision_time ON decisions(timestamp);
CREATE INDEX IF NOT EXISTS idx_decision_context ON decisions(context);
CREATE TABLE IF NOT EXISTS git_commits (
id INTEGER PRIMARY KEY,
project_id INTEGER NOT NULL,
hash TEXT NOT NULL,
message TEXT NOT NULL,
author TEXT,
timestamp INTEGER NOT NULL,
branch TEXT,
files_changed TEXT,
conversation_id INTEGER,
related_message_id INTEGER,
metadata TEXT,
FOREIGN KEY (project_id) REFERENCES projects(id) ON DELETE CASCADE,
FOREIGN KEY (conversation_id) REFERENCES conversations(id) ON DELETE SET NULL,
FOREIGN KEY (related_message_id) REFERENCES messages(id) ON DELETE SET NULL,
UNIQUE(project_id, hash)
);
CREATE INDEX IF NOT EXISTS idx_commit_project ON git_commits(project_id);
CREATE INDEX IF NOT EXISTS idx_commit_conv ON git_commits(conversation_id);
CREATE INDEX IF NOT EXISTS idx_commit_time ON git_commits(timestamp);
CREATE INDEX IF NOT EXISTS idx_commit_branch ON git_commits(branch);
CREATE TABLE IF NOT EXISTS mistakes (
id INTEGER PRIMARY KEY,
external_id TEXT NOT NULL,
conversation_id INTEGER NOT NULL,
message_id INTEGER NOT NULL,
mistake_type TEXT NOT NULL,
what_went_wrong TEXT NOT NULL,
correction TEXT,
user_correction_message TEXT,
files_affected TEXT,
timestamp INTEGER NOT NULL,
FOREIGN KEY (conversation_id) REFERENCES conversations(id) ON DELETE CASCADE,
FOREIGN KEY (message_id) REFERENCES messages(id) ON DELETE CASCADE,
UNIQUE(conversation_id, external_id)
);
CREATE INDEX IF NOT EXISTS idx_mistake_conv ON mistakes(conversation_id);
CREATE INDEX IF NOT EXISTS idx_mistake_type ON mistakes(mistake_type);
CREATE INDEX IF NOT EXISTS idx_mistake_time ON mistakes(timestamp);
CREATE TABLE IF NOT EXISTS file_evolution (
id INTEGER PRIMARY KEY,
file_path TEXT NOT NULL,
conversation_id INTEGER NOT NULL,
change_summary TEXT,
decision_ids TEXT,
commit_hash TEXT,
fixes_mistake_id INTEGER,
timestamp INTEGER NOT NULL,
FOREIGN KEY (conversation_id) REFERENCES conversations(id) ON DELETE CASCADE,
FOREIGN KEY (fixes_mistake_id) REFERENCES mistakes(id) ON DELETE SET NULL
);
CREATE INDEX IF NOT EXISTS idx_evolution_file ON file_evolution(file_path);
CREATE INDEX IF NOT EXISTS idx_evolution_time ON file_evolution(timestamp);
CREATE INDEX IF NOT EXISTS idx_evolution_file_time ON file_evolution(file_path, timestamp);
CREATE TABLE IF NOT EXISTS requirements (
id INTEGER PRIMARY KEY,
external_id TEXT NOT NULL,
type TEXT NOT NULL,
description TEXT NOT NULL,
rationale TEXT,
affects_components TEXT,
conversation_id INTEGER NOT NULL,
message_id INTEGER NOT NULL,
timestamp INTEGER NOT NULL,
FOREIGN KEY (conversation_id) REFERENCES conversations(id) ON DELETE CASCADE,
FOREIGN KEY (message_id) REFERENCES messages(id) ON DELETE CASCADE,
UNIQUE(conversation_id, external_id)
);
CREATE INDEX IF NOT EXISTS idx_req_type ON requirements(type);
CREATE INDEX IF NOT EXISTS idx_req_conv ON requirements(conversation_id);
CREATE TABLE IF NOT EXISTS validations (
id INTEGER PRIMARY KEY,
external_id TEXT NOT NULL,
conversation_id INTEGER NOT NULL,
what_was_tested TEXT NOT NULL,
test_command TEXT,
result TEXT NOT NULL,
performance_data TEXT,
files_tested TEXT,
timestamp INTEGER NOT NULL,
FOREIGN KEY (conversation_id) REFERENCES conversations(id) ON DELETE CASCADE,
UNIQUE(conversation_id, external_id)
);
CREATE INDEX IF NOT EXISTS idx_valid_conv ON validations(conversation_id);
CREATE INDEX IF NOT EXISTS idx_valid_result ON validations(result);
CREATE TABLE IF NOT EXISTS user_preferences (
id TEXT PRIMARY KEY,
category TEXT NOT NULL,
preference TEXT NOT NULL,
rationale TEXT,
examples TEXT,
established_date INTEGER NOT NULL,
updated_at INTEGER NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_pref_category ON user_preferences(category);
-- ==================================================
-- METHODOLOGY & RESEARCH TRACKING TABLES
-- ==================================================
CREATE TABLE IF NOT EXISTS methodologies (
id TEXT PRIMARY KEY,
conversation_id INTEGER NOT NULL,
start_message_id INTEGER NOT NULL,
end_message_id INTEGER NOT NULL,
problem_statement TEXT NOT NULL,
approach TEXT NOT NULL,
steps_taken TEXT NOT NULL,
tools_used TEXT NOT NULL,
files_involved TEXT NOT NULL,
outcome TEXT NOT NULL,
what_worked TEXT,
what_didnt_work TEXT,
started_at INTEGER NOT NULL,
ended_at INTEGER NOT NULL,
created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now') * 1000),
FOREIGN KEY (conversation_id) REFERENCES conversations(id) ON DELETE CASCADE,
FOREIGN KEY (start_message_id) REFERENCES messages(id) ON DELETE CASCADE,
FOREIGN KEY (end_message_id) REFERENCES messages(id) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS idx_methodology_conv ON methodologies(conversation_id);
CREATE INDEX IF NOT EXISTS idx_methodology_approach ON methodologies(approach);
CREATE INDEX IF NOT EXISTS idx_methodology_outcome ON methodologies(outcome);
CREATE INDEX IF NOT EXISTS idx_methodology_started ON methodologies(started_at);
CREATE TABLE IF NOT EXISTS research_findings (
id TEXT PRIMARY KEY,
conversation_id INTEGER NOT NULL,
message_id INTEGER NOT NULL,
topic TEXT NOT NULL,
discovery TEXT NOT NULL,
source_type TEXT NOT NULL,
source_reference TEXT,
relevance TEXT NOT NULL,
confidence TEXT NOT NULL,
related_to TEXT NOT NULL,
timestamp INTEGER NOT NULL,
created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now') * 1000),
FOREIGN KEY (conversation_id) REFERENCES conversations(id) ON DELETE CASCADE,
FOREIGN KEY (message_id) REFERENCES messages(id) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS idx_research_conv ON research_findings(conversation_id);
CREATE INDEX IF NOT EXISTS idx_research_topic ON research_findings(topic);
CREATE INDEX IF NOT EXISTS idx_research_source ON research_findings(source_type);
CREATE INDEX IF NOT EXISTS idx_research_relevance ON research_findings(relevance);
CREATE INDEX IF NOT EXISTS idx_research_timestamp ON research_findings(timestamp);
CREATE TABLE IF NOT EXISTS solution_patterns (
id TEXT PRIMARY KEY,
conversation_id INTEGER NOT NULL,
message_id INTEGER NOT NULL,
problem_category TEXT NOT NULL,
problem_description TEXT NOT NULL,
solution_summary TEXT NOT NULL,
solution_steps TEXT NOT NULL,
code_pattern TEXT,
technology TEXT NOT NULL,
prerequisites TEXT NOT NULL,
applies_when TEXT NOT NULL,
avoid_when TEXT,
applied_to_files TEXT NOT NULL,
effectiveness TEXT NOT NULL,
timestamp INTEGER NOT NULL,
created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now') * 1000),
FOREIGN KEY (conversation_id) REFERENCES conversations(id) ON DELETE CASCADE,
FOREIGN KEY (message_id) REFERENCES messages(id) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS idx_pattern_conv ON solution_patterns(conversation_id);
CREATE INDEX IF NOT EXISTS idx_pattern_category ON solution_patterns(problem_category);
CREATE INDEX IF NOT EXISTS idx_pattern_effectiveness ON solution_patterns(effectiveness);
CREATE INDEX IF NOT EXISTS idx_pattern_timestamp ON solution_patterns(timestamp);
-- ==================================================
-- VECTOR & SEARCH TABLES
-- ==================================================
CREATE TABLE IF NOT EXISTS message_embeddings (
id TEXT PRIMARY KEY,
message_id INTEGER NOT NULL,
content TEXT NOT NULL,
embedding BLOB NOT NULL,
model_name TEXT DEFAULT 'all-MiniLM-L6-v2',
created_at INTEGER NOT NULL,
FOREIGN KEY (message_id) REFERENCES messages(id) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS idx_embed_msg ON message_embeddings(message_id);
CREATE TABLE IF NOT EXISTS decision_embeddings (
id TEXT PRIMARY KEY,
decision_id INTEGER NOT NULL,
embedding BLOB NOT NULL,
created_at INTEGER NOT NULL,
FOREIGN KEY (decision_id) REFERENCES decisions(id) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS idx_dec_embed ON decision_embeddings(decision_id);
CREATE TABLE IF NOT EXISTS mistake_embeddings (
id TEXT PRIMARY KEY,
mistake_id INTEGER NOT NULL,
embedding BLOB NOT NULL,
created_at INTEGER NOT NULL,
FOREIGN KEY (mistake_id) REFERENCES mistakes(id) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS idx_mistake_embed ON mistake_embeddings(mistake_id);
CREATE TABLE IF NOT EXISTS methodology_embeddings (
id TEXT PRIMARY KEY,
methodology_id TEXT NOT NULL,
embedding BLOB NOT NULL,
created_at INTEGER NOT NULL,
FOREIGN KEY (methodology_id) REFERENCES methodologies(id) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS idx_methodology_embed ON methodology_embeddings(methodology_id);
CREATE TABLE IF NOT EXISTS research_embeddings (
id TEXT PRIMARY KEY,
research_id TEXT NOT NULL,
embedding BLOB NOT NULL,
created_at INTEGER NOT NULL,
FOREIGN KEY (research_id) REFERENCES research_findings(id) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS idx_research_embed ON research_embeddings(research_id);
CREATE TABLE IF NOT EXISTS pattern_embeddings (
id TEXT PRIMARY KEY,
pattern_id TEXT NOT NULL,
embedding BLOB NOT NULL,
created_at INTEGER NOT NULL,
FOREIGN KEY (pattern_id) REFERENCES solution_patterns(id) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS idx_pattern_embed ON pattern_embeddings(pattern_id);
CREATE VIRTUAL TABLE IF NOT EXISTS messages_fts USING fts5(
id UNINDEXED,
content,
metadata,
content=messages,
content_rowid=rowid
);
CREATE VIRTUAL TABLE IF NOT EXISTS decisions_fts USING fts5(
id UNINDEXED,
decision_text,
rationale,
context,
content=decisions,
content_rowid=rowid
);
CREATE VIRTUAL TABLE IF NOT EXISTS mistakes_fts USING fts5(
id,
what_went_wrong,
correction,
mistake_type
);
CREATE VIRTUAL TABLE IF NOT EXISTS methodologies_fts USING fts5(
id UNINDEXED,
problem_statement,
what_worked,
what_didnt_work
);
CREATE VIRTUAL TABLE IF NOT EXISTS research_fts USING fts5(
id UNINDEXED,
topic,
discovery,
source_reference
);
CREATE VIRTUAL TABLE IF NOT EXISTS patterns_fts USING fts5(
id UNINDEXED,
problem_description,
solution_summary,
applies_when
);
-- ==================================================
-- PERFORMANCE & CACHING
-- ==================================================
CREATE TABLE IF NOT EXISTS query_cache (
cache_key TEXT PRIMARY KEY,
result TEXT NOT NULL,
created_at INTEGER NOT NULL,
expires_at INTEGER NOT NULL,
hit_count INTEGER DEFAULT 0,
last_accessed INTEGER
);
CREATE INDEX IF NOT EXISTS idx_cache_expires ON query_cache(expires_at);
-- ==================================================
-- METADATA TABLE
-- ==================================================
CREATE TABLE IF NOT EXISTS schema_version (
version INTEGER PRIMARY KEY,
applied_at INTEGER NOT NULL,
description TEXT,
checksum TEXT
);
-- ==================================================
-- LIVE CONTEXT LAYER TABLES
-- ==================================================
CREATE TABLE IF NOT EXISTS working_memory (
id TEXT PRIMARY KEY,
key TEXT NOT NULL,
value TEXT NOT NULL,
context TEXT,
tags TEXT,
session_id TEXT,
project_path TEXT NOT NULL,
created_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL,
expires_at INTEGER,
embedding BLOB,
UNIQUE(project_path, key)
);
CREATE INDEX IF NOT EXISTS idx_wm_session ON working_memory(session_id);
CREATE INDEX IF NOT EXISTS idx_wm_project ON working_memory(project_path);
CREATE INDEX IF NOT EXISTS idx_wm_expires ON working_memory(expires_at);
CREATE INDEX IF NOT EXISTS idx_wm_key ON working_memory(key);
CREATE INDEX IF NOT EXISTS idx_wm_project_key ON working_memory(project_path, key);
CREATE TABLE IF NOT EXISTS session_handoffs (
id TEXT PRIMARY KEY,
from_session_id TEXT NOT NULL,
project_path TEXT NOT NULL,
created_at INTEGER NOT NULL,
handoff_data TEXT NOT NULL,
resumed_by_session_id TEXT,
resumed_at INTEGER
);
CREATE INDEX IF NOT EXISTS idx_handoff_session ON session_handoffs(from_session_id);
CREATE INDEX IF NOT EXISTS idx_handoff_project ON session_handoffs(project_path);
CREATE INDEX IF NOT EXISTS idx_handoff_created ON session_handoffs(created_at);
CREATE INDEX IF NOT EXISTS idx_handoff_resumed ON session_handoffs(resumed_by_session_id);
CREATE TABLE IF NOT EXISTS session_checkpoints (
id TEXT PRIMARY KEY,
session_id TEXT NOT NULL,
project_path TEXT NOT NULL,
checkpoint_number INTEGER NOT NULL,
created_at INTEGER NOT NULL,
decisions TEXT,
active_files TEXT,
task_state TEXT,
context_summary TEXT NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_checkpoint_session ON session_checkpoints(session_id);
CREATE INDEX IF NOT EXISTS idx_checkpoint_project ON session_checkpoints(project_path);
CREATE INDEX IF NOT EXISTS idx_checkpoint_created ON session_checkpoints(created_at);
CREATE UNIQUE INDEX IF NOT EXISTS idx_checkpoint_session_num ON session_checkpoints(session_id, checkpoint_number);
CREATE VIRTUAL TABLE IF NOT EXISTS working_memory_fts USING fts5(
id UNINDEXED,
key,
value,
context,
content=working_memory,
content_rowid=rowid
);
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/tools/ToolDefinitions.ts | TypeScript | /**
* MCP Tool Definitions
*/
export const TOOLS = {
index_conversations: {
name: "index_conversations",
description: "Index conversation history for the current project. This parses conversation files, extracts decisions, mistakes, and links to git commits. Can index all sessions or a specific session.",
inputSchema: {
type: "object",
properties: {
project_path: {
type: "string",
description: "Path to the project (defaults to current working directory)",
},
session_id: {
type: "string",
description: "Optional: specific session ID to index. Use the external session id (JSONL filename / Codex rollout id), e.g. 'a1172af3-ca62-41be-9b90-701cef39daae'. Internal DB ids are accepted but prefer list_recent_sessions.session_id.",
},
include_thinking: {
type: "boolean",
description: "Include thinking blocks in indexing (default: false, can be large)",
default: false,
},
enable_git: {
type: "boolean",
description: "Enable git integration to link commits to conversations (default: true)",
default: true,
},
exclude_mcp_conversations: {
type: ["boolean", "string"],
description: "Exclude MCP tool conversations from indexing. Options: 'self-only' (exclude only cccmemory MCP to prevent self-referential loops, DEFAULT), false (index all MCP conversations), 'all-mcp' or true (exclude all MCP tool conversations)",
default: "self-only",
},
exclude_mcp_servers: {
type: "array",
description: "List of specific MCP server names to exclude (e.g., ['cccmemory', 'filesystem']). More granular than exclude_mcp_conversations.",
items: { type: "string" },
},
},
},
},
search_conversations: {
name: "search_conversations",
description: "Search conversation history using natural language queries. Returns relevant messages with context. Supports pagination and scope filtering (current session, all sessions in project, or global across projects).",
inputSchema: {
type: "object",
properties: {
query: {
type: "string",
description: "Natural language search query",
},
limit: {
type: "number",
description: "Maximum number of results (default: 10)",
default: 10,
},
offset: {
type: "number",
description: "Skip N results for pagination (default: 0). Use with limit to fetch subsequent pages.",
default: 0,
},
date_range: {
type: "array",
description: "Optional date range filter [start_timestamp, end_timestamp]",
items: { type: "number" },
},
scope: {
type: "string",
enum: ["current", "all", "global"],
description: "Search scope: 'current' (current session only), 'all' (all sessions in current project), 'global' (all indexed projects including Codex). Default: 'all'",
default: "all",
},
conversation_id: {
type: "string",
description: "Required when scope='current': internal conversation id from list_recent_sessions.id",
},
},
required: ["query"],
if: {
properties: { scope: { const: "current" } },
},
then: {
required: ["conversation_id"],
},
},
},
search_project_conversations: {
name: "search_project_conversations",
description: "Search conversations scoped to a project path, optionally including both Claude Code and Codex sessions that match the same project root.",
inputSchema: {
type: "object",
properties: {
query: {
type: "string",
description: "Natural language search query",
},
project_path: {
type: "string",
description: "Project path (defaults to current working directory)",
},
limit: {
type: "number",
description: "Maximum number of results (default: 10)",
default: 10,
},
offset: {
type: "number",
description: "Skip N results for pagination (default: 0). Use with limit to fetch subsequent pages.",
default: 0,
},
date_range: {
type: "array",
description: "Optional date range filter [start_timestamp, end_timestamp]",
items: { type: "number" },
},
include_claude_code: {
type: "boolean",
description: "Include Claude Code conversations (default: true)",
default: true,
},
include_codex: {
type: "boolean",
description: "Include Codex conversations (default: true)",
default: true,
},
},
required: ["query"],
},
},
get_decisions: {
name: "get_decisions",
description: "Find decisions made about a specific topic, file, or component. Shows rationale, alternatives considered, and rejected approaches. Supports pagination and scope filtering.",
inputSchema: {
type: "object",
properties: {
query: {
type: "string",
description: "Topic or keyword to search for (e.g., 'authentication', 'database')",
},
file_path: {
type: "string",
description: "Optional: filter decisions related to a specific file",
},
limit: {
type: "number",
description: "Maximum number of decisions to return (default: 10)",
default: 10,
},
offset: {
type: "number",
description: "Skip N results for pagination (default: 0). Use with limit to fetch subsequent pages.",
default: 0,
},
scope: {
type: "string",
enum: ["current", "all", "global"],
description: "Search scope: 'current' (current session only), 'all' (all sessions in current project), 'global' (all indexed projects including Codex). Default: 'all'",
default: "all",
},
conversation_id: {
type: "string",
description: "Required when scope='current': internal conversation id from list_recent_sessions.id",
},
},
required: ["query"],
if: {
properties: { scope: { const: "current" } },
},
then: {
required: ["conversation_id"],
},
},
},
check_before_modify: {
name: "check_before_modify",
description: "Check important context before modifying a file. Shows recent changes, related decisions, commits, and past mistakes to avoid.",
inputSchema: {
type: "object",
properties: {
file_path: {
type: "string",
description: "Path to the file you want to modify",
},
},
required: ["file_path"],
},
},
get_file_evolution: {
name: "get_file_evolution",
description: "Show complete timeline of changes to a file across conversations and commits. Supports pagination for files with long history.",
inputSchema: {
type: "object",
properties: {
file_path: {
type: "string",
description: "Path to the file",
},
include_decisions: {
type: "boolean",
description: "Include related decisions (default: true)",
default: true,
},
include_commits: {
type: "boolean",
description: "Include git commits (default: true)",
default: true,
},
limit: {
type: "number",
description: "Maximum number of timeline events to return (default: 50)",
default: 50,
},
offset: {
type: "number",
description: "Skip N events for pagination (default: 0). Use with limit to fetch subsequent pages.",
default: 0,
},
},
required: ["file_path"],
},
},
link_commits_to_conversations: {
name: "link_commits_to_conversations",
description: "Link git commits to the conversation sessions where they were made or discussed. Creates associations between code changes and their conversation context, enabling you to see WHY changes were made. Supports pagination and scope filtering.",
inputSchema: {
type: "object",
properties: {
query: {
type: "string",
description: "Search query for commits",
},
conversation_id: {
type: "string",
description: "Optional: filter by specific conversation ID",
},
limit: {
type: "number",
description: "Maximum number of commits (default: 20)",
default: 20,
},
offset: {
type: "number",
description: "Skip N results for pagination (default: 0). Use with limit to fetch subsequent pages.",
default: 0,
},
scope: {
type: "string",
enum: ["current", "all", "global"],
description: "Search scope: 'current' (current session only), 'all' (all sessions in current project), 'global' (all indexed projects including Codex). Default: 'all'",
default: "all",
},
},
},
},
search_mistakes: {
name: "search_mistakes",
description: "Find past mistakes to avoid repeating them. Shows what went wrong and how it was corrected. Supports pagination and scope filtering.",
inputSchema: {
type: "object",
properties: {
query: {
type: "string",
description: "Search query for mistakes",
},
mistake_type: {
type: "string",
description: "Optional: filter by type (logic_error, wrong_approach, misunderstanding, tool_error, syntax_error)",
enum: ["logic_error", "wrong_approach", "misunderstanding", "tool_error", "syntax_error"],
},
limit: {
type: "number",
description: "Maximum number of results (default: 10)",
default: 10,
},
offset: {
type: "number",
description: "Skip N results for pagination (default: 0). Use with limit to fetch subsequent pages.",
default: 0,
},
scope: {
type: "string",
enum: ["current", "all", "global"],
description: "Search scope: 'current' (current session only), 'all' (all sessions in current project), 'global' (all indexed projects including Codex). Default: 'all'",
default: "all",
},
conversation_id: {
type: "string",
description: "Required when scope='current': internal conversation id from list_recent_sessions.id",
},
},
required: ["query"],
if: {
properties: { scope: { const: "current" } },
},
then: {
required: ["conversation_id"],
},
},
},
get_requirements: {
name: "get_requirements",
description: "Look up requirements and constraints for a component or feature.",
inputSchema: {
type: "object",
properties: {
component: {
type: "string",
description: "Component or feature name",
},
type: {
type: "string",
description: "Optional: filter by requirement type",
enum: ["dependency", "performance", "compatibility", "business"],
},
},
required: ["component"],
},
},
get_tool_history: {
name: "get_tool_history",
description: "Query history of tool uses (bash commands, file edits, reads, etc.) with pagination, filtering, and content control. Returns metadata about tool uses with optional content truncation to stay within token limits. Use include_content=false for quick overview of many tools, or enable with max_content_length to control response size.",
inputSchema: {
type: "object",
properties: {
tool_name: {
type: "string",
description: "Optional: filter by tool name (Bash, Edit, Write, Read)",
},
file_path: {
type: "string",
description: "Optional: filter by file path (searches in tool parameters)",
},
limit: {
type: "number",
description: "Maximum number of results per page (default: 20)",
default: 20,
},
offset: {
type: "number",
description: "Skip N results for pagination (default: 0). Use with limit to fetch subsequent pages.",
default: 0,
},
include_content: {
type: "boolean",
description: "Include tool result content, stdout, stderr (default: false for security). Set true to include content (tool names, timestamps, success/failure status).",
default: false,
},
max_content_length: {
type: "number",
description: "Maximum characters per content field before truncation (default: 500). Truncated fields are marked with content_truncated flag.",
default: 500,
},
date_range: {
type: "array",
description: "Optional: filter by timestamp range [start_timestamp, end_timestamp]. Use Date.now() for current time.",
items: {
type: "number",
},
minItems: 2,
maxItems: 2,
},
conversation_id: {
type: "string",
description: "Optional: filter by internal conversation id (list_recent_sessions.id)",
},
errors_only: {
type: "boolean",
description: "Optional: show only tool uses that resulted in errors (default: false)",
default: false,
},
},
},
},
find_similar_sessions: {
name: "find_similar_sessions",
description: "Find conversations that dealt with similar topics or problems. Supports pagination and scope filtering.",
inputSchema: {
type: "object",
properties: {
query: {
type: "string",
description: "Description of the topic or problem",
},
limit: {
type: "number",
description: "Maximum number of sessions (default: 5)",
default: 5,
},
offset: {
type: "number",
description: "Skip N results for pagination (default: 0). Use with limit to fetch subsequent pages.",
default: 0,
},
scope: {
type: "string",
enum: ["current", "all", "global"],
description: "Search scope: 'current' (current session only), 'all' (all sessions in current project), 'global' (all indexed projects including Codex). Default: 'all'",
default: "all",
},
conversation_id: {
type: "string",
description: "Required when scope='current': internal conversation id from list_recent_sessions.id",
},
},
required: ["query"],
if: {
properties: { scope: { const: "current" } },
},
then: {
required: ["conversation_id"],
},
},
},
recall_and_apply: {
name: "recall_and_apply",
description: "Recall relevant past context (conversations, decisions, mistakes, file changes) and format it for applying to current work. Use this when you need to 'remember when we did X' and 'now do Y based on that'. Returns structured context optimized for context transfer workflows. Supports pagination and scope filtering.",
inputSchema: {
type: "object",
properties: {
query: {
type: "string",
description: "What to recall (e.g., 'how we implemented authentication', 'the bug we fixed in parser', 'decisions about database schema')",
},
context_types: {
type: "array",
description: "Types of context to include: 'conversations', 'decisions', 'mistakes', 'file_changes', 'commits'. Default: all types",
items: {
type: "string",
enum: ["conversations", "decisions", "mistakes", "file_changes", "commits"]
},
default: ["conversations", "decisions", "mistakes", "file_changes", "commits"],
},
file_path: {
type: "string",
description: "Optional: focus on a specific file",
},
date_range: {
type: "array",
description: "Optional: limit to time range [start_timestamp, end_timestamp]",
items: { type: "number" },
},
limit: {
type: "number",
description: "Maximum results per context type (default: 5)",
default: 5,
},
offset: {
type: "number",
description: "Skip N results for pagination (default: 0). Use with limit to fetch subsequent pages.",
default: 0,
},
scope: {
type: "string",
enum: ["current", "all", "global"],
description: "Search scope: 'current' (current session only), 'all' (all sessions in current project), 'global' (all indexed projects including Codex). Default: 'all'",
default: "all",
},
conversation_id: {
type: "string",
description: "Required when scope='current': internal conversation id from list_recent_sessions.id",
},
},
required: ["query"],
if: {
properties: { scope: { const: "current" } },
},
then: {
required: ["conversation_id"],
},
},
},
generate_documentation: {
name: "generate_documentation",
description: "Generate comprehensive project documentation by combining local codebase analysis with conversation history. Shows WHAT exists in code and WHY it was built that way.",
inputSchema: {
type: "object",
properties: {
project_path: {
type: "string",
description: "Path to the project (defaults to current working directory)",
},
session_id: {
type: "string",
description: "Optional: internal conversation id (list_recent_sessions.id) to include. If not provided, includes all sessions.",
},
scope: {
type: "string",
enum: ["full", "architecture", "decisions", "quality"],
description: "Documentation scope: full (everything), architecture (modules), decisions (decision log), quality (code quality insights)",
default: "full",
},
module_filter: {
type: "string",
description: "Optional: filter to specific module path (e.g., 'src/auth')",
},
},
},
},
discover_old_conversations: {
name: "discover_old_conversations",
description: "Discover old conversation folders when project directories are renamed or moved. Scans ~/.claude/projects to find folders that match the current project based on database contents and folder similarity.",
inputSchema: {
type: "object",
properties: {
current_project_path: {
type: "string",
description: "Current project path (defaults to current working directory). Used to find matching old folders.",
},
},
},
},
migrate_project: {
name: "migrate_project",
description: "Migrate or merge conversation history from different project folders. Use 'migrate' mode (default) to replace target folder when renaming projects. Use 'merge' mode to combine conversations from different projects into one folder. Creates backups automatically.",
inputSchema: {
type: "object",
properties: {
source_folder: {
type: "string",
description: "Path to the source conversation folder (e.g., /Users/name/.claude/projects/-old-project)",
},
old_project_path: {
type: "string",
description: "Old project path stored in database (e.g., /Users/name/old-project)",
},
new_project_path: {
type: "string",
description: "New project path to update to (e.g., /Users/name/new-project)",
},
dry_run: {
type: "boolean",
description: "If true, shows what would be migrated without making changes (default: false)",
default: false,
},
mode: {
type: "string",
enum: ["migrate", "merge"],
description: "Operation mode: 'migrate' (default) replaces target folder, 'merge' combines conversations from source into existing target. In merge mode, duplicate conversation IDs are skipped (target kept). Use 'merge' to combine history from different projects.",
default: "migrate",
},
},
required: ["source_folder", "old_project_path", "new_project_path"],
},
},
forget_by_topic: {
name: "forget_by_topic",
description: "Forget conversations about specific topics or keywords. Searches for matching conversations and optionally deletes them with automatic backup. Use confirm=false to preview what would be deleted, then set confirm=true to actually delete.",
inputSchema: {
type: "object",
properties: {
keywords: {
type: "array",
description: "Topics or keywords to search for (e.g., ['authentication', 'redesign'])",
items: { type: "string" },
minItems: 1,
},
project_path: {
type: "string",
description: "Path to the project (defaults to current working directory)",
},
confirm: {
type: "boolean",
description: "Set to true to actually delete conversations. If false (default), only shows preview of what would be deleted",
default: false,
},
},
required: ["keywords"],
},
},
// ==================== High-Value Utility Tools ====================
search_by_file: {
name: "search_by_file",
description: "Find all conversation context related to a specific file: discussions, decisions, mistakes, and changes. Essential for understanding file history before modifications.",
inputSchema: {
type: "object",
properties: {
file_path: {
type: "string",
description: "Path to the file (can be relative or absolute)",
},
limit: {
type: "number",
description: "Maximum results per category (default: 5)",
default: 5,
},
},
required: ["file_path"],
},
},
list_recent_sessions: {
name: "list_recent_sessions",
description: "List recent conversation sessions with summary info (date, message count, topics). Returns both internal id and external session_id. Useful for understanding conversation history at a glance.",
inputSchema: {
type: "object",
properties: {
limit: {
type: "number",
description: "Maximum sessions to return (default: 10)",
default: 10,
},
offset: {
type: "number",
description: "Skip N sessions for pagination (default: 0)",
default: 0,
},
project_path: {
type: "string",
description: "Optional: filter to specific project path",
},
},
},
},
get_latest_session_summary: {
name: "get_latest_session_summary",
description: "Summarize the latest session for a project: what the agent is trying to solve, recent actions, and current errors.",
inputSchema: {
type: "object",
properties: {
project_path: {
type: "string",
description: "Project path (defaults to current working directory)",
},
source_type: {
type: "string",
enum: ["claude-code", "codex", "all"],
description: "Filter by source type (default: all)",
default: "all",
},
limit_messages: {
type: "number",
description: "How many recent messages to consider (default: 20)",
default: 20,
},
include_tools: {
type: "boolean",
description: "Include recent tool actions (default: true)",
default: true,
},
include_errors: {
type: "boolean",
description: "Include recent tool errors (default: true)",
default: true,
},
},
},
},
// ==================== Global Cross-Project Tools ====================
index_all_projects: {
name: "index_all_projects",
description: "Index all projects from both Claude Code and Codex. Discovers and indexes conversations from all sources, registering them in a global index for cross-project search. This enables searching across all your work globally.",
inputSchema: {
type: "object",
properties: {
include_codex: {
type: "boolean",
description: "Include Codex conversations (default: true)",
default: true,
},
include_claude_code: {
type: "boolean",
description: "Include Claude Code conversations (default: true)",
default: true,
},
codex_path: {
type: "string",
description: "Path to Codex home directory (default: ~/.codex)",
},
claude_projects_path: {
type: "string",
description: "Path to Claude Code projects directory (default: ~/.claude/projects)",
},
incremental: {
type: "boolean",
description: "Perform incremental indexing - only index files modified since last indexing (default: true). Set to false for full re-indexing.",
default: true,
},
},
},
},
search_all_conversations: {
name: "search_all_conversations",
description: "Search conversations across all indexed projects (Claude Code + Codex). Returns results from all projects with source type and project path for context. Supports full pagination.",
inputSchema: {
type: "object",
properties: {
query: {
type: "string",
description: "Natural language search query",
},
limit: {
type: "number",
description: "Maximum number of results (default: 20)",
default: 20,
},
offset: {
type: "number",
description: "Skip N results for pagination (default: 0). Use with limit to fetch subsequent pages.",
default: 0,
},
date_range: {
type: "array",
description: "Optional date range filter [start_timestamp, end_timestamp]",
items: { type: "number" },
},
source_type: {
type: "string",
description: "Filter by source: 'claude-code', 'codex', or 'all' (default: 'all')",
enum: ["claude-code", "codex", "all"],
default: "all",
},
},
required: ["query"],
},
},
get_all_decisions: {
name: "get_all_decisions",
description: "Find decisions made across all indexed projects. Shows rationale, alternatives, and rejected approaches from all your work globally. Supports full pagination.",
inputSchema: {
type: "object",
properties: {
query: {
type: "string",
description: "Topic or keyword to search for (e.g., 'authentication', 'database')",
},
file_path: {
type: "string",
description: "Optional: filter decisions related to a specific file",
},
limit: {
type: "number",
description: "Maximum number of decisions to return (default: 20)",
default: 20,
},
offset: {
type: "number",
description: "Skip N results for pagination (default: 0). Use with limit to fetch subsequent pages.",
default: 0,
},
source_type: {
type: "string",
description: "Filter by source: 'claude-code', 'codex', or 'all' (default: 'all')",
enum: ["claude-code", "codex", "all"],
default: "all",
},
},
required: ["query"],
},
},
search_all_mistakes: {
name: "search_all_mistakes",
description: "Find past mistakes across all indexed projects to avoid repeating them. Shows what went wrong and how it was corrected across all your work. Supports full pagination.",
inputSchema: {
type: "object",
properties: {
query: {
type: "string",
description: "Search query for mistakes",
},
mistake_type: {
type: "string",
description: "Optional: filter by type (logic_error, wrong_approach, misunderstanding, tool_error, syntax_error)",
},
limit: {
type: "number",
description: "Maximum number of results (default: 20)",
default: 20,
},
offset: {
type: "number",
description: "Skip N results for pagination (default: 0). Use with limit to fetch subsequent pages.",
default: 0,
},
source_type: {
type: "string",
description: "Filter by source: 'claude-code', 'codex', or 'all' (default: 'all')",
enum: ["claude-code", "codex", "all"],
default: "all",
},
},
required: ["query"],
},
},
// ==================== Live Context Layer Tools ====================
remember: {
name: "remember",
description:
"Store a fact, decision, or piece of context in working memory. Use this to remember important information that should persist across conversation boundaries. Items are stored per-project and can be recalled by key or searched semantically. Supports confidence levels, importance ratings, and source attribution.",
inputSchema: {
type: "object",
properties: {
key: {
type: "string",
description:
"A unique key to identify this memory (e.g., 'storage_decision', 'auth_approach', 'current_task')",
},
value: {
type: "string",
description:
"The value to remember (e.g., 'Using SQLite for simplicity and portability')",
},
context: {
type: "string",
description:
"Optional additional context or rationale for this memory",
},
tags: {
type: "array",
items: { type: "string" },
description:
"Optional tags for categorization (e.g., ['architecture', 'decision'])",
},
ttl: {
type: "number",
description:
"Optional time-to-live in seconds. Memory will auto-expire after this time.",
},
confidence: {
type: "string",
enum: ["uncertain", "likely", "confirmed", "verified"],
description:
"Confidence level: uncertain (hypothesis), likely (probably correct, default), confirmed (tested), verified (proven in production)",
default: "likely",
},
importance: {
type: "string",
enum: ["low", "normal", "high", "critical"],
description:
"Importance level: low (nice to know), normal (default), high (important), critical (must not forget)",
default: "normal",
},
source: {
type: "string",
description:
"Where this information came from (e.g., 'user stated', 'extracted from docs', 'confirmed in testing')",
},
pinned: {
type: "boolean",
description: "Pin this memory to prevent accidental deletion (default: false)",
default: false,
},
project_path: {
type: "string",
description: "Project path (defaults to current working directory)",
},
},
required: ["key", "value"],
},
},
recall: {
name: "recall",
description:
"Retrieve a specific memory item by its key. Use this when you need to recall a specific fact or decision that was previously stored.",
inputSchema: {
type: "object",
properties: {
key: {
type: "string",
description: "The key of the memory to recall",
},
project_path: {
type: "string",
description: "Project path (defaults to current working directory)",
},
},
required: ["key"],
},
},
recall_relevant: {
name: "recall_relevant",
description:
"Search working memory semantically to find relevant memories based on a query. Use this when you need to find memories related to a topic but don't know the exact key.",
inputSchema: {
type: "object",
properties: {
query: {
type: "string",
description:
"Natural language query to search for (e.g., 'database decisions', 'authentication setup')",
},
limit: {
type: "number",
description: "Maximum number of results (default: 10)",
default: 10,
},
project_path: {
type: "string",
description: "Project path (defaults to current working directory)",
},
},
required: ["query"],
},
},
list_memory: {
name: "list_memory",
description:
"List all items in working memory for the current project. Optionally filter by tags.",
inputSchema: {
type: "object",
properties: {
tags: {
type: "array",
items: { type: "string" },
description: "Optional: filter by tags (returns items matching any tag)",
},
limit: {
type: "number",
description: "Maximum number of items to return (default: 100)",
default: 100,
},
offset: {
type: "number",
description: "Skip N items for pagination (default: 0)",
default: 0,
},
project_path: {
type: "string",
description: "Project path (defaults to current working directory)",
},
},
},
},
forget: {
name: "forget",
description:
"Remove a memory item by its key. Use this to clean up memories that are no longer relevant.",
inputSchema: {
type: "object",
properties: {
key: {
type: "string",
description: "The key of the memory to forget",
},
project_path: {
type: "string",
description: "Project path (defaults to current working directory)",
},
},
required: ["key"],
},
},
// ==================== Session Handoff Tools ====================
prepare_handoff: {
name: "prepare_handoff",
description:
"Prepare a handoff document for transitioning to a new conversation. Extracts key decisions, active files, pending tasks, and working memory to enable seamless continuation in a new session.",
inputSchema: {
type: "object",
properties: {
session_id: {
type: "string",
description:
"Internal conversation id (list_recent_sessions.id) to prepare handoff for (defaults to most recent session)",
},
include: {
type: "array",
items: {
type: "string",
enum: ["decisions", "files", "tasks", "memory"],
},
description:
"What to include in handoff (default: all). Options: decisions, files, tasks, memory",
},
context_summary: {
type: "string",
description:
"Optional summary of current context/task to include in handoff",
},
project_path: {
type: "string",
description: "Project path (defaults to current working directory)",
},
},
},
},
resume_from_handoff: {
name: "resume_from_handoff",
description:
"Resume work from a previous handoff document. Loads the context from the handoff and provides a summary of what was being worked on.",
inputSchema: {
type: "object",
properties: {
handoff_id: {
type: "string",
description:
"ID of the handoff to resume from (defaults to most recent)",
},
inject_context: {
type: "boolean",
description:
"Whether to inject the handoff context into the response (default: true)",
default: true,
},
project_path: {
type: "string",
description: "Project path (defaults to current working directory)",
},
},
},
},
list_handoffs: {
name: "list_handoffs",
description:
"List available handoff documents for the current project. Shows when each was created and whether it has been resumed.",
inputSchema: {
type: "object",
properties: {
limit: {
type: "number",
description: "Maximum number of handoffs to return (default: 10)",
default: 10,
},
include_resumed: {
type: "boolean",
description: "Include handoffs that have already been resumed (default: true)",
default: true,
},
project_path: {
type: "string",
description: "Project path (defaults to current working directory)",
},
},
},
},
// ==================== Context Injection Tools ====================
get_startup_context: {
name: "get_startup_context",
description:
"Get relevant context to inject at the start of a new conversation. Combines recent handoffs, decisions, working memory, and file history based on the query or task description.",
inputSchema: {
type: "object",
properties: {
query: {
type: "string",
description:
"Description of what you're about to work on (e.g., 'authentication system', 'database optimization')",
},
max_tokens: {
type: "number",
description:
"Maximum tokens for context response (default: 2000). Helps stay within context limits.",
default: 2000,
},
sources: {
type: "array",
items: {
type: "string",
enum: ["history", "decisions", "memory", "handoffs"],
},
description:
"Which sources to include (default: all). Options: history, decisions, memory, handoffs",
},
project_path: {
type: "string",
description: "Project path (defaults to current working directory)",
},
},
},
},
inject_relevant_context: {
name: "inject_relevant_context",
description:
"Analyze a message and automatically inject relevant historical context. Use at the start of a conversation to bring in context from past sessions.",
inputSchema: {
type: "object",
properties: {
message: {
type: "string",
description:
"The user's first message or task description to analyze for context injection",
},
max_tokens: {
type: "number",
description: "Maximum tokens for injected context (default: 1500)",
default: 1500,
},
sources: {
type: "array",
items: {
type: "string",
enum: ["history", "decisions", "memory", "handoffs"],
},
description: "Which sources to search (default: all)",
},
project_path: {
type: "string",
description: "Project path (defaults to current working directory)",
},
},
required: ["message"],
},
},
// ==================== Phase 1: Tag Management Tools ====================
list_tags: {
name: "list_tags",
description:
"List all tags with usage statistics. Shows how many items use each tag and what types of items they're applied to.",
inputSchema: {
type: "object",
properties: {
project_path: {
type: "string",
description: "Filter to specific project (defaults to current)",
},
scope: {
type: "string",
enum: ["project", "global", "all"],
description: "Tag scope filter: project (current project only), global (project-independent tags), all (both)",
default: "all",
},
sort_by: {
type: "string",
enum: ["name", "usage_count", "last_used", "created"],
description: "Sort order for tags",
default: "usage_count",
},
include_unused: {
type: "boolean",
description: "Include tags with zero usage (default: false)",
default: false,
},
limit: {
type: "number",
description: "Maximum number of tags to return (default: 50)",
default: 50,
},
offset: {
type: "number",
description: "Skip N tags for pagination (default: 0)",
default: 0,
},
},
},
},
search_by_tags: {
name: "search_by_tags",
description:
"Find items across all entity types (memories, decisions, patterns, sessions, mistakes) by tag. Supports AND/OR matching modes.",
inputSchema: {
type: "object",
properties: {
tags: {
type: "array",
items: { type: "string" },
minItems: 1,
description: "Tags to search for",
},
match_mode: {
type: "string",
enum: ["all", "any"],
description: "Match mode: 'all' (AND - item must have all tags), 'any' (OR - item has at least one tag)",
default: "any",
},
item_types: {
type: "array",
items: {
type: "string",
enum: ["memory", "decision", "pattern", "session", "mistake"],
},
description: "Filter to specific item types (default: all types)",
},
scope: {
type: "string",
enum: ["project", "global", "all"],
description: "Search scope",
default: "all",
},
project_path: {
type: "string",
description: "Project path (defaults to current working directory)",
},
limit: {
type: "number",
description: "Maximum number of results (default: 20)",
default: 20,
},
offset: {
type: "number",
description: "Skip N results for pagination (default: 0)",
default: 0,
},
},
required: ["tags"],
},
},
rename_tag: {
name: "rename_tag",
description:
"Rename a tag across all usages. If the new name already exists, items will be merged into the existing tag.",
inputSchema: {
type: "object",
properties: {
old_name: {
type: "string",
description: "Current tag name",
},
new_name: {
type: "string",
description: "New tag name",
},
scope: {
type: "string",
enum: ["project", "global"],
description: "Tag scope to rename within",
default: "project",
},
project_path: {
type: "string",
description: "Project path (required for project scope)",
},
},
required: ["old_name", "new_name"],
},
},
merge_tags: {
name: "merge_tags",
description:
"Combine multiple tags into one. Source tags will be deleted and all their items will be retagged with the target tag.",
inputSchema: {
type: "object",
properties: {
source_tags: {
type: "array",
items: { type: "string" },
minItems: 1,
description: "Tags to merge from (will be deleted)",
},
target_tag: {
type: "string",
description: "Tag to merge into (will be kept or created)",
},
scope: {
type: "string",
enum: ["project", "global"],
description: "Tag scope",
default: "project",
},
project_path: {
type: "string",
description: "Project path (required for project scope)",
},
},
required: ["source_tags", "target_tag"],
},
},
delete_tag: {
name: "delete_tag",
description:
"Remove a tag entirely. By default, refuses to delete tags with usages unless force=true.",
inputSchema: {
type: "object",
properties: {
name: {
type: "string",
description: "Tag name to delete",
},
scope: {
type: "string",
enum: ["project", "global"],
description: "Tag scope",
default: "project",
},
project_path: {
type: "string",
description: "Project path (required for project scope)",
},
force: {
type: "boolean",
description: "Delete even if tag has usages (default: false)",
default: false,
},
},
required: ["name"],
},
},
tag_item: {
name: "tag_item",
description:
"Add tags to any item type (memory, decision, pattern, session, mistake). Creates tags if they don't exist.",
inputSchema: {
type: "object",
properties: {
item_type: {
type: "string",
enum: ["memory", "decision", "pattern", "session", "mistake"],
description: "Type of item to tag",
},
item_id: {
type: ["number", "string"],
description: "Item ID (number) or key (string for memory)",
},
tags: {
type: "array",
items: { type: "string" },
minItems: 1,
description: "Tags to add",
},
project_path: {
type: "string",
description: "Project path (defaults to current working directory)",
},
},
required: ["item_type", "item_id", "tags"],
},
},
untag_item: {
name: "untag_item",
description:
"Remove tags from an item. If no tags specified, removes all tags from the item.",
inputSchema: {
type: "object",
properties: {
item_type: {
type: "string",
enum: ["memory", "decision", "pattern", "session", "mistake"],
description: "Type of item to untag",
},
item_id: {
type: ["number", "string"],
description: "Item ID (number) or key (string for memory)",
},
tags: {
type: "array",
items: { type: "string" },
description: "Tags to remove (omit to remove all tags)",
},
project_path: {
type: "string",
description: "Project path (defaults to current working directory)",
},
},
required: ["item_type", "item_id"],
},
},
// ==================== Phase 1: Memory Confidence Tools ====================
set_memory_confidence: {
name: "set_memory_confidence",
description:
"Update the confidence level of a memory. Use this when you've validated or invalidated information.",
inputSchema: {
type: "object",
properties: {
key: {
type: "string",
description: "Memory key to update",
},
confidence: {
type: "string",
enum: ["uncertain", "likely", "confirmed", "verified"],
description: "New confidence level",
},
evidence: {
type: "string",
description: "Why this confidence level (e.g., 'tested in production', 'user confirmed')",
},
verified_by: {
type: "string",
description: "Who/what verified (for confirmed/verified levels)",
},
project_path: {
type: "string",
description: "Project path (defaults to current working directory)",
},
},
required: ["key", "confidence"],
},
},
set_memory_importance: {
name: "set_memory_importance",
description:
"Update the importance level of a memory. Critical memories are exempt from auto-cleanup.",
inputSchema: {
type: "object",
properties: {
key: {
type: "string",
description: "Memory key to update",
},
importance: {
type: "string",
enum: ["low", "normal", "high", "critical"],
description: "New importance level",
},
project_path: {
type: "string",
description: "Project path (defaults to current working directory)",
},
},
required: ["key", "importance"],
},
},
pin_memory: {
name: "pin_memory",
description:
"Pin or unpin a memory. Pinned memories are protected from accidental deletion.",
inputSchema: {
type: "object",
properties: {
key: {
type: "string",
description: "Memory key to pin/unpin",
},
pinned: {
type: "boolean",
description: "Whether to pin (true) or unpin (false)",
default: true,
},
project_path: {
type: "string",
description: "Project path (defaults to current working directory)",
},
},
required: ["key"],
},
},
archive_memory: {
name: "archive_memory",
description:
"Archive a memory. Archived memories are hidden from normal searches but can be restored.",
inputSchema: {
type: "object",
properties: {
key: {
type: "string",
description: "Memory key to archive",
},
reason: {
type: "string",
description: "Why archiving this memory (e.g., 'outdated', 'no longer relevant')",
},
project_path: {
type: "string",
description: "Project path (defaults to current working directory)",
},
},
required: ["key"],
},
},
unarchive_memory: {
name: "unarchive_memory",
description: "Restore an archived memory back to active status.",
inputSchema: {
type: "object",
properties: {
key: {
type: "string",
description: "Memory key to unarchive",
},
project_path: {
type: "string",
description: "Project path (defaults to current working directory)",
},
},
required: ["key"],
},
},
search_memory_by_quality: {
name: "search_memory_by_quality",
description:
"Find memories filtered by confidence level, importance, pinned status, and archive status. Useful for finding high-confidence facts or reviewing low-confidence items.",
inputSchema: {
type: "object",
properties: {
query: {
type: "string",
description: "Optional text search within filtered memories",
},
confidence: {
type: "array",
items: {
type: "string",
enum: ["uncertain", "likely", "confirmed", "verified"],
},
description: "Filter by confidence levels (returns memories matching any level)",
},
importance: {
type: "array",
items: {
type: "string",
enum: ["low", "normal", "high", "critical"],
},
description: "Filter by importance levels (returns memories matching any level)",
},
pinned_only: {
type: "boolean",
description: "Only return pinned memories (default: false)",
default: false,
},
include_archived: {
type: "boolean",
description: "Include archived memories (default: false)",
default: false,
},
scope: {
type: "string",
enum: ["project", "global"],
description: "Search scope",
default: "project",
},
project_path: {
type: "string",
description: "Project path (defaults to current working directory)",
},
sort_by: {
type: "string",
enum: ["relevance", "importance", "confidence", "recent"],
description: "Sort order",
default: "importance",
},
limit: {
type: "number",
description: "Maximum number of results (default: 20)",
default: 20,
},
offset: {
type: "number",
description: "Skip N results for pagination (default: 0)",
default: 0,
},
},
},
},
get_memory_stats: {
name: "get_memory_stats",
description:
"Get statistics about memories: counts by confidence level, importance, archived/pinned status, and tag distribution.",
inputSchema: {
type: "object",
properties: {
project_path: {
type: "string",
description: "Project path (defaults to current working directory)",
},
scope: {
type: "string",
enum: ["project", "global"],
description: "Stats scope",
default: "project",
},
},
},
},
// ==================== Phase 1: Cleanup/Maintenance Tools ====================
get_storage_stats: {
name: "get_storage_stats",
description:
"Get storage statistics: database size, record counts by type, fragmentation level, and recommendations.",
inputSchema: {
type: "object",
properties: {
project_path: {
type: "string",
description: "Specific project to analyze (omit for all projects)",
},
detailed: {
type: "boolean",
description: "Include per-table size breakdown (default: false)",
default: false,
},
},
},
},
find_stale_items: {
name: "find_stale_items",
description:
"Find items that haven't been accessed or updated recently. Useful for cleanup planning.",
inputSchema: {
type: "object",
properties: {
item_types: {
type: "array",
items: {
type: "string",
enum: ["memory", "decision", "pattern", "session"],
},
description: "Types of items to check (default: memory, decision, pattern)",
default: ["memory", "decision", "pattern"],
},
stale_threshold_days: {
type: "number",
description: "Days since last access/update to consider stale (default: 90)",
default: 90,
},
exclude_pinned: {
type: "boolean",
description: "Exclude pinned items (default: true)",
default: true,
},
exclude_important: {
type: "boolean",
description: "Exclude high/critical importance items (default: true)",
default: true,
},
project_path: {
type: "string",
description: "Project path (defaults to current working directory)",
},
limit: {
type: "number",
description: "Maximum items to return (default: 50)",
default: 50,
},
},
},
},
find_duplicates: {
name: "find_duplicates",
description:
"Find similar or duplicate items using semantic similarity. Returns groups of duplicates with recommendations on which to keep.",
inputSchema: {
type: "object",
properties: {
item_types: {
type: "array",
items: {
type: "string",
enum: ["memory", "decision", "pattern"],
},
description: "Types of items to check (default: memory, decision)",
default: ["memory", "decision"],
},
similarity_threshold: {
type: "number",
minimum: 0.5,
maximum: 1.0,
description: "Semantic similarity threshold (0.5-1.0, default: 0.85)",
default: 0.85,
},
project_path: {
type: "string",
description: "Project path (defaults to current working directory)",
},
limit: {
type: "number",
description: "Maximum duplicate groups to return (default: 20)",
default: 20,
},
},
},
},
merge_duplicates: {
name: "merge_duplicates",
description:
"Merge duplicate items into one. Optionally combines content and tags from merged items.",
inputSchema: {
type: "object",
properties: {
item_type: {
type: "string",
enum: ["memory", "decision", "pattern"],
description: "Type of items to merge",
},
keep_id: {
type: "number",
description: "ID of the item to keep",
},
merge_ids: {
type: "array",
items: { type: "number" },
description: "IDs of items to merge into keep_id (will be deleted)",
},
merge_strategy: {
type: "string",
enum: ["keep_content", "combine_content", "keep_newest"],
description: "How to handle content: keep_content (keep keep_id content), combine_content (merge all), keep_newest (use most recent)",
default: "keep_content",
},
merge_tags: {
type: "boolean",
description: "Combine tags from all items (default: true)",
default: true,
},
},
required: ["item_type", "keep_id", "merge_ids"],
},
},
cleanup_stale: {
name: "cleanup_stale",
description:
"Remove or archive stale items. Use preview mode first to see what would be affected.",
inputSchema: {
type: "object",
properties: {
item_types: {
type: "array",
items: { type: "string" },
description: "Types of items to clean up",
},
stale_threshold_days: {
type: "number",
description: "Days threshold (default: 90)",
default: 90,
},
action: {
type: "string",
enum: ["archive", "delete", "preview"],
description: "Action to take: preview (show what would happen), archive (soft remove), delete (permanent)",
default: "preview",
},
exclude_pinned: {
type: "boolean",
description: "Exclude pinned items (default: true)",
default: true,
},
exclude_important: {
type: "boolean",
description: "Exclude high/critical importance items (default: true)",
default: true,
},
max_items: {
type: "number",
description: "Safety limit on items to process (default: 100)",
default: 100,
},
project_path: {
type: "string",
description: "Project path (defaults to current working directory)",
},
},
},
},
vacuum_database: {
name: "vacuum_database",
description:
"Reclaim disk space and optimize the database. Run after bulk deletions.",
inputSchema: {
type: "object",
properties: {
analyze: {
type: "boolean",
description: "Run ANALYZE after VACUUM to update query planner statistics (default: true)",
default: true,
},
reindex: {
type: "boolean",
description: "Rebuild all indexes (default: false)",
default: false,
},
},
},
},
cleanup_orphans: {
name: "cleanup_orphans",
description:
"Find and optionally remove orphaned records (tags without items, embeddings without sources, etc.).",
inputSchema: {
type: "object",
properties: {
preview: {
type: "boolean",
description: "Only show what would be cleaned (default: true)",
default: true,
},
},
},
},
get_health_report: {
name: "get_health_report",
description:
"Run comprehensive health checks on the database and memory system. Returns overall health score and recommendations.",
inputSchema: {
type: "object",
properties: {
project_path: {
type: "string",
description: "Project path (defaults to current working directory)",
},
},
},
},
run_maintenance: {
name: "run_maintenance",
description:
"Run one or more maintenance tasks. Use preview mode to see what would happen.",
inputSchema: {
type: "object",
properties: {
tasks: {
type: "array",
items: {
type: "string",
enum: ["cleanup_stale", "cleanup_orphans", "vacuum", "find_duplicates", "health_report", "cleanup_expired"],
},
description: "Tasks to run",
},
options: {
type: "object",
description: "Task-specific options (e.g., stale_threshold_days for cleanup_stale)",
},
preview: {
type: "boolean",
description: "Preview mode - show what would happen without making changes (default: true)",
default: true,
},
},
required: ["tasks"],
},
},
get_maintenance_history: {
name: "get_maintenance_history",
description: "View history of past maintenance operations.",
inputSchema: {
type: "object",
properties: {
since: {
type: "number",
description: "Only show operations since this timestamp",
},
task_type: {
type: "string",
description: "Filter by task type",
},
limit: {
type: "number",
description: "Maximum records to return (default: 20)",
default: 20,
},
},
},
},
// ==================== Phase 9: Methodology & Research Tracking ====================
get_methodologies: {
name: "get_methodologies",
description: "Search for problem-solving methodologies used in past conversations. Shows how problems were approached, what steps were taken, tools used, and outcomes.",
inputSchema: {
type: "object",
properties: {
query: {
type: "string",
description: "Search query for problem statements or approaches",
},
approach: {
type: "string",
enum: ["exploration", "research", "implementation", "debugging", "refactoring", "testing"],
description: "Filter by approach type",
},
outcome: {
type: "string",
enum: ["success", "partial", "failed", "ongoing"],
description: "Filter by outcome",
},
limit: {
type: "number",
description: "Maximum results (default: 10)",
default: 10,
},
},
required: ["query"],
},
},
get_research_findings: {
name: "get_research_findings",
description: "Search for discoveries and findings made during code exploration and research. Shows what was discovered, source of the finding, and relevance.",
inputSchema: {
type: "object",
properties: {
query: {
type: "string",
description: "Search query for topics or discoveries",
},
source_type: {
type: "string",
enum: ["code", "documentation", "web", "experimentation", "user_input"],
description: "Filter by source type",
},
relevance: {
type: "string",
enum: ["high", "medium", "low"],
description: "Filter by relevance level",
},
confidence: {
type: "string",
enum: ["verified", "likely", "uncertain"],
description: "Filter by confidence level",
},
limit: {
type: "number",
description: "Maximum results (default: 10)",
default: 10,
},
},
required: ["query"],
},
},
get_solution_patterns: {
name: "get_solution_patterns",
description: "Search for reusable solution patterns from past problem-solving. Shows problem categories, solutions, and when to apply them.",
inputSchema: {
type: "object",
properties: {
query: {
type: "string",
description: "Search query for problems or solutions",
},
problem_category: {
type: "string",
enum: ["error-handling", "performance", "authentication", "database", "api-design", "testing", "refactoring", "configuration", "file-operations", "async-patterns", "general"],
description: "Filter by problem category",
},
effectiveness: {
type: "string",
enum: ["excellent", "good", "moderate", "poor"],
description: "Filter by effectiveness level",
},
technology: {
type: "string",
description: "Filter by technology/framework used",
},
limit: {
type: "number",
description: "Maximum results (default: 10)",
default: 10,
},
},
required: ["query"],
},
},
};
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/tools/ToolHandlers.ts | TypeScript | /**
* MCP Tool Handlers - Implementation of all 22 tools for the cccmemory MCP server.
*
* This class provides the implementation for all MCP (Model Context Protocol) tools
* that allow Claude to interact with conversation history and memory.
*
* Tools are organized into categories:
* - Indexing: index_conversations
* - Search: search_conversations, searchDecisions, search_mistakes
* - File Context: check_before_modify, get_file_evolution
* - History: get_tool_history, link_commits_to_conversations
* - Discovery: find_similar_sessions, get_requirements
* - Recall: recall_and_apply
* - Documentation: generate_documentation
* - Migration: discover_old_conversations, migrate_project
*
* @example
* ```typescript
* const handlers = new ToolHandlers(memory, db, '/path/to/projects');
* const result = await handlers.indexConversations({
* project_path: '/Users/me/my-project'
* });
* ```
*/
import { ConversationMemory } from "../ConversationMemory.js";
import type { SQLiteManager } from "../storage/SQLiteManager.js";
import { sanitizeForLike } from "../utils/sanitization.js";
import { getCanonicalProjectPath, getWorktreeInfo } from "../utils/worktree.js";
import type * as Types from "../types/ToolTypes.js";
import { DocumentationGenerator } from "../documentation/DocumentationGenerator.js";
import { ProjectMigration } from "../utils/ProjectMigration.js";
import { pathToProjectFolderName } from "../utils/sanitization.js";
import { DeletionService } from "../storage/DeletionService.js";
import { readdirSync } from "fs";
import { join, resolve } from "path";
import { safeJsonParse } from "../utils/safeJson.js";
/**
* Pagination Patterns:
*
* This codebase uses two different pagination patterns based on data source:
*
* 1. SQL-based pagination (fetch+1):
* - Fetch limit+1 records from database
* - hasMore = results.length > limit
* - Slice to limit if hasMore is true
* - Use case: Single-database SQL queries (searchMistakes, linkCommitsToConversations)
* - Advantage: Efficient, minimal data transfer
*
* 2. In-memory pagination (slice):
* - Fetch all needed results (or limit+offset)
* - Slice to get paginated subset: results.slice(offset, offset + limit)
* - has_more = offset + limit < results.length
* - Use case: Semantic search, cross-project aggregation
* - Advantage: Allows sorting/filtering before pagination
*
* Both patterns are correct and optimized for their respective use cases.
*/
/**
* Helper interface for embedding indexing parameters.
*/
interface EmbeddingIndexParams {
messages: Array<{ id: string; content?: string }>;
decisions: Array<{
id: string;
decision_text: string;
rationale?: string;
context?: string | null;
}>;
mistakes: Array<{
id: string;
what_went_wrong: string;
correction?: string | null;
mistake_type: string;
}>;
messageIdMap: Map<string, number>;
decisionIdMap: Map<string, number>;
mistakeIdMap: Map<string, number>;
semanticSearch: {
indexMessages: (msgs: Array<{ id: number; content: string }>, incremental: boolean) => Promise<void>;
indexDecisions: (decs: Array<{ id: number; decision_text: string; rationale?: string; context?: string | null }>, incremental: boolean) => Promise<void>;
indexMistakes: (msts: Array<{ id: number; what_went_wrong: string; correction?: string | null; mistake_type: string }>, incremental: boolean) => Promise<void>;
indexMissingDecisionEmbeddings: () => Promise<number>;
indexMissingMistakeEmbeddings: () => Promise<number>;
};
incremental: boolean;
logLabel: string;
}
/**
* Generate embeddings for messages, decisions, and mistakes.
* Shared helper to avoid code duplication in indexAllProjects.
*/
async function generateEmbeddingsForIndexing(params: EmbeddingIndexParams): Promise<void> {
const {
messages,
decisions,
mistakes,
messageIdMap,
decisionIdMap,
mistakeIdMap,
semanticSearch,
incremental,
logLabel,
} = params;
try {
// Map messages to internal IDs
const messagesForEmbedding = messages
.map((message) => {
const internalId = messageIdMap.get(message.id);
if (!internalId || !message.content) {
return null;
}
return { id: internalId, content: message.content };
})
.filter((message): message is { id: number; content: string } => Boolean(message));
// Map decisions to internal IDs
const decisionsForEmbedding: Array<{
id: number;
decision_text: string;
rationale?: string;
context?: string | null;
}> = [];
for (const decision of decisions) {
const internalId = decisionIdMap.get(decision.id);
if (!internalId) {
continue;
}
decisionsForEmbedding.push({
id: internalId,
decision_text: decision.decision_text,
rationale: decision.rationale,
context: decision.context ?? null,
});
}
// Map mistakes to internal IDs
const mistakesForEmbedding: Array<{
id: number;
what_went_wrong: string;
correction?: string | null;
mistake_type: string;
}> = [];
for (const mistake of mistakes) {
const internalId = mistakeIdMap.get(mistake.id);
if (!internalId) {
continue;
}
mistakesForEmbedding.push({
id: internalId,
what_went_wrong: mistake.what_went_wrong,
correction: mistake.correction ?? null,
mistake_type: mistake.mistake_type,
});
}
// Index all items
await semanticSearch.indexMessages(messagesForEmbedding, incremental);
await semanticSearch.indexDecisions(decisionsForEmbedding, incremental);
await semanticSearch.indexMistakes(mistakesForEmbedding, incremental);
await semanticSearch.indexMissingDecisionEmbeddings();
await semanticSearch.indexMissingMistakeEmbeddings();
console.error(`✓ Generated embeddings for ${logLabel}`);
} catch (embedError) {
console.error(`⚠️ Embedding generation failed for ${logLabel}:`, (embedError as Error).message);
console.error(" FTS fallback will be used for search");
}
}
/**
* Tool handlers for the cccmemory MCP server.
*
* Provides methods for indexing, searching, and managing conversation history.
*/
export class ToolHandlers {
private migration: ProjectMigration;
private lastAutoIndex: number = 0;
private autoIndexPromise: Promise<void> | null = null;
private readonly AUTO_INDEX_COOLDOWN = 60000; // 1 minute
/**
* Create a new ToolHandlers instance.
*
* @param memory - ConversationMemory instance for core operations
* @param db - SQLiteManager for database access
* @param projectsDir - Optional directory for storing project data
*/
constructor(private memory: ConversationMemory, private db: SQLiteManager, projectsDir?: string) {
this.migration = new ProjectMigration(db, projectsDir);
}
private resolveProjectPath(input?: string): string {
const rawPath = input || process.cwd();
return getCanonicalProjectPath(rawPath).canonicalPath;
}
private resolveOptionalProjectPath(input?: string): string | undefined {
if (!input) {
return undefined;
}
return this.resolveProjectPath(input);
}
private inferProjectPathFromMessages(messages: Array<{ cwd?: string }>): string | null {
const counts = new Map<string, number>();
for (const message of messages) {
const cwd = message.cwd;
if (!cwd || typeof cwd !== "string") {
continue;
}
const trimmed = cwd.trim();
if (!trimmed) {
continue;
}
counts.set(trimmed, (counts.get(trimmed) || 0) + 1);
}
let bestPath: string | null = null;
let bestCount = 0;
for (const [path, count] of counts) {
if (count > bestCount) {
bestCount = count;
bestPath = path;
}
}
return bestPath;
}
/**
* Automatically run incremental indexing if cooldown has expired.
* Uses a mutex (autoIndexPromise) to coalesce concurrent calls and prevent stampede.
* This ensures search results include recent conversations without
* requiring manual indexing.
*/
private async maybeAutoIndex(): Promise<void> {
if (process.env.NODE_ENV === 'test' || process.env.CCCMEMORY_DISABLE_AUTO_INDEX === '1') {
return;
}
const now = Date.now();
// If indexing is already in progress, wait for it
if (this.autoIndexPromise) {
await this.autoIndexPromise;
return;
}
// Check cooldown
if (now - this.lastAutoIndex <= this.AUTO_INDEX_COOLDOWN) {
return;
}
// Update timestamp immediately to prevent concurrent triggers
this.lastAutoIndex = now;
try {
// Create the indexing promise and store it for coalescing
this.autoIndexPromise = this.indexAllProjects({ incremental: true }).then(() => {});
await this.autoIndexPromise;
} catch (error) {
// Log but don't fail - search should still work with existing index
console.error('Auto-indexing failed:', error);
} finally {
this.autoIndexPromise = null;
}
}
/**
* Index conversation history for a project.
*
* Parses conversation files from Claude Code's conversation history, extracts
* decisions, mistakes, and requirements, links git commits, and generates
* semantic embeddings for search.
*
* @param args - Indexing arguments:
* - `project_path`: Path to the project (defaults to cwd)
* - `session_id`: Optional specific session to index
* - `include_thinking`: Include thinking blocks (default: false)
* - `enable_git`: Enable git integration (default: true)
* - `exclude_mcp_conversations`: Exclude MCP tool conversations (default: 'self-only')
* - `exclude_mcp_servers`: List of specific MCP servers to exclude
*
* @returns Result containing:
* - `success`: Whether indexing succeeded
* - `stats`: Counts of conversations, messages, decisions, etc.
* - `indexed_folders`: List of folders that were indexed
* - `database_path`: Path to the SQLite database
* - `embeddings_generated`: Whether embeddings were created
* - `embedding_error`: Error message if embeddings failed
* - `message`: Human-readable status message
*
* @example
* ```typescript
* const result = await handlers.indexConversations({
* project_path: '/Users/me/my-project',
* enable_git: true,
* exclude_mcp_conversations: 'self-only'
* });
* console.error(result.message); // "Indexed 5 conversation(s) with 245 messages..."
* ```
*/
async indexConversations(args: Record<string, unknown>): Promise<Types.IndexConversationsResponse> {
const typedArgs = args as Types.IndexConversationsArgs;
const rawProjectPath = typedArgs.project_path || process.cwd();
const { canonicalPath } = getWorktreeInfo(rawProjectPath);
const projectPath = canonicalPath;
const sessionId = typedArgs.session_id;
const includeThinking = typedArgs.include_thinking ?? false;
const enableGit = typedArgs.enable_git ?? true;
const excludeMcpConversations = typedArgs.exclude_mcp_conversations ?? 'self-only';
const excludeMcpServers = typedArgs.exclude_mcp_servers;
const { GlobalIndex } = await import("../storage/GlobalIndex.js");
const globalIndex = new GlobalIndex(this.db);
try {
let resolvedSessionId = sessionId;
if (sessionId) {
const numericId = Number(sessionId);
const row = this.db.prepare(`
SELECT external_id
FROM conversations
WHERE project_path = ?
AND source_type = 'claude-code'
AND (external_id = ? OR id = ?)
LIMIT 1
`).get(projectPath, sessionId, Number.isFinite(numericId) ? numericId : -1) as
| { external_id: string }
| undefined;
if (row?.external_id) {
resolvedSessionId = row.external_id;
}
}
let lastIndexedMs: number | undefined;
if (!resolvedSessionId) {
const existingProject = globalIndex.getProject(projectPath, "claude-code");
if (existingProject) {
lastIndexedMs = existingProject.last_indexed;
}
}
const indexResult = await this.memory.indexConversations({
projectPath,
sessionId: resolvedSessionId,
includeThinking,
enableGitIntegration: enableGit,
excludeMcpConversations,
excludeMcpServers,
lastIndexedMs,
});
const { ConversationStorage } = await import("../storage/ConversationStorage.js");
const storage = new ConversationStorage(this.db);
const stats = storage.getStatsForProject(projectPath, "claude-code");
globalIndex.registerProject({
project_path: projectPath,
source_type: "claude-code",
message_count: stats.messages.count,
conversation_count: stats.conversations.count,
decision_count: stats.decisions.count,
mistake_count: stats.mistakes.count,
metadata: {
indexed_folders: indexResult.indexed_folders || [],
},
});
const sessionLabel =
sessionId && resolvedSessionId && sessionId !== resolvedSessionId
? `${sessionId} -> ${resolvedSessionId}`
: sessionId;
const sessionInfo = sessionLabel ? ` (session: ${sessionLabel})` : ' (all sessions)';
let message = `Indexed ${stats.conversations.count} conversation(s) with ${stats.messages.count} messages${sessionInfo}`;
// Add indexed folders info
if (indexResult.indexed_folders && indexResult.indexed_folders.length > 0) {
message += `\n📁 Indexed from: ${indexResult.indexed_folders.join(', ')}`;
}
// Add database location info
if (indexResult.database_path) {
message += `\n💾 Database: ${indexResult.database_path}`;
}
// Add embedding status to message
if (indexResult.embeddings_generated) {
message += '\n✅ Semantic search enabled (embeddings generated)';
} else if (indexResult.embedding_error) {
message += `\n⚠️ Semantic search unavailable: ${indexResult.embedding_error}`;
message += '\n Falling back to full-text search';
}
return {
success: true,
project_path: projectPath,
indexed_folders: indexResult.indexed_folders,
database_path: indexResult.database_path,
stats,
embeddings_generated: indexResult.embeddings_generated,
embedding_error: indexResult.embedding_error,
message,
};
} finally {
globalIndex.close();
}
}
/**
* Search conversation history using natural language queries.
*
* Uses semantic search with embeddings if available, otherwise falls back
* to full-text search. Returns relevant messages with context and similarity scores.
*
* @param args - Search arguments:
* - `query`: Natural language search query (required)
* - `limit`: Maximum number of results (default: 10)
* - `date_range`: Optional [start_timestamp, end_timestamp] filter
*
* @returns Search results containing:
* - `query`: The search query used
* - `results`: Array of matching messages with:
* - `conversation_id`: Conversation containing the message
* - `message_id`: Message identifier
* - `timestamp`: When the message was created
* - `similarity`: Relevance score (0-1)
* - `snippet`: Text excerpt from the message
* - `git_branch`: Git branch at the time
* - `message_type`: Type of message
* - `role`: Message role (user/assistant)
* - `total_found`: Number of results returned
*
* @example
* ```typescript
* const result = await handlers.searchConversations({
* query: 'authentication bug fix',
* limit: 5
* });
* result.results.forEach(r => {
* console.error(`${r.similarity.toFixed(2)}: ${r.snippet}`);
* });
* ```
*/
async searchConversations(args: Record<string, unknown>): Promise<Types.SearchConversationsResponse> {
await this.maybeAutoIndex();
const typedArgs = args as unknown as Types.SearchConversationsArgs;
const { query, limit = 10, offset = 0, date_range, scope = 'all', conversation_id } = typedArgs;
// Handle global scope by delegating to searchAllConversations
if (scope === 'global') {
const globalResponse = await this.searchAllConversations({
query,
limit,
offset,
date_range,
source_type: "all",
});
const results: Types.SearchResult[] = globalResponse.results.map((result) => ({
conversation_id: result.conversation_id,
message_id: result.message_id,
timestamp: result.timestamp,
similarity: result.similarity,
snippet: result.snippet,
git_branch: result.git_branch,
message_type: result.message_type,
role: result.role,
}));
return {
query,
results,
total_found: globalResponse.total_found,
has_more: globalResponse.has_more,
offset: globalResponse.offset,
scope: 'global',
};
}
// Handle current session scope
if (scope === 'current') {
if (!conversation_id) {
throw new Error("conversation_id is required when scope='current'");
}
// Look up external_id from internal conversation_id for consistent filtering
// conversation_id is documented as "internal conversation id from list_recent_sessions.id"
const convRow = this.db.prepare(
"SELECT external_id FROM conversations WHERE id = ?"
).get(conversation_id) as { external_id: string } | undefined;
if (!convRow) {
throw new Error(`Conversation with id '${conversation_id}' not found`);
}
const targetExternalId = convRow.external_id;
// Overfetch to account for post-query filtering (conversation_id, date_range)
// Use 4x multiplier to ensure we have enough results after filtering
const overfetchMultiplier = 4;
const fetchLimit = (limit + offset) * overfetchMultiplier;
const results = await this.memory.search(query, fetchLimit);
const filteredResults = results.filter(r => r.conversation.id === targetExternalId);
const dateFilteredResults = date_range
? filteredResults.filter(r => {
const timestamp = r.message.timestamp;
return timestamp >= date_range[0] && timestamp <= date_range[1];
})
: filteredResults;
const paginatedResults = dateFilteredResults.slice(offset, offset + limit);
return {
query,
results: paginatedResults.map((r) => ({
conversation_id: r.conversation.id,
message_id: r.message.id,
timestamp: new Date(r.message.timestamp).toISOString(),
similarity: r.similarity,
snippet: r.snippet,
git_branch: r.conversation.git_branch,
message_type: r.message.message_type,
role: r.message.role,
})),
total_found: paginatedResults.length,
has_more: offset + limit < dateFilteredResults.length,
offset,
scope: 'current',
};
}
// Handle 'all' scope (default) - all sessions in current project
const results = await this.memory.search(query, limit + offset);
const filteredResults = date_range
? results.filter(r => {
const timestamp = r.message.timestamp;
return timestamp >= date_range[0] && timestamp <= date_range[1];
})
: results;
const paginatedResults = filteredResults.slice(offset, offset + limit);
return {
query,
results: paginatedResults.map((r) => ({
conversation_id: r.conversation.id,
message_id: r.message.id,
timestamp: new Date(r.message.timestamp).toISOString(),
similarity: r.similarity,
snippet: r.snippet,
git_branch: r.conversation.git_branch,
message_type: r.message.message_type,
role: r.message.role,
})),
total_found: paginatedResults.length,
has_more: offset + limit < filteredResults.length,
offset,
scope: 'all',
};
}
/**
* Search conversations scoped to a project, optionally including Codex sessions.
*/
async searchProjectConversations(
args: Record<string, unknown>
): Promise<Types.SearchProjectConversationsResponse> {
await this.maybeAutoIndex();
const { SemanticSearch } = await import("../search/SemanticSearch.js");
const { getEmbeddingGenerator } = await import("../embeddings/EmbeddingGenerator.js");
const typedArgs = args as unknown as Types.SearchProjectConversationsArgs;
const {
query,
project_path,
limit = 10,
offset = 0,
date_range,
include_claude_code = true,
include_codex = true,
} = typedArgs;
const rawProjectPath = project_path || process.cwd();
const { canonicalPath, worktreePaths } = getWorktreeInfo(rawProjectPath);
const allowedPaths = new Set<string>([canonicalPath, ...worktreePaths]);
const canonicalCache = new Map<string, string>();
const matchesProjectPath = (path?: string): boolean => {
if (!path) {
return false;
}
if (allowedPaths.has(path)) {
return true;
}
const cached = canonicalCache.get(path);
if (cached) {
return allowedPaths.has(cached);
}
const { canonicalPath: resolved } = getCanonicalProjectPath(path);
canonicalCache.set(path, resolved);
return allowedPaths.has(resolved);
};
// Pre-compute embedding once
let queryEmbedding: Float32Array | undefined;
try {
const embedder = await getEmbeddingGenerator();
if (embedder.isAvailable()) {
queryEmbedding = await embedder.embed(query);
}
} catch (_embeddingError) {
// Fall back to FTS
}
const allowedSources = new Set<string>();
if (include_claude_code) {
allowedSources.add("claude-code");
}
if (include_codex) {
allowedSources.add("codex");
}
const semanticSearch = new SemanticSearch(this.db);
const localResults = await semanticSearch.searchConversations(
query,
limit + offset + 50,
undefined,
queryEmbedding
);
const filteredResults = localResults.filter((r) => {
if (date_range) {
const timestamp = r.message.timestamp;
if (timestamp < date_range[0] || timestamp > date_range[1]) {
return false;
}
}
const sourceType = r.conversation.source_type || "claude-code";
if (!allowedSources.has(sourceType)) {
return false;
}
return matchesProjectPath(r.conversation.project_path);
});
const results: Types.SearchProjectResult[] = filteredResults.map((result) => ({
conversation_id: result.conversation.id,
message_id: result.message.id,
timestamp: new Date(result.message.timestamp).toISOString(),
similarity: result.similarity,
snippet: result.snippet,
git_branch: result.conversation.git_branch,
message_type: result.message.message_type,
role: result.message.role,
project_path: result.conversation.project_path,
source_type: (result.conversation.source_type || "claude-code") as "claude-code" | "codex",
}));
results.sort((a, b) => b.similarity - a.similarity);
const paginatedResults = results.slice(offset, offset + limit);
return {
query,
project_path: canonicalPath,
results: paginatedResults,
total_found: paginatedResults.length,
has_more: offset + limit < results.length,
offset,
include_claude_code,
include_codex,
};
}
/**
* Find decisions made about a specific topic, file, or component.
*
* Searches through extracted decisions to find relevant architectural choices,
* technical decisions, and their rationale. Shows alternatives considered and
* rejected approaches.
*
* @param args - Decision search arguments:
* - `query`: Topic or keyword to search for (required)
* - `file_path`: Optional filter for decisions related to a specific file
* - `limit`: Maximum number of results (default: 10)
*
* @returns Decision search results containing:
* - `query`: The search query used
* - `file_path`: File filter if applied
* - `decisions`: Array of matching decisions with:
* - `decision_id`: Decision identifier
* - `decision_text`: The decision that was made
* - `rationale`: Why this decision was made
* - `alternatives_considered`: Other options that were considered
* - `rejected_reasons`: Why alternatives were rejected
* - `context`: Context in which the decision was made
* - `related_files`: Files affected by this decision
* - `related_commits`: Git commits implementing this decision
* - `timestamp`: When the decision was made
* - `similarity`: Relevance score
* - `total_found`: Number of decisions returned
*
* @example
* ```typescript
* const result = await handlers.getDecisions({
* query: 'database',
* file_path: 'src/storage/SQLiteManager.ts',
* limit: 5
* });
* result.decisions.forEach(d => {
* console.error(`Decision: ${d.decision_text}`);
* console.error(`Rationale: ${d.rationale}`);
* });
* ```
*/
async getDecisions(args: Record<string, unknown>): Promise<Types.GetDecisionsResponse> {
await this.maybeAutoIndex();
const typedArgs = args as unknown as Types.GetDecisionsArgs;
const { query, file_path, limit = 10, offset = 0, scope = 'all', conversation_id } = typedArgs;
// Handle global scope
if (scope === 'global') {
const globalResponse = await this.getAllDecisions({ query, file_path, limit, offset, source_type: 'all' });
return {
query,
file_path,
decisions: globalResponse.decisions.map(d => ({
decision_id: d.decision_id,
decision_text: d.decision_text,
rationale: d.rationale,
alternatives_considered: d.alternatives_considered,
rejected_reasons: d.rejected_reasons,
context: d.context,
related_files: d.related_files,
related_commits: d.related_commits,
timestamp: d.timestamp,
similarity: d.similarity,
})),
total_found: globalResponse.total_found,
has_more: globalResponse.has_more,
offset: globalResponse.offset,
scope: 'global',
};
}
// Overfetch to account for post-query filtering (file_path, conversation_id)
// Use 4x multiplier to ensure we have enough results after filtering
const overfetchMultiplier = (file_path || scope === 'current') ? 4 : 1;
const fetchLimit = (limit + offset) * overfetchMultiplier;
const results = await this.memory.searchDecisions(query, fetchLimit);
// Filter by file if specified
let filteredResults = results;
if (file_path) {
filteredResults = results.filter((r) =>
r.decision.related_files.includes(file_path)
);
}
// Filter by conversation_id if scope is 'current'
if (scope === 'current') {
if (!conversation_id) {
throw new Error("conversation_id is required when scope='current'");
}
// Look up external_id from internal conversation_id for consistent filtering
const convRow = this.db.prepare(
"SELECT external_id FROM conversations WHERE id = ?"
).get(conversation_id) as { external_id: string } | undefined;
if (!convRow) {
throw new Error(`Conversation with id '${conversation_id}' not found`);
}
const targetExternalId = convRow.external_id;
filteredResults = filteredResults.filter((r) => r.decision.conversation_id === targetExternalId);
}
const paginatedResults = filteredResults.slice(offset, offset + limit);
return {
query,
file_path,
decisions: paginatedResults.map((r) => ({
decision_id: r.decision.id,
decision_text: r.decision.decision_text,
rationale: r.decision.rationale,
alternatives_considered: r.decision.alternatives_considered,
rejected_reasons: r.decision.rejected_reasons,
context: r.decision.context,
related_files: r.decision.related_files,
related_commits: r.decision.related_commits,
timestamp: new Date(r.decision.timestamp).toISOString(),
similarity: r.similarity,
})),
total_found: paginatedResults.length,
has_more: offset + limit < filteredResults.length,
offset,
scope,
};
}
/**
* Check important context before modifying a file.
*
* Shows recent changes, related decisions, commits, and past mistakes to avoid
* when working on a file. Use this before making significant changes to understand
* the file's history and context.
*
* @param args - Check arguments:
* - `file_path`: Path to the file you want to modify (required)
*
* @returns Context information containing:
* - `file_path`: The file being checked
* - `warning`: Warning message if important context found
* - `recent_changes`: Recent edits and commits to this file
* - `edits`: Recent file edits with timestamps and conversation IDs
* - `commits`: Recent git commits affecting this file
* - `related_decisions`: Decisions that affect this file
* - `mistakes_to_avoid`: Past mistakes related to this file
*
* @example
* ```typescript
* const context = await handlers.checkBeforeModify({
* file_path: 'src/storage/SQLiteManager.ts'
* });
* console.error(context.warning);
* console.error(`${context.related_decisions.length} decisions affect this file`);
* console.error(`${context.mistakes_to_avoid.length} mistakes to avoid`);
* ```
*/
async checkBeforeModify(args: Record<string, unknown>): Promise<Types.CheckBeforeModifyResponse> {
const typedArgs = args as unknown as Types.CheckBeforeModifyArgs;
const { file_path } = typedArgs;
// Validate required parameter
if (!file_path || typeof file_path !== 'string' || file_path.trim() === '') {
throw new Error("file_path is required and must be a non-empty string");
}
const timeline = this.memory.getFileTimeline(file_path);
// Get recent mistakes affecting this file
const sanitized = sanitizeForLike(file_path);
const mistakes = this.db
.prepare(
"SELECT * FROM mistakes WHERE files_affected LIKE ? ESCAPE '\\' ORDER BY timestamp DESC LIMIT 5"
)
.all(`%"${sanitized}"%`) as Types.MistakeRow[];
return {
file_path,
warning: timeline.edits.length > 0 || timeline.decisions.length > 0
? "⚠️ Important context found for this file"
: "No significant history found",
recent_changes: {
edits: timeline.edits.slice(0, 5).map((e: { snapshot_timestamp: number; conversation_id: string }) => ({
timestamp: new Date(e.snapshot_timestamp).toISOString(),
conversation_id: e.conversation_id,
})),
commits: timeline.commits.slice(0, 5).map((c: { hash: string; message: string; timestamp: number }) => ({
hash: c.hash.substring(0, 7),
message: c.message,
timestamp: new Date(c.timestamp).toISOString(),
})),
},
related_decisions: timeline.decisions.slice(0, 3).map((d: { decision_text: string; rationale?: string; timestamp: number }) => ({
decision_text: d.decision_text,
rationale: d.rationale,
timestamp: new Date(d.timestamp).toISOString(),
})),
mistakes_to_avoid: mistakes.map((m) => ({
what_went_wrong: m.what_went_wrong,
correction: m.correction,
mistake_type: m.mistake_type,
})),
};
}
/**
* Show complete timeline of changes to a file.
*
* Returns a chronological timeline of all edits, commits, and related decisions
* for a specific file across all conversations and git history.
*
* @param args - Evolution arguments:
* - `file_path`: Path to the file (required)
* - `include_decisions`: Include related decisions (default: true)
* - `include_commits`: Include git commits (default: true)
*
* @returns File evolution timeline containing:
* - `file_path`: The file being analyzed
* - `total_edits`: Total number of edits to this file
* - `timeline`: Chronological array of events (most recent first):
* - `type`: Event type ('edit', 'commit', or 'decision')
* - `timestamp`: When the event occurred
* - `data`: Event-specific data (conversation_id, commit hash, decision text, etc.)
*
* @example
* ```typescript
* const evolution = await handlers.getFileEvolution({
* file_path: 'src/index.ts',
* include_decisions: true,
* include_commits: true
* });
* console.error(`${evolution.total_edits} edits across ${evolution.timeline.length} events`);
* evolution.timeline.forEach(event => {
* console.error(`${event.timestamp}: ${event.type}`);
* });
* ```
*/
async getFileEvolution(args: Record<string, unknown>): Promise<Types.GetFileEvolutionResponse> {
const typedArgs = args as unknown as Types.GetFileEvolutionArgs;
const { file_path, include_decisions = true, include_commits = true, limit = 50, offset = 0 } = typedArgs;
const timeline = this.memory.getFileTimeline(file_path);
const events: Types.TimelineEvent[] = [];
timeline.edits.forEach((edit: { snapshot_timestamp: number; conversation_id: string; backup_version?: number }) => {
events.push({
type: "edit",
timestamp: new Date(edit.snapshot_timestamp).toISOString(),
data: {
conversation_id: edit.conversation_id,
backup_version: edit.backup_version,
},
});
});
if (include_commits) {
timeline.commits.forEach((commit: { timestamp: number; hash: string; message: string; author?: string }) => {
events.push({
type: "commit",
timestamp: new Date(commit.timestamp).toISOString(),
data: {
hash: commit.hash.substring(0, 7),
message: commit.message,
author: commit.author,
},
});
});
}
if (include_decisions) {
timeline.decisions.forEach((decision: { timestamp: number; decision_text: string; rationale?: string }) => {
events.push({
type: "decision",
timestamp: new Date(decision.timestamp).toISOString(),
data: {
decision_text: decision.decision_text,
rationale: decision.rationale,
},
});
});
}
// Sort by timestamp (descending - most recent first)
events.sort((a, b) => new Date(b.timestamp).getTime() - new Date(a.timestamp).getTime());
// Apply pagination
const paginatedEvents = events.slice(offset, offset + limit);
return {
file_path,
total_edits: timeline.edits.length,
timeline: paginatedEvents,
has_more: offset + limit < events.length,
};
}
/**
* Link git commits to the conversations where they were made or discussed.
*
* Finds git commits that are associated with specific conversations, showing
* which code changes were made during which conversations. Helps answer "WHY
* was this code changed?"
*
* @param args - Link arguments:
* - `query`: Optional search query for commit messages
* - `conversation_id`: Optional filter for specific conversation
* - `limit`: Maximum number of commits (default: 20)
*
* @returns Commit links containing:
* - `query`: Search query if provided
* - `conversation_id`: Conversation filter if provided
* - `commits`: Array of linked commits with:
* - `hash`: Short commit hash (7 chars)
* - `full_hash`: Full commit hash
* - `message`: Commit message
* - `author`: Commit author
* - `timestamp`: When commit was made
* - `branch`: Git branch
* - `files_changed`: List of files changed
* - `conversation_id`: Conversation where this was discussed/made
* - `total_found`: Number of commits returned
*
* @example
* ```typescript
* const links = await handlers.linkCommitsToConversations({
* query: 'fix authentication',
* limit: 10
* });
* links.commits.forEach(c => {
* console.error(`${c.hash}: ${c.message}`);
* console.error(` Conversation: ${c.conversation_id}`);
* });
* ```
*/
async linkCommitsToConversations(args: Record<string, unknown>): Promise<Types.LinkCommitsToConversationsResponse> {
const typedArgs = args as Types.LinkCommitsToConversationsArgs;
const { query, conversation_id, limit = 20, offset = 0, scope = 'all' } = typedArgs;
// Global scope not supported for git commits (project-specific)
if (scope === 'global') {
throw new Error("Global scope is not supported for linkCommitsToConversations (git commits are project-specific)");
}
let sql = `
SELECT gc.*, c.external_id as conversation_external_id
FROM git_commits gc
LEFT JOIN conversations c ON gc.conversation_id = c.id
WHERE 1=1
`;
const params: (string | number)[] = [];
if (conversation_id || scope === 'current') {
const targetId = conversation_id || typedArgs.conversation_id;
if (!targetId) {
throw new Error("conversation_id is required when scope='current'");
}
// Look up external_id from internal conversation_id for consistent filtering
const convRow = this.db.prepare(
"SELECT external_id FROM conversations WHERE id = ?"
).get(targetId) as { external_id: string } | undefined;
if (!convRow) {
throw new Error(`Conversation with id '${targetId}' not found`);
}
sql += " AND c.external_id = ?";
params.push(convRow.external_id);
}
if (query) {
sql += " AND message LIKE ? ESCAPE '\\'";
params.push(`%${sanitizeForLike(query)}%`);
}
sql += ` ORDER BY timestamp DESC LIMIT ? OFFSET ?`;
params.push(limit + 1); // Fetch one extra to determine has_more
params.push(offset);
const commits = this.db
.prepare(sql)
.all(...params) as Array<Types.GitCommitRow & { conversation_external_id?: string | null }>;
const hasMore = commits.length > limit;
const results = hasMore ? commits.slice(0, limit) : commits;
return {
query,
conversation_id,
commits: results.map((c) => ({
hash: c.hash.substring(0, 7),
full_hash: c.hash,
message: c.message,
author: c.author,
timestamp: new Date(c.timestamp).toISOString(),
branch: c.branch,
files_changed: safeJsonParse<string[]>(c.files_changed, []),
conversation_id: c.conversation_external_id ?? undefined,
})),
total_found: results.length,
has_more: hasMore,
offset,
scope,
};
}
/**
* Find past mistakes to avoid repeating them.
*
* Searches through extracted mistakes to find documented errors, bugs, and
* wrong approaches. Shows what went wrong and how it was corrected.
*
* @param args - Mistake search arguments:
* - `query`: Search query for mistakes (required)
* - `mistake_type`: Optional filter by type (logic_error, wrong_approach, misunderstanding, tool_error, syntax_error)
* - `limit`: Maximum number of results (default: 10)
*
* @returns Mistake search results containing:
* - `query`: Search query used
* - `mistake_type`: Type filter if applied
* - `mistakes`: Array of matching mistakes with:
* - `mistake_id`: Mistake identifier
* - `mistake_type`: Type of mistake
* - `what_went_wrong`: Description of the mistake
* - `correction`: How it was fixed
* - `user_correction_message`: User's correction message if available
* - `files_affected`: List of files involved
* - `timestamp`: When the mistake occurred
* - `total_found`: Number of mistakes returned
*
* @example
* ```typescript
* const mistakes = await handlers.searchMistakes({
* query: 'database transaction',
* mistake_type: 'logic_error',
* limit: 5
* });
* mistakes.mistakes.forEach(m => {
* console.error(`${m.mistake_type}: ${m.what_went_wrong}`);
* console.error(`Fix: ${m.correction}`);
* });
* ```
*/
async searchMistakes(args: Record<string, unknown>): Promise<Types.SearchMistakesResponse> {
await this.maybeAutoIndex();
const typedArgs = args as unknown as Types.SearchMistakesArgs;
const { query, mistake_type, limit = 10, offset = 0, scope = 'all', conversation_id } = typedArgs;
// Handle global scope
if (scope === 'global') {
const globalResponse = await this.searchAllMistakes({ query, mistake_type, limit, offset, source_type: 'all' });
return {
query,
mistake_type,
mistakes: globalResponse.mistakes.map(m => ({
mistake_id: m.mistake_id,
mistake_type: m.mistake_type,
what_went_wrong: m.what_went_wrong,
correction: m.correction,
user_correction_message: m.user_correction_message,
files_affected: m.files_affected,
timestamp: m.timestamp,
})),
total_found: globalResponse.total_found,
has_more: globalResponse.has_more,
offset: globalResponse.offset,
scope: 'global',
};
}
// Try semantic search first for better results
try {
const { SemanticSearch } = await import("../search/SemanticSearch.js");
const semanticSearch = new SemanticSearch(this.db);
// Fetch more than needed to allow for filtering and pagination
const semanticResults = await semanticSearch.searchMistakes(query, limit + offset + 10);
// Apply additional filters
let filtered = semanticResults;
if (mistake_type) {
filtered = filtered.filter(r => r.mistake.mistake_type === mistake_type);
}
if (scope === 'current') {
if (!conversation_id) {
throw new Error("conversation_id is required when scope='current'");
}
// Look up external_id from internal conversation_id for consistent filtering
const convRow = this.db.prepare(
"SELECT external_id FROM conversations WHERE id = ?"
).get(conversation_id) as { external_id: string } | undefined;
if (!convRow) {
throw new Error(`Conversation with id '${conversation_id}' not found`);
}
const targetExternalId = convRow.external_id;
filtered = filtered.filter(r => r.mistake.conversation_id === targetExternalId);
}
// Apply pagination
const paginated = filtered.slice(offset, offset + limit + 1);
const hasMore = paginated.length > limit;
const results = hasMore ? paginated.slice(0, limit) : paginated;
if (results.length > 0) {
return {
query,
mistake_type,
mistakes: results.map(r => ({
mistake_id: r.mistake.id,
mistake_type: r.mistake.mistake_type,
what_went_wrong: r.mistake.what_went_wrong,
correction: r.mistake.correction,
user_correction_message: r.mistake.user_correction_message,
files_affected: r.mistake.files_affected,
timestamp: new Date(r.mistake.timestamp).toISOString(),
})),
total_found: results.length,
has_more: hasMore,
offset,
scope,
};
}
// Fall through to LIKE search if semantic returned no results
} catch (_e) {
// Semantic search failed, fall back to LIKE search
console.error("Semantic mistake search failed, using LIKE fallback");
}
// Fallback to LIKE search
const sanitized = sanitizeForLike(query);
let sql = `
SELECT m.*, m.external_id as mistake_external_id, c.external_id as conversation_external_id
FROM mistakes m
JOIN conversations c ON m.conversation_id = c.id
WHERE m.what_went_wrong LIKE ? ESCAPE '\\'
`;
const params: (string | number)[] = [`%${sanitized}%`];
if (mistake_type) {
sql += " AND mistake_type = ?";
params.push(mistake_type);
}
// Filter by conversation_id if scope is 'current'
if (scope === 'current') {
if (!conversation_id) {
throw new Error("conversation_id is required when scope='current'");
}
// Look up external_id from internal conversation_id for consistent filtering
const convRow = this.db.prepare(
"SELECT external_id FROM conversations WHERE id = ?"
).get(conversation_id) as { external_id: string } | undefined;
if (!convRow) {
throw new Error(`Conversation with id '${conversation_id}' not found`);
}
sql += " AND c.external_id = ?";
params.push(convRow.external_id);
}
sql += ` ORDER BY timestamp DESC LIMIT ? OFFSET ?`;
params.push(limit + 1); // Fetch one extra to determine has_more
params.push(offset);
const mistakes = this.db
.prepare(sql)
.all(...params) as Array<Types.MistakeRow & { mistake_external_id: string }>;
const hasMore = mistakes.length > limit;
const results = hasMore ? mistakes.slice(0, limit) : mistakes;
return {
query,
mistake_type,
mistakes: results.map((m) => ({
mistake_id: m.mistake_external_id,
mistake_type: m.mistake_type,
what_went_wrong: m.what_went_wrong,
correction: m.correction,
user_correction_message: m.user_correction_message,
files_affected: safeJsonParse<string[]>(m.files_affected, []),
timestamp: new Date(m.timestamp).toISOString(),
})),
total_found: results.length,
has_more: hasMore,
offset,
scope,
};
}
/**
* Look up requirements and constraints for a component or feature.
*
* Finds documented requirements, dependencies, performance constraints, and
* compatibility requirements that affect a component or feature.
*
* @param args - Requirements search arguments:
* - `component`: Component or feature name (required)
* - `type`: Optional filter by requirement type (dependency, performance, compatibility, business)
*
* @returns Requirements results containing:
* - `component`: Component searched
* - `type`: Type filter if applied
* - `requirements`: Array of matching requirements with:
* - `requirement_id`: Requirement identifier
* - `type`: Requirement type
* - `description`: Requirement description
* - `rationale`: Why this requirement exists
* - `affects_components`: List of affected components
* - `timestamp`: When requirement was documented
* - `total_found`: Number of requirements returned
*
* @example
* ```typescript
* const reqs = await handlers.getRequirements({
* component: 'authentication',
* type: 'security'
* });
* reqs.requirements.forEach(r => {
* console.error(`${r.type}: ${r.description}`);
* console.error(`Rationale: ${r.rationale}`);
* });
* ```
*/
async getRequirements(args: Record<string, unknown>): Promise<Types.GetRequirementsResponse> {
const typedArgs = args as unknown as Types.GetRequirementsArgs;
const { component, type } = typedArgs;
const sanitized = sanitizeForLike(component);
// Wrap OR group in parentheses to ensure AND type=? applies to both conditions
let sql = "SELECT * FROM requirements WHERE (description LIKE ? ESCAPE '\\' OR affects_components LIKE ? ESCAPE '\\')";
const params: (string | number)[] = [`%${sanitized}%`, `%${sanitized}%`];
if (type) {
sql += " AND type = ?";
params.push(type);
}
sql += " ORDER BY timestamp DESC";
const requirements = this.db.prepare(sql).all(...params) as Types.RequirementRow[];
return {
component,
type,
requirements: requirements.map((r) => ({
requirement_id: r.id,
type: r.type,
description: r.description,
rationale: r.rationale,
affects_components: safeJsonParse<string[]>(r.affects_components, []),
timestamp: new Date(r.timestamp).toISOString(),
})),
total_found: requirements.length,
};
}
/**
* Query history of tool uses (bash commands, file edits, reads, etc.) with pagination and filtering.
*
* Shows what tools were used during conversations and their results. Useful
* for understanding what commands were run, what files were edited, and
* whether operations succeeded or failed.
*
* @param args - Tool history arguments:
* - `tool_name`: Optional filter by tool name (Bash, Edit, Write, Read)
* - `file_path`: Optional filter by file path
* - `limit`: Maximum number of results (default: 20)
* - `offset`: Skip N results for pagination (default: 0)
* - `include_content`: Include tool content in response (default: false for security, set true to include)
* - `max_content_length`: Maximum characters per content field (default: 500)
* - `date_range`: Filter by timestamp range [start, end]
* - `conversation_id`: Filter by specific conversation
* - `errors_only`: Show only failed tool uses (default: false)
*
* @returns Tool history containing:
* - `tool_name`: Tool filter if applied
* - `file_path`: File filter if applied
* - `tool_uses`: Array of tool uses (may have truncated content)
* - `total_found`: Number of results returned in this page
* - `total_in_database`: Total matching records in database
* - `has_more`: Whether more results exist beyond current page
* - `offset`: Current offset position
*
* @example
* ```typescript
* // Get first page of Bash commands
* const page1 = await handlers.getToolHistory({
* tool_name: 'Bash',
* limit: 20,
* offset: 0
* });
*
* // Get metadata only (no content)
* const metadata = await handlers.getToolHistory({
* include_content: false,
* limit: 50
* });
*
* // Get errors from last 24 hours
* const errors = await handlers.getToolHistory({
* errors_only: true,
* date_range: [Date.now() - 86400000, Date.now()]
* });
* ```
*/
async getToolHistory(args: Record<string, unknown>): Promise<Types.GetToolHistoryResponse> {
const typedArgs = args as Types.GetToolHistoryArgs;
const {
tool_name,
file_path,
limit = 20,
offset = 0,
include_content = false,
max_content_length = 500,
date_range,
conversation_id,
errors_only = false,
} = typedArgs;
// Helper function to truncate text with indicator
const truncateText = (text: string | null | undefined, maxLength: number): { value?: string; truncated: boolean } => {
if (!text) {
return { value: undefined, truncated: false };
}
if (text.length <= maxLength) {
return { value: text, truncated: false };
}
return {
value: text.substring(0, maxLength) + '... (truncated)',
truncated: true,
};
};
// Build WHERE clause for filters
let whereClause = "WHERE 1=1";
const params: (string | number)[] = [];
if (tool_name) {
whereClause += " AND tu.tool_name = ?";
params.push(tool_name);
}
if (file_path) {
const sanitized = sanitizeForLike(file_path);
whereClause += " AND tu.tool_input LIKE ? ESCAPE '\\'";
params.push(`%${sanitized}%`);
}
if (date_range && date_range.length === 2) {
whereClause += " AND tu.timestamp BETWEEN ? AND ?";
params.push(date_range[0], date_range[1]);
}
if (conversation_id) {
whereClause += " AND tu.message_id IN (SELECT id FROM messages WHERE conversation_id = ?)";
params.push(conversation_id);
}
if (errors_only) {
whereClause += " AND tr.is_error = 1";
}
// Get total count of matching records
const countSql = `
SELECT COUNT(*) as total
FROM tool_uses tu
LEFT JOIN tool_results tr ON tu.id = tr.tool_use_id
${whereClause}
`;
const countResult = this.db.prepare(countSql).get(...params) as { total: number };
const totalInDatabase = countResult.total;
// Get paginated results
const sql = `
SELECT tu.*, tr.content as result_content, tr.is_error, tr.stdout, tr.stderr
FROM tool_uses tu
LEFT JOIN tool_results tr ON tu.id = tr.tool_use_id
${whereClause}
ORDER BY tu.timestamp DESC
LIMIT ? OFFSET ?
`;
const queryParams = [...params, limit, offset];
const toolUses = this.db.prepare(sql).all(...queryParams) as Types.ToolUseRow[];
// Calculate pagination metadata
const hasMore = offset + toolUses.length < totalInDatabase;
return {
tool_name,
file_path,
tool_uses: toolUses.map((t) => {
// Parse tool input
const toolInput = safeJsonParse<Record<string, unknown>>(t.tool_input, {});
// Build result object based on include_content setting
const result: Types.ToolUseResult['result'] = {
is_error: Boolean(t.is_error),
};
if (include_content) {
// Truncate content fields if they exist
const contentTrunc = truncateText(t.result_content, max_content_length);
const stdoutTrunc = truncateText(t.stdout, max_content_length);
const stderrTrunc = truncateText(t.stderr, max_content_length);
if (contentTrunc.value !== undefined) {
result.content = contentTrunc.value;
if (contentTrunc.truncated) {
result.content_truncated = true;
}
}
if (stdoutTrunc.value !== undefined) {
result.stdout = stdoutTrunc.value;
if (stdoutTrunc.truncated) {
result.stdout_truncated = true;
}
}
if (stderrTrunc.value !== undefined) {
result.stderr = stderrTrunc.value;
if (stderrTrunc.truncated) {
result.stderr_truncated = true;
}
}
}
// If include_content=false, only return is_error (no content, stdout, stderr)
return {
tool_use_id: t.id,
tool_name: t.tool_name,
tool_input: toolInput,
result,
timestamp: new Date(t.timestamp).toISOString(),
};
}),
total_found: toolUses.length,
total_in_database: totalInDatabase,
has_more: hasMore,
offset,
};
}
/**
* Find conversations that dealt with similar topics or problems.
*
* Searches across all conversations to find ones that discussed similar topics,
* allowing you to learn from past work on similar problems.
*
* @param args - Similarity search arguments:
* - `query`: Description of the topic or problem (required)
* - `limit`: Maximum number of sessions (default: 5)
*
* @returns Similar sessions containing:
* - `query`: Search query used
* - `sessions`: Array of similar conversation sessions with:
* - `conversation_id`: Session identifier
* - `project_path`: Project path for this session
* - `first_message_at`: When the conversation started
* - `message_count`: Number of messages in the conversation
* - `git_branch`: Git branch at the time
* - `relevance_score`: Similarity score to the query
* - `relevant_messages`: Sample of relevant messages from this session
* - `total_found`: Number of sessions returned
*
* @example
* ```typescript
* const similar = await handlers.findSimilarSessions({
* query: 'implementing user authentication with JWT',
* limit: 3
* });
* similar.sessions.forEach(s => {
* console.error(`Session ${s.conversation_id} (${s.message_count} messages)`);
* console.error(`Relevance: ${s.relevance_score.toFixed(2)}`);
* console.error(`Messages: ${s.relevant_messages.length} relevant`);
* });
* ```
*/
async findSimilarSessions(args: Record<string, unknown>): Promise<Types.FindSimilarSessionsResponse> {
await this.maybeAutoIndex();
const typedArgs = args as unknown as Types.FindSimilarSessionsArgs;
const { query, limit = 5, offset = 0, scope = 'all', conversation_id: _conversation_id } = typedArgs;
// Note: scope='global' and scope='current' have limited usefulness for finding similar SESSIONS
// but we implement them for API consistency
if (scope === 'current') {
throw new Error("scope='current' is not supported for findSimilarSessions (it finds sessions, not messages within a session)");
}
const results = await this.memory.search(query, (limit + offset) * 3); // Get more to group by conversation
// Group by conversation
const conversationMap = new Map<string, Types.SessionResult>();
for (const result of results) {
const convId = result.conversation.id;
if (convId && !conversationMap.has(convId)) {
conversationMap.set(convId, {
conversation_id: convId,
project_path: result.conversation.project_path,
first_message_at: new Date(result.conversation.first_message_at).toISOString(),
message_count: result.conversation.message_count,
git_branch: result.conversation.git_branch,
relevance_score: result.similarity,
relevant_messages: [],
});
}
const conversation = conversationMap.get(convId);
if (conversation) {
conversation.relevant_messages.push({
message_id: result.message.id,
snippet: result.snippet,
similarity: result.similarity,
});
}
}
const allSessions = Array.from(conversationMap.values())
.sort((a, b) => b.relevance_score - a.relevance_score);
const sessions = allSessions.slice(offset, offset + limit);
return {
query,
sessions,
total_found: sessions.length,
has_more: offset + limit < allSessions.length,
offset,
scope,
};
}
/**
* Recall relevant context and format for application to current work.
*
* This is a comprehensive context retrieval tool that searches across multiple
* data sources (conversations, decisions, mistakes, file changes, commits) and
* returns actionable suggestions for applying historical context to current work.
*
* @param args - Recall arguments:
* - `query`: What you're working on or need context for (required)
* - `context_types`: Types to recall (default: all types)
* - Options: "conversations", "decisions", "mistakes", "file_changes", "commits"
* - `file_path`: Optional filter for file-specific context
* - `date_range`: Optional [start_timestamp, end_timestamp] filter
* - `limit`: Maximum items per context type (default: 5)
*
* @returns Recalled context containing:
* - `query`: Search query used
* - `context_summary`: High-level summary of what was found
* - `recalled_context`: Structured context data:
* - `conversations`: Relevant past conversations
* - `decisions`: Related decisions with rationale
* - `mistakes`: Past mistakes to avoid
* - `file_changes`: File modification history
* - `commits`: Related git commits
* - `application_suggestions`: Actionable suggestions for applying this context
* - `total_items_found`: Total number of context items found
*
* @example
* ```typescript
* const context = await handlers.recallAndApply({
* query: 'refactoring database connection pooling',
* context_types: ['decisions', 'mistakes', 'commits'],
* file_path: 'src/database/pool.ts',
* limit: 5
* });
* console.error(context.context_summary);
* context.application_suggestions.forEach(s => console.error(`- ${s}`));
* ```
*/
async recallAndApply(args: Record<string, unknown>): Promise<Types.RecallAndApplyResponse> {
await this.maybeAutoIndex();
const typedArgs = args as unknown as Types.RecallAndApplyArgs;
const { query, context_types = ["conversations", "decisions", "mistakes", "file_changes", "commits"], file_path, date_range, limit = 5, offset = 0, scope = 'all', conversation_id } = typedArgs;
const recalled: Types.RecalledContext = {};
let totalItems = 0;
const suggestions: string[] = [];
// 1. Recall conversations if requested
if (context_types.includes("conversations")) {
// Use searchConversations with scope support
const convResponse = await this.searchConversations({
query,
limit,
offset,
date_range,
scope,
conversation_id,
});
recalled.conversations = convResponse.results.map(result => ({
session_id: result.conversation_id,
timestamp: result.timestamp,
snippet: result.snippet,
relevance_score: result.similarity,
}));
totalItems += recalled.conversations.length;
if (recalled.conversations.length > 0) {
suggestions.push(`Review ${recalled.conversations.length} past conversation(s) about similar topics`);
}
}
// 2. Recall decisions if requested
if (context_types.includes("decisions")) {
// Use getDecisions with scope support
const decisionsResponse = await this.getDecisions({
query,
file_path,
limit,
offset,
scope,
conversation_id,
});
recalled.decisions = decisionsResponse.decisions.map(d => ({
decision_id: d.decision_id,
type: d.context || 'unknown',
description: d.decision_text,
rationale: d.rationale || undefined,
alternatives: d.alternatives_considered,
rejected_approaches: Object.values(d.rejected_reasons ?? {}),
affects_components: d.related_files,
timestamp: d.timestamp,
}));
totalItems += recalled.decisions.length;
if (recalled.decisions.length > 0) {
suggestions.push(`Apply learnings from ${recalled.decisions.length} past decision(s) with documented rationale`);
}
}
// 3. Recall mistakes if requested
if (context_types.includes("mistakes")) {
// Use searchMistakes with scope support
const mistakesResponse = await this.searchMistakes({
query,
limit,
offset,
scope,
conversation_id,
});
recalled.mistakes = mistakesResponse.mistakes.map(m => ({
mistake_id: m.mistake_id,
type: m.mistake_type,
description: m.what_went_wrong,
what_happened: m.what_went_wrong,
how_fixed: m.correction || undefined,
lesson_learned: m.user_correction_message || undefined,
files_affected: m.files_affected,
timestamp: m.timestamp,
}));
totalItems += recalled.mistakes.length;
if (recalled.mistakes.length > 0) {
suggestions.push(`Avoid repeating ${recalled.mistakes.length} documented mistake(s) from the past`);
}
}
// 4. Recall file changes if requested
if (context_types.includes("file_changes") && file_path) {
// Query file_edits table (not messages) - file_path is stored in file_edits
const fileChanges = this.db.getDatabase()
.prepare(`
SELECT
file_path,
COUNT(DISTINCT conversation_id) as change_count,
MAX(snapshot_timestamp) as last_modified,
GROUP_CONCAT(DISTINCT conversation_id) as conversation_ids
FROM file_edits
WHERE file_path LIKE ? ESCAPE '\\'
${date_range ? 'AND snapshot_timestamp BETWEEN ? AND ?' : ''}
GROUP BY file_path
ORDER BY last_modified DESC
LIMIT ?
`)
.all(
`%${sanitizeForLike(file_path)}%`,
...(date_range ? [date_range[0], date_range[1]] : []),
limit
) as Array<{
file_path: string;
change_count: number;
last_modified: number;
conversation_ids: string;
}>;
recalled.file_changes = fileChanges.map(fc => ({
file_path: fc.file_path,
change_count: fc.change_count,
last_modified: new Date(fc.last_modified).toISOString(),
related_conversations: fc.conversation_ids ? fc.conversation_ids.split(',') : [],
}));
totalItems += recalled.file_changes.length;
if (recalled.file_changes.length > 0) {
suggestions.push(`Consider ${recalled.file_changes.length} file(s) with relevant history before making changes`);
}
}
// 5. Recall commits if requested
if (context_types.includes("commits")) {
const commits = this.db.getDatabase()
.prepare(`
SELECT hash, message, timestamp, files_changed
FROM git_commits
WHERE message LIKE ? ESCAPE '\\' ${file_path ? "AND files_changed LIKE ? ESCAPE '\\'" : ''}
${date_range ? 'AND timestamp BETWEEN ? AND ?' : ''}
ORDER BY timestamp DESC
LIMIT ?
`)
.all(
`%${sanitizeForLike(query)}%`,
...(file_path ? [`%${sanitizeForLike(file_path)}%`] : []),
...(date_range ? [date_range[0], date_range[1]] : []),
limit
) as Array<{
hash: string;
message: string;
timestamp: number;
files_changed: string;
}>;
recalled.commits = commits.map(c => ({
commit_hash: c.hash,
message: c.message,
timestamp: new Date(c.timestamp).toISOString(),
files_affected: safeJsonParse<string[]>(c.files_changed, []),
}));
totalItems += recalled.commits.length;
if (recalled.commits.length > 0) {
suggestions.push(`Reference ${recalled.commits.length} related git commit(s) for implementation patterns`);
}
}
// Generate context summary
const summaryParts: string[] = [];
if (recalled.conversations && recalled.conversations.length > 0) {
summaryParts.push(`${recalled.conversations.length} relevant conversation(s)`);
}
if (recalled.decisions && recalled.decisions.length > 0) {
summaryParts.push(`${recalled.decisions.length} decision(s)`);
}
if (recalled.mistakes && recalled.mistakes.length > 0) {
summaryParts.push(`${recalled.mistakes.length} past mistake(s)`);
}
if (recalled.file_changes && recalled.file_changes.length > 0) {
summaryParts.push(`${recalled.file_changes.length} file change(s)`);
}
if (recalled.commits && recalled.commits.length > 0) {
summaryParts.push(`${recalled.commits.length} commit(s)`);
}
const contextSummary = summaryParts.length > 0
? `Recalled: ${summaryParts.join(', ')}`
: 'No relevant context found';
// Add general suggestion if we found context
if (totalItems > 0) {
suggestions.push(`Use this historical context to inform your current implementation`);
} else {
suggestions.push(`No historical context found - you may be working on something new`);
}
return {
query,
context_summary: contextSummary,
recalled_context: recalled,
application_suggestions: suggestions,
total_items_found: totalItems,
};
}
/**
* Generate comprehensive project documentation by combining codebase analysis
* with conversation history.
*
* Creates documentation that shows WHAT exists in the code (via local code scanning)
* and WHY it was built that way (via conversation history).
*
* @param args - Documentation generation arguments:
* - `project_path`: Path to the project (defaults to cwd)
* - `session_id`: Optional specific session to include
* - `scope`: Documentation scope (default: 'full')
* - 'full': Everything (architecture, decisions, quality)
* - 'architecture': Module structure and dependencies
* - 'decisions': Decision log with rationale
* - 'quality': Code quality insights
* - `module_filter`: Optional filter for specific module path (e.g., 'src/auth')
*
* @returns Documentation result containing:
* - `success`: Whether generation succeeded
* - `project_path`: Project that was documented
* - `scope`: Scope of documentation generated
* - `documentation`: Generated markdown documentation
* - `statistics`: Summary statistics:
* - `modules`: Number of modules documented
* - `decisions`: Number of decisions included
* - `mistakes`: Number of mistakes documented
* - `commits`: Number of commits referenced
*
* @example
* ```typescript
* const doc = await handlers.generateDocumentation({
* project_path: '/Users/me/my-project',
* scope: 'full',
* module_filter: 'src/auth'
* });
* console.error(doc.documentation); // Markdown documentation
* console.error(`Documented ${doc.statistics.modules} modules`);
* ```
*/
async generateDocumentation(args: Record<string, unknown>): Promise<Types.GenerateDocumentationResponse> {
const typedArgs = args as unknown as Types.GenerateDocumentationArgs;
const projectPath = this.resolveProjectPath(typedArgs.project_path);
const sessionId = typedArgs.session_id;
const scope = typedArgs.scope || 'full';
const moduleFilter = typedArgs.module_filter;
console.error('\n📚 Starting documentation generation...');
const generator = new DocumentationGenerator(this.db);
const documentation = await generator.generate(
{
projectPath,
sessionId,
scope,
moduleFilter
}
);
// Extract statistics from the generated documentation
const lines = documentation.split('\n');
const modulesLine = lines.find(l => l.includes('**Modules**:'));
const decisionsLine = lines.find(l => l.includes('| Decisions |'));
const mistakesLine = lines.find(l => l.includes('| Mistakes |'));
const commitsLine = lines.find(l => l.includes('| Git Commits |'));
const extractNumber = (line: string | undefined): number => {
if (!line) {return 0;}
const match = line.match(/\d+/);
return match ? parseInt(match[0], 10) : 0;
};
return {
success: true,
project_path: projectPath,
scope,
documentation,
statistics: {
modules: extractNumber(modulesLine),
decisions: extractNumber(decisionsLine),
mistakes: extractNumber(mistakesLine),
commits: extractNumber(commitsLine)
}
};
}
/**
* Discover old conversation folders that might contain conversation history
* for the current project.
*
* Searches through stored conversation folders to find potential matches for
* the current project path. Useful when project paths have changed (e.g., after
* moving or renaming a project directory).
*
* @param args - Discovery arguments:
* - `current_project_path`: Current project path (defaults to cwd)
*
* @returns Discovery results containing:
* - `success`: Whether discovery succeeded
* - `current_project_path`: Current project path searched for
* - `candidates`: Array of potential matches sorted by score:
* - `folder_name`: Name of the conversation folder
* - `folder_path`: Full path to the folder
* - `stored_project_path`: Original project path stored in conversations
* - `score`: Match score (higher is better match)
* - `stats`: Folder statistics:
* - `conversations`: Number of conversations in folder
* - `messages`: Number of messages in folder
* - `files`: Number of .jsonl files
* - `last_activity`: Timestamp of last activity
* - `message`: Human-readable status message
*
* @example
* ```typescript
* const discovery = await handlers.discoverOldConversations({
* current_project_path: '/Users/me/projects/my-app'
* });
* console.error(discovery.message);
* discovery.candidates.forEach(c => {
* console.error(`Score ${c.score}: ${c.folder_name}`);
* console.error(` Original path: ${c.stored_project_path}`);
* console.error(` Stats: ${c.stats.conversations} conversations, ${c.stats.files} files`);
* });
* ```
*/
async discoverOldConversations(args: Record<string, unknown>): Promise<Types.DiscoverOldConversationsResponse> {
const typedArgs = args as Types.DiscoverOldConversationsArgs;
const currentProjectPath = this.resolveProjectPath(typedArgs.current_project_path);
const candidates = await this.migration.discoverOldFolders(currentProjectPath);
// Convert to response format with additional stats
const formattedCandidates = candidates.map(c => ({
folder_name: c.folderName,
folder_path: c.folderPath,
stored_project_path: c.storedProjectPath,
score: Math.round(c.score * 10) / 10, // Round to 1 decimal
stats: {
conversations: c.stats.conversations,
messages: c.stats.messages,
files: 0, // Will be calculated below
last_activity: c.stats.lastActivity
}
}));
// Count JSONL files for each candidate
for (const candidate of formattedCandidates) {
try {
const files = readdirSync(candidate.folder_path);
candidate.stats.files = files.filter((f: string) => f.endsWith('.jsonl')).length;
} catch (_error) {
candidate.stats.files = 0;
}
}
const message = candidates.length > 0
? `Found ${candidates.length} potential old conversation folder(s). Top match has ${formattedCandidates[0].stats.conversations} conversations and ${formattedCandidates[0].stats.files} files (score: ${formattedCandidates[0].score}).`
: `No old conversation folders found for project path: ${currentProjectPath}`;
return {
success: true,
current_project_path: currentProjectPath,
candidates: formattedCandidates,
message
};
}
/**
* Migrate or merge conversation history from an old project path to a new one.
*
* Use this when a project has been moved or renamed to bring the conversation
* history along. Supports two modes: 'migrate' (move all files) or 'merge'
* (combine with existing files).
*
* @param args - Migration arguments:
* - `source_folder`: Source folder containing old conversations (required)
* - `old_project_path`: Original project path in the conversations (required)
* - `new_project_path`: New project path to update to (required)
* - `dry_run`: Preview changes without applying them (default: false)
* - `mode`: Migration mode (default: 'migrate')
* - 'migrate': Move all files from source to target
* - 'merge': Combine source files with existing target files
*
* @returns Migration result containing:
* - `success`: Whether migration succeeded
* - `source_folder`: Source folder path
* - `target_folder`: Target folder path (where files were copied)
* - `files_copied`: Number of files copied/migrated
* - `database_updated`: Whether database was updated with new paths
* - `backup_created`: Whether backup was created (always true for non-dry-run)
* - `message`: Human-readable status message
*
* @example
* ```typescript
* // First, preview with dry run
* const preview = await handlers.migrateProject({
* source_folder: '/path/to/old/conversations',
* old_project_path: '/old/path/to/project',
* new_project_path: '/new/path/to/project',
* dry_run: true
* });
* console.error(preview.message); // "Dry run: Would migrate X files..."
*
* // Then, execute the migration
* const result = await handlers.migrateProject({
* source_folder: '/path/to/old/conversations',
* old_project_path: '/old/path/to/project',
* new_project_path: '/new/path/to/project',
* dry_run: false,
* mode: 'migrate'
* });
* console.error(`Migrated ${result.files_copied} files`);
* ```
*/
async migrateProject(args: Record<string, unknown>): Promise<Types.MigrateProjectResponse> {
const typedArgs = args as unknown as Types.MigrateProjectArgs;
const sourceFolder = typedArgs.source_folder;
// Validate all required parameters
if (!sourceFolder || typeof sourceFolder !== 'string' || sourceFolder.trim() === '') {
throw new Error("source_folder is required and must be a non-empty string");
}
if (!typedArgs.old_project_path || !typedArgs.new_project_path) {
throw new Error("old_project_path and new_project_path are required");
}
const oldProjectPath = getCanonicalProjectPath(typedArgs.old_project_path).canonicalPath;
const newProjectPath = getCanonicalProjectPath(typedArgs.new_project_path).canonicalPath;
const dryRun = typedArgs.dry_run ?? false;
const mode = typedArgs.mode ?? "migrate";
// Validate paths are under expected directories using resolved paths
// to prevent path traversal attacks (e.g., /projects/../../../etc/passwd)
const projectsDir = resolve(this.migration.getProjectsDir());
const resolvedSource = resolve(sourceFolder);
if (!resolvedSource.startsWith(projectsDir + "/") && resolvedSource !== projectsDir) {
throw new Error(`Source folder must be under ${projectsDir}`);
}
// Calculate target folder path
const targetFolderName = pathToProjectFolderName(newProjectPath);
const targetFolder = join(this.migration.getProjectsDir(), targetFolderName);
// Execute migration or merge
const result = await this.migration.executeMigration(
sourceFolder,
targetFolder,
oldProjectPath,
newProjectPath,
dryRun,
mode
);
let message: string;
if (dryRun) {
message =
mode === "merge"
? `Dry run: Would merge ${result.filesCopied} new conversation files into ${targetFolder}`
: `Dry run: Would migrate ${result.filesCopied} conversation files from ${sourceFolder} to ${targetFolder}`;
} else {
message =
mode === "merge"
? `Successfully merged ${result.filesCopied} new conversation files into ${targetFolder}. Original files preserved in ${sourceFolder}.`
: `Successfully migrated ${result.filesCopied} conversation files to ${targetFolder}. Original files preserved in ${sourceFolder}.`;
}
return {
success: result.success,
source_folder: sourceFolder,
target_folder: targetFolder,
files_copied: result.filesCopied,
database_updated: result.databaseUpdated,
backup_created: !dryRun && result.databaseUpdated,
message
};
}
/**
* Forget conversations by topic/keywords.
*
* Searches for conversations matching the provided keywords and optionally deletes them.
* Creates automatic backup before deletion.
*
* @param args - Arguments:
* - `keywords`: Array of keywords/topics to search for
* - `project_path`: Path to the project (defaults to cwd)
* - `confirm`: Must be true to actually delete (default: false for preview)
*
* @returns Result containing:
* - `success`: Whether operation succeeded
* - `preview_mode`: Whether this was a preview (confirm=false)
* - `conversations_found`: Number of conversations matching keywords
* - `conversations_deleted`: Number of conversations actually deleted
* - `messages_deleted`: Number of messages deleted
* - `decisions_deleted`: Number of decisions deleted
* - `mistakes_deleted`: Number of mistakes deleted
* - `backup_path`: Path to backup file (if deletion occurred)
* - `conversation_summaries`: List of conversations with basic info
* - `message`: Human-readable status message
*
* @example
* ```typescript
* // Preview what would be deleted
* const preview = await handlers.forgetByTopic({
* keywords: ['authentication', 'redesign'],
* confirm: false
* });
*
* // Actually delete after reviewing preview
* const result = await handlers.forgetByTopic({
* keywords: ['authentication', 'redesign'],
* confirm: true
* });
* ```
*/
async forgetByTopic(args: unknown): Promise<Types.ForgetByTopicResponse> {
const typedArgs = args as Types.ForgetByTopicArgs;
// Filter out empty strings and trim whitespace
const keywords = (typedArgs.keywords || [])
.map(k => k.trim())
.filter(k => k.length > 0);
const projectPath = this.resolveProjectPath(typedArgs.project_path);
// SECURITY: Require strict boolean true to prevent truthy string coercion
const confirm = typedArgs.confirm === true;
if (keywords.length === 0) {
return {
success: false,
preview_mode: true,
conversations_found: 0,
conversations_deleted: 0,
messages_deleted: 0,
decisions_deleted: 0,
mistakes_deleted: 0,
backup_path: null,
conversation_summaries: [],
message: "No keywords provided. Please specify keywords/topics to search for."
};
}
try {
// Create deletion service
const storage = this.memory.getStorage();
const semanticSearch = this.memory.getSemanticSearch();
const deletionService = new DeletionService(
this.db.getDatabase(),
storage,
semanticSearch
);
// Preview what would be deleted
const preview = await deletionService.previewDeletionByTopic(keywords, projectPath);
if (preview.conversationIds.length === 0) {
return {
success: true,
preview_mode: true,
conversations_found: 0,
conversations_deleted: 0,
messages_deleted: 0,
decisions_deleted: 0,
mistakes_deleted: 0,
backup_path: null,
conversation_summaries: [],
message: preview.summary
};
}
// Format conversation summaries for response
const conversationSummaries = preview.conversations.map(conv => ({
id: conv.id,
session_id: conv.session_id,
created_at: new Date(conv.created_at).toISOString(),
message_count: conv.message_count
}));
// If not confirmed, return preview
if (!confirm) {
return {
success: true,
preview_mode: true,
conversations_found: preview.conversationIds.length,
conversations_deleted: 0,
messages_deleted: 0,
decisions_deleted: 0,
mistakes_deleted: 0,
backup_path: null,
conversation_summaries: conversationSummaries,
message: `${preview.summary}\n\nSet confirm=true to delete these conversations.`
};
}
// Actually delete with backup
const result = await deletionService.forgetByTopic(keywords, projectPath);
return {
success: true,
preview_mode: false,
conversations_found: result.deleted.conversations,
conversations_deleted: result.deleted.conversations,
messages_deleted: result.deleted.messages,
decisions_deleted: result.deleted.decisions,
mistakes_deleted: result.deleted.mistakes,
backup_path: result.backup.backupPath,
conversation_summaries: conversationSummaries,
message: result.summary
};
} catch (error) {
return {
success: false,
preview_mode: !confirm,
conversations_found: 0,
conversations_deleted: 0,
messages_deleted: 0,
decisions_deleted: 0,
mistakes_deleted: 0,
backup_path: null,
conversation_summaries: [],
message: `Error: ${(error as Error).message}`
};
}
}
// ==================== High-Value Utility Tools ====================
/**
* Search for all context related to a specific file.
*
* Combines discussions, decisions, and mistakes related to a file
* in one convenient query.
*
* @param args - Search arguments with file_path
* @returns Combined file context from all sources
*/
async searchByFile(args: Record<string, unknown>): Promise<Types.SearchByFileResponse> {
const typedArgs = args as unknown as Types.SearchByFileArgs;
const filePath = typedArgs.file_path;
const limit = typedArgs.limit || 5;
if (!filePath) {
return {
file_path: "",
discussions: [],
decisions: [],
mistakes: [],
total_mentions: 0,
message: "Error: file_path is required",
};
}
// Normalize the file path for searching (handle both relative and absolute)
const normalizedPath = filePath.replace(/^\.\//, "");
const escapedPath = sanitizeForLike(normalizedPath);
try {
// Search messages mentioning this file
interface MessageRow {
id: string;
conversation_id: string;
content: string;
timestamp: number;
role: string;
}
const messagesQuery = `
SELECT id, conversation_id, content, timestamp, role
FROM messages
WHERE content LIKE ? ESCAPE '\\' OR content LIKE ? ESCAPE '\\'
ORDER BY timestamp DESC
LIMIT ?
`;
const discussions = this.db
.prepare(messagesQuery)
.all(`%${escapedPath}%`, `%/${escapedPath}%`, limit) as MessageRow[];
// Search decisions related to this file
interface DecisionRow {
id: string;
decision_text: string;
rationale: string | null;
context: string | null;
timestamp: number;
}
const decisionsQuery = `
SELECT d.id, d.decision_text, d.rationale, d.context, d.timestamp
FROM decisions d
WHERE d.related_files LIKE ? ESCAPE '\\'
OR d.related_files LIKE ? ESCAPE '\\'
OR d.decision_text LIKE ? ESCAPE '\\'
ORDER BY d.timestamp DESC
LIMIT ?
`;
const decisions = this.db
.prepare(decisionsQuery)
.all(`%${escapedPath}%`, `%/${escapedPath}%`, `%${escapedPath}%`, limit) as DecisionRow[];
// Search mistakes related to this file
interface MistakeRow {
id: string;
mistake_type: string;
what_went_wrong: string;
correction: string | null;
timestamp: number;
}
const mistakesQuery = `
SELECT m.id, m.mistake_type, m.what_went_wrong, m.correction, m.timestamp
FROM mistakes m
WHERE m.files_affected LIKE ? ESCAPE '\\'
OR m.files_affected LIKE ? ESCAPE '\\'
OR m.what_went_wrong LIKE ? ESCAPE '\\'
ORDER BY m.timestamp DESC
LIMIT ?
`;
const mistakes = this.db
.prepare(mistakesQuery)
.all(`%${escapedPath}%`, `%/${escapedPath}%`, `%${escapedPath}%`, limit) as MistakeRow[];
const totalMentions = discussions.length + decisions.length + mistakes.length;
return {
file_path: filePath,
discussions: discussions.map((d) => ({
id: d.id,
conversation_id: d.conversation_id,
content: d.content.substring(0, 500),
timestamp: d.timestamp,
role: d.role,
})),
decisions: decisions.map((d) => ({
id: d.id,
decision_text: d.decision_text,
rationale: d.rationale || undefined,
context: d.context || undefined,
timestamp: d.timestamp,
})),
mistakes: mistakes.map((m) => ({
id: m.id,
mistake_type: m.mistake_type,
what_went_wrong: m.what_went_wrong,
correction: m.correction || undefined,
timestamp: m.timestamp,
})),
total_mentions: totalMentions,
message:
totalMentions > 0
? `Found ${totalMentions} mentions: ${discussions.length} discussions, ${decisions.length} decisions, ${mistakes.length} mistakes`
: `No mentions found for file: ${filePath}`,
};
} catch (error) {
return {
file_path: filePath,
discussions: [],
decisions: [],
mistakes: [],
total_mentions: 0,
message: `Error searching for file: ${(error as Error).message}`,
};
}
}
/**
* List recent conversation sessions.
*
* Provides an overview of recent sessions with basic stats.
*
* @param args - Query arguments with limit/offset
* @returns List of recent sessions with summaries
*/
async listRecentSessions(args: Record<string, unknown>): Promise<Types.ListRecentSessionsResponse> {
const typedArgs = args as unknown as Types.ListRecentSessionsArgs;
const limit = typedArgs.limit || 10;
const offset = typedArgs.offset || 0;
const projectPath = this.resolveOptionalProjectPath(typedArgs.project_path);
try {
interface SessionRow {
id: string;
external_id: string;
project_path: string;
created_at: number;
message_count: number;
first_message_preview: string | null;
}
let query: string;
let params: (string | number)[];
if (projectPath) {
query = `
SELECT
c.id,
c.external_id,
c.project_path,
c.created_at,
(SELECT COUNT(*) FROM messages WHERE conversation_id = c.id) as message_count,
(SELECT content FROM messages WHERE conversation_id = c.id ORDER BY timestamp ASC LIMIT 1) as first_message_preview
FROM conversations c
WHERE c.project_path = ?
ORDER BY c.created_at DESC
LIMIT ? OFFSET ?
`;
params = [projectPath, limit + 1, offset];
} else {
query = `
SELECT
c.id,
c.external_id,
c.project_path,
c.created_at,
(SELECT COUNT(*) FROM messages WHERE conversation_id = c.id) as message_count,
(SELECT content FROM messages WHERE conversation_id = c.id ORDER BY timestamp ASC LIMIT 1) as first_message_preview
FROM conversations c
ORDER BY c.created_at DESC
LIMIT ? OFFSET ?
`;
params = [limit + 1, offset];
}
const rows = this.db.prepare(query).all(...params) as SessionRow[];
const hasMore = rows.length > limit;
const sessions = hasMore ? rows.slice(0, limit) : rows;
// Count total sessions
interface CountRow {
total: number;
}
const countQuery = projectPath
? "SELECT COUNT(*) as total FROM conversations WHERE project_path = ?"
: "SELECT COUNT(*) as total FROM conversations";
const countParams = projectPath ? [projectPath] : [];
const countRow = this.db.prepare(countQuery).get(...countParams) as CountRow;
const totalSessions = countRow?.total || 0;
return {
sessions: sessions.map((s) => ({
id: s.id,
session_id: s.external_id,
project_path: s.project_path,
created_at: s.created_at,
message_count: s.message_count,
first_message_preview: s.first_message_preview
? s.first_message_preview.substring(0, 200)
: undefined,
})),
total_sessions: totalSessions,
has_more: hasMore,
message: `Found ${totalSessions} sessions${projectPath ? ` for ${projectPath}` : ""}`,
};
} catch (error) {
return {
sessions: [],
total_sessions: 0,
has_more: false,
message: `Error listing sessions: ${(error as Error).message}`,
};
}
}
/**
* Summarize the latest session for a project.
*
* Returns the most recent conversation and a lightweight summary of
* what is being worked on, recent actions, and errors.
*/
async getLatestSessionSummary(args: Record<string, unknown>): Promise<Types.GetLatestSessionSummaryResponse> {
const typedArgs = args as unknown as Types.GetLatestSessionSummaryArgs;
const projectPath = this.resolveOptionalProjectPath(typedArgs.project_path);
const sourceType = typedArgs.source_type ?? "all";
const limitMessages = Math.max(1, Math.min(typedArgs.limit_messages ?? 20, 200));
const includeTools = typedArgs.include_tools !== false;
const includeErrors = typedArgs.include_errors !== false;
const truncate = (input: string, maxLength: number): string => {
const trimmed = input.trim().replace(/\s+/g, " ");
if (trimmed.length <= maxLength) {
return trimmed;
}
return `${trimmed.slice(0, maxLength - 1)}…`;
};
try {
let query = `
SELECT
c.id,
c.external_id,
c.project_path,
c.source_type,
c.created_at,
c.last_message_at,
c.message_count
FROM conversations c
`;
const params: Array<string | number> = [];
const clauses: string[] = [];
if (projectPath) {
clauses.push("c.project_path = ?");
params.push(projectPath);
}
if (sourceType !== "all") {
clauses.push("c.source_type = ?");
params.push(sourceType);
}
if (clauses.length > 0) {
query += ` WHERE ${clauses.join(" AND ")}`;
}
query += " ORDER BY c.last_message_at DESC LIMIT 1";
const sessionRow = this.db.prepare(query).get(...params) as
| {
id: number;
external_id: string;
project_path: string;
source_type: "claude-code" | "codex";
created_at: number;
last_message_at: number;
message_count: number;
}
| undefined;
if (!sessionRow) {
return {
success: true,
found: false,
message: "No sessions found",
};
}
const messageRows = this.db.prepare(`
SELECT
m.id,
m.message_type,
m.role,
m.content,
m.timestamp
FROM messages m
WHERE m.conversation_id = ?
ORDER BY m.timestamp DESC
LIMIT ?
`).all(sessionRow.id, limitMessages) as Array<{
id: number;
message_type: string;
role: string | null;
content: string | null;
timestamp: number;
}>;
const recentUserMessages: Array<{ timestamp: number; content: string }> = [];
const recentAssistantMessages: Array<{ timestamp: number; content: string }> = [];
for (const row of messageRows) {
if (!row.content) {
continue;
}
const snippet = truncate(row.content, 280);
if (row.role === "user" || row.message_type === "user") {
if (recentUserMessages.length < 3) {
recentUserMessages.push({ timestamp: row.timestamp, content: snippet });
}
} else if (row.role === "assistant" || row.message_type === "assistant") {
if (recentAssistantMessages.length < 3) {
recentAssistantMessages.push({ timestamp: row.timestamp, content: snippet });
}
}
}
const problemStatementSource = recentUserMessages[0] ?? recentAssistantMessages[0];
const problemStatement = problemStatementSource?.content;
const recentActions: Array<{
tool_name: string;
timestamp: number;
tool_input: Record<string, unknown>;
}> = [];
if (includeTools) {
const toolRows = this.db.prepare(`
SELECT
tu.tool_name,
tu.tool_input,
tu.timestamp
FROM tool_uses tu
JOIN messages m ON m.id = tu.message_id
WHERE m.conversation_id = ?
ORDER BY tu.timestamp DESC
LIMIT 5
`).all(sessionRow.id) as Array<{ tool_name: string; tool_input: string; timestamp: number }>;
for (const row of toolRows) {
recentActions.push({
tool_name: row.tool_name,
timestamp: row.timestamp,
tool_input: safeJsonParse(row.tool_input, {}),
});
}
}
const errors: Array<{ tool_name: string; timestamp: number; message: string }> = [];
if (includeErrors) {
const errorRows = this.db.prepare(`
SELECT
tu.tool_name,
tr.timestamp,
tr.content,
tr.stderr
FROM tool_results tr
JOIN tool_uses tu ON tu.id = tr.tool_use_id
JOIN messages m ON m.id = tr.message_id
WHERE m.conversation_id = ? AND tr.is_error = 1
ORDER BY tr.timestamp DESC
LIMIT 5
`).all(sessionRow.id) as Array<{
tool_name: string;
timestamp: number;
content: string | null;
stderr: string | null;
}>;
for (const row of errorRows) {
const message = row.stderr || row.content || "Unknown error";
errors.push({
tool_name: row.tool_name,
timestamp: row.timestamp,
message: truncate(message, 280),
});
}
}
return {
success: true,
found: true,
session: {
id: String(sessionRow.id),
session_id: sessionRow.external_id,
project_path: sessionRow.project_path,
source_type: sessionRow.source_type,
created_at: sessionRow.created_at,
last_message_at: sessionRow.last_message_at,
message_count: sessionRow.message_count,
},
summary: {
problem_statement: problemStatement,
recent_user_messages: recentUserMessages,
recent_assistant_messages: recentAssistantMessages,
recent_actions: includeTools ? recentActions : undefined,
errors: includeErrors ? errors : undefined,
},
message: "Latest session summary generated",
};
} catch (error) {
return {
success: false,
found: false,
message: `Error generating latest session summary: ${(error as Error).message}`,
};
}
}
// ==================== Global Cross-Project Tools ====================
/**
* Index all projects (Claude Code + Codex).
*
* Discovers and indexes all projects from both Claude Code and Codex,
* registering them in a global index for cross-project search.
*
* @param args - Indexing arguments
* @returns Summary of all indexed projects
*/
async indexAllProjects(args: Record<string, unknown>): Promise<Types.IndexAllProjectsResponse> {
const { GlobalIndex } = await import("../storage/GlobalIndex.js");
const { homedir } = await import("os");
const { join } = await import("path");
const { existsSync, readdirSync } = await import("fs");
const typedArgs = args as Types.IndexAllProjectsArgs;
const {
include_codex = true,
include_claude_code = true,
codex_path = join(homedir(), ".codex"),
claude_projects_path = join(homedir(), ".claude", "projects"),
incremental = true,
} = typedArgs;
const globalIndex = new GlobalIndex(this.db);
try {
const projects: Array<{
project_path: string;
source_type: "claude-code" | "codex";
message_count: number;
conversation_count: number;
}> = [];
const errors: Array<{ project_path: string; error: string }> = [];
const claudeProjectsByPath = new Map<string, {
project_path: string;
source_type: "claude-code";
message_count: number;
conversation_count: number;
decision_count: number;
mistake_count: number;
indexed_folders: Set<string>;
}>();
let totalMessages = 0;
let totalConversations = 0;
let totalDecisions = 0;
let totalMistakes = 0;
let shouldRebuildFts = false;
const { ConversationStorage } = await import("../storage/ConversationStorage.js");
const { SemanticSearch } = await import("../search/SemanticSearch.js");
const { DecisionExtractor } = await import("../parsers/DecisionExtractor.js");
const { MistakeExtractor } = await import("../parsers/MistakeExtractor.js");
const { RequirementsExtractor } = await import("../parsers/RequirementsExtractor.js");
const storage = new ConversationStorage(this.db);
const semanticSearch = new SemanticSearch(this.db);
const decisionExtractor = new DecisionExtractor();
const mistakeExtractor = new MistakeExtractor();
const requirementsExtractor = new RequirementsExtractor();
// Index Codex if requested
if (include_codex && existsSync(codex_path)) {
try {
const { CodexConversationParser } = await import("../parsers/CodexConversationParser.js");
// Get last indexed time for incremental mode (across all codex projects)
let codexLastIndexedMs: number | undefined;
if (incremental) {
const existingCodexProjects = globalIndex.getAllProjects("codex");
const maxIndexed = existingCodexProjects.reduce(
(max, project) => Math.max(max, project.last_indexed),
0
);
if (maxIndexed > 0) {
codexLastIndexedMs = maxIndexed;
}
}
// Parse Codex sessions
const parser = new CodexConversationParser();
const parseResult = parser.parseSession(codex_path, undefined, codexLastIndexedMs);
if (parseResult.messages.length > 0) {
shouldRebuildFts = true;
const conversationIdMap = await storage.storeConversations(parseResult.conversations);
const messageIdMap = await storage.storeMessages(parseResult.messages, {
skipFtsRebuild: true,
conversationIdMap,
});
const toolUseIdMap = await storage.storeToolUses(parseResult.tool_uses, messageIdMap);
await storage.storeToolResults(parseResult.tool_results, messageIdMap, toolUseIdMap);
await storage.storeFileEdits(parseResult.file_edits, conversationIdMap, messageIdMap);
await storage.storeThinkingBlocks(parseResult.thinking_blocks, messageIdMap);
const decisions = decisionExtractor.extractDecisions(
parseResult.messages,
parseResult.thinking_blocks
);
const decisionIdMap = await storage.storeDecisions(decisions, {
skipFtsRebuild: true,
conversationIdMap,
messageIdMap,
});
const mistakes = mistakeExtractor.extractMistakes(
parseResult.messages,
parseResult.tool_results
);
const mistakeIdMap = await storage.storeMistakes(mistakes, conversationIdMap, messageIdMap);
const requirements = requirementsExtractor.extractRequirements(parseResult.messages);
await storage.storeRequirements(requirements, conversationIdMap, messageIdMap);
const validations = requirementsExtractor.extractValidations(
parseResult.tool_uses,
parseResult.tool_results,
parseResult.messages
);
await storage.storeValidations(validations, conversationIdMap);
// Generate embeddings for semantic search
await generateEmbeddingsForIndexing({
messages: parseResult.messages,
decisions,
mistakes,
messageIdMap,
decisionIdMap,
mistakeIdMap,
semanticSearch,
incremental,
logLabel: "Codex sessions",
});
}
const codexProjectPaths = new Set(parseResult.conversations.map((conv) => conv.project_path));
for (const projectPath of codexProjectPaths) {
const stats = storage.getStatsForProject(projectPath, "codex");
globalIndex.registerProject({
project_path: projectPath,
source_type: "codex",
source_root: codex_path,
message_count: stats.messages.count,
conversation_count: stats.conversations.count,
decision_count: stats.decisions.count,
mistake_count: stats.mistakes.count,
metadata: {
indexed_folders: parseResult.indexed_folders || [],
},
});
projects.push({
project_path: projectPath,
source_type: "codex",
message_count: stats.messages.count,
conversation_count: stats.conversations.count,
});
totalMessages += stats.messages.count;
totalConversations += stats.conversations.count;
totalDecisions += stats.decisions.count;
totalMistakes += stats.mistakes.count;
}
} catch (error) {
errors.push({
project_path: codex_path,
error: (error as Error).message,
});
}
}
// Index Claude Code projects if requested
if (include_claude_code && existsSync(claude_projects_path)) {
try {
const { ConversationParser } = await import("../parsers/ConversationParser.js");
const { statSync } = await import("fs");
const projectFolders = readdirSync(claude_projects_path);
const indexedFolderLastIndexed = new Map<string, number>();
if (incremental) {
const existingProjects = globalIndex.getAllProjects("claude-code");
for (const project of existingProjects) {
const folders = project.metadata?.indexed_folders;
if (!Array.isArray(folders)) {
continue;
}
for (const folder of folders) {
if (typeof folder === "string") {
indexedFolderLastIndexed.set(folder, project.last_indexed);
}
}
}
}
for (const folder of projectFolders) {
const folderPath = join(claude_projects_path, folder);
try {
// Skip if not a directory
if (!statSync(folderPath).isDirectory()) {
continue;
}
// Get last indexed time for incremental mode
let lastIndexedMs: number | undefined;
if (incremental) {
const metadataIndexed = indexedFolderLastIndexed.get(folderPath);
if (metadataIndexed) {
lastIndexedMs = metadataIndexed;
} else {
const existingProject = globalIndex.getProject(folderPath, "claude-code");
if (existingProject) {
lastIndexedMs = existingProject.last_indexed;
}
}
}
// Parse Claude Code conversations directly from this folder
const parser = new ConversationParser();
const parseResult = parser.parseFromFolder(folderPath, undefined, lastIndexedMs);
// Skip empty projects
if (parseResult.messages.length === 0) {
continue;
}
const inferredPath = this.inferProjectPathFromMessages(parseResult.messages);
const canonicalProjectPath = inferredPath
? getCanonicalProjectPath(inferredPath).canonicalPath
: folderPath;
if (canonicalProjectPath !== folderPath) {
for (const conversation of parseResult.conversations) {
conversation.project_path = canonicalProjectPath;
}
}
shouldRebuildFts = true;
const conversationIdMap = await storage.storeConversations(parseResult.conversations);
const messageIdMap = await storage.storeMessages(parseResult.messages, {
skipFtsRebuild: true,
conversationIdMap,
});
const toolUseIdMap = await storage.storeToolUses(parseResult.tool_uses, messageIdMap);
await storage.storeToolResults(parseResult.tool_results, messageIdMap, toolUseIdMap);
await storage.storeFileEdits(parseResult.file_edits, conversationIdMap, messageIdMap);
await storage.storeThinkingBlocks(parseResult.thinking_blocks, messageIdMap);
const decisions = decisionExtractor.extractDecisions(
parseResult.messages,
parseResult.thinking_blocks
);
const decisionIdMap = await storage.storeDecisions(decisions, {
skipFtsRebuild: true,
conversationIdMap,
messageIdMap,
});
const mistakes = mistakeExtractor.extractMistakes(
parseResult.messages,
parseResult.tool_results
);
const mistakeIdMap = await storage.storeMistakes(mistakes, conversationIdMap, messageIdMap);
const requirements = requirementsExtractor.extractRequirements(parseResult.messages);
await storage.storeRequirements(requirements, conversationIdMap, messageIdMap);
const validations = requirementsExtractor.extractValidations(
parseResult.tool_uses,
parseResult.tool_results,
parseResult.messages
);
await storage.storeValidations(validations, conversationIdMap);
// Generate embeddings for semantic search
await generateEmbeddingsForIndexing({
messages: parseResult.messages,
decisions,
mistakes,
messageIdMap,
decisionIdMap,
mistakeIdMap,
semanticSearch,
incremental,
logLabel: `project: ${canonicalProjectPath}`,
});
const existingAggregate = claudeProjectsByPath.get(canonicalProjectPath);
const indexedFolders = existingAggregate
? existingAggregate.indexed_folders
: new Set<string>();
indexedFolders.add(folderPath);
const stats = storage.getStatsForProject(canonicalProjectPath, "claude-code");
// Register in global index with the canonical project path
globalIndex.registerProject({
project_path: canonicalProjectPath,
source_type: "claude-code",
source_root: claude_projects_path,
message_count: stats.messages.count,
conversation_count: stats.conversations.count,
decision_count: stats.decisions.count,
mistake_count: stats.mistakes.count,
metadata: {
indexed_folders: Array.from(indexedFolders),
},
});
claudeProjectsByPath.set(canonicalProjectPath, {
project_path: canonicalProjectPath,
source_type: "claude-code",
message_count: stats.messages.count,
conversation_count: stats.conversations.count,
decision_count: stats.decisions.count,
mistake_count: stats.mistakes.count,
indexed_folders: indexedFolders,
});
} catch (error) {
errors.push({
project_path: folder,
error: (error as Error).message,
});
}
}
} catch (error) {
errors.push({
project_path: claude_projects_path,
error: (error as Error).message,
});
}
}
for (const project of claudeProjectsByPath.values()) {
projects.push({
project_path: project.project_path,
source_type: "claude-code",
message_count: project.message_count,
conversation_count: project.conversation_count,
});
totalMessages += project.message_count;
totalConversations += project.conversation_count;
totalDecisions += project.decision_count;
totalMistakes += project.mistake_count;
}
if (shouldRebuildFts) {
storage.rebuildAllFts();
}
const stats = globalIndex.getGlobalStats();
return {
success: true,
global_index_path: globalIndex.getDbPath(),
projects_indexed: projects.length,
claude_code_projects: stats.claude_code_projects,
codex_projects: stats.codex_projects,
total_messages: totalMessages,
total_conversations: totalConversations,
total_decisions: totalDecisions,
total_mistakes: totalMistakes,
projects,
errors,
message: `Indexed ${projects.length} project(s): ${stats.claude_code_projects} Claude Code + ${stats.codex_projects} Codex`,
};
} finally {
// Ensure GlobalIndex is always closed
globalIndex.close();
}
}
/**
* Search across all indexed projects.
*
* @param args - Search arguments
* @returns Search results from all projects
*/
async searchAllConversations(
args: Record<string, unknown>
): Promise<Types.SearchAllConversationsResponse> {
await this.maybeAutoIndex();
const { GlobalIndex } = await import("../storage/GlobalIndex.js");
const { SemanticSearch } = await import("../search/SemanticSearch.js");
const { getEmbeddingGenerator } = await import("../embeddings/EmbeddingGenerator.js");
const typedArgs = args as unknown as Types.SearchAllConversationsArgs;
const { query, limit = 20, offset = 0, date_range, source_type = "all" } = typedArgs;
const globalIndex = new GlobalIndex(this.db);
try {
const projects = globalIndex.getAllProjects(
source_type === "all" ? undefined : source_type
);
let queryEmbedding: Float32Array | undefined;
try {
const embedder = await getEmbeddingGenerator();
if (embedder.isAvailable()) {
queryEmbedding = await embedder.embed(query);
}
} catch (_embeddingError) {
// Fall back to FTS
}
const semanticSearch = new SemanticSearch(this.db);
const localResults = await semanticSearch.searchConversations(
query,
limit + offset + 50,
undefined,
queryEmbedding
);
const filteredResults = localResults.filter((r) => {
if (date_range) {
const timestamp = r.message.timestamp;
if (timestamp < date_range[0] || timestamp > date_range[1]) {
return false;
}
}
if (source_type !== "all") {
const resultSource = r.conversation.source_type || "claude-code";
return resultSource === source_type;
}
return true;
});
const allResults: Types.GlobalSearchResult[] = [];
let claudeCodeResults = 0;
let codexResults = 0;
for (const result of filteredResults) {
const source = (result.conversation.source_type || "claude-code") as "claude-code" | "codex";
allResults.push({
conversation_id: result.conversation.id,
message_id: result.message.id,
timestamp: new Date(result.message.timestamp).toISOString(),
similarity: result.similarity,
snippet: result.snippet,
git_branch: result.conversation.git_branch,
message_type: result.message.message_type,
role: result.message.role,
project_path: result.conversation.project_path,
source_type: source,
});
if (source === "claude-code") {
claudeCodeResults++;
} else {
codexResults++;
}
}
const sortedResults = allResults.sort((a, b) => b.similarity - a.similarity);
const paginatedResults = sortedResults.slice(offset, offset + limit);
const successfulProjects = projects.length;
return {
query,
results: paginatedResults,
total_found: paginatedResults.length,
has_more: offset + limit < sortedResults.length,
offset,
projects_searched: projects.length,
projects_succeeded: successfulProjects,
failed_projects: undefined,
search_stats: {
claude_code_results: claudeCodeResults,
codex_results: codexResults,
},
message: `Found ${paginatedResults.length} result(s) across ${projects.length} project(s)`,
};
} finally {
// Ensure GlobalIndex is always closed
globalIndex.close();
}
}
/**
* Get decisions from all indexed projects.
*
* @param args - Query arguments
* @returns Decisions from all projects
*/
async getAllDecisions(args: Record<string, unknown>): Promise<Types.GetAllDecisionsResponse> {
await this.maybeAutoIndex();
const { GlobalIndex } = await import("../storage/GlobalIndex.js");
const { SemanticSearch } = await import("../search/SemanticSearch.js");
const typedArgs = args as unknown as Types.GetAllDecisionsArgs;
const { query, file_path, limit = 20, offset = 0, source_type = 'all' } = typedArgs;
const globalIndex = new GlobalIndex(this.db);
try {
const projects = globalIndex.getAllProjects(
source_type === "all" ? undefined : source_type
);
const semanticSearch = new SemanticSearch(this.db);
const searchResults = await semanticSearch.searchDecisions(query, limit + offset + 50);
const filteredResults = searchResults.filter((r) => {
if (file_path && !r.decision.related_files.includes(file_path)) {
return false;
}
if (source_type !== "all") {
const convSource = r.conversation.source_type || "claude-code";
return convSource === source_type;
}
return true;
});
const allDecisions: Types.GlobalDecision[] = filteredResults.map((r) => ({
decision_id: r.decision.id,
decision_text: r.decision.decision_text,
rationale: r.decision.rationale,
alternatives_considered: r.decision.alternatives_considered,
rejected_reasons: r.decision.rejected_reasons,
context: r.decision.context,
related_files: r.decision.related_files,
related_commits: r.decision.related_commits,
timestamp: new Date(r.decision.timestamp).toISOString(),
similarity: r.similarity,
project_path: r.conversation.project_path,
source_type: (r.conversation.source_type || "claude-code") as "claude-code" | "codex",
}));
const sortedDecisions = allDecisions.sort((a, b) => b.similarity - a.similarity);
const paginatedDecisions = sortedDecisions.slice(offset, offset + limit);
return {
query,
decisions: paginatedDecisions,
total_found: paginatedDecisions.length,
has_more: offset + limit < sortedDecisions.length,
offset,
projects_searched: projects.length,
message: `Found ${paginatedDecisions.length} decision(s) across ${projects.length} project(s)`,
};
} finally {
globalIndex.close();
}
}
/**
* Search mistakes across all indexed projects.
*
* @param args - Search arguments
* @returns Mistakes from all projects
*/
async searchAllMistakes(
args: Record<string, unknown>
): Promise<Types.SearchAllMistakesResponse> {
await this.maybeAutoIndex();
const { GlobalIndex } = await import("../storage/GlobalIndex.js");
const { SemanticSearch } = await import("../search/SemanticSearch.js");
const typedArgs = args as unknown as Types.SearchAllMistakesArgs;
const { query, mistake_type, limit = 20, offset = 0, source_type = 'all' } = typedArgs;
const globalIndex = new GlobalIndex(this.db);
try {
const projects = globalIndex.getAllProjects(
source_type === "all" ? undefined : source_type
);
interface GlobalMistakeWithSimilarity extends Types.GlobalMistake {
similarity: number;
}
const semanticSearch = new SemanticSearch(this.db);
const searchResults = await semanticSearch.searchMistakes(query, limit + offset + 50);
const filteredResults = searchResults.filter((r) => {
if (mistake_type && r.mistake.mistake_type !== mistake_type) {
return false;
}
if (source_type !== "all") {
const convSource = r.conversation.source_type || "claude-code";
return convSource === source_type;
}
return true;
});
const allMistakes: GlobalMistakeWithSimilarity[] = filteredResults.map((r) => ({
mistake_id: r.mistake.id,
mistake_type: r.mistake.mistake_type,
what_went_wrong: r.mistake.what_went_wrong,
correction: r.mistake.correction,
user_correction_message: r.mistake.user_correction_message,
files_affected: r.mistake.files_affected,
timestamp: new Date(r.mistake.timestamp).toISOString(),
project_path: r.conversation.project_path,
source_type: (r.conversation.source_type || "claude-code") as "claude-code" | "codex",
similarity: r.similarity,
}));
// Sort by similarity (semantic relevance) and paginate
const sortedMistakes = allMistakes.sort((a, b) => b.similarity - a.similarity);
const paginatedMistakes = sortedMistakes.slice(offset, offset + limit);
// Remove similarity from results (not in GlobalMistake type)
const results: Types.GlobalMistake[] = paginatedMistakes.map(({ similarity: _similarity, ...rest }) => rest);
return {
query,
mistakes: results,
total_found: results.length,
has_more: offset + limit < sortedMistakes.length,
offset,
projects_searched: projects.length,
message: `Found ${results.length} mistake(s) across ${projects.length} project(s)`,
};
} finally {
globalIndex.close();
}
}
// ==================== Live Context Layer Tools ====================
/**
* Store a fact, decision, or context in working memory.
*
* @param args - Remember arguments with key, value, context, tags, ttl
* @returns The stored memory item
*/
async remember(args: Record<string, unknown>): Promise<Types.RememberResponse> {
const { WorkingMemoryStore } = await import("../memory/WorkingMemoryStore.js");
const typedArgs = args as unknown as Types.RememberArgs;
const {
key,
value,
context,
tags,
ttl,
project_path,
} = typedArgs;
const projectPath = this.resolveProjectPath(project_path);
if (!key || !value) {
return {
success: false,
message: "key and value are required",
};
}
try {
const store = new WorkingMemoryStore(this.db.getDatabase());
const item = store.remember({
key,
value,
context,
tags,
ttl,
projectPath,
});
return {
success: true,
item: {
id: item.id,
key: item.key,
value: item.value,
context: item.context,
tags: item.tags,
created_at: new Date(item.createdAt).toISOString(),
updated_at: new Date(item.updatedAt).toISOString(),
expires_at: item.expiresAt ? new Date(item.expiresAt).toISOString() : undefined,
},
message: `Remembered "${key}" successfully`,
};
} catch (error) {
return {
success: false,
message: `Error storing memory: ${(error as Error).message}`,
};
}
}
/**
* Recall a specific memory item by key.
*
* @param args - Recall arguments with key
* @returns The recalled memory item or null
*/
async recall(args: Record<string, unknown>): Promise<Types.RecallResponse> {
const { WorkingMemoryStore } = await import("../memory/WorkingMemoryStore.js");
const typedArgs = args as unknown as Types.RecallArgs;
const { key, project_path } = typedArgs;
const projectPath = this.resolveProjectPath(project_path);
if (!key) {
return {
success: false,
found: false,
message: "key is required",
};
}
try {
const store = new WorkingMemoryStore(this.db.getDatabase());
const item = store.recall(key, projectPath);
if (!item) {
return {
success: true,
found: false,
message: `No memory found for key "${key}"`,
};
}
return {
success: true,
found: true,
item: {
id: item.id,
key: item.key,
value: item.value,
context: item.context,
tags: item.tags,
created_at: new Date(item.createdAt).toISOString(),
updated_at: new Date(item.updatedAt).toISOString(),
expires_at: item.expiresAt ? new Date(item.expiresAt).toISOString() : undefined,
},
message: `Found memory for "${key}"`,
};
} catch (error) {
return {
success: false,
found: false,
message: `Error recalling memory: ${(error as Error).message}`,
};
}
}
/**
* Search working memory semantically.
*
* @param args - Search arguments with query
* @returns Relevant memory items
*/
async recallRelevant(args: Record<string, unknown>): Promise<Types.RecallRelevantResponse> {
const { WorkingMemoryStore } = await import("../memory/WorkingMemoryStore.js");
const typedArgs = args as unknown as Types.RecallRelevantArgs;
const { query, limit = 10, project_path } = typedArgs;
const projectPath = this.resolveProjectPath(project_path);
if (!query) {
return {
success: false,
items: [],
message: "query is required",
};
}
try {
const store = new WorkingMemoryStore(this.db.getDatabase());
const results = store.recallRelevant({
query,
projectPath,
limit,
});
return {
success: true,
items: results.map(item => ({
id: item.id,
key: item.key,
value: item.value,
context: item.context,
tags: item.tags,
similarity: item.similarity,
created_at: new Date(item.createdAt).toISOString(),
updated_at: new Date(item.updatedAt).toISOString(),
})),
total_found: results.length,
message: results.length > 0
? `Found ${results.length} relevant memory item(s)`
: "No relevant memories found",
};
} catch (error) {
return {
success: false,
items: [],
message: `Error searching memory: ${(error as Error).message}`,
};
}
}
/**
* List all items in working memory.
*
* @param args - List arguments with optional tags filter
* @returns All memory items
*/
async listMemory(args: Record<string, unknown>): Promise<Types.ListMemoryResponse> {
const { WorkingMemoryStore } = await import("../memory/WorkingMemoryStore.js");
const typedArgs = args as Types.ListMemoryArgs;
const {
tags,
limit = 100,
offset = 0,
project_path,
} = typedArgs;
const projectPath = this.resolveProjectPath(project_path);
try {
const store = new WorkingMemoryStore(this.db.getDatabase());
const items = store.list(projectPath, { tags, limit: limit + 1, offset });
const hasMore = items.length > limit;
const results = hasMore ? items.slice(0, limit) : items;
const totalCount = store.count(projectPath);
return {
success: true,
items: results.map(item => ({
id: item.id,
key: item.key,
value: item.value,
context: item.context,
tags: item.tags,
created_at: new Date(item.createdAt).toISOString(),
updated_at: new Date(item.updatedAt).toISOString(),
expires_at: item.expiresAt ? new Date(item.expiresAt).toISOString() : undefined,
})),
total_count: totalCount,
has_more: hasMore,
offset,
message: `Listed ${results.length} of ${totalCount} memory item(s)`,
};
} catch (error) {
return {
success: false,
items: [],
total_count: 0,
has_more: false,
offset: 0,
message: `Error listing memory: ${(error as Error).message}`,
};
}
}
/**
* Remove a memory item by key.
*
* @param args - Forget arguments with key
* @returns Success status
*/
async forget(args: Record<string, unknown>): Promise<Types.ForgetResponse> {
const { WorkingMemoryStore } = await import("../memory/WorkingMemoryStore.js");
const typedArgs = args as unknown as Types.ForgetArgs;
const { key, project_path } = typedArgs;
const projectPath = this.resolveProjectPath(project_path);
if (!key) {
return {
success: false,
message: "key is required",
};
}
try {
const store = new WorkingMemoryStore(this.db.getDatabase());
const deleted = store.forget(key, projectPath);
return {
success: deleted,
message: deleted
? `Forgot memory for "${key}"`
: `No memory found for key "${key}"`,
};
} catch (error) {
return {
success: false,
message: `Error forgetting memory: ${(error as Error).message}`,
};
}
}
// ============================================================
// SESSION HANDOFF TOOLS
// ============================================================
/**
* Prepare a handoff document from the current session.
* Captures decisions, active files, pending tasks, and working memory.
*
* @param args - Handoff preparation arguments
* @returns The prepared handoff document
*/
async prepareHandoff(args: Record<string, unknown>): Promise<Types.PrepareHandoffResponse> {
const { SessionHandoffStore } = await import("../handoff/SessionHandoffStore.js");
const typedArgs = args as unknown as Types.PrepareHandoffArgs;
const {
session_id,
include = ["decisions", "files", "tasks", "memory"],
project_path,
} = typedArgs;
const projectPath = this.resolveProjectPath(project_path);
try {
const store = new SessionHandoffStore(this.db.getDatabase());
const handoff = store.prepareHandoff({
sessionId: session_id,
projectPath,
include: include as Array<"decisions" | "files" | "tasks" | "memory">,
});
return {
success: true,
handoff: {
id: handoff.id,
from_session_id: handoff.fromSessionId,
project_path: handoff.projectPath,
created_at: new Date(handoff.createdAt).toISOString(),
summary: handoff.contextSummary,
decisions_count: handoff.decisions.length,
files_count: handoff.activeFiles.length,
tasks_count: handoff.pendingTasks.length,
memory_count: handoff.workingMemory.length,
},
message: `Handoff prepared with ${handoff.decisions.length} decisions, ${handoff.activeFiles.length} files, ${handoff.pendingTasks.length} tasks, ${handoff.workingMemory.length} memory items.`,
};
} catch (error) {
return {
success: false,
message: `Error preparing handoff: ${(error as Error).message}`,
};
}
}
/**
* Resume from a handoff in a new session.
* Loads context from a previous session for continuity.
*
* @param args - Resume arguments
* @returns The resumed handoff context
*/
async resumeFromHandoff(args: Record<string, unknown>): Promise<Types.ResumeFromHandoffResponse> {
const { SessionHandoffStore } = await import("../handoff/SessionHandoffStore.js");
const typedArgs = args as unknown as Types.ResumeFromHandoffArgs;
const {
handoff_id,
new_session_id,
inject_context = true,
project_path,
} = typedArgs;
const projectPath = this.resolveProjectPath(project_path);
try {
const store = new SessionHandoffStore(this.db.getDatabase());
const handoff = store.resumeFromHandoff({
handoffId: handoff_id,
projectPath,
newSessionId: new_session_id,
injectContext: inject_context,
});
if (!handoff) {
return {
success: true,
found: false,
message: "No unresumed handoff found for this project.",
};
}
return {
success: true,
found: true,
handoff: {
id: handoff.id,
from_session_id: handoff.fromSessionId,
project_path: handoff.projectPath,
created_at: new Date(handoff.createdAt).toISOString(),
summary: handoff.contextSummary,
decisions: handoff.decisions.map((d) => ({
text: d.text,
rationale: d.rationale,
timestamp: new Date(d.timestamp).toISOString(),
})),
active_files: handoff.activeFiles.map((f) => ({
path: f.path,
last_action: f.lastAction,
})),
pending_tasks: handoff.pendingTasks.map((t) => ({
description: t.description,
status: t.status,
})),
memory_items: handoff.workingMemory.map((m) => ({
key: m.key,
value: m.value,
})),
},
message: `Resumed from handoff: ${handoff.contextSummary}`,
};
} catch (error) {
return {
success: false,
found: false,
message: `Error resuming from handoff: ${(error as Error).message}`,
};
}
}
/**
* List available handoffs for a project.
*
* @param args - List arguments
* @returns List of available handoffs
*/
async listHandoffs(args: Record<string, unknown>): Promise<Types.ListHandoffsResponse> {
const { SessionHandoffStore } = await import("../handoff/SessionHandoffStore.js");
const typedArgs = args as unknown as Types.ListHandoffsArgs;
const {
limit = 10,
include_resumed = false,
project_path,
} = typedArgs;
const projectPath = this.resolveProjectPath(project_path);
try {
const store = new SessionHandoffStore(this.db.getDatabase());
const handoffs = store.listHandoffs(projectPath, {
limit,
includeResumed: include_resumed,
});
return {
success: true,
handoffs: handoffs.map((h) => ({
id: h.id,
from_session_id: h.fromSessionId,
created_at: new Date(h.createdAt).toISOString(),
resumed_by: h.resumedBy,
resumed_at: h.resumedAt ? new Date(h.resumedAt).toISOString() : undefined,
summary: h.summary,
})),
total_count: handoffs.length,
message: `Found ${handoffs.length} handoff(s)`,
};
} catch (error) {
return {
success: false,
handoffs: [],
total_count: 0,
message: `Error listing handoffs: ${(error as Error).message}`,
};
}
}
// ============================================================
// CONTEXT INJECTION TOOLS
// ============================================================
/**
* Get context to inject at the start of a new conversation.
* Combines handoffs, decisions, working memory, and file history.
*
* @param args - Context injection arguments
* @returns Structured context for injection
*/
async getStartupContext(args: Record<string, unknown>): Promise<Types.GetStartupContextResponse> {
const { ContextInjector } = await import("../context/ContextInjector.js");
const typedArgs = args as unknown as Types.GetStartupContextArgs;
const {
query,
max_tokens = 2000,
sources = ["history", "decisions", "memory", "handoffs"],
project_path,
} = typedArgs;
const projectPath = this.resolveProjectPath(project_path);
try {
const injector = new ContextInjector(this.db.getDatabase());
const context = await injector.getRelevantContext({
query,
projectPath,
maxTokens: max_tokens,
sources: sources as Array<"history" | "decisions" | "memory" | "handoffs">,
});
return {
success: true,
context: {
handoff: context.handoff ? {
id: context.handoff.id,
from_session_id: context.handoff.fromSessionId,
project_path: context.handoff.projectPath,
created_at: new Date(context.handoff.createdAt).toISOString(),
summary: context.handoff.contextSummary,
decisions: context.handoff.decisions.map(d => ({
text: d.text,
rationale: d.rationale,
timestamp: new Date(d.timestamp).toISOString(),
})),
active_files: context.handoff.activeFiles.map(f => ({
path: f.path,
last_action: f.lastAction,
})),
pending_tasks: context.handoff.pendingTasks.map(t => ({
description: t.description,
status: t.status,
})),
memory_items: context.handoff.workingMemory.map(m => ({
key: m.key,
value: m.value,
})),
} : undefined,
decisions: context.decisions.map(d => ({
id: d.id,
text: d.text,
rationale: d.rationale,
timestamp: new Date(d.timestamp).toISOString(),
})),
memory: context.memory.map(m => ({
id: m.id,
key: m.key,
value: m.value,
context: m.context,
tags: m.tags,
created_at: new Date(m.createdAt).toISOString(),
updated_at: new Date(m.updatedAt).toISOString(),
})),
recent_files: context.recentFiles.map(f => ({
path: f.path,
last_action: f.lastAction,
timestamp: new Date(f.timestamp).toISOString(),
})),
summary: context.summary,
},
token_estimate: context.tokenEstimate,
message: `Retrieved context: ${context.summary}`,
};
} catch (error) {
return {
success: false,
context: {
decisions: [],
memory: [],
recent_files: [],
summary: "",
},
token_estimate: 0,
message: `Error getting startup context: ${(error as Error).message}`,
};
}
}
/**
* Inject relevant context based on the first message in a new conversation.
* Returns formatted markdown context for direct use.
*
* @param args - Injection arguments
* @returns Formatted context string
*/
async injectRelevantContext(args: Record<string, unknown>): Promise<Types.InjectRelevantContextResponse> {
const { ContextInjector } = await import("../context/ContextInjector.js");
const typedArgs = args as unknown as Types.InjectRelevantContextArgs;
const {
message,
max_tokens = 2000,
sources = ["history", "decisions", "memory", "handoffs"],
project_path,
} = typedArgs;
const projectPath = this.resolveProjectPath(project_path);
if (!message) {
return {
success: false,
injected_context: "",
sources_used: [],
token_count: 0,
message: "message is required",
};
}
try {
const injector = new ContextInjector(this.db.getDatabase());
const context = await injector.getRelevantContext({
query: message,
projectPath,
maxTokens: max_tokens,
sources: sources as Array<"history" | "decisions" | "memory" | "handoffs">,
});
// Format for injection
const formattedContext = injector.formatForInjection(context);
// Track which sources were used
const sourcesUsed: string[] = [];
if (context.handoff) {
sourcesUsed.push("handoffs");
}
if (context.decisions.length > 0) {
sourcesUsed.push("decisions");
}
if (context.memory.length > 0) {
sourcesUsed.push("memory");
}
if (context.recentFiles.length > 0) {
sourcesUsed.push("history");
}
return {
success: true,
injected_context: formattedContext,
sources_used: sourcesUsed,
token_count: context.tokenEstimate,
message: `Injected context from ${sourcesUsed.length} source(s)`,
};
} catch (error) {
return {
success: false,
injected_context: "",
sources_used: [],
token_count: 0,
message: `Error injecting context: ${(error as Error).message}`,
};
}
}
// ==================== Phase 1: Tag Management Handlers ====================
/**
* List all tags with usage statistics
*/
async listTags(args: Record<string, unknown>): Promise<Types.ListTagsResponse> {
const typedArgs = args as Types.ListTagsArgs;
const {
scope = "all",
sort_by = "usage_count",
include_unused = false,
limit = 50,
offset = 0,
} = typedArgs;
const projectPath = this.resolveOptionalProjectPath(typedArgs.project_path);
try {
let query = `
SELECT
id, name, project_path, description, color,
created_at, updated_at, usage_count, last_used_at, used_in_types
FROM tag_stats
WHERE 1=1
`;
const params: unknown[] = [];
// Scope filtering
if (scope === "project" && projectPath) {
query += " AND project_path = ?";
params.push(projectPath);
} else if (scope === "global") {
query += " AND project_path IS NULL";
} else if (scope === "all" && projectPath) {
query += " AND (project_path = ? OR project_path IS NULL)";
params.push(projectPath);
}
// Include unused filter
if (!include_unused) {
query += " AND usage_count > 0";
}
// Sorting
const sortMap: Record<string, string> = {
name: "name ASC",
usage_count: "usage_count DESC",
last_used: "last_used_at DESC NULLS LAST",
created: "created_at DESC",
};
query += ` ORDER BY ${sortMap[sort_by] || "usage_count DESC"}`;
// Pagination with fetch+1 pattern
query += " LIMIT ? OFFSET ?";
params.push(limit + 1, offset);
const rows = this.db.prepare(query).all(...params) as Array<{
id: number;
name: string;
project_path: string | null;
description: string | null;
color: string | null;
created_at: number;
updated_at: number;
usage_count: number;
last_used_at: number | null;
used_in_types: string | null;
}>;
const hasMore = rows.length > limit;
const tags = rows.slice(0, limit).map((row) => ({
id: row.id,
name: row.name,
project_path: row.project_path,
description: row.description,
color: row.color,
usage_count: row.usage_count,
last_used_at: row.last_used_at,
used_in_types: row.used_in_types ? row.used_in_types.split(",") : [],
created_at: row.created_at,
updated_at: row.updated_at,
}));
// Get total count
let countQuery = "SELECT COUNT(*) as total FROM tag_stats WHERE 1=1";
const countParams: unknown[] = [];
if (scope === "project" && projectPath) {
countQuery += " AND project_path = ?";
countParams.push(projectPath);
} else if (scope === "global") {
countQuery += " AND project_path IS NULL";
} else if (scope === "all" && projectPath) {
countQuery += " AND (project_path = ? OR project_path IS NULL)";
countParams.push(projectPath);
}
if (!include_unused) {
countQuery += " AND usage_count > 0";
}
const countResult = this.db.prepare(countQuery).get(...countParams) as { total: number };
return {
success: true,
tags,
total: countResult.total,
hasMore,
message: `Found ${tags.length} tag(s)`,
};
} catch (error) {
return {
success: false,
tags: [],
total: 0,
hasMore: false,
message: `Error listing tags: ${(error as Error).message}`,
};
}
}
/**
* Search items by tags
*/
async searchByTags(args: Record<string, unknown>): Promise<Types.SearchByTagsResponse> {
const typedArgs = args as unknown as Types.SearchByTagsArgs;
const {
tags,
match_mode = "any",
item_types,
scope = "all",
limit = 20,
offset = 0,
} = typedArgs;
const projectPath = this.resolveOptionalProjectPath(typedArgs.project_path);
if (!tags || tags.length === 0) {
return {
success: false,
items: [],
total: 0,
hasMore: false,
tag_breakdown: {},
message: "At least one tag is required",
};
}
try {
// Find tag IDs
const tagPlaceholders = tags.map(() => "?").join(",");
let tagQuery = `SELECT id, name FROM tags WHERE name IN (${tagPlaceholders})`;
const tagParams: unknown[] = [...tags];
if (scope === "project" && projectPath) {
tagQuery += " AND project_path = ?";
tagParams.push(projectPath);
} else if (scope === "global") {
tagQuery += " AND project_path IS NULL";
} else if (scope === "all" && projectPath) {
tagQuery += " AND (project_path = ? OR project_path IS NULL)";
tagParams.push(projectPath);
}
const tagRows = this.db.prepare(tagQuery).all(...tagParams) as Array<{ id: number; name: string }>;
const tagIds = tagRows.map((r) => r.id);
const tagIdToName = new Map(tagRows.map((r) => [r.id, r.name]));
if (tagIds.length === 0) {
return {
success: true,
items: [],
total: 0,
hasMore: false,
tag_breakdown: {},
message: "No matching tags found",
};
}
// Find items with those tags
const tagIdPlaceholders = tagIds.map(() => "?").join(",");
let itemQuery = `
SELECT it.item_type, it.item_id, it.tag_id, it.created_at
FROM item_tags it
WHERE it.tag_id IN (${tagIdPlaceholders})
`;
const itemParams: unknown[] = [...tagIds];
if (item_types && item_types.length > 0) {
const typePlaceholders = item_types.map(() => "?").join(",");
itemQuery += ` AND it.item_type IN (${typePlaceholders})`;
itemParams.push(...item_types);
}
const itemRows = this.db.prepare(itemQuery).all(...itemParams) as Array<{
item_type: string;
item_id: number;
tag_id: number;
created_at: number;
}>;
// Group by item
const itemMap = new Map<string, {
item_type: string;
item_id: number;
matched_tags: Set<string>;
created_at: number;
}>();
for (const row of itemRows) {
const key = `${row.item_type}:${row.item_id}`;
if (!itemMap.has(key)) {
itemMap.set(key, {
item_type: row.item_type,
item_id: row.item_id,
matched_tags: new Set(),
created_at: row.created_at,
});
}
const tagName = tagIdToName.get(row.tag_id);
const item = itemMap.get(key);
if (tagName && item) {
item.matched_tags.add(tagName);
}
}
// Filter by match_mode
let filteredItems = Array.from(itemMap.values());
if (match_mode === "all") {
filteredItems = filteredItems.filter((item) => item.matched_tags.size === tags.length);
}
// Calculate tag breakdown
const tagBreakdown: Record<string, number> = {};
for (const item of filteredItems) {
for (const tag of item.matched_tags) {
tagBreakdown[tag] = (tagBreakdown[tag] || 0) + 1;
}
}
const total = filteredItems.length;
const hasMore = offset + limit < total;
const paginatedItems = filteredItems.slice(offset, offset + limit);
// Get summaries for items (simplified - just use item_id for now)
const items: Types.TaggedItem[] = paginatedItems.map((item) => ({
item_type: item.item_type as Types.TagItemType,
item_id: item.item_id,
item_summary: `${item.item_type} #${item.item_id}`,
matched_tags: Array.from(item.matched_tags),
all_tags: Array.from(item.matched_tags),
created_at: item.created_at,
}));
return {
success: true,
items,
total,
hasMore,
tag_breakdown: tagBreakdown,
message: `Found ${total} item(s) with matching tags`,
};
} catch (error) {
return {
success: false,
items: [],
total: 0,
hasMore: false,
tag_breakdown: {},
message: `Error searching by tags: ${(error as Error).message}`,
};
}
}
/**
* Rename a tag
*/
async renameTag(args: Record<string, unknown>): Promise<Types.RenameTagResponse> {
const typedArgs = args as unknown as Types.RenameTagArgs;
const { old_name, new_name, scope = "project" } = typedArgs;
const projectPath = this.resolveOptionalProjectPath(typedArgs.project_path);
if (!old_name || !new_name) {
return {
success: false,
old_name: old_name || "",
new_name: new_name || "",
items_affected: 0,
merged: false,
message: "Both old_name and new_name are required",
};
}
try {
return this.db.transaction(() => {
// Find the old tag
let findQuery = "SELECT id FROM tags WHERE name = ?";
const findParams: unknown[] = [old_name];
if (scope === "project" && projectPath) {
findQuery += " AND project_path = ?";
findParams.push(projectPath);
} else if (scope === "global") {
findQuery += " AND project_path IS NULL";
}
const oldTag = this.db.prepare(findQuery).get(...findParams) as { id: number } | undefined;
if (!oldTag) {
return {
success: false,
old_name,
new_name,
items_affected: 0,
merged: false,
message: `Tag '${old_name}' not found`,
};
}
// Check if new name already exists
let existsQuery = "SELECT id FROM tags WHERE name = ?";
const existsParams: unknown[] = [new_name];
if (scope === "project" && projectPath) {
existsQuery += " AND project_path = ?";
existsParams.push(projectPath);
} else if (scope === "global") {
existsQuery += " AND project_path IS NULL";
}
const existingTag = this.db.prepare(existsQuery).get(...existsParams) as { id: number } | undefined;
if (existingTag) {
// Merge: move items from old tag to existing tag
const countResult = this.db.prepare(
"SELECT COUNT(*) as count FROM item_tags WHERE tag_id = ?"
).get(oldTag.id) as { count: number };
// Update item_tags, ignoring duplicates
this.db.prepare(`
UPDATE OR IGNORE item_tags SET tag_id = ? WHERE tag_id = ?
`).run(existingTag.id, oldTag.id);
// Delete items that couldn't be moved (duplicates)
this.db.prepare("DELETE FROM item_tags WHERE tag_id = ?").run(oldTag.id);
// Delete old tag
this.db.prepare("DELETE FROM tags WHERE id = ?").run(oldTag.id);
return {
success: true,
old_name,
new_name,
items_affected: countResult.count,
merged: true,
message: `Merged '${old_name}' into existing tag '${new_name}'`,
};
} else {
// Simple rename
const countResult = this.db.prepare(
"SELECT COUNT(*) as count FROM item_tags WHERE tag_id = ?"
).get(oldTag.id) as { count: number };
this.db.prepare("UPDATE tags SET name = ?, updated_at = ? WHERE id = ?")
.run(new_name, Date.now(), oldTag.id);
return {
success: true,
old_name,
new_name,
items_affected: countResult.count,
merged: false,
message: `Renamed tag '${old_name}' to '${new_name}'`,
};
}
});
} catch (error) {
return {
success: false,
old_name,
new_name,
items_affected: 0,
merged: false,
message: `Error renaming tag: ${(error as Error).message}`,
};
}
}
/**
* Merge multiple tags into one
*/
async mergeTags(args: Record<string, unknown>): Promise<Types.MergeTagsResponse> {
const typedArgs = args as unknown as Types.MergeTagsArgs;
const { source_tags, target_tag, scope = "project" } = typedArgs;
const projectPath = this.resolveOptionalProjectPath(typedArgs.project_path);
if (!source_tags || source_tags.length === 0 || !target_tag) {
return {
success: false,
merged_tags: [],
target_tag: target_tag || "",
items_retagged: 0,
duplicates_removed: 0,
message: "source_tags and target_tag are required",
};
}
try {
return this.db.transaction(() => {
const projectCondition = scope === "project" && projectPath
? "AND project_path = ?"
: scope === "global"
? "AND project_path IS NULL"
: "";
const baseParams = scope === "project" && projectPath ? [projectPath] : [];
// Find or create target tag
let targetTagId: number;
const existingTarget = this.db.prepare(
`SELECT id FROM tags WHERE name = ? ${projectCondition}`
).get(target_tag, ...baseParams) as { id: number } | undefined;
if (existingTarget) {
targetTagId = existingTarget.id;
} else {
// Create target tag
const result = this.db.prepare(
"INSERT INTO tags (name, project_path, created_at, updated_at) VALUES (?, ?, ?, ?)"
).run(target_tag, scope === "global" ? null : projectPath, Date.now(), Date.now());
targetTagId = Number(result.lastInsertRowid);
}
// Find source tags
const sourcePlaceholders = source_tags.map(() => "?").join(",");
const sourceTagRows = this.db.prepare(
`SELECT id, name FROM tags WHERE name IN (${sourcePlaceholders}) ${projectCondition}`
).all(...source_tags, ...baseParams) as Array<{ id: number; name: string }>;
const mergedTags: string[] = [];
let itemsRetagged = 0;
let duplicatesRemoved = 0;
for (const sourceTag of sourceTagRows) {
if (sourceTag.id === targetTagId) {continue;}
// Count items before
const countBefore = this.db.prepare(
"SELECT COUNT(*) as count FROM item_tags WHERE tag_id = ?"
).get(sourceTag.id) as { count: number };
// Move items to target (ignore duplicates)
this.db.prepare(
"UPDATE OR IGNORE item_tags SET tag_id = ? WHERE tag_id = ?"
).run(targetTagId, sourceTag.id);
// Count remaining (duplicates)
const remaining = this.db.prepare(
"SELECT COUNT(*) as count FROM item_tags WHERE tag_id = ?"
).get(sourceTag.id) as { count: number };
// Delete remaining duplicates
this.db.prepare("DELETE FROM item_tags WHERE tag_id = ?").run(sourceTag.id);
// Delete source tag
this.db.prepare("DELETE FROM tags WHERE id = ?").run(sourceTag.id);
mergedTags.push(sourceTag.name);
itemsRetagged += countBefore.count - remaining.count;
duplicatesRemoved += remaining.count;
}
return {
success: true,
merged_tags: mergedTags,
target_tag,
items_retagged: itemsRetagged,
duplicates_removed: duplicatesRemoved,
message: `Merged ${mergedTags.length} tag(s) into '${target_tag}'`,
};
});
} catch (error) {
return {
success: false,
merged_tags: [],
target_tag,
items_retagged: 0,
duplicates_removed: 0,
message: `Error merging tags: ${(error as Error).message}`,
};
}
}
/**
* Delete a tag
*/
async deleteTag(args: Record<string, unknown>): Promise<Types.DeleteTagResponse> {
const typedArgs = args as unknown as Types.DeleteTagArgs;
const { name, scope = "project", force = false } = typedArgs;
const projectPath = this.resolveOptionalProjectPath(typedArgs.project_path);
if (!name) {
return {
success: false,
deleted: false,
items_untagged: 0,
message: "Tag name is required",
};
}
try {
return this.db.transaction(() => {
const projectCondition = scope === "project" && projectPath
? "AND project_path = ?"
: scope === "global"
? "AND project_path IS NULL"
: "";
const baseParams = scope === "project" && projectPath ? [projectPath] : [];
// Find tag
const tag = this.db.prepare(
`SELECT id FROM tags WHERE name = ? ${projectCondition}`
).get(name, ...baseParams) as { id: number } | undefined;
if (!tag) {
return {
success: false,
deleted: false,
items_untagged: 0,
message: `Tag '${name}' not found`,
};
}
// Check usage
const usageResult = this.db.prepare(
"SELECT COUNT(*) as count FROM item_tags WHERE tag_id = ?"
).get(tag.id) as { count: number };
if (usageResult.count > 0 && !force) {
return {
success: false,
deleted: false,
items_untagged: 0,
message: `Tag '${name}' has ${usageResult.count} usage(s). Use force=true to delete anyway.`,
};
}
// Delete item_tags (cascades from tags table, but explicit is safer)
this.db.prepare("DELETE FROM item_tags WHERE tag_id = ?").run(tag.id);
// Delete tag
this.db.prepare("DELETE FROM tags WHERE id = ?").run(tag.id);
return {
success: true,
deleted: true,
items_untagged: usageResult.count,
message: `Deleted tag '${name}'${usageResult.count > 0 ? ` (${usageResult.count} item(s) untagged)` : ""}`,
};
});
} catch (error) {
return {
success: false,
deleted: false,
items_untagged: 0,
message: `Error deleting tag: ${(error as Error).message}`,
};
}
}
/**
* Add tags to an item
*/
async tagItem(args: Record<string, unknown>): Promise<Types.TagItemResponse> {
const typedArgs = args as unknown as Types.TagItemArgs;
const { item_type, item_id, tags } = typedArgs;
const projectPath = this.resolveProjectPath(typedArgs.project_path);
if (!item_type || item_id === undefined || !tags || tags.length === 0) {
return {
success: false,
item_type: item_type || ("memory" as Types.TagItemType),
item_id: item_id || 0,
tags_added: [],
tags_existed: [],
message: "item_type, item_id, and tags are required",
};
}
try {
return this.db.transaction(() => {
const tagsAdded: string[] = [];
const tagsExisted: string[] = [];
for (const tagName of tags) {
// Find or create tag
let tag = this.db.prepare(
"SELECT id FROM tags WHERE name = ? AND (project_path = ? OR project_path IS NULL)"
).get(tagName, projectPath) as { id: number } | undefined;
if (!tag) {
const result = this.db.prepare(
"INSERT INTO tags (name, project_path, created_at, updated_at) VALUES (?, ?, ?, ?)"
).run(tagName, projectPath, Date.now(), Date.now());
tag = { id: Number(result.lastInsertRowid) };
}
// Try to add item_tag
const itemIdNum = typeof item_id === "string" ? 0 : item_id; // For memory, we need to resolve key to id
try {
this.db.prepare(
"INSERT INTO item_tags (tag_id, item_type, item_id, created_at) VALUES (?, ?, ?, ?)"
).run(tag.id, item_type, itemIdNum, Date.now());
tagsAdded.push(tagName);
} catch (_e) {
// Duplicate - tag already exists on item
tagsExisted.push(tagName);
}
}
return {
success: true,
item_type,
item_id,
tags_added: tagsAdded,
tags_existed: tagsExisted,
message: `Added ${tagsAdded.length} tag(s), ${tagsExisted.length} already existed`,
};
});
} catch (error) {
return {
success: false,
item_type,
item_id,
tags_added: [],
tags_existed: [],
message: `Error tagging item: ${(error as Error).message}`,
};
}
}
/**
* Remove tags from an item
*/
async untagItem(args: Record<string, unknown>): Promise<Types.UntagItemResponse> {
const typedArgs = args as unknown as Types.UntagItemArgs;
const { item_type, item_id, tags } = typedArgs;
const projectPath = this.resolveProjectPath(typedArgs.project_path);
if (!item_type || item_id === undefined) {
return {
success: false,
item_type: item_type || ("memory" as Types.TagItemType),
item_id: item_id || 0,
tags_removed: [],
message: "item_type and item_id are required",
};
}
try {
return this.db.transaction(() => {
const itemIdNum = typeof item_id === "string" ? 0 : item_id;
const tagsRemoved: string[] = [];
if (tags && tags.length > 0) {
// Remove specific tags
for (const tagName of tags) {
const tag = this.db.prepare(
"SELECT id FROM tags WHERE name = ? AND (project_path = ? OR project_path IS NULL)"
).get(tagName, projectPath) as { id: number } | undefined;
if (tag) {
const result = this.db.prepare(
"DELETE FROM item_tags WHERE tag_id = ? AND item_type = ? AND item_id = ?"
).run(tag.id, item_type, itemIdNum);
if (result.changes > 0) {
tagsRemoved.push(tagName);
}
}
}
} else {
// Remove all tags from item
const currentTags = this.db.prepare(`
SELECT t.name FROM tags t
JOIN item_tags it ON t.id = it.tag_id
WHERE it.item_type = ? AND it.item_id = ?
`).all(item_type, itemIdNum) as Array<{ name: string }>;
this.db.prepare(
"DELETE FROM item_tags WHERE item_type = ? AND item_id = ?"
).run(item_type, itemIdNum);
tagsRemoved.push(...currentTags.map((t) => t.name));
}
return {
success: true,
item_type,
item_id,
tags_removed: tagsRemoved,
message: `Removed ${tagsRemoved.length} tag(s)`,
};
});
} catch (error) {
return {
success: false,
item_type,
item_id,
tags_removed: [],
message: `Error untagging item: ${(error as Error).message}`,
};
}
}
// ==================== Phase 1: Memory Confidence Handlers ====================
/**
* Set memory confidence level
*/
async setMemoryConfidence(args: Record<string, unknown>): Promise<Types.SetMemoryConfidenceResponse> {
const typedArgs = args as unknown as Types.SetMemoryConfidenceArgs;
const { key, confidence, evidence, verified_by } = typedArgs;
const projectPath = this.resolveProjectPath(typedArgs.project_path);
if (!key || !confidence) {
return {
success: false,
key: key || "",
previous_confidence: null,
new_confidence: confidence || "",
verified_at: null,
message: "key and confidence are required",
};
}
try {
// Get current memory
const memory = this.db.prepare(
"SELECT id, confidence FROM working_memory WHERE key = ? AND project_path = ?"
).get(key, projectPath) as { id: number; confidence: string | null } | undefined;
if (!memory) {
return {
success: false,
key,
previous_confidence: null,
new_confidence: confidence,
verified_at: null,
message: `Memory '${key}' not found`,
};
}
const now = Date.now();
const verifiedAt = (confidence === "confirmed" || confidence === "verified") ? now : null;
// Update memory
let updateQuery = "UPDATE working_memory SET confidence = ?, updated_at = ?";
const updateParams: unknown[] = [confidence, now];
if (verifiedAt) {
updateQuery += ", verified_at = ?";
updateParams.push(verifiedAt);
}
if (verified_by) {
updateQuery += ", verified_by = ?";
updateParams.push(verified_by);
}
if (evidence) {
updateQuery += ", context = COALESCE(context, '') || ' | Evidence: ' || ?";
updateParams.push(evidence);
}
updateQuery += " WHERE id = ?";
updateParams.push(memory.id);
this.db.prepare(updateQuery).run(...updateParams);
return {
success: true,
key,
previous_confidence: memory.confidence,
new_confidence: confidence,
verified_at: verifiedAt,
message: `Updated confidence to '${confidence}'${verifiedAt ? " (verified)" : ""}`,
};
} catch (error) {
return {
success: false,
key,
previous_confidence: null,
new_confidence: confidence,
verified_at: null,
message: `Error setting confidence: ${(error as Error).message}`,
};
}
}
/**
* Set memory importance level
*/
async setMemoryImportance(args: Record<string, unknown>): Promise<Types.SetMemoryImportanceResponse> {
const typedArgs = args as unknown as Types.SetMemoryImportanceArgs;
const { key, importance } = typedArgs;
const projectPath = this.resolveProjectPath(typedArgs.project_path);
if (!key || !importance) {
return {
success: false,
key: key || "",
previous_importance: null,
new_importance: importance || "",
message: "key and importance are required",
};
}
try {
const memory = this.db.prepare(
"SELECT id, importance FROM working_memory WHERE key = ? AND project_path = ?"
).get(key, projectPath) as { id: number; importance: string | null } | undefined;
if (!memory) {
return {
success: false,
key,
previous_importance: null,
new_importance: importance,
message: `Memory '${key}' not found`,
};
}
this.db.prepare(
"UPDATE working_memory SET importance = ?, updated_at = ? WHERE id = ?"
).run(importance, Date.now(), memory.id);
return {
success: true,
key,
previous_importance: memory.importance,
new_importance: importance,
message: `Updated importance to '${importance}'`,
};
} catch (error) {
return {
success: false,
key,
previous_importance: null,
new_importance: importance,
message: `Error setting importance: ${(error as Error).message}`,
};
}
}
/**
* Pin/unpin a memory
*/
async pinMemory(args: Record<string, unknown>): Promise<Types.PinMemoryResponse> {
const typedArgs = args as unknown as Types.PinMemoryArgs;
const { key, pinned = true } = typedArgs;
const projectPath = this.resolveProjectPath(typedArgs.project_path);
if (!key) {
return {
success: false,
key: "",
pinned: false,
message: "key is required",
};
}
try {
const result = this.db.prepare(
"UPDATE working_memory SET pinned = ?, updated_at = ? WHERE key = ? AND project_path = ?"
).run(pinned ? 1 : 0, Date.now(), key, projectPath);
if (result.changes === 0) {
return {
success: false,
key,
pinned: false,
message: `Memory '${key}' not found`,
};
}
return {
success: true,
key,
pinned,
message: pinned ? `Pinned memory '${key}'` : `Unpinned memory '${key}'`,
};
} catch (error) {
return {
success: false,
key,
pinned: false,
message: `Error pinning memory: ${(error as Error).message}`,
};
}
}
/**
* Archive a memory
*/
async archiveMemory(args: Record<string, unknown>): Promise<Types.ArchiveMemoryResponse> {
const typedArgs = args as unknown as Types.ArchiveMemoryArgs;
const { key, reason } = typedArgs;
const projectPath = this.resolveProjectPath(typedArgs.project_path);
if (!key) {
return {
success: false,
key: "",
archived: false,
reason: null,
message: "key is required",
};
}
try {
const result = this.db.prepare(
"UPDATE working_memory SET archived = 1, archive_reason = ?, updated_at = ? WHERE key = ? AND project_path = ?"
).run(reason || null, Date.now(), key, projectPath);
if (result.changes === 0) {
return {
success: false,
key,
archived: false,
reason: null,
message: `Memory '${key}' not found`,
};
}
return {
success: true,
key,
archived: true,
reason: reason || null,
message: `Archived memory '${key}'${reason ? `: ${reason}` : ""}`,
};
} catch (error) {
return {
success: false,
key,
archived: false,
reason: null,
message: `Error archiving memory: ${(error as Error).message}`,
};
}
}
/**
* Unarchive a memory
*/
async unarchiveMemory(args: Record<string, unknown>): Promise<Types.UnarchiveMemoryResponse> {
const typedArgs = args as unknown as Types.UnarchiveMemoryArgs;
const { key } = typedArgs;
const projectPath = this.resolveProjectPath(typedArgs.project_path);
if (!key) {
return {
success: false,
key: "",
message: "key is required",
};
}
try {
const result = this.db.prepare(
"UPDATE working_memory SET archived = 0, archive_reason = NULL, updated_at = ? WHERE key = ? AND project_path = ?"
).run(Date.now(), key, projectPath);
if (result.changes === 0) {
return {
success: false,
key,
message: `Memory '${key}' not found`,
};
}
return {
success: true,
key,
message: `Unarchived memory '${key}'`,
};
} catch (error) {
return {
success: false,
key,
message: `Error unarchiving memory: ${(error as Error).message}`,
};
}
}
/**
* Search memories by quality filters
*/
async searchMemoryByQuality(args: Record<string, unknown>): Promise<Types.SearchMemoryByQualityResponse> {
const typedArgs = args as Types.SearchMemoryByQualityArgs;
const {
query,
confidence,
importance,
pinned_only = false,
include_archived = false,
scope = "project",
sort_by = "importance",
limit = 20,
offset = 0,
} = typedArgs;
const projectPath = this.resolveProjectPath(typedArgs.project_path);
try {
let sqlQuery = "SELECT * FROM working_memory WHERE 1=1";
const params: unknown[] = [];
// Project/scope filter
if (scope === "project") {
sqlQuery += " AND project_path = ?";
params.push(projectPath);
}
// Archived filter
if (!include_archived) {
sqlQuery += " AND (archived = 0 OR archived IS NULL)";
}
// Pinned filter
if (pinned_only) {
sqlQuery += " AND pinned = 1";
}
// Confidence filter
if (confidence && confidence.length > 0) {
const placeholders = confidence.map(() => "?").join(",");
sqlQuery += ` AND confidence IN (${placeholders})`;
params.push(...confidence);
}
// Importance filter
if (importance && importance.length > 0) {
const placeholders = importance.map(() => "?").join(",");
sqlQuery += ` AND importance IN (${placeholders})`;
params.push(...importance);
}
// Text search
if (query) {
sqlQuery += " AND (key LIKE ? OR value LIKE ? OR context LIKE ?)";
const searchTerm = `%${query}%`;
params.push(searchTerm, searchTerm, searchTerm);
}
// Sorting
const sortMap: Record<string, string> = {
relevance: "updated_at DESC",
importance: "CASE importance WHEN 'critical' THEN 1 WHEN 'high' THEN 2 WHEN 'normal' THEN 3 WHEN 'low' THEN 4 ELSE 5 END, updated_at DESC",
confidence: "CASE confidence WHEN 'verified' THEN 1 WHEN 'confirmed' THEN 2 WHEN 'likely' THEN 3 WHEN 'uncertain' THEN 4 ELSE 5 END, updated_at DESC",
recent: "updated_at DESC",
};
sqlQuery += ` ORDER BY ${sortMap[sort_by] || "updated_at DESC"}`;
// Pagination
sqlQuery += " LIMIT ? OFFSET ?";
params.push(limit + 1, offset);
const rows = this.db.prepare(sqlQuery).all(...params) as Array<{
id: string;
key: string;
value: string;
context: string | null;
tags: string | null;
created_at: number;
updated_at: number;
expires_at: number | null;
confidence: string | null;
importance: string | null;
pinned: number | null;
archived: number | null;
archive_reason: string | null;
source: string | null;
source_session_id: string | null;
verified_at: number | null;
verified_by: string | null;
}>;
const hasMore = rows.length > limit;
const items: Types.MemoryItem[] = rows.slice(0, limit).map((row) => ({
id: row.id,
key: row.key,
value: row.value,
context: row.context || undefined,
tags: row.tags ? safeJsonParse(row.tags, []) : [],
created_at: new Date(row.created_at).toISOString(),
updated_at: new Date(row.updated_at).toISOString(),
expires_at: row.expires_at ? new Date(row.expires_at).toISOString() : undefined,
confidence: row.confidence || undefined,
importance: row.importance || undefined,
pinned: row.pinned === 1,
archived: row.archived === 1,
archive_reason: row.archive_reason || undefined,
source: row.source || undefined,
source_session_id: row.source_session_id || undefined,
verified_at: row.verified_at ? new Date(row.verified_at).toISOString() : undefined,
verified_by: row.verified_by || undefined,
}));
return {
success: true,
items,
total: items.length,
hasMore,
message: `Found ${items.length} memor${items.length === 1 ? "y" : "ies"}`,
};
} catch (error) {
return {
success: false,
items: [],
total: 0,
hasMore: false,
message: `Error searching memories: ${(error as Error).message}`,
};
}
}
/**
* Get memory statistics
*/
async getMemoryStats(args: Record<string, unknown>): Promise<Types.GetMemoryStatsResponse> {
const typedArgs = args as Types.GetMemoryStatsArgs;
const { scope = "project" } = typedArgs;
const projectPath = this.resolveProjectPath(typedArgs.project_path);
try {
const projectCondition = scope === "project" ? "WHERE project_path = ?" : "";
const params = scope === "project" ? [projectPath] : [];
// Total count
const totalResult = this.db.prepare(
`SELECT COUNT(*) as count FROM working_memory ${projectCondition}`
).get(...params) as { count: number };
// Active (non-archived)
const activeResult = this.db.prepare(
`SELECT COUNT(*) as count FROM working_memory ${projectCondition ? projectCondition + " AND" : "WHERE"} (archived = 0 OR archived IS NULL)`
).get(...params) as { count: number };
// Archived
const archivedResult = this.db.prepare(
`SELECT COUNT(*) as count FROM working_memory ${projectCondition ? projectCondition + " AND" : "WHERE"} archived = 1`
).get(...params) as { count: number };
// Pinned
const pinnedResult = this.db.prepare(
`SELECT COUNT(*) as count FROM working_memory ${projectCondition ? projectCondition + " AND" : "WHERE"} pinned = 1`
).get(...params) as { count: number };
// By confidence
const confidenceRows = this.db.prepare(`
SELECT COALESCE(confidence, 'likely') as level, COUNT(*) as count
FROM working_memory ${projectCondition}
GROUP BY COALESCE(confidence, 'likely')
`).all(...params) as Array<{ level: string; count: number }>;
const byConfidence = { uncertain: 0, likely: 0, confirmed: 0, verified: 0 };
for (const row of confidenceRows) {
if (row.level in byConfidence) {
byConfidence[row.level as keyof typeof byConfidence] = row.count;
}
}
// By importance
const importanceRows = this.db.prepare(`
SELECT COALESCE(importance, 'normal') as level, COUNT(*) as count
FROM working_memory ${projectCondition}
GROUP BY COALESCE(importance, 'normal')
`).all(...params) as Array<{ level: string; count: number }>;
const byImportance = { low: 0, normal: 0, high: 0, critical: 0 };
for (const row of importanceRows) {
if (row.level in byImportance) {
byImportance[row.level as keyof typeof byImportance] = row.count;
}
}
// Expired
const now = Date.now();
const expiredResult = this.db.prepare(
`SELECT COUNT(*) as count FROM working_memory ${projectCondition ? projectCondition + " AND" : "WHERE"} expires_at IS NOT NULL AND expires_at < ?`
).get(...params, now) as { count: number };
// Expiring soon (within 7 days)
const sevenDays = 7 * 24 * 60 * 60 * 1000;
const expiringSoonResult = this.db.prepare(
`SELECT COUNT(*) as count FROM working_memory ${projectCondition ? projectCondition + " AND" : "WHERE"} expires_at IS NOT NULL AND expires_at >= ? AND expires_at < ?`
).get(...params, now, now + sevenDays) as { count: number };
// Top tags
const topTags: Array<{ tag: string; count: number }> = [];
return {
success: true,
total: totalResult.count,
active: activeResult.count,
archived: archivedResult.count,
pinned: pinnedResult.count,
by_confidence: byConfidence,
by_importance: byImportance,
expired: expiredResult.count,
expiring_soon: expiringSoonResult.count,
top_tags: topTags,
message: `Memory stats for ${scope === "project" ? "project" : "global"}`,
};
} catch (error) {
return {
success: false,
total: 0,
active: 0,
archived: 0,
pinned: 0,
by_confidence: { uncertain: 0, likely: 0, confirmed: 0, verified: 0 },
by_importance: { low: 0, normal: 0, high: 0, critical: 0 },
expired: 0,
expiring_soon: 0,
top_tags: [],
message: `Error getting stats: ${(error as Error).message}`,
};
}
}
// ==================== Phase 1: Cleanup/Maintenance Handlers ====================
/**
* Get storage statistics
*/
async getStorageStats(args: Record<string, unknown>): Promise<Types.GetStorageStatsResponse> {
const typedArgs = args as Types.GetStorageStatsArgs;
// Reserved for future project-specific filtering
void typedArgs.detailed;
void typedArgs.project_path;
try {
const dbStats = this.db.getStats();
// Get table counts
const tables = [
{ name: "conversations", type: "conversations" },
{ name: "messages", type: "messages" },
{ name: "decisions", type: "decisions" },
{ name: "mistakes", type: "mistakes" },
{ name: "working_memory", type: "memories" },
{ name: "message_embeddings", type: "embeddings" },
];
const byType: Record<string, Types.StorageTypeStats> = {};
for (const table of tables) {
try {
const result = this.db.prepare(`SELECT COUNT(*) as count FROM ${table.name}`).get() as { count: number };
byType[table.type] = { count: result.count, size_bytes: 0 };
} catch {
byType[table.type] = { count: 0, size_bytes: 0 };
}
}
// Fill in missing types
const allTypes = ["conversations", "messages", "decisions", "mistakes", "patterns", "memories", "learnings", "embeddings", "history"];
for (const type of allTypes) {
if (!byType[type]) {
byType[type] = { count: 0, size_bytes: 0 };
}
}
// Get oldest and newest
let oldest = 0;
let newest = 0;
try {
const oldestResult = this.db.prepare("SELECT MIN(created_at) as ts FROM conversations").get() as { ts: number | null };
const newestResult = this.db.prepare("SELECT MAX(updated_at) as ts FROM conversations").get() as { ts: number | null };
oldest = oldestResult.ts || 0;
newest = newestResult.ts || 0;
} catch {
// Ignore
}
// Format size
const formatSize = (bytes: number): string => {
if (bytes < 1024) {return `${bytes} B`;}
if (bytes < 1024 * 1024) {return `${(bytes / 1024).toFixed(1)} KB`;}
if (bytes < 1024 * 1024 * 1024) {return `${(bytes / 1024 / 1024).toFixed(1)} MB`;}
return `${(bytes / 1024 / 1024 / 1024).toFixed(1)} GB`;
};
const recommendations: string[] = [];
const sizeInMB = dbStats.fileSize / (1024 * 1024);
if (sizeInMB > 100) {
recommendations.push("Database is large. Consider running cleanup_stale to remove old items.");
}
if (byType.embeddings.count > 10000) {
recommendations.push("Many embeddings stored. Consider pruning unused embeddings.");
}
return {
success: true,
database_path: dbStats.dbPath,
total_size_bytes: dbStats.fileSize,
total_size_human: formatSize(dbStats.fileSize),
by_type: byType as Types.GetStorageStatsResponse["by_type"],
oldest_item: oldest,
newest_item: newest,
fragmentation_percent: 0,
recommendations,
message: `Database size: ${formatSize(dbStats.fileSize)}`,
};
} catch (error) {
return {
success: false,
database_path: "",
total_size_bytes: 0,
total_size_human: "0 B",
by_type: {
conversations: { count: 0, size_bytes: 0 },
messages: { count: 0, size_bytes: 0 },
decisions: { count: 0, size_bytes: 0 },
mistakes: { count: 0, size_bytes: 0 },
patterns: { count: 0, size_bytes: 0 },
memories: { count: 0, size_bytes: 0 },
learnings: { count: 0, size_bytes: 0 },
embeddings: { count: 0, size_bytes: 0 },
history: { count: 0, size_bytes: 0 },
},
oldest_item: 0,
newest_item: 0,
fragmentation_percent: 0,
recommendations: [],
message: `Error getting storage stats: ${(error as Error).message}`,
};
}
}
/**
* Find stale items
*/
async findStaleItems(args: Record<string, unknown>): Promise<Types.FindStaleItemsResponse> {
const typedArgs = args as Types.FindStaleItemsArgs;
const {
item_types = ["memory", "decision", "pattern"],
stale_threshold_days = 90,
exclude_pinned = true,
exclude_important = true,
limit = 50,
} = typedArgs;
const projectPath = this.resolveOptionalProjectPath(typedArgs.project_path);
try {
const staleItems: Types.StaleItem[] = [];
const byType: Record<string, number> = {};
const now = Date.now();
const threshold = now - stale_threshold_days * 24 * 60 * 60 * 1000;
// Check memories
if (item_types.includes("memory")) {
let query = "SELECT id, key, updated_at, importance FROM working_memory WHERE updated_at < ?";
const params: unknown[] = [threshold];
if (projectPath) {
query += " AND project_path = ?";
params.push(projectPath);
}
if (exclude_pinned) {
query += " AND (pinned = 0 OR pinned IS NULL)";
}
if (exclude_important) {
query += " AND importance NOT IN ('high', 'critical')";
}
query += " ORDER BY updated_at ASC LIMIT ?";
params.push(limit);
const rows = this.db.prepare(query).all(...params) as Array<{
id: number;
key: string;
updated_at: number;
importance: string | null;
}>;
for (const row of rows) {
const daysStale = Math.floor((now - row.updated_at) / (24 * 60 * 60 * 1000));
staleItems.push({
item_type: "memory",
item_id: row.id,
identifier: row.key,
last_accessed: row.updated_at,
days_stale: daysStale,
importance: row.importance || "normal",
size_estimate: 100,
});
}
byType.memory = rows.length;
}
// Similar logic for decisions
if (item_types.includes("decision")) {
let query = "SELECT id, decision_text, timestamp FROM decisions WHERE timestamp < ?";
const params: unknown[] = [threshold];
query += " ORDER BY timestamp ASC LIMIT ?";
params.push(limit);
const rows = this.db.prepare(query).all(...params) as Array<{
id: number;
decision_text: string;
timestamp: number;
}>;
for (const row of rows) {
const daysStale = Math.floor((now - row.timestamp) / (24 * 60 * 60 * 1000));
staleItems.push({
item_type: "decision",
item_id: row.id,
identifier: row.decision_text.slice(0, 50),
last_accessed: row.timestamp,
days_stale: daysStale,
importance: "normal",
size_estimate: 200,
});
}
byType.decision = rows.length;
}
const totalSize = staleItems.reduce((sum, item) => sum + item.size_estimate, 0);
return {
success: true,
stale_items: staleItems.slice(0, limit),
total_stale: staleItems.length,
total_size_bytes: totalSize,
by_type: byType,
message: `Found ${staleItems.length} stale item(s)`,
};
} catch (error) {
return {
success: false,
stale_items: [],
total_stale: 0,
total_size_bytes: 0,
by_type: {},
message: `Error finding stale items: ${(error as Error).message}`,
};
}
}
/**
* Find duplicates (simplified - uses text similarity)
*/
async findDuplicates(args: Record<string, unknown>): Promise<Types.FindDuplicatesResponse> {
const typedArgs = args as Types.FindDuplicatesArgs;
const {
item_types = ["memory", "decision"],
similarity_threshold: _similarity_threshold = 0.85,
limit = 20,
} = typedArgs;
const projectPath = this.resolveOptionalProjectPath(typedArgs.project_path);
try {
// Simplified duplicate detection - exact matches only for now
const duplicateGroups: Types.DuplicateGroup[] = [];
if (item_types.includes("memory")) {
let query = "SELECT id, key, value, created_at, importance FROM working_memory";
const params: unknown[] = [];
if (projectPath) {
query += " WHERE project_path = ?";
params.push(projectPath);
}
const rows = this.db.prepare(query).all(...params) as Array<{
id: number;
key: string;
value: string;
created_at: number;
importance: string | null;
}>;
// Group by value hash (exact duplicates)
const valueMap = new Map<string, typeof rows>();
for (const row of rows) {
const key = row.value.toLowerCase().trim();
if (!valueMap.has(key)) {
valueMap.set(key, []);
}
const group = valueMap.get(key);
if (group) {
group.push(row);
}
}
let groupId = 1;
for (const [_value, group] of valueMap) {
if (group.length > 1) {
// Sort by importance and created_at to determine which to keep
const sorted = [...group].sort((a, b) => {
const impOrder: Record<string, number> = { critical: 1, high: 2, normal: 3, low: 4 };
const impDiff = (impOrder[a.importance || "normal"] || 3) - (impOrder[b.importance || "normal"] || 3);
if (impDiff !== 0) {return impDiff;}
return b.created_at - a.created_at;
});
duplicateGroups.push({
group_id: groupId++,
item_type: "memory",
items: sorted.map((r) => ({
id: r.id,
identifier: r.key,
content_preview: r.value.slice(0, 100),
created_at: r.created_at,
importance: r.importance || "normal",
})),
similarity_score: 1.0,
recommended_keep: sorted[0].id,
recommendation_reason: "Highest importance and most recent",
});
}
}
}
const potentialSavings = duplicateGroups.reduce(
(sum, g) => sum + g.items.length - 1, 0
);
return {
success: true,
duplicate_groups: duplicateGroups.slice(0, limit),
total_groups: duplicateGroups.length,
potential_savings: potentialSavings,
message: `Found ${duplicateGroups.length} duplicate group(s)`,
};
} catch (error) {
return {
success: false,
duplicate_groups: [],
total_groups: 0,
potential_savings: 0,
message: `Error finding duplicates: ${(error as Error).message}`,
};
}
}
/**
* Merge duplicates
*/
async mergeDuplicates(args: Record<string, unknown>): Promise<Types.MergeDuplicatesResponse> {
const typedArgs = args as unknown as Types.MergeDuplicatesArgs;
const { item_type, keep_id, merge_ids, merge_tags = true } = typedArgs;
if (!item_type || !keep_id || !merge_ids || merge_ids.length === 0) {
return {
success: false,
kept_id: keep_id || 0,
merged_count: 0,
tags_merged: [],
references_updated: 0,
message: "item_type, keep_id, and merge_ids are required",
};
}
try {
return this.db.transaction(() => {
const tagsMerged: string[] = [];
if (item_type === "memory") {
// Merge tags if requested
if (merge_tags) {
for (const mergeId of merge_ids) {
const tags = this.db.prepare(
"SELECT t.name FROM tags t JOIN item_tags it ON t.id = it.tag_id WHERE it.item_type = 'memory' AND it.item_id = ?"
).all(mergeId) as Array<{ name: string }>;
for (const tag of tags) {
if (!tagsMerged.includes(tag.name)) {
tagsMerged.push(tag.name);
}
}
}
}
// Delete merged items
const placeholders = merge_ids.map(() => "?").join(",");
this.db.prepare(`DELETE FROM working_memory WHERE id IN (${placeholders})`).run(...merge_ids);
}
return {
success: true,
kept_id: keep_id,
merged_count: merge_ids.length,
tags_merged: tagsMerged,
references_updated: 0,
message: `Merged ${merge_ids.length} item(s) into #${keep_id}`,
};
});
} catch (error) {
return {
success: false,
kept_id: keep_id,
merged_count: 0,
tags_merged: [],
references_updated: 0,
message: `Error merging duplicates: ${(error as Error).message}`,
};
}
}
/**
* Cleanup stale items
*/
async cleanupStale(args: Record<string, unknown>): Promise<Types.CleanupStaleResponse> {
const typedArgs = args as Types.CleanupStaleArgs;
const {
item_types,
stale_threshold_days = 90,
action = "preview",
exclude_pinned = true,
exclude_important = true,
max_items = 100,
} = typedArgs;
const projectPath = this.resolveOptionalProjectPath(typedArgs.project_path);
try {
// Find stale items first
const staleResult = await this.findStaleItems({
item_types: item_types as Array<"memory" | "decision" | "pattern" | "session">,
stale_threshold_days,
exclude_pinned,
exclude_important,
project_path: projectPath,
limit: max_items,
});
if (!staleResult.success || staleResult.stale_items.length === 0) {
return {
success: true,
action,
preview_only: action === "preview",
items_affected: 0,
by_type: {},
space_freed_bytes: 0,
items: [],
message: "No stale items found",
};
}
const items = staleResult.stale_items.map((item) => ({
type: item.item_type,
id: item.item_id,
identifier: item.identifier,
}));
if (action === "preview") {
return {
success: true,
action: "preview",
preview_only: true,
items_affected: items.length,
by_type: staleResult.by_type,
space_freed_bytes: staleResult.total_size_bytes,
items,
message: `Would affect ${items.length} item(s). Use action='delete' or action='archive' to proceed.`,
};
}
// Execute cleanup
return this.db.transaction(() => {
for (const item of items) {
if (item.type === "memory") {
if (action === "archive") {
this.db.prepare(
"UPDATE working_memory SET archived = 1, archive_reason = 'Stale cleanup', updated_at = ? WHERE id = ?"
).run(Date.now(), item.id);
} else if (action === "delete") {
this.db.prepare("DELETE FROM working_memory WHERE id = ?").run(item.id);
}
}
// Similar for other types
}
// Log maintenance
this.db.prepare(`
INSERT INTO maintenance_log (task_type, started_at, completed_at, status, items_processed, items_affected, details)
VALUES (?, ?, ?, 'completed', ?, ?, ?)
`).run(
"cleanup_stale",
Date.now(),
Date.now(),
items.length,
items.length,
JSON.stringify({ action, threshold_days: stale_threshold_days })
);
return {
success: true,
action,
preview_only: false,
items_affected: items.length,
by_type: staleResult.by_type,
space_freed_bytes: staleResult.total_size_bytes,
items,
message: `${action === "archive" ? "Archived" : "Deleted"} ${items.length} stale item(s)`,
};
});
} catch (error) {
return {
success: false,
action,
preview_only: action === "preview",
items_affected: 0,
by_type: {},
space_freed_bytes: 0,
items: [],
message: `Error cleaning up stale items: ${(error as Error).message}`,
};
}
}
/**
* Vacuum database
*/
async vacuumDatabase(args: Record<string, unknown>): Promise<Types.VacuumDatabaseResponse> {
const typedArgs = args as Types.VacuumDatabaseArgs;
const { analyze = true, reindex = false } = typedArgs;
try {
const startTime = Date.now();
const statsBefore = this.db.getStats();
// VACUUM must run outside of a transaction
this.db.exec("VACUUM");
if (analyze) {
this.db.exec("ANALYZE");
}
if (reindex) {
this.db.exec("REINDEX");
}
const statsAfter = this.db.getStats();
const duration = Date.now() - startTime;
const sizeBefore = statsBefore.fileSize;
const sizeAfter = statsAfter.fileSize;
// Log maintenance
this.db.prepare(`
INSERT INTO maintenance_log (task_type, started_at, completed_at, status, details)
VALUES (?, ?, ?, 'completed', ?)
`).run(
"vacuum",
startTime,
Date.now(),
JSON.stringify({ analyze, reindex, size_before: sizeBefore, size_after: sizeAfter })
);
return {
success: true,
size_before: sizeBefore,
size_after: sizeAfter,
space_freed: sizeBefore - sizeAfter,
duration_ms: duration,
message: `Vacuum completed in ${duration}ms. Freed ${Math.max(0, sizeBefore - sizeAfter)} bytes.`,
};
} catch (error) {
return {
success: false,
size_before: 0,
size_after: 0,
space_freed: 0,
duration_ms: 0,
message: `Error vacuuming database: ${(error as Error).message}`,
};
}
}
/**
* Cleanup orphaned records
*/
async cleanupOrphans(args: Record<string, unknown>): Promise<Types.CleanupOrphansResponse> {
const typedArgs = args as Types.CleanupOrphansArgs;
const { preview = true } = typedArgs;
try {
const orphansFound = {
tags_without_items: 0,
embeddings_without_items: 0,
history_without_items: 0,
links_without_targets: 0,
};
// Find orphaned tags
const orphanedTags = this.db.prepare(`
SELECT COUNT(*) as count FROM tags t
WHERE NOT EXISTS (SELECT 1 FROM item_tags it WHERE it.tag_id = t.id)
`).get() as { count: number };
orphansFound.tags_without_items = orphanedTags.count;
// Find orphaned embeddings
const orphanedEmbeddings = this.db.prepare(`
SELECT COUNT(*) as count FROM message_embeddings e
WHERE NOT EXISTS (SELECT 1 FROM messages m WHERE m.id = e.message_id)
`).get() as { count: number };
orphansFound.embeddings_without_items = orphanedEmbeddings.count;
const totalOrphans = Object.values(orphansFound).reduce((a, b) => a + b, 0);
if (preview) {
return {
success: true,
preview_only: true,
orphans_found: orphansFound,
total_orphans: totalOrphans,
cleaned: 0,
message: `Found ${totalOrphans} orphan(s). Use preview=false to clean.`,
};
}
// Execute cleanup
return this.db.transaction(() => {
let cleaned = 0;
// Delete orphaned tags
const tagResult = this.db.prepare(`
DELETE FROM tags WHERE NOT EXISTS (SELECT 1 FROM item_tags it WHERE it.tag_id = tags.id)
`).run();
cleaned += tagResult.changes;
// Delete orphaned embeddings
const embedResult = this.db.prepare(`
DELETE FROM message_embeddings WHERE NOT EXISTS (SELECT 1 FROM messages m WHERE m.id = message_embeddings.message_id)
`).run();
cleaned += embedResult.changes;
// Log maintenance
this.db.prepare(`
INSERT INTO maintenance_log (task_type, started_at, completed_at, status, items_affected, details)
VALUES (?, ?, ?, 'completed', ?, ?)
`).run("cleanup_orphans", Date.now(), Date.now(), cleaned, JSON.stringify(orphansFound));
return {
success: true,
preview_only: false,
orphans_found: orphansFound,
total_orphans: totalOrphans,
cleaned,
message: `Cleaned ${cleaned} orphan(s)`,
};
});
} catch (error) {
return {
success: false,
preview_only: preview,
orphans_found: {
tags_without_items: 0,
embeddings_without_items: 0,
history_without_items: 0,
links_without_targets: 0,
},
total_orphans: 0,
cleaned: 0,
message: `Error cleaning orphans: ${(error as Error).message}`,
};
}
}
/**
* Get health report
*/
async getHealthReport(args: Record<string, unknown>): Promise<Types.GetHealthReportResponse> {
const typedArgs = args as Types.GetHealthReportArgs;
const projectPath = this.resolveOptionalProjectPath(typedArgs.project_path);
try {
const checks: Types.HealthCheck[] = [];
let passed = 0;
let warnings = 0;
let failures = 0;
// Database size check
const stats = await this.getStorageStats({ detailed: false });
const sizeMB = stats.total_size_bytes / 1024 / 1024;
if (sizeMB > 500) {
checks.push({
name: "database_size",
status: "fail",
message: `Database size: ${stats.total_size_human}`,
details: "Database exceeds 500MB",
recommendation: "Run cleanup_stale and vacuum_database",
});
failures++;
} else if (sizeMB > 100) {
checks.push({
name: "database_size",
status: "warn",
message: `Database size: ${stats.total_size_human}`,
details: "Database exceeds 100MB",
recommendation: "Consider running cleanup_stale",
});
warnings++;
} else {
checks.push({
name: "database_size",
status: "pass",
message: `Database size: ${stats.total_size_human}`,
details: "Size is healthy",
recommendation: null,
});
passed++;
}
// Stale items check
const stale = await this.findStaleItems({ limit: 100, project_path: projectPath });
if (stale.total_stale > 50) {
checks.push({
name: "stale_items",
status: "warn",
message: `${stale.total_stale} stale items`,
details: "Many items haven't been accessed recently",
recommendation: "Review and cleanup stale items",
});
warnings++;
} else {
checks.push({
name: "stale_items",
status: "pass",
message: `${stale.total_stale} stale items`,
details: "Stale item count is acceptable",
recommendation: null,
});
passed++;
}
// Orphan check
const orphans = await this.cleanupOrphans({ preview: true });
if (orphans.total_orphans > 100) {
checks.push({
name: "orphans",
status: "warn",
message: `${orphans.total_orphans} orphaned records`,
details: "Many orphaned records found",
recommendation: "Run cleanup_orphans",
});
warnings++;
} else {
checks.push({
name: "orphans",
status: "pass",
message: `${orphans.total_orphans} orphaned records`,
details: "Orphan count is acceptable",
recommendation: null,
});
passed++;
}
// Calculate overall health
let overallHealth: "good" | "needs_attention" | "critical";
let score: number;
if (failures > 0) {
overallHealth = "critical";
score = Math.max(0, 50 - failures * 25);
} else if (warnings > 1) {
overallHealth = "needs_attention";
score = Math.max(50, 100 - warnings * 15);
} else {
overallHealth = "good";
score = 100 - warnings * 10;
}
const recommendations = checks
.filter((c) => c.recommendation)
.map((c) => c.recommendation as string);
// Get last maintenance
const lastMaint = this.db.prepare(
"SELECT MAX(completed_at) as ts FROM maintenance_log WHERE status = 'completed'"
).get() as { ts: number | null };
return {
success: true,
overall_health: overallHealth,
score,
checks,
summary: { passed, warnings, failures },
recommendations,
last_maintenance: lastMaint.ts,
message: `Health: ${overallHealth} (score: ${score})`,
};
} catch (error) {
return {
success: false,
overall_health: "critical",
score: 0,
checks: [],
summary: { passed: 0, warnings: 0, failures: 1 },
recommendations: [],
last_maintenance: null,
message: `Error getting health report: ${(error as Error).message}`,
};
}
}
/**
* Run maintenance tasks
*/
async runMaintenance(args: Record<string, unknown>): Promise<Types.RunMaintenanceResponse> {
const typedArgs = args as unknown as Types.RunMaintenanceArgs;
const { tasks, options = {}, preview = true } = typedArgs;
if (!tasks || tasks.length === 0) {
return {
success: false,
tasks_run: [],
total_duration_ms: 0,
overall_status: "failed",
log_id: 0,
message: "tasks array is required",
};
}
try {
const startTime = Date.now();
const tasksRun: Types.MaintenanceTaskResult[] = [];
for (const task of tasks) {
const taskStart = Date.now();
try {
let resultSummary = "";
switch (task) {
case "cleanup_stale": {
const result = await this.cleanupStale({
...options,
action: preview ? "preview" : "archive",
});
resultSummary = result.message;
break;
}
case "cleanup_orphans": {
const result = await this.cleanupOrphans({ preview });
resultSummary = result.message;
break;
}
case "vacuum": {
if (!preview) {
const result = await this.vacuumDatabase(options);
resultSummary = result.message;
} else {
resultSummary = "Vacuum (preview mode - skipped)";
}
break;
}
case "find_duplicates": {
const result = await this.findDuplicates(options);
resultSummary = result.message;
break;
}
case "health_report": {
const result = await this.getHealthReport(options);
resultSummary = result.message;
break;
}
default:
resultSummary = "Unknown task";
}
tasksRun.push({
task,
status: "success",
duration_ms: Date.now() - taskStart,
result_summary: resultSummary,
});
} catch (taskError) {
tasksRun.push({
task,
status: "failed",
duration_ms: Date.now() - taskStart,
result_summary: (taskError as Error).message,
});
}
}
const totalDuration = Date.now() - startTime;
const failedCount = tasksRun.filter((t) => t.status === "failed").length;
const overallStatus = failedCount === 0 ? "success" : failedCount < tasks.length ? "partial" : "failed";
// Log to maintenance_log
const logResult = this.db.prepare(`
INSERT INTO maintenance_log (task_type, started_at, completed_at, status, items_processed, details)
VALUES (?, ?, ?, ?, ?, ?)
`).run(
"run_maintenance",
startTime,
Date.now(),
overallStatus,
tasks.length,
JSON.stringify({ tasks, preview, results: tasksRun })
);
return {
success: true,
tasks_run: tasksRun,
total_duration_ms: totalDuration,
overall_status: overallStatus,
log_id: Number(logResult.lastInsertRowid),
message: `Completed ${tasksRun.length} task(s) in ${totalDuration}ms`,
};
} catch (error) {
return {
success: false,
tasks_run: [],
total_duration_ms: 0,
overall_status: "failed",
log_id: 0,
message: `Error running maintenance: ${(error as Error).message}`,
};
}
}
/**
* Get maintenance history
*/
async getMaintenanceHistory(args: Record<string, unknown>): Promise<Types.GetMaintenanceHistoryResponse> {
const typedArgs = args as Types.GetMaintenanceHistoryArgs;
const { since, task_type, limit = 20 } = typedArgs;
try {
let query = "SELECT * FROM maintenance_log WHERE 1=1";
const params: unknown[] = [];
if (since) {
query += " AND started_at >= ?";
params.push(since);
}
if (task_type) {
query += " AND task_type = ?";
params.push(task_type);
}
query += " ORDER BY started_at DESC LIMIT ?";
params.push(limit);
const rows = this.db.prepare(query).all(...params) as Types.MaintenanceLogEntry[];
return {
success: true,
entries: rows,
total: rows.length,
message: `Found ${rows.length} maintenance log entr${rows.length === 1 ? "y" : "ies"}`,
};
} catch (error) {
return {
success: false,
entries: [],
total: 0,
message: `Error getting maintenance history: ${(error as Error).message}`,
};
}
}
// ==================== Phase 9: Methodology & Research Tracking ====================
/**
* Search for problem-solving methodologies.
*
* @param args.query - Search query for problem statements or approaches
* @param args.approach - Filter by approach type
* @param args.outcome - Filter by outcome
* @param args.limit - Maximum results (default: 10)
* @returns Matching methodologies with problem statements, steps, and outcomes
*/
async getMethodologies(args: Record<string, unknown>): Promise<{
query: string;
methodologies: Array<{
id: string;
problem_statement: string;
approach: string;
steps_taken: Array<{ order: number; action: string; tool?: string; succeeded: boolean }>;
tools_used: string[];
files_involved: string[];
outcome: string;
what_worked?: string;
what_didnt_work?: string;
started_at: number;
ended_at: number;
}>;
total_found: number;
}> {
await this.maybeAutoIndex();
const query = args.query as string;
const approach = args.approach as string | undefined;
const outcome = args.outcome as string | undefined;
const limit = (args.limit as number) || 10;
let sql = `
SELECT m.*
FROM methodologies m
WHERE 1=1
`;
const params: (string | number)[] = [];
// FTS search
const ftsResults = this.db.prepare(`
SELECT id, rank
FROM methodologies_fts
WHERE methodologies_fts MATCH ?
ORDER BY rank
LIMIT ?
`).all(`"${query}"*`, limit * 2) as Array<{ id: string; rank: number }>;
if (ftsResults.length > 0) {
const ids = ftsResults.map(r => `'${r.id}'`).join(",");
sql += ` AND m.id IN (${ids})`;
}
if (approach) {
sql += " AND m.approach = ?";
params.push(approach);
}
if (outcome) {
sql += " AND m.outcome = ?";
params.push(outcome);
}
sql += " ORDER BY m.started_at DESC LIMIT ?";
params.push(limit);
const rows = this.db.prepare(sql).all(...params) as Array<{
id: string;
problem_statement: string;
approach: string;
steps_taken: string;
tools_used: string;
files_involved: string;
outcome: string;
what_worked: string | null;
what_didnt_work: string | null;
started_at: number;
ended_at: number;
}>;
const methodologies = rows.map(row => ({
id: row.id,
problem_statement: row.problem_statement,
approach: row.approach,
steps_taken: safeJsonParse(row.steps_taken, []),
tools_used: safeJsonParse(row.tools_used, []),
files_involved: safeJsonParse(row.files_involved, []),
outcome: row.outcome,
what_worked: row.what_worked || undefined,
what_didnt_work: row.what_didnt_work || undefined,
started_at: row.started_at,
ended_at: row.ended_at,
}));
return {
query,
methodologies,
total_found: methodologies.length,
};
}
/**
* Search for research findings and discoveries.
*
* @param args.query - Search query for topics or discoveries
* @param args.source_type - Filter by source type
* @param args.relevance - Filter by relevance level
* @param args.confidence - Filter by confidence level
* @param args.limit - Maximum results (default: 10)
* @returns Matching findings with topics, discoveries, and sources
*/
async getResearchFindings(args: Record<string, unknown>): Promise<{
query: string;
findings: Array<{
id: string;
topic: string;
discovery: string;
source_type: string;
source_reference?: string;
relevance: string;
confidence: string;
related_to: string[];
timestamp: number;
}>;
total_found: number;
}> {
await this.maybeAutoIndex();
const query = args.query as string;
const source_type = args.source_type as string | undefined;
const relevance = args.relevance as string | undefined;
const confidence = args.confidence as string | undefined;
const limit = (args.limit as number) || 10;
let sql = `
SELECT r.*
FROM research_findings r
WHERE 1=1
`;
const params: (string | number)[] = [];
// FTS search
const ftsResults = this.db.prepare(`
SELECT id, rank
FROM research_fts
WHERE research_fts MATCH ?
ORDER BY rank
LIMIT ?
`).all(`"${query}"*`, limit * 2) as Array<{ id: string; rank: number }>;
if (ftsResults.length > 0) {
const ids = ftsResults.map(r => `'${r.id}'`).join(",");
sql += ` AND r.id IN (${ids})`;
}
if (source_type) {
sql += " AND r.source_type = ?";
params.push(source_type);
}
if (relevance) {
sql += " AND r.relevance = ?";
params.push(relevance);
}
if (confidence) {
sql += " AND r.confidence = ?";
params.push(confidence);
}
sql += " ORDER BY r.timestamp DESC LIMIT ?";
params.push(limit);
const rows = this.db.prepare(sql).all(...params) as Array<{
id: string;
topic: string;
discovery: string;
source_type: string;
source_reference: string | null;
relevance: string;
confidence: string;
related_to: string;
timestamp: number;
}>;
const findings = rows.map(row => ({
id: row.id,
topic: row.topic,
discovery: row.discovery,
source_type: row.source_type,
source_reference: row.source_reference || undefined,
relevance: row.relevance,
confidence: row.confidence,
related_to: safeJsonParse(row.related_to, []),
timestamp: row.timestamp,
}));
return {
query,
findings,
total_found: findings.length,
};
}
/**
* Search for solution patterns.
*
* @param args.query - Search query for problems or solutions
* @param args.problem_category - Filter by problem category
* @param args.effectiveness - Filter by effectiveness level
* @param args.technology - Filter by technology
* @param args.limit - Maximum results (default: 10)
* @returns Matching patterns with problems, solutions, and applicability
*/
async getSolutionPatterns(args: Record<string, unknown>): Promise<{
query: string;
patterns: Array<{
id: string;
problem_category: string;
problem_description: string;
solution_summary: string;
solution_steps: string[];
code_pattern?: string;
technology: string[];
prerequisites: string[];
applies_when: string;
avoid_when?: string;
applied_to_files: string[];
effectiveness: string;
timestamp: number;
}>;
total_found: number;
}> {
await this.maybeAutoIndex();
const query = args.query as string;
const problem_category = args.problem_category as string | undefined;
const effectiveness = args.effectiveness as string | undefined;
const technology = args.technology as string | undefined;
const limit = (args.limit as number) || 10;
let sql = `
SELECT p.*
FROM solution_patterns p
WHERE 1=1
`;
const params: (string | number)[] = [];
// FTS search
const ftsResults = this.db.prepare(`
SELECT id, rank
FROM patterns_fts
WHERE patterns_fts MATCH ?
ORDER BY rank
LIMIT ?
`).all(`"${query}"*`, limit * 2) as Array<{ id: string; rank: number }>;
if (ftsResults.length > 0) {
const ids = ftsResults.map(r => `'${r.id}'`).join(",");
sql += ` AND p.id IN (${ids})`;
}
if (problem_category) {
sql += " AND p.problem_category = ?";
params.push(problem_category);
}
if (effectiveness) {
sql += " AND p.effectiveness = ?";
params.push(effectiveness);
}
if (technology) {
sql += " AND p.technology LIKE ?";
params.push(`%${technology}%`);
}
sql += " ORDER BY p.timestamp DESC LIMIT ?";
params.push(limit);
const rows = this.db.prepare(sql).all(...params) as Array<{
id: string;
problem_category: string;
problem_description: string;
solution_summary: string;
solution_steps: string;
code_pattern: string | null;
technology: string;
prerequisites: string;
applies_when: string;
avoid_when: string | null;
applied_to_files: string;
effectiveness: string;
timestamp: number;
}>;
const patterns = rows.map(row => ({
id: row.id,
problem_category: row.problem_category,
problem_description: row.problem_description,
solution_summary: row.solution_summary,
solution_steps: safeJsonParse(row.solution_steps, []),
code_pattern: row.code_pattern || undefined,
technology: safeJsonParse(row.technology, []),
prerequisites: safeJsonParse(row.prerequisites, []),
applies_when: row.applies_when,
avoid_when: row.avoid_when || undefined,
applied_to_files: safeJsonParse(row.applied_to_files, []),
effectiveness: row.effectiveness,
timestamp: row.timestamp,
}));
return {
query,
patterns,
total_found: patterns.length,
};
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/types/ToolTypes.ts | TypeScript | /**
* Type definitions for MCP Tool arguments and responses
* Replaces 'any' types with proper interfaces for type safety
*/
// ==================== Scope Type Helpers ====================
/**
* Standard scope type used across multiple tools
*/
export type Scope = 'current' | 'all' | 'global';
/**
* Helper type to create scope-aware discriminated unions.
* When scope='current', conversation_id is required.
*/
export type WithScopeConversation<T> =
| (T & { scope: 'current'; conversation_id: string })
| (T & { scope?: 'all' | 'global'; conversation_id?: string })
| (T & { scope?: undefined; conversation_id?: string });
// ==================== Tool Arguments ====================
export interface IndexConversationsArgs {
project_path?: string;
session_id?: string;
include_thinking?: boolean;
enable_git?: boolean;
exclude_mcp_conversations?: boolean | 'self-only' | 'all-mcp';
exclude_mcp_servers?: string[];
}
interface SearchConversationsBaseArgs {
query: string;
limit?: number;
offset?: number;
date_range?: [number, number];
}
export type SearchConversationsArgs = WithScopeConversation<SearchConversationsBaseArgs>;
export interface SearchProjectConversationsArgs {
query: string;
project_path?: string;
limit?: number;
offset?: number;
date_range?: [number, number];
include_claude_code?: boolean;
include_codex?: boolean;
}
interface GetDecisionsBaseArgs {
query: string;
file_path?: string;
limit?: number;
offset?: number;
}
export type GetDecisionsArgs = WithScopeConversation<GetDecisionsBaseArgs>;
export interface CheckBeforeModifyArgs {
file_path: string;
}
export interface GetFileEvolutionArgs {
file_path: string;
include_decisions?: boolean;
include_commits?: boolean;
limit?: number;
offset?: number;
}
interface LinkCommitsToConversationsBaseArgs {
query?: string;
limit?: number;
offset?: number;
}
export type LinkCommitsToConversationsArgs = WithScopeConversation<LinkCommitsToConversationsBaseArgs>;
interface SearchMistakesBaseArgs {
query: string;
mistake_type?: string;
limit?: number;
offset?: number;
}
export type SearchMistakesArgs = WithScopeConversation<SearchMistakesBaseArgs>;
export interface GetRequirementsArgs {
component: string;
type?: string;
}
export interface GetToolHistoryArgs {
tool_name?: string;
file_path?: string;
limit?: number;
offset?: number;
include_content?: boolean;
max_content_length?: number;
date_range?: [number, number];
conversation_id?: string;
errors_only?: boolean;
}
interface FindSimilarSessionsBaseArgs {
query: string;
limit?: number;
offset?: number;
}
export type FindSimilarSessionsArgs = WithScopeConversation<FindSimilarSessionsBaseArgs>;
export interface GenerateDocumentationArgs {
project_path?: string;
session_id?: string;
scope?: 'full' | 'architecture' | 'decisions' | 'quality';
module_filter?: string;
}
// ==================== Tool Responses ====================
export interface IndexConversationsResponse {
success: boolean;
project_path: string;
indexed_folders?: string[];
database_path?: string;
stats: {
conversations: { count: number };
messages: { count: number };
decisions: { count: number };
mistakes: { count: number };
git_commits: { count: number };
};
embeddings_generated?: boolean;
embedding_error?: string;
message: string;
}
export interface SearchResult {
conversation_id: string;
message_id: string;
timestamp: string;
similarity: number;
snippet: string;
git_branch?: string;
message_type: string;
role?: string;
}
export interface SearchConversationsResponse {
query: string;
results: SearchResult[];
total_found: number;
has_more: boolean;
offset: number;
scope: 'current' | 'all' | 'global';
}
export interface SearchProjectResult extends SearchResult {
project_path: string;
source_type: 'claude-code' | 'codex';
}
export interface SearchProjectConversationsResponse {
query: string;
project_path: string;
results: SearchProjectResult[];
total_found: number;
has_more: boolean;
offset: number;
include_claude_code: boolean;
include_codex: boolean;
}
export interface DecisionResult {
decision_id: string;
decision_text: string;
rationale?: string;
alternatives_considered: string[];
rejected_reasons: Record<string, string>;
context?: string;
related_files: string[];
related_commits: string[];
timestamp: string;
similarity: number;
}
export interface GetDecisionsResponse {
query: string;
file_path?: string;
decisions: DecisionResult[];
total_found: number;
has_more: boolean;
offset: number;
scope: 'current' | 'all' | 'global';
}
export interface EditInfo {
timestamp: string;
conversation_id: string;
}
export interface CommitInfo {
hash: string;
message: string;
timestamp: string;
}
export interface DecisionInfo {
decision_text: string;
rationale?: string;
timestamp: string;
}
export interface MistakeInfo {
what_went_wrong: string;
correction?: string;
mistake_type: string;
}
export interface CheckBeforeModifyResponse {
file_path: string;
warning: string;
recent_changes: {
edits: EditInfo[];
commits: CommitInfo[];
};
related_decisions: DecisionInfo[];
mistakes_to_avoid: MistakeInfo[];
}
export interface TimelineEvent {
type: 'edit' | 'commit' | 'decision';
timestamp: string;
data: Record<string, unknown>;
}
export interface GetFileEvolutionResponse {
file_path: string;
total_edits: number;
timeline: TimelineEvent[];
has_more: boolean;
}
export interface CommitResult {
hash: string;
full_hash: string;
message: string;
author?: string;
timestamp: string;
branch?: string;
files_changed: string[];
conversation_id?: string;
}
export interface LinkCommitsToConversationsResponse {
query?: string;
conversation_id?: string;
commits: CommitResult[];
total_found: number;
has_more: boolean;
offset: number;
scope: 'current' | 'all' | 'global';
}
export interface MistakeResult {
mistake_id: string;
mistake_type: string;
what_went_wrong: string;
correction?: string;
user_correction_message?: string;
files_affected: string[];
timestamp: string;
}
export interface SearchMistakesResponse {
query: string;
mistake_type?: string;
mistakes: MistakeResult[];
total_found: number;
has_more: boolean;
offset: number;
scope: 'current' | 'all' | 'global';
}
export interface RequirementResult {
requirement_id: string;
type: string;
description: string;
rationale?: string;
affects_components: string[];
timestamp: string;
}
export interface GetRequirementsResponse {
component: string;
type?: string;
requirements: RequirementResult[];
total_found: number;
}
export interface ToolUseResult {
tool_use_id: string;
tool_name: string;
tool_input: Record<string, unknown>;
result: {
content?: string;
is_error: boolean;
stdout?: string;
stderr?: string;
content_truncated?: boolean;
stdout_truncated?: boolean;
stderr_truncated?: boolean;
};
timestamp: string;
}
export interface GetToolHistoryResponse {
tool_name?: string;
file_path?: string;
tool_uses: ToolUseResult[];
total_found: number; // Number of results returned in this page
total_in_database: number; // Total matching records in database
has_more: boolean; // Whether more results exist beyond current page
offset: number; // Current offset position
}
export interface RelevantMessage {
message_id: string;
snippet: string;
similarity: number;
}
export interface SessionResult {
conversation_id: string;
project_path: string;
first_message_at: string;
message_count: number;
git_branch?: string;
relevance_score: number;
relevant_messages: RelevantMessage[];
}
export interface FindSimilarSessionsResponse {
query: string;
sessions: SessionResult[];
total_found: number;
has_more: boolean;
offset: number;
scope: 'current' | 'all' | 'global';
}
interface RecallAndApplyBaseArgs {
query: string;
context_types?: Array<"conversations" | "decisions" | "mistakes" | "file_changes" | "commits">;
file_path?: string;
date_range?: [number, number];
limit?: number;
offset?: number;
}
export type RecallAndApplyArgs = WithScopeConversation<RecallAndApplyBaseArgs>;
export interface RecalledContext {
conversations?: Array<{
session_id: string;
timestamp: string;
snippet: string;
relevance_score?: number;
}>;
decisions?: Array<{
decision_id: string;
type: string;
description: string;
rationale?: string;
alternatives?: string[];
rejected_approaches?: string[];
affects_components: string[];
timestamp: string;
}>;
mistakes?: Array<{
mistake_id: string;
type: string;
description: string;
what_happened: string;
how_fixed?: string;
lesson_learned?: string;
files_affected: string[];
timestamp: string;
}>;
file_changes?: Array<{
file_path: string;
change_count: number;
last_modified: string;
related_conversations: string[];
}>;
commits?: Array<{
commit_hash: string;
message: string;
timestamp: string;
files_affected: string[];
}>;
}
export interface RecallAndApplyResponse {
query: string;
context_summary: string; // High-level summary of what was recalled
recalled_context: RecalledContext;
application_suggestions: string[]; // Suggested ways to apply this context
total_items_found: number;
}
export interface GenerateDocumentationResponse {
success: boolean;
project_path: string;
scope: string;
documentation: string; // Markdown formatted documentation
statistics: {
modules: number;
decisions: number;
mistakes: number;
commits: number;
};
}
// ==================== Database Row Types ====================
export interface ConversationRow {
id: number;
project_id: number;
project_path: string;
source_type: string;
external_id: string;
first_message_at: number;
last_message_at: number;
message_count: number;
git_branch?: string;
claude_version?: string;
metadata: string;
created_at: number;
updated_at: number;
}
export interface MessageRow {
id: number;
conversation_id: number;
external_id: string;
parent_message_id?: number | null;
parent_external_id?: string | null;
message_type: string;
role?: string;
content?: string;
timestamp: number;
is_sidechain: number;
agent_id?: string;
request_id?: string;
git_branch?: string;
cwd?: string;
metadata: string;
}
export interface DecisionRow {
id: number;
external_id: string;
conversation_id: number;
message_id: number;
decision_text: string;
rationale?: string;
alternatives_considered: string;
rejected_reasons: string;
context?: string;
related_files: string;
related_commits: string;
timestamp: number;
}
export interface MistakeRow {
id: number;
external_id: string;
conversation_id: number;
message_id: number;
mistake_type: string;
what_went_wrong: string;
correction?: string;
user_correction_message?: string;
files_affected: string;
timestamp: number;
}
export interface GitCommitRow {
id: number;
project_id: number;
hash: string;
message: string;
author?: string;
timestamp: number;
branch?: string;
files_changed: string;
conversation_id?: number | null;
related_message_id?: number | null;
metadata: string;
}
export interface RequirementRow {
id: string;
type: string;
description: string;
rationale?: string;
affects_components: string;
conversation_id: string;
message_id: string;
timestamp: number;
}
export interface ToolUseRow {
id: string;
message_id: string;
tool_name: string;
tool_input: string;
timestamp: number;
result_content?: string;
is_error: number;
stdout?: string;
stderr?: string;
}
// Migration Tool Types
export interface DiscoverOldConversationsArgs {
current_project_path?: string;
}
export interface OldConversationCandidate {
folder_name: string;
folder_path: string;
stored_project_path: string | null;
score: number;
stats: {
conversations: number;
messages: number;
files: number;
last_activity: number | null;
};
}
export interface DiscoverOldConversationsResponse {
success: boolean;
current_project_path: string;
candidates: OldConversationCandidate[];
message: string;
}
export interface MigrateProjectArgs {
source_folder: string;
old_project_path: string;
new_project_path: string;
dry_run?: boolean;
mode?: "migrate" | "merge";
}
export interface MigrateProjectResponse {
success: boolean;
source_folder: string;
target_folder: string;
files_copied: number;
database_updated: boolean;
backup_created: boolean;
message: string;
}
// ============================================================================
// Forget By Topic Tool
// ============================================================================
export interface ForgetByTopicArgs {
keywords: string[];
project_path?: string;
confirm?: boolean;
}
export interface ForgetByTopicResponse {
success: boolean;
preview_mode: boolean;
conversations_found: number;
conversations_deleted: number;
messages_deleted: number;
decisions_deleted: number;
mistakes_deleted: number;
backup_path: string | null;
conversation_summaries: Array<{
id: string;
session_id: string;
created_at: string;
message_count: number;
}>;
message: string;
}
// ==================== High-Value Utility Tools ====================
export interface SearchByFileArgs {
file_path: string;
limit?: number;
}
export interface SearchByFileResponse {
file_path: string;
discussions: Array<{
id: string;
conversation_id: string;
content: string;
timestamp: number;
role: string;
}>;
decisions: Array<{
id: string;
decision_text: string;
rationale?: string;
context?: string;
timestamp: number;
}>;
mistakes: Array<{
id: string;
mistake_type: string;
what_went_wrong: string;
correction?: string;
timestamp: number;
}>;
total_mentions: number;
message: string;
}
export interface ListRecentSessionsArgs {
limit?: number;
offset?: number;
project_path?: string;
}
export interface ListRecentSessionsResponse {
sessions: Array<{
id: string;
session_id: string;
project_path: string;
created_at: number;
message_count: number;
first_message_preview?: string;
}>;
total_sessions: number;
has_more: boolean;
message: string;
}
export interface GetLatestSessionSummaryArgs {
project_path?: string;
source_type?: 'claude-code' | 'codex' | 'all';
limit_messages?: number;
include_tools?: boolean;
include_errors?: boolean;
}
export interface GetLatestSessionSummaryResponse {
success: boolean;
found: boolean;
session?: {
id: string;
session_id: string;
project_path: string;
source_type: 'claude-code' | 'codex';
created_at: number;
last_message_at: number;
message_count: number;
};
summary?: {
problem_statement?: string;
recent_user_messages: Array<{ timestamp: number; content: string }>;
recent_assistant_messages: Array<{ timestamp: number; content: string }>;
recent_actions?: Array<{
tool_name: string;
timestamp: number;
tool_input: Record<string, unknown>;
}>;
errors?: Array<{
tool_name: string;
timestamp: number;
message: string;
}>;
};
message: string;
}
// ==================== Global Cross-Project Tools ====================
export interface IndexAllProjectsArgs {
include_codex?: boolean;
include_claude_code?: boolean;
codex_path?: string;
claude_projects_path?: string;
/** If true, only index files modified since last indexing */
incremental?: boolean;
}
export interface IndexAllProjectsResponse {
success: boolean;
global_index_path: string;
projects_indexed: number;
claude_code_projects: number;
codex_projects: number;
total_messages: number;
total_conversations: number;
total_decisions: number;
total_mistakes: number;
projects: Array<{
project_path: string;
source_type: 'claude-code' | 'codex';
message_count: number;
conversation_count: number;
}>;
errors: Array<{
project_path: string;
error: string;
}>;
message: string;
}
export interface SearchAllConversationsArgs {
query: string;
limit?: number;
offset?: number;
date_range?: [number, number];
source_type?: 'claude-code' | 'codex' | 'all';
}
export interface GlobalSearchResult extends SearchResult {
project_path: string;
source_type: 'claude-code' | 'codex';
}
export interface SearchAllConversationsResponse {
query: string;
results: GlobalSearchResult[];
total_found: number;
has_more: boolean;
offset: number;
projects_searched: number;
projects_succeeded?: number;
failed_projects?: string[];
search_stats: {
claude_code_results: number;
codex_results: number;
};
message: string;
}
export interface GetAllDecisionsArgs {
query: string;
file_path?: string;
limit?: number;
offset?: number;
source_type?: 'claude-code' | 'codex' | 'all';
}
export interface GlobalDecision extends DecisionResult {
project_path: string;
source_type: 'claude-code' | 'codex';
}
export interface GetAllDecisionsResponse {
query: string;
decisions: GlobalDecision[];
total_found: number;
has_more: boolean;
offset: number;
projects_searched: number;
message: string;
}
export interface SearchAllMistakesArgs {
query: string;
mistake_type?: string;
limit?: number;
offset?: number;
source_type?: 'claude-code' | 'codex' | 'all';
}
export interface GlobalMistake extends MistakeResult {
project_path: string;
source_type: 'claude-code' | 'codex';
}
export interface SearchAllMistakesResponse {
query: string;
mistakes: GlobalMistake[];
total_found: number;
has_more: boolean;
offset: number;
projects_searched: number;
message: string;
}
// ==================== Live Context Layer Tools ====================
// Working Memory Types
export interface MemoryItem {
id: string;
key: string;
value: string;
context?: string;
tags: string[];
created_at: string;
updated_at: string;
expires_at?: string;
// Phase 1: Memory Confidence fields
confidence?: string;
importance?: string;
pinned?: boolean;
archived?: boolean;
archive_reason?: string;
source?: string;
source_session_id?: string;
verified_at?: string;
verified_by?: string;
}
export interface MemoryItemWithSimilarity extends MemoryItem {
similarity: number;
}
export type ConfidenceLevel = "uncertain" | "likely" | "confirmed" | "verified";
export type ImportanceLevel = "low" | "normal" | "high" | "critical";
export interface RememberArgs {
key: string;
value: string;
context?: string;
tags?: string[];
ttl?: number;
confidence?: ConfidenceLevel;
importance?: ImportanceLevel;
source?: string;
pinned?: boolean;
project_path?: string;
}
export interface RememberResponse {
success: boolean;
item?: MemoryItem;
message: string;
}
export interface RecallArgs {
key: string;
project_path?: string;
}
export interface RecallResponse {
success: boolean;
found: boolean;
item?: MemoryItem;
message: string;
}
export interface RecallRelevantArgs {
query: string;
limit?: number;
project_path?: string;
}
export interface RecallRelevantResponse {
success: boolean;
items: MemoryItemWithSimilarity[];
total_found?: number;
message: string;
}
export interface ListMemoryArgs {
tags?: string[];
limit?: number;
offset?: number;
project_path?: string;
}
export interface ListMemoryResponse {
success: boolean;
items: MemoryItem[];
total_count: number;
has_more: boolean;
offset: number;
message: string;
}
export interface ForgetArgs {
key: string;
project_path?: string;
}
export interface ForgetResponse {
success: boolean;
message: string;
}
// Session Handoff Types
export interface PrepareHandoffArgs {
session_id?: string;
include?: Array<"decisions" | "files" | "tasks" | "memory">;
context_summary?: string;
project_path?: string;
}
export interface HandoffSummary {
id: string;
from_session_id: string;
project_path: string;
created_at: string;
summary: string;
decisions_count: number;
files_count: number;
tasks_count: number;
memory_count: number;
}
export interface HandoffDocument {
id: string;
from_session_id: string;
project_path: string;
created_at: string;
summary: string;
decisions: Array<{
text: string;
rationale?: string;
timestamp: string;
}>;
active_files: Array<{
path: string;
last_action: string;
}>;
pending_tasks: Array<{
description: string;
status: string;
}>;
memory_items: Array<{
key: string;
value: string;
}>;
}
export interface PrepareHandoffResponse {
success: boolean;
handoff?: HandoffSummary;
message: string;
}
export interface ResumeFromHandoffArgs {
handoff_id?: string;
new_session_id?: string;
inject_context?: boolean;
project_path?: string;
}
export interface ResumeFromHandoffResponse {
success: boolean;
found: boolean;
handoff?: HandoffDocument;
message: string;
}
export interface ListHandoffsArgs {
limit?: number;
include_resumed?: boolean;
project_path?: string;
}
export interface ListHandoffsResponse {
success: boolean;
handoffs: Array<{
id: string;
from_session_id: string;
created_at: string;
resumed_by?: string;
resumed_at?: string;
summary: string;
}>;
total_count: number;
message: string;
}
// Context Injection Types
export interface GetStartupContextArgs {
query?: string;
max_tokens?: number;
sources?: Array<"history" | "decisions" | "memory" | "handoffs">;
project_path?: string;
}
export interface GetStartupContextResponse {
success: boolean;
context: {
handoff?: HandoffDocument;
decisions: Array<{
id: string;
text: string;
rationale?: string;
timestamp: string;
}>;
memory: MemoryItem[];
recent_files: Array<{
path: string;
last_action: string;
timestamp: string;
}>;
summary: string;
};
token_estimate: number;
message: string;
}
export interface InjectRelevantContextArgs {
message: string;
max_tokens?: number;
sources?: Array<"history" | "decisions" | "memory" | "handoffs">;
project_path?: string;
}
export interface InjectRelevantContextResponse {
success: boolean;
injected_context: string;
sources_used: string[];
token_count: number;
message: string;
}
// ==================== Phase 1: Tag Management Types ====================
export type TagItemType = "memory" | "decision" | "pattern" | "session" | "mistake";
export type TagScope = "project" | "global" | "all";
export type TagSortBy = "name" | "usage_count" | "last_used" | "created";
export interface TagInfo {
id: number;
name: string;
project_path: string | null;
description: string | null;
color: string | null;
usage_count: number;
last_used_at: number | null;
used_in_types: string[];
created_at: number;
updated_at: number;
}
export interface ListTagsArgs {
project_path?: string;
scope?: TagScope;
sort_by?: TagSortBy;
include_unused?: boolean;
limit?: number;
offset?: number;
}
export interface ListTagsResponse {
success: boolean;
tags: TagInfo[];
total: number;
hasMore: boolean;
message: string;
}
export interface SearchByTagsArgs {
tags: string[];
match_mode?: "all" | "any";
item_types?: TagItemType[];
scope?: TagScope;
project_path?: string;
limit?: number;
offset?: number;
}
export interface TaggedItem {
item_type: TagItemType;
item_id: number;
item_summary: string;
matched_tags: string[];
all_tags: string[];
created_at: number;
}
export interface SearchByTagsResponse {
success: boolean;
items: TaggedItem[];
total: number;
hasMore: boolean;
tag_breakdown: Record<string, number>;
message: string;
}
export interface RenameTagArgs {
old_name: string;
new_name: string;
scope?: "project" | "global";
project_path?: string;
}
export interface RenameTagResponse {
success: boolean;
old_name: string;
new_name: string;
items_affected: number;
merged: boolean;
message: string;
}
export interface MergeTagsArgs {
source_tags: string[];
target_tag: string;
scope?: "project" | "global";
project_path?: string;
}
export interface MergeTagsResponse {
success: boolean;
merged_tags: string[];
target_tag: string;
items_retagged: number;
duplicates_removed: number;
message: string;
}
export interface DeleteTagArgs {
name: string;
scope?: "project" | "global";
project_path?: string;
force?: boolean;
}
export interface DeleteTagResponse {
success: boolean;
deleted: boolean;
items_untagged: number;
message: string;
}
export interface TagItemArgs {
item_type: TagItemType;
item_id: number | string;
tags: string[];
project_path?: string;
}
export interface TagItemResponse {
success: boolean;
item_type: TagItemType;
item_id: number | string;
tags_added: string[];
tags_existed: string[];
message: string;
}
export interface UntagItemArgs {
item_type: TagItemType;
item_id: number | string;
tags?: string[];
project_path?: string;
}
export interface UntagItemResponse {
success: boolean;
item_type: TagItemType;
item_id: number | string;
tags_removed: string[];
message: string;
}
// ==================== Phase 1: Memory Confidence Types ====================
export interface SetMemoryConfidenceArgs {
key: string;
confidence: ConfidenceLevel;
evidence?: string;
verified_by?: string;
project_path?: string;
}
export interface SetMemoryConfidenceResponse {
success: boolean;
key: string;
previous_confidence: string | null;
new_confidence: string;
verified_at: number | null;
message: string;
}
export interface SetMemoryImportanceArgs {
key: string;
importance: ImportanceLevel;
project_path?: string;
}
export interface SetMemoryImportanceResponse {
success: boolean;
key: string;
previous_importance: string | null;
new_importance: string;
message: string;
}
export interface PinMemoryArgs {
key: string;
pinned?: boolean;
project_path?: string;
}
export interface PinMemoryResponse {
success: boolean;
key: string;
pinned: boolean;
message: string;
}
export interface ArchiveMemoryArgs {
key: string;
reason?: string;
project_path?: string;
}
export interface ArchiveMemoryResponse {
success: boolean;
key: string;
archived: boolean;
reason: string | null;
message: string;
}
export interface UnarchiveMemoryArgs {
key: string;
project_path?: string;
}
export interface UnarchiveMemoryResponse {
success: boolean;
key: string;
message: string;
}
export interface SearchMemoryByQualityArgs {
query?: string;
confidence?: ConfidenceLevel[];
importance?: ImportanceLevel[];
pinned_only?: boolean;
include_archived?: boolean;
scope?: "project" | "global";
project_path?: string;
sort_by?: "relevance" | "importance" | "confidence" | "recent";
limit?: number;
offset?: number;
}
export interface SearchMemoryByQualityResponse {
success: boolean;
items: MemoryItem[];
total: number;
hasMore: boolean;
message: string;
}
export interface GetMemoryStatsArgs {
project_path?: string;
scope?: "project" | "global";
}
export interface GetMemoryStatsResponse {
success: boolean;
total: number;
active: number;
archived: number;
pinned: number;
by_confidence: {
uncertain: number;
likely: number;
confirmed: number;
verified: number;
};
by_importance: {
low: number;
normal: number;
high: number;
critical: number;
};
expired: number;
expiring_soon: number;
top_tags: Array<{ tag: string; count: number }>;
message: string;
}
// ==================== Phase 1: Cleanup/Maintenance Types ====================
export interface GetStorageStatsArgs {
project_path?: string;
detailed?: boolean;
}
export interface StorageTypeStats {
count: number;
size_bytes: number;
}
export interface GetStorageStatsResponse {
success: boolean;
database_path: string;
total_size_bytes: number;
total_size_human: string;
by_type: {
conversations: StorageTypeStats;
messages: StorageTypeStats;
decisions: StorageTypeStats;
mistakes: StorageTypeStats;
patterns: StorageTypeStats;
memories: StorageTypeStats;
learnings: StorageTypeStats;
embeddings: StorageTypeStats;
history: StorageTypeStats;
};
by_project?: Array<{
project_path: string;
size_bytes: number;
item_count: number;
}>;
oldest_item: number;
newest_item: number;
fragmentation_percent: number;
recommendations: string[];
message: string;
}
export interface FindStaleItemsArgs {
item_types?: Array<"memory" | "decision" | "pattern" | "session">;
stale_threshold_days?: number;
exclude_pinned?: boolean;
exclude_important?: boolean;
project_path?: string;
limit?: number;
}
export interface StaleItem {
item_type: string;
item_id: number;
identifier: string;
last_accessed: number;
days_stale: number;
importance: string;
size_estimate: number;
}
export interface FindStaleItemsResponse {
success: boolean;
stale_items: StaleItem[];
total_stale: number;
total_size_bytes: number;
by_type: Record<string, number>;
message: string;
}
export interface FindDuplicatesArgs {
item_types?: Array<"memory" | "decision" | "pattern">;
similarity_threshold?: number;
project_path?: string;
limit?: number;
}
export interface DuplicateItem {
id: number;
identifier: string;
content_preview: string;
created_at: number;
importance: string;
}
export interface DuplicateGroup {
group_id: number;
item_type: string;
items: DuplicateItem[];
similarity_score: number;
recommended_keep: number;
recommendation_reason: string;
}
export interface FindDuplicatesResponse {
success: boolean;
duplicate_groups: DuplicateGroup[];
total_groups: number;
potential_savings: number;
message: string;
}
export interface MergeDuplicatesArgs {
item_type: "memory" | "decision" | "pattern";
keep_id: number;
merge_ids: number[];
merge_strategy?: "keep_content" | "combine_content" | "keep_newest";
merge_tags?: boolean;
}
export interface MergeDuplicatesResponse {
success: boolean;
kept_id: number;
merged_count: number;
tags_merged: string[];
references_updated: number;
message: string;
}
export interface CleanupStaleArgs {
item_types?: string[];
stale_threshold_days?: number;
action?: "archive" | "delete" | "preview";
exclude_pinned?: boolean;
exclude_important?: boolean;
max_items?: number;
project_path?: string;
}
export interface CleanupStaleResponse {
success: boolean;
action: string;
preview_only: boolean;
items_affected: number;
by_type: Record<string, number>;
space_freed_bytes: number;
items: Array<{
type: string;
id: number;
identifier: string;
}>;
message: string;
}
export interface VacuumDatabaseArgs {
analyze?: boolean;
reindex?: boolean;
}
export interface VacuumDatabaseResponse {
success: boolean;
size_before: number;
size_after: number;
space_freed: number;
duration_ms: number;
message: string;
}
export interface CleanupOrphansArgs {
preview?: boolean;
}
export interface CleanupOrphansResponse {
success: boolean;
preview_only: boolean;
orphans_found: {
tags_without_items: number;
embeddings_without_items: number;
history_without_items: number;
links_without_targets: number;
};
total_orphans: number;
cleaned: number;
message: string;
}
export interface HealthCheck {
name: string;
status: "pass" | "warn" | "fail";
message: string;
details: string;
recommendation: string | null;
}
export interface GetHealthReportArgs {
project_path?: string;
}
export interface GetHealthReportResponse {
success: boolean;
overall_health: "good" | "needs_attention" | "critical";
score: number;
checks: HealthCheck[];
summary: {
passed: number;
warnings: number;
failures: number;
};
recommendations: string[];
last_maintenance: number | null;
message: string;
}
export interface RunMaintenanceArgs {
tasks: Array<"cleanup_stale" | "cleanup_orphans" | "vacuum" | "find_duplicates" | "health_report" | "cleanup_expired">;
options?: Record<string, unknown>;
preview?: boolean;
}
export interface MaintenanceTaskResult {
task: string;
status: "success" | "failed" | "skipped";
duration_ms: number;
result_summary: string;
}
export interface RunMaintenanceResponse {
success: boolean;
tasks_run: MaintenanceTaskResult[];
total_duration_ms: number;
overall_status: "success" | "partial" | "failed";
log_id: number;
message: string;
}
export interface GetMaintenanceHistoryArgs {
since?: number;
task_type?: string;
limit?: number;
}
export interface MaintenanceLogEntry {
id: number;
task_type: string;
started_at: number;
completed_at: number | null;
status: "running" | "completed" | "failed";
items_processed: number;
items_affected: number;
details: string | null;
error_message: string | null;
}
export interface GetMaintenanceHistoryResponse {
success: boolean;
entries: MaintenanceLogEntry[];
total: number;
message: string;
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/utils/Logger.ts | TypeScript | /**
* Logging Abstraction
*
* Centralized logging with configurable levels and formatting.
* Replaces scattered console.log/warn/error calls throughout codebase.
*/
export enum LogLevel {
DEBUG = 0,
INFO = 1,
WARN = 2,
ERROR = 3,
SILENT = 4,
}
export interface LoggerConfig {
level: LogLevel;
prefix?: string;
timestamp?: boolean;
}
export class Logger {
private config: LoggerConfig;
constructor(config: Partial<LoggerConfig> = {}) {
this.config = {
level: config.level ?? LogLevel.INFO,
prefix: config.prefix ?? '',
timestamp: config.timestamp ?? false,
};
}
/**
* Set the minimum log level
*/
setLevel(level: LogLevel): void {
this.config.level = level;
}
/**
* Get current log level
*/
getLevel(): LogLevel {
return this.config.level;
}
/**
* Format log message with optional timestamp and prefix
*/
private format(level: string, message: string): string {
const parts: string[] = [];
if (this.config.timestamp) {
parts.push(new Date().toISOString());
}
if (this.config.prefix) {
parts.push(`[${this.config.prefix}]`);
}
parts.push(`[${level}]`);
parts.push(message);
return parts.join(' ');
}
/**
* Debug level logging (most verbose)
* All logging goes to stderr to avoid interfering with MCP JSON-RPC on stdout
*/
debug(message: string, ...args: unknown[]): void {
if (this.config.level <= LogLevel.DEBUG) {
console.error(this.format('DEBUG', message), ...args);
}
}
/**
* Info level logging (normal operations)
* All logging goes to stderr to avoid interfering with MCP JSON-RPC on stdout
*/
info(message: string, ...args: unknown[]): void {
if (this.config.level <= LogLevel.INFO) {
console.error(this.format('INFO', message), ...args);
}
}
/**
* Warning level logging (potential issues)
*/
warn(message: string, ...args: unknown[]): void {
if (this.config.level <= LogLevel.WARN) {
console.error(this.format('WARN', message), ...args);
}
}
/**
* Error level logging (failures)
*/
error(message: string, ...args: unknown[]): void {
if (this.config.level <= LogLevel.ERROR) {
console.error(this.format('ERROR', message), ...args);
}
}
/**
* Success message (convenience wrapper for info)
* All logging goes to stderr to avoid interfering with MCP JSON-RPC on stdout
*/
success(message: string, ...args: unknown[]): void {
if (this.config.level <= LogLevel.INFO) {
console.error(this.format('✓', message), ...args);
}
}
/**
* Create a child logger with a specific prefix
*/
child(prefix: string): Logger {
return new Logger({
level: this.config.level,
prefix: this.config.prefix
? `${this.config.prefix}:${prefix}`
: prefix,
timestamp: this.config.timestamp,
});
}
}
/**
* Parse LOG_LEVEL environment variable (case-insensitive)
*/
function parseLogLevel(levelStr: string | undefined): LogLevel {
if (!levelStr) {
return LogLevel.INFO;
}
const normalized = levelStr.toUpperCase();
const level = LogLevel[normalized as keyof typeof LogLevel];
return level !== undefined ? level : LogLevel.INFO;
}
/**
* Default logger instance
*/
export const logger = new Logger({
level: parseLogLevel(process.env.LOG_LEVEL),
timestamp: false,
});
/**
* Create a logger for a specific module
*/
export function createLogger(module: string): Logger {
return logger.child(module);
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/utils/McpConfig.ts | TypeScript | /**
* MCP Configuration Management Utilities
* Handles reading, writing, and managing MCP server configuration in ~/.claude.json
*/
import { readFileSync, writeFileSync, existsSync, copyFileSync } from 'fs';
import { join } from 'path';
import { homedir } from 'os';
import { execSync } from 'child_process';
const CLAUDE_CONFIG_PATH = join(homedir(), '.claude.json');
const SERVER_NAME = 'cccmemory';
export interface McpServerConfig {
type: string;
command: string;
args: string[];
env?: Record<string, string>;
}
export interface ClaudeConfig {
mcpServers?: Record<string, McpServerConfig>;
[key: string]: unknown;
}
/**
* Read Claude configuration
*/
export function readClaudeConfig(): ClaudeConfig | null {
if (!existsSync(CLAUDE_CONFIG_PATH)) {
return null;
}
try {
const content = readFileSync(CLAUDE_CONFIG_PATH, 'utf-8');
return JSON.parse(content) as ClaudeConfig;
} catch (error) {
throw new Error(`Failed to parse ${CLAUDE_CONFIG_PATH}: ${(error as Error).message}`);
}
}
/**
* Write Claude configuration
*/
export function writeClaudeConfig(config: ClaudeConfig, createBackup = true): void {
if (createBackup && existsSync(CLAUDE_CONFIG_PATH)) {
const backupPath = `${CLAUDE_CONFIG_PATH}.backup.${Date.now()}`;
copyFileSync(CLAUDE_CONFIG_PATH, backupPath);
}
writeFileSync(
CLAUDE_CONFIG_PATH,
JSON.stringify(config, null, 2),
'utf-8'
);
}
/**
* Get the command path for the MCP server
* Handles both global npm install and local development
*/
export function getMcpCommand(): string {
try {
// Try to find global npm bin
const npmBin = execSync('npm bin -g', { encoding: 'utf-8' }).trim();
const globalPath = join(npmBin, 'cccmemory');
if (existsSync(globalPath)) {
return 'cccmemory';
}
} catch (_error) {
// Fallback to command name
}
return 'cccmemory';
}
/**
* Check if MCP server is configured
*/
export function isMcpConfigured(): { configured: boolean; config?: McpServerConfig; configPath?: string } {
const config = readClaudeConfig();
if (!config) {
return { configured: false };
}
const mcpConfig = config.mcpServers?.[SERVER_NAME];
if (mcpConfig) {
return {
configured: true,
config: mcpConfig,
configPath: CLAUDE_CONFIG_PATH
};
}
return { configured: false, configPath: CLAUDE_CONFIG_PATH };
}
/**
* Add MCP server to configuration
*/
export function addMcpServer(): void {
const config = readClaudeConfig();
if (!config) {
throw new Error('Claude Code configuration not found. Please install Claude Code first.');
}
// Initialize mcpServers if it doesn't exist
if (!config.mcpServers) {
config.mcpServers = {};
}
// Check if already configured
if (config.mcpServers[SERVER_NAME]) {
throw new Error('MCP server is already configured');
}
// Add MCP server configuration
config.mcpServers[SERVER_NAME] = {
type: 'stdio',
command: getMcpCommand(),
args: [],
env: {}
};
writeClaudeConfig(config, true);
}
/**
* Remove MCP server from configuration
*/
export function removeMcpServer(): void {
const config = readClaudeConfig();
if (!config) {
throw new Error('Claude Code configuration not found');
}
if (!config.mcpServers || !config.mcpServers[SERVER_NAME]) {
throw new Error('MCP server is not configured');
}
// Remove the server
delete config.mcpServers[SERVER_NAME];
writeClaudeConfig(config, true);
}
/**
* Get MCP server status
*/
export function getMcpStatus(): {
claudeConfigExists: boolean;
mcpConfigured: boolean;
serverConfig?: McpServerConfig;
commandExists: boolean;
commandPath?: string;
} {
const claudeConfigExists = existsSync(CLAUDE_CONFIG_PATH);
const { configured, config } = isMcpConfigured();
let commandExists = false;
let commandPath: string | undefined;
try {
const npmBin = execSync('npm bin -g', { encoding: 'utf-8' }).trim();
commandPath = join(npmBin, 'cccmemory');
commandExists = existsSync(commandPath);
} catch (_error) {
// Command not found
}
return {
claudeConfigExists,
mcpConfigured: configured,
serverConfig: config,
commandExists,
commandPath
};
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/utils/ProjectMigration.ts | TypeScript | /**
* Project Migration Utility
* Handles migration of conversation history when project directories are renamed
*/
import { existsSync, readdirSync, mkdirSync, copyFileSync } from "fs";
import { basename, dirname, join } from "path";
import { homedir } from "os";
import { getSQLiteManager, type SQLiteManager } from "../storage/SQLiteManager.js";
import { getCanonicalProjectPath } from "./worktree.js";
import { pathToProjectFolderName } from "./sanitization.js";
export interface OldFolder {
folderPath: string;
folderName: string;
storedProjectPath: string | null;
stats: {
conversations: number;
messages: number;
lastActivity: number | null;
};
score: number;
}
export interface ValidationResult {
valid: boolean;
errors: string[];
stats?: {
conversations: number;
messages: number;
files: number;
};
}
export interface MigrationResult {
success: boolean;
filesCopied: number;
databaseUpdated: boolean;
message: string;
}
interface ScoreFactors {
pathScore: number;
folderScore: number;
hasDatabase: boolean;
jsonlCount: number;
}
export class ProjectMigration {
private projectsDir: string;
private sqliteManager: SQLiteManager;
private db: ReturnType<SQLiteManager["getDatabase"]>;
constructor(sqliteManager?: SQLiteManager, projectsDir?: string) {
this.sqliteManager = sqliteManager ?? getSQLiteManager();
this.db = this.sqliteManager.getDatabase();
// Allow override of projects directory for testing
this.projectsDir = projectsDir || join(homedir(), ".claude", "projects");
}
private buildFolderCandidates(): Map<
string,
Array<{ projectId: number; path: string }>
> {
const candidates = new Map<string, Array<{ projectId: number; path: string }>>();
const addCandidate = (path: string, projectId: number) => {
const folderName = pathToProjectFolderName(path);
const list = candidates.get(folderName) ?? [];
list.push({ projectId, path });
candidates.set(folderName, list);
};
try {
const projects = this.db
.prepare("SELECT id, canonical_path FROM projects")
.all() as Array<{ id: number; canonical_path: string }>;
for (const project of projects) {
addCandidate(project.canonical_path, project.id);
}
const aliases = this.db
.prepare("SELECT alias_path, project_id FROM project_aliases")
.all() as Array<{ alias_path: string; project_id: number }>;
for (const alias of aliases) {
addCandidate(alias.alias_path, alias.project_id);
}
} catch (_error) {
// If DB is unavailable, skip DB-backed candidates
}
return candidates;
}
private selectBestCandidate(
candidates: Array<{ projectId: number; path: string }>,
currentProjectPath?: string
): { projectId: number; path: string } | null {
if (candidates.length === 0) {
return null;
}
if (!currentProjectPath) {
return candidates[0];
}
let best = candidates[0];
let bestScore = this.scorePath(currentProjectPath, candidates[0].path);
for (let i = 1; i < candidates.length; i += 1) {
const candidate = candidates[i];
const score = this.scorePath(currentProjectPath, candidate.path);
if (score > bestScore) {
best = candidate;
bestScore = score;
}
}
return best;
}
private getProjectStats(projectId: number): {
conversations: number;
messages: number;
lastActivity: number | null;
} {
const statsRow = this.db
.prepare(
`
SELECT
COUNT(DISTINCT id) as conversations,
MAX(last_message_at) as last_activity
FROM conversations
WHERE project_id = ?
`
)
.get(projectId) as { conversations: number; last_activity: number | null } | undefined;
const messageRow = this.db
.prepare(
`
SELECT COUNT(*) as count
FROM messages m
JOIN conversations c ON c.id = m.conversation_id
WHERE c.project_id = ?
`
)
.get(projectId) as { count: number } | undefined;
return {
conversations: statsRow?.conversations ?? 0,
messages: messageRow?.count ?? 0,
lastActivity: statsRow?.last_activity ?? null
};
}
private resolveProjectId(projectPath: string): number | null {
const canonical = getCanonicalProjectPath(projectPath).canonicalPath;
const projectRow = this.db
.prepare("SELECT id FROM projects WHERE canonical_path = ?")
.get(canonical) as { id: number } | undefined;
if (projectRow) {
return projectRow.id;
}
const aliasRow = this.db
.prepare("SELECT project_id FROM project_aliases WHERE alias_path = ?")
.get(canonical) as { project_id: number } | undefined;
if (aliasRow) {
return aliasRow.project_id;
}
const conversationRow = this.db
.prepare("SELECT project_id FROM conversations WHERE project_path = ? LIMIT 1")
.get(canonical) as { project_id: number } | undefined;
return conversationRow?.project_id ?? null;
}
/**
* Get the projects directory (for use by other classes)
*/
getProjectsDir(): string {
return this.projectsDir;
}
/**
* Discover old conversation folders using combined approach
*/
async discoverOldFolders(currentProjectPath: string): Promise<OldFolder[]> {
const candidates: OldFolder[] = [];
const projectsDir = this.projectsDir;
if (!existsSync(projectsDir)) {
return [];
}
const folders = readdirSync(projectsDir);
const expectedFolder = pathToProjectFolderName(currentProjectPath);
const folderCandidates = this.buildFolderCandidates();
for (const folder of folders) {
const folderPath = join(projectsDir, folder);
const dbPath = join(folderPath, ".cccmemory.db");
let storedPath: string | null = null;
let stats = { conversations: 0, messages: 0, lastActivity: null as number | null };
let pathScore = 0;
const candidateList = folderCandidates.get(folder);
const bestCandidate = candidateList
? this.selectBestCandidate(candidateList, currentProjectPath)
: null;
if (bestCandidate) {
storedPath = bestCandidate.path;
stats = this.getProjectStats(bestCandidate.projectId);
pathScore = this.scorePath(currentProjectPath, bestCandidate.path);
}
// Strategy 2: Folder name similarity
const folderScore = this.scoreFolderName(expectedFolder, folder);
// Strategy 3: Check for JSONL files
let jsonlCount = 0;
try {
jsonlCount = readdirSync(folderPath).filter(f => f.endsWith(".jsonl")).length;
} catch (_error) {
// Can't read folder
continue;
}
// Calculate overall score
const score = this.calculateOverallScore({
pathScore,
folderScore,
hasDatabase: existsSync(dbPath),
jsonlCount
});
if (score > 0 || storedPath !== null) {
candidates.push({
folderPath,
folderName: folder,
storedProjectPath: storedPath,
stats,
score
});
}
}
// Sort by score (highest first)
return candidates.sort((a, b) => b.score - a.score);
}
/**
* Validate migration is safe and possible
*/
validateMigration(
sourceFolder: string,
targetFolder: string,
mode: "migrate" | "merge" = "migrate"
): ValidationResult {
const errors: string[] = [];
let sourceFiles: string[] = [];
// Check source exists
if (!existsSync(sourceFolder)) {
errors.push("Source folder does not exist");
return { valid: false, errors };
}
// Check source has JSONL files
sourceFiles = readdirSync(sourceFolder).filter(f => f.endsWith(".jsonl"));
if (sourceFiles.length === 0) {
errors.push("Source folder has no conversation files");
}
// Check target doesn't have data (conflict detection) - ONLY for migrate mode
if (mode === "migrate" && existsSync(targetFolder)) {
const targetFiles = readdirSync(targetFolder).filter(f => f.endsWith(".jsonl"));
if (targetFiles.length > 0) {
errors.push("Target folder already has conversation data");
}
}
// Get statistics if validation passed so far
let stats: { conversations: number; messages: number; files: number } | undefined;
if (errors.length === 0) {
stats = {
conversations: sourceFiles.length,
messages: 0,
files: sourceFiles.length
};
}
return {
valid: errors.length === 0,
errors,
stats
};
}
/**
* Execute migration (copy files and update database)
*/
async executeMigration(
sourceFolder: string,
targetFolder: string,
oldProjectPath: string,
newProjectPath: string,
dryRun: boolean,
mode: "migrate" | "merge" = "migrate"
): Promise<MigrationResult> {
// Validate first
const validation = this.validateMigration(sourceFolder, targetFolder, mode);
if (!validation.valid) {
throw new Error(`Migration validation failed: ${validation.errors.join(", ")}`);
}
if (dryRun) {
return {
success: true,
filesCopied: validation.stats?.files || 0,
databaseUpdated: false,
message: "Dry run: No changes made"
};
}
// Create target folder
if (!existsSync(targetFolder)) {
mkdirSync(targetFolder, { recursive: true });
}
const filesCopied =
mode === "merge"
? this.copyNewJsonlFiles(sourceFolder, targetFolder)
: this.copyAllJsonlFiles(sourceFolder, targetFolder);
const databaseUpdated = this.updateProjectReferences(oldProjectPath, newProjectPath);
return {
success: true,
filesCopied,
databaseUpdated,
message:
mode === "merge"
? `Merged ${filesCopied} new conversation files into target`
: `Migrated ${filesCopied} conversation files`
};
}
private copyAllJsonlFiles(sourceFolder: string, targetFolder: string): number {
const jsonlFiles = readdirSync(sourceFolder).filter(f => f.endsWith(".jsonl"));
let filesCopied = 0;
for (const file of jsonlFiles) {
const sourcePath = join(sourceFolder, file);
const targetPath = join(targetFolder, file);
copyFileSync(sourcePath, targetPath);
filesCopied++;
}
return filesCopied;
}
private copyNewJsonlFiles(sourceFolder: string, targetFolder: string): number {
const sourceFiles = readdirSync(sourceFolder).filter(f => f.endsWith(".jsonl"));
const existingFiles = existsSync(targetFolder)
? readdirSync(targetFolder).filter(f => f.endsWith(".jsonl"))
: [];
const existingSet = new Set(existingFiles);
let filesCopied = 0;
for (const file of sourceFiles) {
if (!existingSet.has(file)) {
const sourcePath = join(sourceFolder, file);
const targetPath = join(targetFolder, file);
copyFileSync(sourcePath, targetPath);
filesCopied++;
}
}
return filesCopied;
}
private backupDatabase(): string {
const dbPath = this.sqliteManager.getDbPath();
if (dbPath === ":memory:") {
return dbPath;
}
const backupName = `${basename(dbPath)}.bak.${Date.now()}`;
const backupPath = join(dirname(dbPath), backupName);
this.db.exec("PRAGMA wal_checkpoint(TRUNCATE)");
this.db.exec(`VACUUM INTO '${backupPath.replace(/'/g, "''")}'`);
return backupPath;
}
private updateProjectReferences(oldPath: string, newPath: string): boolean {
const canonicalOld = getCanonicalProjectPath(oldPath).canonicalPath;
const canonicalNew = getCanonicalProjectPath(newPath).canonicalPath;
if (canonicalOld === canonicalNew) {
return false;
}
const projectId = this.resolveProjectId(canonicalOld);
if (!projectId) {
return false;
}
const existingNew = this.resolveProjectId(canonicalNew);
if (existingNew && existingNew !== projectId) {
throw new Error(
`Target project path already exists in database: ${canonicalNew}. ` +
"Resolve duplicate projects before migrating."
);
}
const now = Date.now();
this.backupDatabase();
try {
this.db.exec("BEGIN TRANSACTION");
this.db
.prepare("UPDATE projects SET canonical_path = ?, display_path = ?, updated_at = ? WHERE id = ?")
.run(canonicalNew, canonicalNew, now, projectId);
this.db
.prepare("UPDATE conversations SET project_path = ? WHERE project_id = ?")
.run(canonicalNew, projectId);
this.db
.prepare("UPDATE working_memory SET project_path = ? WHERE project_path = ?")
.run(canonicalNew, canonicalOld);
this.db
.prepare("UPDATE session_handoffs SET project_path = ? WHERE project_path = ?")
.run(canonicalNew, canonicalOld);
this.db
.prepare("UPDATE session_checkpoints SET project_path = ? WHERE project_path = ?")
.run(canonicalNew, canonicalOld);
this.db
.prepare(
"INSERT OR IGNORE INTO project_aliases (alias_path, project_id, created_at) VALUES (?, ?, ?)"
)
.run(canonicalOld, projectId, now);
this.db.exec("COMMIT");
return true;
} catch (error) {
this.db.exec("ROLLBACK");
throw error;
}
}
/**
* Score path similarity
*/
scorePath(currentPath: string, oldPath: string): number {
// Exact match
if (currentPath === oldPath) {
return 100;
}
// Split into components using platform-aware separator
// Handle both Unix (/) and Windows (\) paths
const pathSeparatorRegex = /[\\/]/;
const currentParts = currentPath.split(pathSeparatorRegex).filter(p => p.length > 0);
const oldParts = oldPath.split(pathSeparatorRegex).filter(p => p.length > 0);
// Count matching components
let matches = 0;
const minLength = Math.min(currentParts.length, oldParts.length);
for (let i = 0; i < minLength; i++) {
if (currentParts[i] === oldParts[i]) {
matches++;
}
}
// If only one component differs and same length, likely a rename
if (
currentParts.length === oldParts.length &&
matches === currentParts.length - 1
) {
return 80;
}
// General similarity score
return (matches / Math.max(currentParts.length, oldParts.length)) * 100;
}
/**
* Score folder name similarity
*/
scoreFolderName(expected: string, actual: string): number {
// Exact match
if (expected === actual) {
return 100;
}
// Split by dashes
const expectedParts = expected.split("-").filter(p => p.length > 0);
const actualParts = actual.split("-").filter(p => p.length > 0);
// Count matching parts
let matches = 0;
const minLength = Math.min(expectedParts.length, actualParts.length);
for (let i = 0; i < minLength; i++) {
if (expectedParts[i] === actualParts[i]) {
matches++;
}
}
// Calculate percentage
return (matches / Math.max(expectedParts.length, actualParts.length)) * 100;
}
/**
* Calculate overall score from multiple factors
*/
calculateOverallScore(factors: ScoreFactors): number {
let score = 0;
// Path similarity is most important (0-100 points)
score += factors.pathScore;
// Folder name similarity (weighted 50%)
score += factors.folderScore * 0.5;
// Having a database is good (20 points)
if (factors.hasDatabase) {
score += 20;
}
// More JSONL files = higher confidence (1 point per file, max 30)
score += Math.min(factors.jsonlCount, 30);
return score;
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/utils/constants.ts | TypeScript | /**
* Application Constants
*
* Centralized location for magic numbers and configuration values.
* Extracted from scattered literals throughout the codebase.
*/
// Database Configuration
export const DB_CONFIG = {
// Performance settings (from SQLiteManager)
CACHE_SIZE_KB: 64000, // 64MB cache
MMAP_SIZE: 30000000000, // 30GB memory-mapped I/O
PAGE_SIZE: 4096, // 4KB page size
WAL_AUTOCHECKPOINT: 1000, // Checkpoint WAL after 1000 pages
// Database file name
DB_FILE_NAME: '.cccmemory.db',
// Backup suffix
BACKUP_SUFFIX: '.bak',
} as const;
// Embedding Configuration
export const EMBEDDING_CONFIG = {
// Default model dimensions
OLLAMA_DEFAULT_DIMENSIONS: 1024, // mxbai-embed-large
TRANSFORMERS_DEFAULT_DIMENSIONS: 384, // Xenova/all-MiniLM-L6-v2
OPENAI_DEFAULT_DIMENSIONS: 1536, // text-embedding-ada-002
// Default models
OLLAMA_DEFAULT_MODEL: 'mxbai-embed-large',
TRANSFORMERS_DEFAULT_MODEL: 'Xenova/all-MiniLM-L6-v2',
OPENAI_DEFAULT_MODEL: 'text-embedding-ada-002',
// Batch size for embedding generation
BATCH_SIZE: 100,
// Similarity threshold
DEFAULT_SIMILARITY_THRESHOLD: 0.7,
} as const;
// Search Configuration
export const SEARCH_CONFIG = {
// Default result limits
DEFAULT_LIMIT: 10,
MAX_LIMIT: 100,
// Context window for snippets
SNIPPET_CONTEXT_CHARS: 200,
// Date range defaults
DEFAULT_DAYS_BACK: 30,
} as const;
// File Path Patterns
export const PATH_PATTERNS = {
// Conversation directories
CLAUDE_DIR: '.claude',
PROJECTS_DIR: 'projects',
// Legacy patterns
LEGACY_PREFIX: '-Users-',
// Config file
CONFIG_FILE: '.claude-memory-config.jsonc',
} as const;
// Time Constants (milliseconds)
export const TIME = {
SECOND: 1000,
MINUTE: 60 * 1000,
HOUR: 60 * 60 * 1000,
DAY: 24 * 60 * 60 * 1000,
WEEK: 7 * 24 * 60 * 60 * 1000,
} as const;
// Validation Limits
export const LIMITS = {
// String length limits
MAX_MESSAGE_LENGTH: 100000,
MAX_FILE_PATH_LENGTH: 4096,
MAX_DECISION_LENGTH: 10000,
// Array size limits
MAX_BATCH_SIZE: 1000,
MAX_SEARCH_RESULTS: 1000,
// Numeric limits
MIN_SIMILARITY_SCORE: 0.0,
MAX_SIMILARITY_SCORE: 1.0,
} as const;
// Migration Configuration
export const MIGRATION_CONFIG = {
// Validation thresholds
MIN_CONVERSATIONS_FOR_MIGRATION: 1,
MIN_SIMILARITY_SCORE_FOR_MATCH: 0.7,
// Backup behavior
AUTO_BACKUP: true,
KEEP_SOURCE_FILES: true,
} as const;
// MCP Configuration
export const MCP_CONFIG = {
// Tool timeout
TOOL_TIMEOUT_MS: 30000, // 30 seconds
// Batch processing
BATCH_PROCESSING_SIZE: 50,
} as const;
// Error Messages (commonly reused)
export const ERROR_MESSAGES = {
NO_CONVERSATIONS_FOUND: 'No conversations found',
INDEX_REQUIRED: 'Please index conversations first',
INVALID_PROJECT_PATH: 'Invalid project path',
DATABASE_ERROR: 'Database operation failed',
EMBEDDING_ERROR: 'Embedding generation failed',
} as const;
// Success Messages (commonly reused)
export const SUCCESS_MESSAGES = {
INDEX_COMPLETE: 'Indexing complete',
MIGRATION_COMPLETE: 'Migration complete',
BACKUP_CREATED: 'Backup created successfully',
} as const;
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/utils/safeJson.ts | TypeScript | /**
* Safe JSON parsing utilities
*
* Provides crash-safe JSON parsing with fallback values.
* Use this instead of raw JSON.parse() when parsing data from:
* - Database rows (may be corrupted)
* - User input
* - External sources
*/
/**
* Safely parse JSON with a fallback value on error.
*
* Unlike JSON.parse(), this function:
* - Never throws
* - Handles null/undefined input
* - Handles non-string input
* - Returns the fallback on any parse error
*
* @param value - The JSON string to parse
* @param fallback - Value to return if parsing fails
* @returns Parsed value or fallback
*
* @example
* ```typescript
* // Safe object parsing
* const data = safeJsonParse(row.metadata, {});
*
* // Safe array parsing
* const files = safeJsonParse(row.files_changed, []);
*
* // With specific type
* interface Config { debug: boolean }
* const config = safeJsonParse<Config>(jsonStr, { debug: false });
* ```
*/
export function safeJsonParse<T>(value: string | null | undefined, fallback: T): T {
// Handle non-string input
if (typeof value !== "string" || value === "") {
return fallback;
}
try {
return JSON.parse(value) as T;
} catch (_error) {
return fallback;
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/utils/sanitization.ts | TypeScript | /**
* Input sanitization utilities for SQL LIKE queries and path validation
*/
import { normalize, sep } from 'path';
/**
* Sanitize string for use in SQL LIKE patterns
* Escapes special LIKE characters: %, _, "
*/
export function sanitizeForLike(input: string): string {
return input.replace(/[%_"\\]/g, '\\$&');
}
/**
* Check if a path contains path traversal attempts
* Only rejects actual '..' path segments, not filenames containing '..'
*/
function hasPathTraversal(pathStr: string): boolean {
// Split on both Unix and Windows separators
const segments = pathStr.split(/[\\/]/);
return segments.some(segment => segment === '..');
}
/**
* Validate and sanitize file path
* Cross-platform: prevents path traversal attacks and blocks system directories
*/
export function validateFilePath(filePath: string): string {
// Remove any null bytes
const cleaned = filePath.replace(/\0/g, '');
// Check for path traversal attempts (only actual '..' segments)
if (hasPathTraversal(cleaned)) {
throw new Error('Path traversal detected: .. is not allowed in file paths');
}
// Check for absolute paths outside allowed directories (platform-specific)
const isWindows = process.platform === 'win32';
if (isWindows) {
// Windows system directories (case-insensitive)
const forbidden = [
/^[A-Z]:\\Windows\\/i,
/^[A-Z]:\\Program Files/i,
/^[A-Z]:\\ProgramData/i,
/^[A-Z]:\\System/i,
/^[A-Z]:\\System32/i,
/^[A-Z]:\\Boot/i,
];
if (forbidden.some(pattern => pattern.test(cleaned))) {
throw new Error('Access to system directories is not allowed');
}
} else {
// Unix system directories - comprehensive list
const forbiddenPaths = [
'/etc',
'/sys',
'/proc',
'/dev',
'/boot',
'/sbin',
'/bin',
'/lib',
'/lib64',
'/usr/sbin',
'/var/run',
'/var/lock',
];
if (forbiddenPaths.some(dir => cleaned === dir || cleaned.startsWith(dir + '/'))) {
throw new Error('Access to system directories is not allowed');
}
}
return cleaned;
}
/**
* Validate and normalize project path
* Cross-platform: handles both Unix (/) and Windows (\) paths
* Used for converting file paths to Claude project directory names
*/
export function sanitizeProjectPath(path: string): string {
// Remove null bytes
const cleaned = path.replace(/\0/g, '');
// Check for path traversal
if (cleaned.includes('..')) {
throw new Error('Path traversal detected in project path');
}
// First normalize with Node's native path module for current platform
let normalized = normalize(cleaned);
// Then handle any remaining separators from other platforms
// Replace multiple consecutive slashes/backslashes with single separator
normalized = normalized.replace(/[\\/]+/g, sep);
// Remove trailing path separator
const trailingSepRegex = new RegExp(`${sep.replace(/\\/g, '\\\\')}+$`);
return normalized.replace(trailingSepRegex, '');
}
/**
* Sanitize SQL identifier (table/column name)
* Only allows alphanumeric and underscore
*/
export function sanitizeSQLIdentifier(identifier: string): string {
if (!/^[a-zA-Z_][a-zA-Z0-9_]*$/.test(identifier)) {
throw new Error(`Invalid SQL identifier: ${identifier}`);
}
return identifier;
}
/**
* Escape a table name for use in double-quoted SQL identifiers
* Doubles any embedded double-quotes per SQL standard
*/
export function escapeTableName(tableName: string): string {
if (!tableName || tableName.length === 0) {
throw new Error("Table name cannot be empty");
}
// Double any embedded double-quotes (SQL standard escaping)
return tableName.replace(/"/g, '""');
}
/**
* Validate database path for use in ATTACH DATABASE statement
* Prevents SQL injection via path manipulation
*/
export function validateDatabasePath(dbPath: string): string {
if (!dbPath || dbPath.length === 0) {
throw new Error("Database path cannot be empty");
}
// Check for single quotes which could break SQL string
if (dbPath.includes("'")) {
throw new Error("Database path cannot contain single quotes");
}
// Check for null bytes
if (dbPath.includes("\0")) {
throw new Error("Database path cannot contain null bytes");
}
// Check for path traversal
if (dbPath.includes("..")) {
throw new Error("Path traversal detected in database path");
}
return dbPath;
}
/**
* Convert a project path to Claude Code's project folder name
* Cross-platform compatible - handles both Unix and Windows paths
*
* Examples:
* - macOS/Linux: /Users/joker/github/project → -Users-joker-github-project
* - Windows: C:\Users\user\project → C-Users-user-project
* - Windows UNC: \\server\share\project → -server-share-project
*/
export function pathToProjectFolderName(projectPath: string): string {
// Normalize the path first
const normalized = sanitizeProjectPath(projectPath);
// Replace Windows drive letters (C: → C, D: → D)
// Replace both forward and backward slashes with dashes
const folderName = normalized
.replace(/^([A-Z]):/i, '$1') // Remove colon from drive letter
.replace(/[\\/]+/g, '-'); // Replace / or \ with -
return folderName;
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/utils/worktree.ts | TypeScript | import { execFileSync } from "child_process";
import { basename, dirname, isAbsolute, resolve } from "path";
import { realpathSync } from "fs";
export interface WorktreeInfo {
canonicalPath: string;
worktreePaths: string[];
isGitRepo: boolean;
commonDir?: string;
}
function normalizePath(inputPath: string): string {
const resolved = resolve(inputPath);
try {
return realpathSync(resolved);
} catch (_error) {
return resolved;
}
}
function runGit(args: string[], cwd: string): string | null {
try {
return execFileSync("git", args, {
cwd,
encoding: "utf-8",
stdio: ["ignore", "pipe", "ignore"],
}).trim();
} catch (_error) {
return null;
}
}
function resolveGitPath(rawPath: string, cwd: string): string {
const resolved = isAbsolute(rawPath) ? rawPath : resolve(cwd, rawPath);
return normalizePath(resolved);
}
export function getGitCommonDir(projectPath: string): string | null {
const normalizedProjectPath = normalizePath(projectPath);
const output = runGit(["rev-parse", "--git-common-dir"], normalizedProjectPath);
if (!output) {
return null;
}
return resolveGitPath(output, normalizedProjectPath);
}
export function getCanonicalProjectPath(projectPath: string): {
canonicalPath: string;
commonDir?: string;
isGitRepo: boolean;
} {
const normalizedProjectPath = normalizePath(projectPath);
const commonDir = getGitCommonDir(normalizedProjectPath);
if (!commonDir) {
return { canonicalPath: normalizedProjectPath, isGitRepo: false };
}
const commonBase = basename(commonDir);
const canonicalPath =
commonBase === ".git" ? normalizePath(dirname(commonDir)) : commonDir;
return { canonicalPath, commonDir, isGitRepo: true };
}
export function listWorktreePaths(projectPath: string): string[] {
const normalizedProjectPath = normalizePath(projectPath);
const output = runGit(["worktree", "list", "--porcelain"], normalizedProjectPath);
if (!output) {
return [];
}
const worktrees: string[] = [];
for (const line of output.split(/\r?\n/)) {
if (!line.startsWith("worktree ")) {
continue;
}
const rawPath = line.slice("worktree ".length).trim();
if (!rawPath) {
continue;
}
const resolved = resolveGitPath(rawPath, normalizedProjectPath);
if (!worktrees.includes(resolved)) {
worktrees.push(resolved);
}
}
return worktrees;
}
export function getWorktreeInfo(projectPath: string): WorktreeInfo {
const normalizedProjectPath = normalizePath(projectPath);
const { canonicalPath, commonDir, isGitRepo } = getCanonicalProjectPath(normalizedProjectPath);
let worktreePaths = listWorktreePaths(normalizedProjectPath);
if (worktreePaths.length === 0) {
worktreePaths = [normalizedProjectPath];
} else if (!worktreePaths.includes(normalizedProjectPath)) {
worktreePaths.push(normalizedProjectPath);
}
return {
canonicalPath,
worktreePaths,
isGitRepo,
commonDir,
};
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/cjk_text_formatter/__init__.py | Python | """CJK Text Formatter - A CLI tool for polishing text with CJK (Chinese, Japanese, Korean) typography rules."""
__version__ = "1.1.1"
| xiaolai/cjk-text-formatter | 3 | A Python CLI tool for polishing text with Chinese typography rules | Python | xiaolai | xiaolai | inblockchain |
src/cjk_text_formatter/cli.py | Python | """Command-line interface for cjk-text-formatter."""
from __future__ import annotations
import sys
from pathlib import Path
import click
from . import __version__
from .config import load_config, validate_config as validate_config_file, DEFAULT_RULES, RULE_DESCRIPTIONS
from .polish import polish_text, polish_text_verbose
from .processors import process_file, find_files
# Import for accessing package data files
try:
from importlib.resources import files
except ImportError:
# Python 3.8 fallback
from importlib_resources import files
@click.command()
@click.version_option(version=__version__, prog_name='ctf')
@click.argument('input', required=False)
@click.option(
'--output', '-o',
type=click.Path(path_type=Path),
help='Output file path (for single file processing)',
)
@click.option(
'--inplace', '-i',
is_flag=True,
help='Modify files in place',
)
@click.option(
'--recursive', '-r',
is_flag=True,
help='Process directories recursively',
)
@click.option(
'--dry-run', '-n',
is_flag=True,
help='Show changes without writing files',
)
@click.option(
'--extensions', '-e',
multiple=True,
help='File extensions to process (e.g., -e .txt -e .md). Default: .txt, .md, .html',
)
@click.option(
'--verbose', '-v',
is_flag=True,
help='Show summary of changes made',
)
@click.option(
'--config', '-c',
type=click.Path(path_type=Path, exists=True),
help='Path to custom config file',
)
@click.option(
'--validate-config',
type=click.Path(path_type=Path, exists=True),
help='Validate a configuration file and exit',
)
@click.option(
'--show-config',
is_flag=True,
help='Show effective configuration and exit',
)
@click.option(
'--init-config',
is_flag=True,
help='Create example config file and exit',
)
@click.option(
'--global',
'config_global',
is_flag=True,
help='Use with --init-config to create global config (~/.config/)',
)
@click.option(
'--force',
is_flag=True,
help='Use with --init-config to overwrite existing config',
)
@click.option(
'--list-rules',
is_flag=True,
help='List all available formatting rules and exit',
)
@click.option(
'--show-config-example',
is_flag=True,
help='Print example config to stdout and exit',
)
@click.option(
'--where',
'where_config',
is_flag=True,
help='Show config file locations and exit',
)
@click.option(
'--disable',
'disable_rules',
multiple=True,
help='Disable specific rule(s) (can be used multiple times)',
)
@click.option(
'--enable',
'enable_rules',
multiple=True,
help='Enable specific rule(s) (can be used multiple times)',
)
def main(
input: str | None,
output: Path | None,
inplace: bool,
recursive: bool,
dry_run: bool,
extensions: tuple[str, ...],
verbose: bool,
config: Path | None,
validate_config: Path | None,
show_config: bool,
init_config: bool,
config_global: bool,
force: bool,
list_rules: bool,
show_config_example: bool,
where_config: bool,
disable_rules: tuple[str, ...],
enable_rules: tuple[str, ...],
):
"""Format text with Chinese typography rules.
Automatically applies:
• CJK-English spacing (中文English → 中文 English)
• Em-dash formatting (-- → ——)
• Ellipsis normalization (. . . → ...)
• Quote spacing and punctuation fixes
Supports: Plain text (.txt), Markdown (.md), HTML (.html, .htm)
Code blocks and <pre>/<code> tags are preserved.
INPUT can be:
- Text string (if not a file/directory path)
- File path (.txt, .md, .html)
- Directory path
- Omitted (reads from stdin)
Examples:
\b
# Format text directly
ctf "文本English混合"
\b
# Read from stdin
echo "文本English混合" | ctf
cat input.txt | ctf
\b
# Format a file
ctf input.txt
ctf input.md --output formatted.md
\b
# Format files in a directory
ctf ./docs/
ctf ./docs/ --recursive --inplace
\b
# Dry run (preview changes)
ctf input.txt --dry-run
"""
# Handle --validate-config command (validate and exit)
if validate_config:
result = validate_config_file(validate_config)
click.echo(result.format_report())
sys.exit(0 if result.is_valid else 1)
# Handle --init-config command (create config file and exit)
if init_config:
_init_config_file(config_global, force)
sys.exit(0)
# Handle --list-rules command (list rules and exit)
if list_rules:
_list_available_rules()
sys.exit(0)
# Handle --show-config-example command (print example and exit)
if show_config_example:
_show_config_example()
sys.exit(0)
# Handle --where command (show config locations and exit)
if where_config:
_show_config_locations(config)
sys.exit(0)
# Load configuration
rule_config = load_config(config_path=config)
# Apply CLI rule overrides (--disable/--enable)
if disable_rules or enable_rules:
_apply_rule_overrides(rule_config, disable_rules, enable_rules)
# Handle --show-config command (show config and exit)
if show_config:
_show_effective_config(rule_config, config)
sys.exit(0)
# If no input provided, read from stdin
if input is None:
if not sys.stdin.isatty():
input_text = sys.stdin.read()
if verbose:
result, stats = polish_text_verbose(input_text, config=rule_config)
click.echo(result)
click.echo(stats.format_summary(), err=True)
else:
result = polish_text(input_text, config=rule_config)
click.echo(result)
return
else:
click.echo("Error: No input provided", err=True)
click.echo("Try 'ctf --help' for usage information", err=True)
sys.exit(1)
# Check if input is a file or directory
input_path = Path(input)
if input_path.exists():
# Input is a file or directory
if input_path.is_file():
process_single_file(input_path, output, inplace, dry_run, verbose, rule_config)
elif input_path.is_dir():
process_directory(input_path, inplace, recursive, dry_run, extensions, verbose, rule_config)
else:
click.echo(f"Error: {input_path} is not a file or directory", err=True)
sys.exit(1)
else:
# Treat input as text string
if verbose:
result, stats = polish_text_verbose(input, config=rule_config)
if output:
if dry_run:
click.echo(f"Would write to: {output}")
click.echo(result)
else:
output.write_text(result, encoding='utf-8')
click.echo(f"Formatted text written to: {output}")
click.echo(stats.format_summary(), err=True)
else:
click.echo(result)
click.echo(stats.format_summary(), err=True)
else:
result = polish_text(input, config=rule_config)
if output:
if dry_run:
click.echo(f"Would write to: {output}")
click.echo(result)
else:
output.write_text(result, encoding='utf-8')
click.echo(f"Formatted text written to: {output}")
else:
click.echo(result)
def process_single_file(
file_path: Path,
output: Path | None,
inplace: bool,
dry_run: bool,
verbose: bool,
config,
):
"""Process a single file.
Args:
file_path: Input file path
output: Output file path (optional)
inplace: Modify file in place
dry_run: Preview changes without writing
verbose: Show statistics about changes
config: Rule configuration
"""
try:
# For now, verbose stats only work with plain text files
# For other file types, use regular processing
if verbose and file_path.suffix.lower() == '.txt':
content = file_path.read_text(encoding='utf-8')
result, stats = polish_text_verbose(content, config=config)
else:
result = process_file(file_path, config=config)
stats = None
if dry_run:
click.echo(f"=== {file_path} ===")
click.echo(result)
click.echo()
if stats:
click.echo(stats.format_summary(), err=True)
elif inplace:
with open(file_path, 'w', encoding='utf-8', newline='') as f:
f.write(result)
click.secho(f"✓ Formatted: {file_path}", fg='green')
if stats:
click.echo(stats.format_summary(), err=True)
elif output:
with open(output, 'w', encoding='utf-8', newline='') as f:
f.write(result)
click.secho(f"✓ Written to: {output}", fg='green')
if stats:
click.echo(stats.format_summary(), err=True)
else:
# Print to stdout
click.echo(result)
if stats:
click.echo(stats.format_summary(), err=True)
except ValueError as e:
click.echo(f"Error processing {file_path}: {e}", err=True)
sys.exit(1)
except Exception as e:
click.echo(f"Unexpected error processing {file_path}: {e}", err=True)
sys.exit(1)
def process_directory(
dir_path: Path,
inplace: bool,
recursive: bool,
dry_run: bool,
extensions: tuple[str, ...],
verbose: bool,
config,
):
"""Process all files in a directory.
Args:
dir_path: Directory path
inplace: Modify files in place
recursive: Process subdirectories
dry_run: Preview changes without writing
extensions: File extensions to process
verbose: Show statistics about changes
config: Rule configuration
"""
if not inplace and not dry_run:
click.echo("Error: Directory processing requires --inplace or --dry-run", err=True)
sys.exit(1)
# Convert extensions tuple to list, or use defaults
ext_list = list(extensions) if extensions else None
try:
files = find_files(dir_path, recursive=recursive, extensions=ext_list)
except FileNotFoundError as e:
click.echo(f"Error: {e}", err=True)
sys.exit(1)
if not files:
click.echo(f"No files found in {dir_path}", err=True)
sys.exit(0)
click.echo(f"Found {len(files)} file(s) to process")
success_count = 0
error_count = 0
for file_path in files:
try:
# For verbose mode with plain text files, show stats
if verbose and file_path.suffix.lower() == '.txt':
content = file_path.read_text(encoding='utf-8')
result, stats = polish_text_verbose(content, config=config)
else:
result = process_file(file_path, config=config)
stats = None
if dry_run:
click.echo(f"\n=== {file_path} ===")
click.echo(result)
if stats:
click.echo(stats.format_summary(), err=True)
else:
with open(file_path, 'w', encoding='utf-8', newline='') as f:
f.write(result)
if verbose and stats:
click.secho(f"✓ {file_path}", fg='green')
click.echo(f" {stats.format_summary()}", err=True)
else:
click.secho(f"✓ {file_path}", fg='green')
success_count += 1
except ValueError as e:
click.secho(f"✗ {file_path}: {e}", fg='red', err=True)
error_count += 1
except Exception as e:
click.secho(f"✗ {file_path}: Unexpected error: {e}", fg='red', err=True)
error_count += 1
if not dry_run:
click.echo(f"\nProcessed {success_count} file(s), {error_count} error(s)")
def _show_effective_config(rule_config, config_path: Path | None) -> None:
"""Display the effective configuration being used.
Args:
rule_config: The loaded rule configuration
config_path: Path to custom config (if provided via --config)
"""
click.secho("Effective Configuration:", bold=True)
click.echo()
# Show config source
click.echo("Config Source:")
if config_path:
click.echo(f" Custom: {config_path}")
else:
# Check which default config is being used
project_config = Path.cwd() / "cjk-text-formatter.toml"
user_config = Path.home() / ".config" / "cjk-text-formatter.toml"
if project_config.exists():
click.echo(f" Project: {project_config}")
if user_config.exists():
click.echo(f" User: {user_config}")
if not project_config.exists() and not user_config.exists():
click.echo(" Defaults (no config file)")
click.echo()
# Show built-in rules
click.secho("Built-in Rules:", bold=True)
for rule_name, enabled in sorted(rule_config.rules.items()):
status = "✓" if enabled else "✗"
color = "green" if enabled else "red"
click.secho(f" {status} {rule_name}: {enabled}", fg=color)
click.echo()
# Show custom rules
if rule_config.custom_rules:
click.secho("Custom Rules:", bold=True)
for i, rule in enumerate(rule_config.custom_rules):
name = rule.get('name', f'rule_{i}')
pattern = rule.get('pattern', '')
replacement = rule.get('replacement', '')
description = rule.get('description', '')
click.echo(f" [{i+1}] {name}")
click.echo(f" pattern: {pattern}")
click.echo(f" replacement: {replacement}")
if description:
click.echo(f" description: {description}")
click.echo()
else:
click.echo("Custom Rules: None")
click.echo()
def _init_config_file(config_global: bool, force: bool) -> None:
"""Create a config file from the example template.
Args:
config_global: If True, create global config in ~/.config/
force: If True, overwrite existing config file
"""
# Determine target path
if config_global:
target = Path.home() / ".config" / "cjk-text-formatter.toml"
location_name = "global config"
else:
target = Path.cwd() / "cjk-text-formatter.toml"
location_name = "project config"
# Check if file exists
if target.exists() and not force:
click.secho(f"Error: {location_name} already exists at {target}", fg='red', err=True)
click.echo("Use --force to overwrite", err=True)
sys.exit(1)
# Get example config content from package data
try:
package_files = files('cjk_text_formatter')
example_content = (package_files / 'cjk-text-formatter.toml.example').read_text(encoding='utf-8')
except Exception as e:
click.secho(f"Error reading example config from package: {e}", fg='red', err=True)
sys.exit(1)
# Create parent directory if it doesn't exist (for global config)
target.parent.mkdir(parents=True, exist_ok=True)
# Write example content to target
try:
target.write_text(example_content, encoding='utf-8')
click.secho(f"✓ Created {location_name}: {target}", fg='green')
click.echo()
click.echo("Next steps:")
click.echo(f" 1. Edit the config: {target}")
click.echo(" 2. Validate it: ctf --validate-config " + str(target))
click.echo(" 3. Test it: ctf --show-config")
except Exception as e:
click.secho(f"Error creating config file: {e}", fg='red', err=True)
sys.exit(1)
def _list_available_rules() -> None:
"""List all available formatting rules with descriptions."""
click.secho("Available Formatting Rules:", bold=True)
click.echo()
# Group rules by category
categories = {
'Universal': ['ellipsis_normalization'],
'Normalization': [
'fullwidth_alphanumeric',
'fullwidth_punctuation',
'fullwidth_parentheses',
'fullwidth_brackets',
],
'Em-Dash': ['dash_conversion', 'emdash_spacing'],
'Quotes': ['quote_spacing', 'single_quote_spacing'],
'Spacing': ['cjk_english_spacing', 'currency_spacing', 'slash_spacing', 'space_collapsing'],
'Cleanup': ['consecutive_punctuation_limit'],
}
for category, rule_names in categories.items():
click.secho(f"{category}:", bold=True, fg='cyan')
for rule_name in rule_names:
if rule_name in DEFAULT_RULES:
default_value = DEFAULT_RULES[rule_name]
description = RULE_DESCRIPTIONS.get(rule_name, 'No description available')
# Format status
if isinstance(default_value, bool):
status = "✓ ON " if default_value else "✗ OFF"
color = "green" if default_value else "red"
else:
status = f" {default_value}"
color = "yellow"
click.echo(f" {click.style(status, fg=color)} {click.style(rule_name, bold=True)}")
click.echo(f" {description}")
click.echo()
click.echo("Usage:")
click.echo(" • Enable/disable in config file: [rules] section")
click.echo(" • Temporarily disable: ctf --disable rule_name")
click.echo(" • Temporarily enable: ctf --enable rule_name")
click.echo(" • View current config: ctf --show-config")
def _show_config_example() -> None:
"""Print the example config file to stdout."""
# Get example config content from package data
try:
package_files = files('cjk_text_formatter')
content = (package_files / 'cjk-text-formatter.toml.example').read_text(encoding='utf-8')
click.echo(content, nl=False)
except Exception as e:
click.secho(f"Error reading example config from package: {e}", fg='red', err=True)
sys.exit(1)
def _show_config_locations(config_path: Path | None) -> None:
"""Show config file search paths and which ones exist.
Args:
config_path: Custom config path (if provided via --config)
"""
click.secho("Config File Locations (priority order):", bold=True)
click.echo()
# Check each location
locations = []
# 1. Custom path (--config)
if config_path:
exists = config_path.exists()
status = click.style("[EXISTS] ✓", fg='green') if exists else click.style("[NOT FOUND]", fg='red')
locations.append((1, f"Custom (--config): {config_path}", status, exists))
else:
locations.append((1, "Custom (--config): Not specified", click.style("[NOT USED]", fg='yellow'), False))
# 2. Project config
project_config = Path.cwd() / "cjk-text-formatter.toml"
exists = project_config.exists()
status = click.style("[EXISTS] ✓", fg='green') if exists else click.style("[NOT FOUND]", fg='yellow')
locations.append((2, f"Project: {project_config}", status, exists))
# 3. User config
user_config = Path.home() / ".config" / "cjk-text-formatter.toml"
exists = user_config.exists()
status = click.style("[EXISTS] ✓", fg='green') if exists else click.style("[NOT FOUND]", fg='yellow')
locations.append((3, f"User: {user_config}", status, exists))
# 4. Defaults
locations.append((4, "Defaults: Built-in rules", click.style("[ALWAYS AVAILABLE]", fg='green'), True))
# Print locations
for priority, location, status, _ in locations:
click.echo(f" {priority}. {location}")
click.echo(f" {status}")
click.echo()
# Determine which config is active
active_config = None
if config_path and config_path.exists():
active_config = f"Custom: {config_path}"
elif project_config.exists():
active_config = f"Project: {project_config}"
elif user_config.exists():
active_config = f"User: {user_config}"
else:
active_config = "Defaults (no config file)"
click.secho("Active Configuration:", bold=True)
click.echo(f" {active_config}")
click.echo()
click.echo("Tip: Use 'ctf --show-config' to see effective settings")
def _apply_rule_overrides(
rule_config,
disable_rules: tuple[str, ...],
enable_rules: tuple[str, ...],
) -> None:
"""Apply CLI rule overrides to the loaded configuration.
Args:
rule_config: Loaded RuleConfig instance
disable_rules: Tuple of rule names to disable
enable_rules: Tuple of rule names to enable
"""
# Validate rule names
all_rules = set(DEFAULT_RULES.keys())
for rule in disable_rules:
if rule not in all_rules:
click.secho(f"Error: Unknown rule '{rule}'", fg='red', err=True)
click.echo(f"Available rules: {', '.join(sorted(all_rules))}", err=True)
sys.exit(1)
rule_config.rules[rule] = False
for rule in enable_rules:
if rule not in all_rules:
click.secho(f"Error: Unknown rule '{rule}'", fg='red', err=True)
click.echo(f"Available rules: {', '.join(sorted(all_rules))}", err=True)
sys.exit(1)
rule_config.rules[rule] = True
if __name__ == '__main__':
main()
| xiaolai/cjk-text-formatter | 3 | A Python CLI tool for polishing text with Chinese typography rules | Python | xiaolai | xiaolai | inblockchain |
src/cjk_text_formatter/config.py | Python | """Configuration loading and management for text-formater."""
from __future__ import annotations
import re
import sys
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any
# Try to import tomllib (Python 3.11+)
try:
import tomllib
TOMLLIB_AVAILABLE = True
except ImportError:
TOMLLIB_AVAILABLE = False
# Default rule settings
DEFAULT_RULES = {
# Original rules
'ellipsis_normalization': True,
'dash_conversion': True,
'emdash_spacing': True,
'quote_spacing': True,
'single_quote_spacing': True,
'cjk_english_spacing': True,
'cjk_parenthesis_spacing': True,
'space_collapsing': True,
# New normalization rules
'fullwidth_punctuation': True,
'fullwidth_parentheses': True,
'fullwidth_brackets': False, # Optional, off by default
'fullwidth_alphanumeric': True,
# Cleanup rules
'consecutive_punctuation_limit': 0, # 0=unlimited, 1=single, 2=double
'currency_spacing': True,
'slash_spacing': True,
}
# Rule descriptions for documentation and --list-rules
RULE_DESCRIPTIONS = {
'ellipsis_normalization': 'Convert spaced ellipsis to standard form (. . . → ...)',
'dash_conversion': 'Convert dashes to Chinese em-dash between CJK text (2+ dashes → ——)',
'emdash_spacing': 'Fix spacing around em-dash (text——more → text —— more)',
'quote_spacing': 'Smart spacing around double quotes "" (avoids CJK punctuation)',
'single_quote_spacing': 'Smart spacing around single quotes \'\' (avoids CJK punctuation)',
'cjk_english_spacing': 'Add spaces between CJK and English/numbers (中文English → 中文 English)',
'cjk_parenthesis_spacing': 'Add spaces between CJK and half-width parentheses (中文(test) → 中文 (test))',
'space_collapsing': 'Collapse multiple spaces to single space (preserves indentation)',
'fullwidth_punctuation': 'Normalize punctuation width based on context (,. → ,。 in CJK)',
'fullwidth_parentheses': 'Convert () to () in CJK context',
'fullwidth_brackets': 'Convert [] to 【】 in CJK context',
'fullwidth_alphanumeric': 'Convert full-width numbers/letters to half-width (123 → 123)',
'consecutive_punctuation_limit': 'Limit consecutive punctuation (0=unlimited, 1=single, 2=double)',
'currency_spacing': 'Remove space between currency symbols and amounts ($ 100 → $100)',
'slash_spacing': 'Remove spaces around slashes (A / B → A/B, preserves URLs)',
}
@dataclass
class RuleConfig:
"""Configuration for formatting rules."""
rules: dict[str, bool] = field(default_factory=lambda: DEFAULT_RULES.copy())
custom_rules: list[dict[str, Any]] = field(default_factory=list)
def is_enabled(self, rule_name: str) -> bool:
"""Check if a rule is enabled.
Args:
rule_name: Name of the rule to check
Returns:
True if rule is enabled, False otherwise
"""
return self.rules.get(rule_name, True)
def get_value(self, rule_name: str, default: Any = None) -> Any:
"""Get the value of a rule (for non-boolean rules).
Args:
rule_name: Name of the rule
default: Default value if rule not found
Returns:
Rule value or default
"""
return self.rules.get(rule_name, default)
def load_config(config_path: Path | None = None) -> RuleConfig:
"""Load configuration from file.
Configuration priority (highest to lowest):
1. config_path (if provided via --config flag)
2. ./cjk-text-formatter.toml (project root)
3. ~/.config/cjk-text-formatter.toml (user config)
4. Default config (all rules enabled)
Configs are merged: user config applied first, then project config overrides.
Args:
config_path: Optional explicit config file path
Returns:
RuleConfig instance with loaded configuration
"""
if not TOMLLIB_AVAILABLE:
# Fallback for Python <3.11
# TODO: Could print warning to stderr
return RuleConfig()
# Start with defaults
rules = DEFAULT_RULES.copy()
custom_rules = []
# Load user config first (if exists)
user_config_path = Path.home() / ".config" / "cjk-text-formatter.toml"
if user_config_path.exists():
user_config = _load_toml_file(user_config_path)
if user_config:
_merge_config_data(rules, custom_rules, user_config)
# Load project config (overrides user config)
project_config_path = Path.cwd() / "cjk-text-formatter.toml"
if project_config_path.exists():
project_config = _load_toml_file(project_config_path)
if project_config:
_merge_config_data(rules, custom_rules, project_config)
# Load explicit config path (highest priority)
if config_path and config_path.exists():
explicit_config = _load_toml_file(config_path)
if explicit_config:
_merge_config_data(rules, custom_rules, explicit_config)
return RuleConfig(rules=rules, custom_rules=custom_rules)
def _load_toml_file(file_path: Path) -> dict[str, Any] | None:
"""Load and parse a TOML file.
Args:
file_path: Path to TOML file
Returns:
Parsed TOML data or None if loading fails
"""
try:
with open(file_path, 'rb') as f:
return tomllib.load(f)
except (FileNotFoundError, PermissionError, tomllib.TOMLDecodeError):
# Expected errors - file doesn't exist, can't read, or invalid TOML
return None
except Exception as e:
# Unexpected error - log for debugging but don't crash
import sys
print(f"Warning: Unexpected error loading config {file_path}: {e}", file=sys.stderr)
return None
def _merge_config_data(rules: dict[str, bool], custom_rules: list, config_data: dict) -> None:
"""Merge config data into existing rules and custom_rules.
Args:
rules: Existing rules dict (modified in place)
custom_rules: Existing custom rules list (modified in place)
config_data: Config data to merge
"""
# Merge rules
if 'rules' in config_data:
for key, value in config_data['rules'].items():
if key in DEFAULT_RULES: # Only accept known rules
rules[key] = value
# Merge custom rules
if 'custom_rules' in config_data:
custom_rules.extend(config_data['custom_rules'])
def merge_configs(base: RuleConfig, override: RuleConfig) -> RuleConfig:
"""Merge two configs with override taking precedence.
Args:
base: Base configuration
override: Override configuration
Returns:
New RuleConfig with merged settings
"""
merged_rules = base.rules.copy()
merged_rules.update(override.rules)
merged_custom_rules = base.custom_rules + override.custom_rules
return RuleConfig(rules=merged_rules, custom_rules=merged_custom_rules)
@dataclass
class ValidationResult:
"""Result of config file validation."""
config_path: Path
is_valid: bool = True
errors: list[str] = field(default_factory=list)
warnings: list[str] = field(default_factory=list)
def format_report(self) -> str:
"""Format a human-readable validation report.
Returns:
Formatted validation report string
"""
lines = [f"Validating: {self.config_path}"]
lines.append("")
if self.is_valid and not self.warnings:
lines.append("✓ Configuration is valid")
else:
if self.errors:
lines.append("Errors:")
for error in self.errors:
lines.append(f" ✗ {error}")
lines.append("")
if self.warnings:
lines.append("Warnings:")
for warning in self.warnings:
lines.append(f" ⚠ {warning}")
lines.append("")
if not self.errors:
lines.append("✓ Configuration is valid (with warnings)")
return "\n".join(lines)
def validate_config(config_path: Path) -> ValidationResult:
"""Validate a configuration file.
Checks:
- File exists and is readable
- Valid TOML syntax
- Valid structure ([rules], [[custom_rules]])
- Rule names match known built-in rules
- Custom rules have required fields (name, pattern, replacement)
- Regex patterns compile successfully
Args:
config_path: Path to config file to validate
Returns:
ValidationResult with validation details
"""
result = ValidationResult(config_path=config_path)
# Check Python version
if not TOMLLIB_AVAILABLE:
result.is_valid = False
result.errors.append(
"Config validation requires Python 3.11+ (tomllib not available)"
)
return result
# Check if file exists
if not config_path.exists():
result.is_valid = False
result.errors.append(f"Config file not found: {config_path}")
return result
# Check if file is readable
if not config_path.is_file():
result.is_valid = False
result.errors.append(f"Path is not a file: {config_path}")
return result
# Try to load and parse TOML
try:
with open(config_path, 'rb') as f:
config_data = tomllib.load(f)
except PermissionError:
result.is_valid = False
result.errors.append(f"Cannot read file (permission denied): {config_path}")
return result
except tomllib.TOMLDecodeError as e:
result.is_valid = False
result.errors.append(f"TOML syntax error: {e}")
return result
except Exception as e:
result.is_valid = False
result.errors.append(f"Failed to load config: {e}")
return result
# Validate built-in rules section
if 'rules' in config_data:
if not isinstance(config_data['rules'], dict):
result.is_valid = False
result.errors.append("'rules' section must be a table/dict")
else:
# Check for unknown rule names
for rule_name in config_data['rules']:
if rule_name not in DEFAULT_RULES:
result.is_valid = False
result.errors.append(
f"Unknown rule name: '{rule_name}'. "
f"Valid rules: {', '.join(sorted(DEFAULT_RULES.keys()))}"
)
# Validate custom rules section
if 'custom_rules' in config_data:
if not isinstance(config_data['custom_rules'], list):
result.is_valid = False
result.errors.append("'custom_rules' must be an array of tables")
else:
for i, rule in enumerate(config_data['custom_rules']):
rule_id = f"custom_rules[{i}]"
# Check required fields
if 'name' not in rule:
result.is_valid = False
result.errors.append(f"{rule_id}: Missing required field 'name'")
continue # Can't check other fields without name
rule_name = rule.get('name', f'rule_{i}')
if 'pattern' not in rule:
result.is_valid = False
result.errors.append(f"{rule_id} ({rule_name}): Missing required field 'pattern'")
if 'replacement' not in rule:
result.is_valid = False
result.errors.append(f"{rule_id} ({rule_name}): Missing required field 'replacement'")
# Validate regex pattern if present
if 'pattern' in rule:
try:
re.compile(rule['pattern'])
except re.error as e:
result.is_valid = False
result.errors.append(
f"{rule_id} ({rule_name}): Invalid regex pattern: {e}"
)
return result
| xiaolai/cjk-text-formatter | 3 | A Python CLI tool for polishing text with Chinese typography rules | Python | xiaolai | xiaolai | inblockchain |
src/cjk_text_formatter/polish.py | Python | """Text polishing functions for Chinese typography."""
from __future__ import annotations
import re
from dataclasses import dataclass, field
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from .config import RuleConfig
# Regular expressions
CHINESE_RE = re.compile(r"[\u4e00-\u9fff]")
HANGUL_RE = re.compile(r"[\uac00-\ud7af]")
# CJK character ranges for pattern matching
HAN = r'\u4e00-\u9fff' # Chinese characters + Japanese Kanji
HIRAGANA = r'\u3040-\u309f' # Japanese Hiragana
KATAKANA = r'\u30a0-\u30ff' # Japanese Katakana
HANGUL = r'\uac00-\ud7af' # Korean Hangul
# Combined patterns for different use cases
CJK_ALL = f'{HAN}{HIRAGANA}{KATAKANA}{HANGUL}' # All CJK scripts
CJK_NO_KOREAN = f'{HAN}{HIRAGANA}{KATAKANA}' # Chinese + Japanese only
# CJK punctuation constants
CJK_TERMINAL_PUNCTUATION = ',。!?;:、'
CJK_CLOSING_BRACKETS = '》」』】)〉'
CJK_OPENING_BRACKETS = '《「『【(〈'
CJK_EM_DASH = '——'
# CJK character range for dash conversion (includes Han, Hiragana, Katakana, and CJK punctuation)
CJK_CHARS_PATTERN = rf'[{HAN}{HIRAGANA}{KATAKANA}《》「」『』【】()〈〉,。!?;:、]'
# Pre-compiled regex patterns for performance
ELLIPSIS_PATTERN = re.compile(r"\s*\.\s+\.\s+\.(?:\s+\.)*")
ELLIPSIS_SPACING_PATTERN = re.compile(r"\.\.\.\s*(?=\S)")
# Match 2+ dashes between CJK characters (with optional whitespace)
DASH_PATTERN = re.compile(rf'({CJK_CHARS_PATTERN})\s*-{{2,}}\s*({CJK_CHARS_PATTERN})')
EMDASH_SPACING_PATTERN = re.compile(r"([^\s])\s*——\s*([^\s])")
# CJK-parenthesis spacing patterns
CJK_OPENING_PAREN_PATTERN = re.compile(rf'([{CJK_ALL}])\(')
CLOSING_PAREN_CJK_PATTERN = re.compile(rf'\)([{CJK_ALL}])')
# Fullwidth normalization patterns
FULLWIDTH_PARENS_PATTERN = re.compile(rf'\(([{CJK_NO_KOREAN}][^()]*)\)')
FULLWIDTH_BRACKETS_PATTERN = re.compile(rf'\[([{CJK_NO_KOREAN}][^\[\]]*)\]')
CURRENCY_SPACING_PATTERN = re.compile(r'([$¥€£₹USD|CNY|EUR|GBP])\s+(\d)')
SLASH_SPACING_PATTERN = re.compile(r'(?<![/:])\s*/\s*(?!/)')
MULTI_SPACE_PATTERN = re.compile(r"(\S) {2,}")
TRAILING_SPACE_PATTERN = re.compile(r" +$", flags=re.MULTILINE)
EXCESSIVE_NEWLINE_PATTERN = re.compile(r"\n{3,}")
@dataclass
class PolishStats:
"""Statistics about text polishing operations."""
ellipsis_normalized: int = 0
dash_converted: int = 0
emdash_spacing_fixed: int = 0
quote_spacing_fixed: int = 0
cjk_english_spacing_added: int = 0
spaces_collapsed: int = 0
custom_rules_applied: dict[str, int] = field(default_factory=dict)
def has_changes(self) -> bool:
"""Check if any changes were made."""
return any([
self.ellipsis_normalized,
self.dash_converted,
self.emdash_spacing_fixed,
self.quote_spacing_fixed,
self.cjk_english_spacing_added,
self.spaces_collapsed,
bool(self.custom_rules_applied),
])
def format_summary(self) -> str:
"""Format a human-readable summary of changes."""
changes = []
if self.ellipsis_normalized:
changes.append(f"{self.ellipsis_normalized} ellipsis normalized")
if self.dash_converted:
changes.append(f"{self.dash_converted} em-dash converted")
if self.emdash_spacing_fixed:
changes.append(f"{self.emdash_spacing_fixed} em-dash spacing fixed")
if self.quote_spacing_fixed:
changes.append(f"{self.quote_spacing_fixed} quote spacing fixed")
if self.cjk_english_spacing_added:
changes.append(f"{self.cjk_english_spacing_added} CJK-English spacing added")
if self.spaces_collapsed:
changes.append(f"{self.spaces_collapsed} spaces collapsed")
# Add custom rule stats
for rule_name, count in self.custom_rules_applied.items():
if count > 0:
changes.append(f"{count} {rule_name} applied")
if not changes:
return "No changes made"
return "Changes: " + ", ".join(changes)
def contains_cjk(text: str) -> bool:
"""Check if text contains any CJK characters (Han/Kanji/Hangul).
Note: This checks for Han characters (Chinese/Japanese Kanji) or Korean Hangul
as a gate to determine if CJK-specific typography rules should apply.
Text with these characters typically needs CJK typography rules
(spacing with English/numbers, em-dash, quotes). Note that fullwidth
punctuation rules use CJK_NO_KOREAN to exclude Korean, as Korean uses
Western punctuation.
Args:
text: Text to check
Returns:
True if text contains Han or Hangul characters, False otherwise
"""
return bool(CHINESE_RE.search(text) or HANGUL_RE.search(text))
def _normalize_ellipsis(text: str) -> str:
"""Normalize spaced ellipsis patterns to standard ellipsis.
Handles patterns like ". . ." or ". . . ." that might appear in AI translations.
This is a universal rule applied to all languages.
Args:
text: Text to normalize
Returns:
Text with normalized ellipsis
"""
# Replace spaced dots (. . . or . . . .) with standard ellipsis
# Also remove space before if pattern starts with space
text = ELLIPSIS_PATTERN.sub("...", text)
# Ensure exactly one space after ellipsis when followed by non-whitespace
text = ELLIPSIS_SPACING_PATTERN.sub("... ", text)
return text
def _replace_dash(text: str) -> str:
"""Convert dashes (2+) to —— when between CJK characters.
Only converts dashes between Chinese characters or CJK punctuation.
Supports flexible dash count (---, ----, etc.) and optional spacing.
Rules:
- Only converts when both sides are CJK characters/punctuation
- No space between closing quotes/parens (》)) and ——
- No space between —— and opening quotes/parens (《()
- Regular text gets spaces on both sides
Args:
text: Text to process
Returns:
Text with dashes converted to —— with proper spacing
"""
def repl(match: re.Match[str]) -> str:
before = match.group(1)
after = match.group(2)
# No space between closing quotes/parens and ——
left_space = "" if before in (")", "》") else " "
# No space between —— and opening quotes/parens
right_space = "" if after in ("(", "《") else " "
return f"{before}{left_space}——{right_space}{after}"
return DASH_PATTERN.sub(repl, text)
def _fix_emdash_spacing(text: str) -> str:
"""Fix spacing around existing —— (em-dash) characters.
Rules:
- No space between closing quotes/parens (》)) and ——
- No space between —— and opening quotes/parens (《()
- Regular text gets spaces on both sides
Args:
text: Text to process
Returns:
Text with corrected em-dash spacing
"""
def repl(match: re.Match[str]) -> str:
before = match.group(1)
after = match.group(2)
# No space between closing quotes/parens and ——
left_space = "" if before in (")", "》") else " "
# No space between —— and opening quotes/parens
right_space = "" if after in ("(", "《") else " "
return f"{before}{left_space}——{right_space}{after}"
return EMDASH_SPACING_PATTERN.sub(repl, text)
def _fix_quote_spacing(text: str, opening_quote: str, closing_quote: str) -> str:
"""Fix spacing around quotation marks with smart CJK punctuation handling.
Generic implementation for any quote type (double, single, etc.).
Rules:
- Add space before opening quote if preceded by alphanumeric or Chinese
- Add space after closing quote if followed by alphanumeric or Chinese
- NO space added when adjacent to CJK punctuation with built-in visual spacing:
* Terminal punctuation: ,。!?;:、
* Book title marks: 《》
* Corner brackets: 「」『』
* Lenticular brackets: 【】
* Parentheses: ()
* Angle brackets: 〈〉
* Em-dash: ——
Args:
text: Text to process
opening_quote: Opening quote character (e.g., " or ')
closing_quote: Closing quote character (e.g., " or ')
Returns:
Text with corrected quotation mark spacing
"""
# All punctuation that should not have space before opening quote
no_space_before = CJK_CLOSING_BRACKETS + CJK_TERMINAL_PUNCTUATION
# All punctuation that should not have space after closing quote
no_space_after = CJK_OPENING_BRACKETS + CJK_TERMINAL_PUNCTUATION
def repl_before(match: re.Match[str]) -> str:
"""Add space before opening quote only if not preceded by CJK punct or em-dash."""
before = match.group(1)
# Check if we should skip adding space
if before in no_space_before:
return f'{before}{opening_quote}'
return f'{before} {opening_quote}'
def repl_after(match: re.Match[str]) -> str:
"""Add space after closing quote only if not followed by CJK punct or em-dash."""
after = match.group(1)
# Check if we should skip adding space
if after in no_space_after:
return f'{closing_quote}{after}'
return f'{closing_quote} {after}'
# Add space before quote if preceded by alphanumeric/CJK/em-dash (but not CJK punct)
# Include em-dash as a special case (2-char sequence)
text = re.sub(
f'([A-Za-z0-9{CJK_ALL}{CJK_CLOSING_BRACKETS}{CJK_TERMINAL_PUNCTUATION}]|{CJK_EM_DASH}){opening_quote}',
repl_before,
text
)
# Add space after quote if followed by alphanumeric/CJK/em-dash (but not CJK punct)
# Include em-dash as a special case (2-char sequence)
text = re.sub(
f'{closing_quote}([A-Za-z0-9{CJK_ALL}{CJK_OPENING_BRACKETS}{CJK_TERMINAL_PUNCTUATION}]|{CJK_EM_DASH})',
repl_after,
text
)
return text
def _fix_quotes(text: str) -> str:
"""Fix spacing around Chinese double quotation marks “” with smart CJK punctuation handling.
Args:
text: Text to process
Returns:
Text with corrected quotation mark spacing
"""
# U+201C: " (LEFT DOUBLE QUOTATION MARK)
# U+201D: " (RIGHT DOUBLE QUOTATION MARK)
return _fix_quote_spacing(text, '\u201c', '\u201d')
def _fix_single_quotes(text: str) -> str:
"""Fix spacing around Chinese single quotation marks '' with smart CJK punctuation handling.
Same rules as double quotes, but for single quotes.
Args:
text: Text to process
Returns:
Text with corrected quotation mark spacing
"""
# U+2018: ' (LEFT SINGLE QUOTATION MARK)
# U+2019: ' (RIGHT SINGLE QUOTATION MARK)
return _fix_quote_spacing(text, '\u2018', '\u2019')
def _fix_cjk_parenthesis_spacing(text: str) -> str:
"""Add space between CJK characters and half-width parentheses.
Only applies to half-width parentheses (), not full-width ().
Full-width parentheses are handled by fullwidth_parentheses rule.
Examples:
这是测试(test)内容 → 这是测试 (test) 内容
中文(注释)文本 → 中文 (注释) 文本
Args:
text: Text to process
Returns:
Text with spaces added between CJK and parentheses
"""
# Add space between CJK character and opening paren
text = CJK_OPENING_PAREN_PATTERN.sub(r'\1 (', text)
# Add space between closing paren and CJK character
text = CLOSING_PAREN_CJK_PATTERN.sub(r') \1', text)
return text
def _normalize_fullwidth_punctuation(text: str) -> str:
"""Normalize punctuation width based on context.
Full-width in CJK context, half-width in English context.
"""
# Half to full-width mapping
half_to_full = {
',': ',',
'.': '。',
'!': '!',
'?': '?',
';': ';',
':': ':',
}
# Convert to full-width when surrounded by CJK (Chinese + Japanese, NOT Korean)
for half, full in half_to_full.items():
# CJK + half + CJK → CJK + full + CJK
text = re.sub(
f'([{CJK_NO_KOREAN}]){re.escape(half)}([{CJK_NO_KOREAN}])',
f'\\1{full}\\2',
text
)
# CJK + half + end → CJK + full
text = re.sub(
f'([{CJK_NO_KOREAN}]){re.escape(half)}(?=\\s|$)',
f'\\1{full}',
text
)
return text
def _normalize_fullwidth_parentheses(text: str) -> str:
"""Normalize parentheses width in CJK context."""
# Convert half-width to full-width when content is CJK
text = FULLWIDTH_PARENS_PATTERN.sub(r'(\1)', text)
return text
def _normalize_fullwidth_brackets(text: str) -> str:
"""Normalize brackets width in CJK context."""
# Convert half-width to full-width when content is CJK
text = FULLWIDTH_BRACKETS_PATTERN.sub(r'【\1】', text)
return text
def _cleanup_consecutive_punctuation(text: str, limit: int = 1) -> str:
"""Reduce consecutive punctuation marks.
Args:
text: Text to process
limit: Maximum allowed repetitions (0=unlimited, 1=single, 2=double)
Returns:
Text with reduced consecutive punctuation
"""
if limit == 0:
return text
# Punctuation to limit
marks = ['!', '?', '。']
for mark in marks:
if limit == 1:
text = re.sub(f'{re.escape(mark)}{{2,}}', mark, text)
elif limit == 2:
text = re.sub(f'{re.escape(mark)}{{3,}}', mark * 2, text)
return text
def _normalize_fullwidth_alphanumeric(text: str) -> str:
"""Convert full-width alphanumeric to half-width."""
result = []
for char in text:
code = ord(char)
# Full-width numbers (0-9): U+FF10-U+FF19
if 0xFF10 <= code <= 0xFF19:
result.append(chr(code - 0xFEE0))
# Full-width uppercase (A-Z): U+FF21-U+FF3A
elif 0xFF21 <= code <= 0xFF3A:
result.append(chr(code - 0xFEE0))
# Full-width lowercase (a-z): U+FF41-U+FF5A
elif 0xFF41 <= code <= 0xFF5A:
result.append(chr(code - 0xFEE0))
else:
result.append(char)
return ''.join(result)
def _fix_currency_spacing(text: str) -> str:
"""Remove spaces between currency symbols and amounts."""
# Remove space after currency symbol before number using pre-compiled pattern
text = CURRENCY_SPACING_PATTERN.sub(r'\1\2', text)
return text
def _fix_slash_spacing(text: str) -> str:
"""Remove spaces around slashes."""
# Remove spaces around / but not in URLs
# Simple approach: if not preceded/followed by / (avoid //)
text = SLASH_SPACING_PATTERN.sub('/', text)
return text
def _space_between(text: str) -> str:
"""Add spaces between Chinese and English/numbers.
Rules:
- Add space between Chinese characters and English letters
- Add space between Chinese characters and numbers (with units like %, °C, etc.)
- Add space between Chinese and currency symbols with amounts
Args:
text: Text to process
Returns:
Text with spaces added between Chinese and alphanumerics
"""
# Pattern for currency + numbers: $100, ¥500, EUR200, etc.
# Pattern for alphanumeric with optional measurement units
# Supports: 5%, 25°C, 25°c, 45°, 3‰, 25℃, etc.
# Also supports currency symbols: $, ¥, €, £, ₹
alphanum_pattern = r"(?:[$¥€£₹][ ]?)?[A-Za-z0-9]+(?:[%‰℃℉]|°[CcFf]?|[ ]?(?:USD|CNY|EUR|GBP|RMB))?"
# CJK (all scripts) followed by alphanumeric/currency (with optional unit)
text = re.sub(f"([{CJK_ALL}])({alphanum_pattern})", r"\1 \2", text)
# Alphanumeric/currency (with optional unit) followed by CJK (all scripts)
text = re.sub(f"({alphanum_pattern})([{CJK_ALL}])", r"\1 \2", text)
return text
def polish_text(text: str, config: RuleConfig | None = None) -> str:
"""Polish text with typography rules.
Universal rules (all languages):
- Normalize ellipsis patterns (. . . → ...)
- Collapse excessive newlines (3+ → 2, one blank line max)
Chinese-specific rules:
- Convert -- to —— with proper spacing
- Fix spacing around existing ——
- Fix spacing around Chinese quotes ""
- Add spaces between Chinese and English/numbers
- Collapse multiple consecutive spaces
- Remove trailing spaces at line endings
Args:
text: Text to polish
config: Optional configuration for rule toggling
Returns:
Polished text with typography rules applied
"""
# If no config, create default (all rules enabled)
if config is None:
from .config import RuleConfig
config = RuleConfig()
# Universal normalization (applies to all languages)
if config.is_enabled('ellipsis_normalization'):
text = _normalize_ellipsis(text)
# CJK-specific polishing (triggered by presence of Han characters)
if contains_cjk(text):
# Normalization rules (run first)
if config.is_enabled('fullwidth_alphanumeric'):
text = _normalize_fullwidth_alphanumeric(text)
if config.is_enabled('fullwidth_punctuation'):
text = _normalize_fullwidth_punctuation(text)
# Note: fullwidth_parentheses must run AFTER cjk_parenthesis_spacing
if config.is_enabled('fullwidth_brackets'):
text = _normalize_fullwidth_brackets(text)
# Em-dash and quote rules
if config.is_enabled('dash_conversion'):
text = _replace_dash(text)
if config.is_enabled('emdash_spacing'):
text = _fix_emdash_spacing(text)
if config.is_enabled('quote_spacing'):
text = _fix_quotes(text)
if config.is_enabled('single_quote_spacing'):
text = _fix_single_quotes(text)
# Spacing rules
if config.is_enabled('cjk_english_spacing'):
text = _space_between(text)
# Note: cjk_parenthesis_spacing must run BEFORE fullwidth_parentheses
if config.is_enabled('cjk_parenthesis_spacing'):
text = _fix_cjk_parenthesis_spacing(text)
# Now convert remaining () to () in CJK context
if config.is_enabled('fullwidth_parentheses'):
text = _normalize_fullwidth_parentheses(text)
if config.is_enabled('currency_spacing'):
text = _fix_currency_spacing(text)
if config.is_enabled('slash_spacing'):
text = _fix_slash_spacing(text)
# Cleanup rules
punct_limit = config.get_value('consecutive_punctuation_limit', 0)
if punct_limit > 0:
text = _cleanup_consecutive_punctuation(text, punct_limit)
# Collapse multiple spaces to single space (preserve newlines and indentation)
if config.is_enabled('space_collapsing'):
# Match non-space + 2+ spaces to preserve leading indentation after newlines
text = MULTI_SPACE_PATTERN.sub(r"\1 ", text)
# Remove trailing spaces at end of lines
text = TRAILING_SPACE_PATTERN.sub("", text)
# Collapse excessive newlines (3+) to max 2 (one blank line)
# UNIVERSAL RULE - applies to all files, not just CJK
text = EXCESSIVE_NEWLINE_PATTERN.sub("\n\n", text)
# Apply custom regex rules
text = _apply_custom_rules(text, config.custom_rules)
return text.rstrip() # Preserve leading whitespace (for markdown indentation)
def _apply_custom_rules(text: str, custom_rules: list) -> str:
"""Apply custom regex rules to text.
Args:
text: Text to process
custom_rules: List of custom rule dicts with 'pattern' and 'replacement'
Returns:
Text with custom rules applied
"""
for rule in custom_rules:
try:
pattern = rule['pattern']
replacement = rule['replacement']
text = re.sub(pattern, replacement, text)
except (KeyError, re.error):
# Skip invalid rules
continue
return text
def polish_text_verbose(text: str, config: RuleConfig | None = None) -> tuple[str, PolishStats]:
"""Polish text with typography rules and return statistics.
Args:
text: Text to polish
config: Optional configuration for rule toggling
Returns:
Tuple of (polished text, statistics)
"""
# If no config, create default (all rules enabled)
if config is None:
from .config import RuleConfig
config = RuleConfig()
stats = PolishStats()
original = text
# Universal normalization - count ellipsis patterns
if config.is_enabled('ellipsis_normalization'):
stats.ellipsis_normalized = len(ELLIPSIS_PATTERN.findall(text))
text = _normalize_ellipsis(text)
# CJK-specific polishing (triggered by presence of Han characters)
if contains_cjk(text):
# Count dash conversions (-- to ——)
if config.is_enabled('dash_conversion'):
stats.dash_converted = len(DASH_PATTERN.findall(text))
text = _replace_dash(text)
# Count em-dash spacing fixes
if config.is_enabled('emdash_spacing'):
matches = EMDASH_SPACING_PATTERN.findall(text)
# Only count if spacing is actually wrong
temp_text = text
for before, after in matches:
left_space = "" if before in (")", "》") else " "
right_space = "" if after in ("(", "《") else " "
correct = f"{before}{left_space}——{right_space}{after}"
# Check if current version doesn't match correct version
current_pattern = re.compile(re.escape(before) + r"\s*——\s*" + re.escape(after))
if current_pattern.search(temp_text):
current_match = current_pattern.search(temp_text).group()
if current_match != correct:
stats.emdash_spacing_fixed += 1
text = _fix_emdash_spacing(text)
# Count quote spacing fixes
if config.is_enabled('quote_spacing'):
opening_quote = '\u201c'
closing_quote = '\u201d'
quote_before = len(re.findall(f'([A-Za-z0-9{CJK_ALL}]){opening_quote}', text))
quote_after = len(re.findall(f'{closing_quote}([A-Za-z0-9{CJK_ALL}])', text))
stats.quote_spacing_fixed = quote_before + quote_after
text = _fix_quotes(text)
# Count CJK-English spacing additions
if config.is_enabled('cjk_english_spacing'):
num_pattern = r"[A-Za-z0-9]+(?:[%‰℃℉]|°[CcFf]?)?"
cjk_before_eng = len(re.findall(f"([{CJK_ALL}])({num_pattern})", text))
eng_before_cjk = len(re.findall(f"({num_pattern})([{CJK_ALL}])", text))
stats.cjk_english_spacing_added = cjk_before_eng + eng_before_cjk
text = _space_between(text)
# Count multiple spaces (preserve newlines and indentation)
if config.is_enabled('space_collapsing'):
# Match non-space + 2+ spaces to preserve leading indentation
stats.spaces_collapsed = len(MULTI_SPACE_PATTERN.findall(text))
text = MULTI_SPACE_PATTERN.sub(r"\1 ", text)
# Remove trailing spaces at end of lines
text = TRAILING_SPACE_PATTERN.sub("", text)
# Collapse excessive newlines (3+) to max 2 (one blank line)
# UNIVERSAL RULE - applies to all files, not just CJK
text = EXCESSIVE_NEWLINE_PATTERN.sub("\n\n", text)
# Apply custom regex rules and track counts
for rule in config.custom_rules:
try:
pattern = rule['pattern']
replacement = rule['replacement']
rule_name = rule.get('name', 'custom')
# Count matches before applying
matches = re.findall(pattern, text)
count = len(matches)
if count > 0:
stats.custom_rules_applied[rule_name] = count
text = re.sub(pattern, replacement, text)
except (KeyError, re.error):
# Skip invalid rules
continue
return text.rstrip(), stats # Preserve leading whitespace (for markdown indentation)
| xiaolai/cjk-text-formatter | 3 | A Python CLI tool for polishing text with Chinese typography rules | Python | xiaolai | xiaolai | inblockchain |
src/cjk_text_formatter/processors.py | Python | """File processors for different file types."""
from __future__ import annotations
import os
import re
from pathlib import Path
from typing import List
from .config import RuleConfig
from .polish import polish_text, EXCESSIVE_NEWLINE_PATTERN
def validate_safe_path(file_path: Path, base_dir: Path | None = None) -> Path:
"""Validate that a file path doesn't attempt path traversal.
Args:
file_path: Path to validate
base_dir: Optional base directory to restrict paths within
Returns:
Resolved absolute path
Raises:
ValueError: If path attempts to escape base directory or contains suspicious patterns
"""
# Resolve to absolute path
resolved = file_path.resolve()
# Check for suspicious patterns that might indicate path traversal attempts
path_str = str(file_path)
if '..' in path_str or path_str.startswith('/'):
# Only allow these if the resolved path is safe
pass
# If base_dir specified, ensure path doesn't escape it
if base_dir:
base_resolved = base_dir.resolve()
try:
# Check if resolved path is within base directory
resolved.relative_to(base_resolved)
except ValueError:
raise ValueError(
f"Path '{file_path}' attempts to access files outside allowed directory '{base_dir}'"
)
return resolved
class TextProcessor:
"""Processor for plain text files."""
def process(self, text: str, config: RuleConfig | None = None) -> str:
"""Process plain text content.
Args:
text: Text content to process
config: Optional rule configuration
Returns:
Polished text
"""
return polish_text(text, config)
class MarkdownProcessor:
"""Processor for Markdown files that preserves code blocks."""
def process(self, text: str, config: RuleConfig | None = None) -> str:
"""Process Markdown content, preserving code blocks.
Preserves:
- Fenced code blocks (```...```)
- Indented code blocks (4-space indent)
- Inline code (`...`)
Args:
text: Markdown content to process
config: Optional rule configuration
Returns:
Polished markdown with code blocks preserved
"""
# Strategy: Replace code blocks with placeholders, process, then restore
# Store code blocks
code_blocks = []
def save_code(match):
code_blocks.append(match.group(0))
return f"___CODE_BLOCK_{len(code_blocks)-1}___"
# Save fenced code blocks (```...```)
text = re.sub(r'```[\s\S]*?```', save_code, text)
# Save inline code (`...`)
text = re.sub(r'`[^`\n]+?`', save_code, text)
# Process lines, preserving indented code blocks
lines = text.split('\n')
processed_lines = []
in_indented_code = False
for line in lines:
# Check if line is indented code block (4+ spaces or tab at start)
is_code_line = line.startswith(' ') or line.startswith('\t')
# Detect start/end of indented code blocks
if is_code_line and not in_indented_code:
in_indented_code = True
elif not is_code_line and not line.strip() == '' and in_indented_code:
in_indented_code = False
# Only process non-code lines
if not in_indented_code and not is_code_line:
line = polish_text(line, config)
processed_lines.append(line)
text = '\n'.join(processed_lines)
# Apply universal newline collapsing (3+ → 2, one blank line max)
# This must be done after joining lines, as line-by-line processing
# prevents the pattern from matching consecutive newlines
text = EXCESSIVE_NEWLINE_PATTERN.sub("\n\n", text)
# Restore code blocks
for i, code_block in enumerate(code_blocks):
text = text.replace(f"___CODE_BLOCK_{i}___", code_block)
return text
class HTMLProcessor:
"""Processor for HTML files that preserves structure."""
def __init__(self):
"""Initialize HTML processor."""
try:
from bs4 import BeautifulSoup
self._bs4_available = True
except ImportError:
self._bs4_available = False
def process(self, html: str, config: RuleConfig | None = None) -> str:
"""Process HTML content, formatting text while preserving structure.
Preserves:
- All HTML tags and attributes
- Content in <code> and <pre> tags
Args:
html: HTML content to process
config: Optional rule configuration
Returns:
HTML with polished text content
"""
if self._bs4_available:
return self._process_with_bs4(html, config)
else:
return self._process_simple(html, config)
def _process_with_bs4(self, html: str, config: RuleConfig | None = None) -> str:
"""Process HTML using BeautifulSoup."""
from bs4 import BeautifulSoup, NavigableString
soup = BeautifulSoup(html, 'html.parser')
# Tags whose content should NOT be formatted
skip_tags = {'code', 'pre', 'script', 'style'}
def process_element(element):
"""Recursively process element tree."""
if element.name in skip_tags:
return # Don't process content in these tags
for child in element.children:
if isinstance(child, NavigableString):
# Process text nodes
if child.string and child.string.strip():
polished = polish_text(str(child.string), config)
child.replace_with(polished)
elif hasattr(child, 'children'):
# Recursively process child elements
process_element(child)
# Process the document
if soup.body:
process_element(soup.body)
else:
# No body tag, process entire soup
for element in soup.children:
if hasattr(element, 'children'):
process_element(element)
return str(soup)
def _process_simple(self, html: str, config: RuleConfig | None = None) -> str:
"""Process HTML with simple regex-based approach (no BeautifulSoup).
This is a fallback for when BeautifulSoup is not available.
It's less robust but handles simple cases.
"""
# Save code/pre blocks
code_blocks = []
def save_code(match):
code_blocks.append(match.group(0))
return f"___HTML_CODE_{len(code_blocks)-1}___"
# Save <code>...</code> and <pre>...</pre> blocks
html = re.sub(r'<code[^>]*>[\s\S]*?</code>', save_code, html, flags=re.IGNORECASE)
html = re.sub(r'<pre[^>]*>[\s\S]*?</pre>', save_code, html, flags=re.IGNORECASE)
# Extract and process text between tags
def process_text(match):
text = match.group(0)
# Don't process if it's inside a tag
if text.strip():
return polish_text(text, config)
return text
# Process text between tags (simple approach)
html = re.sub(r'>([^<]+)<', lambda m: f'>{polish_text(m.group(1), config)}<', html)
# Restore code blocks
for i, code_block in enumerate(code_blocks):
html = html.replace(f"___HTML_CODE_{i}___", code_block)
return html
def process_file(file_path: Path, config: RuleConfig | None = None) -> str:
"""Process a file based on its extension.
Args:
file_path: Path to file to process
config: Optional rule configuration
Returns:
Processed content
Raises:
ValueError: If file type is not supported or path is invalid
"""
# Validate path (resolves to absolute path, checks for safety)
validated_path = validate_safe_path(file_path)
suffix = validated_path.suffix.lower()
if suffix == '.txt':
processor = TextProcessor()
elif suffix == '.md':
processor = MarkdownProcessor()
elif suffix in ['.html', '.htm']:
processor = HTMLProcessor()
else:
raise ValueError(f"Unsupported file type: {suffix}")
with open(validated_path, 'r', encoding='utf-8', newline='') as f:
content = f.read()
return processor.process(content, config)
def find_files(
path: Path,
recursive: bool = False,
extensions: List[str] | None = None,
) -> List[Path]:
"""Find files to process.
Args:
path: File or directory path
recursive: Whether to search recursively in subdirectories
extensions: List of file extensions to include (e.g., ['.txt', '.md'])
If None, defaults to ['.txt', '.md', '.html', '.htm']
Returns:
List of file paths to process
Raises:
FileNotFoundError: If path does not exist
"""
if not path.exists():
raise FileNotFoundError(f"Path does not exist: {path}")
if extensions is None:
extensions = ['.txt', '.md', '.html', '.htm']
# Normalize extensions to lowercase
extensions = [ext.lower() if ext.startswith('.') else f'.{ext.lower()}'
for ext in extensions]
if path.is_file():
return [path]
# Path is a directory
files = []
pattern = '**/*' if recursive else '*'
for file_path in path.glob(pattern):
if file_path.is_file() and file_path.suffix.lower() in extensions:
files.append(file_path)
return sorted(files)
| xiaolai/cjk-text-formatter | 3 | A Python CLI tool for polishing text with Chinese typography rules | Python | xiaolai | xiaolai | inblockchain |
tests/__init__.py | Python | """Tests for text-formater."""
| xiaolai/cjk-text-formatter | 3 | A Python CLI tool for polishing text with Chinese typography rules | Python | xiaolai | xiaolai | inblockchain |
tests/test_config.py | Python | """Tests for configuration loading and parsing."""
from __future__ import annotations
import json
import tempfile
from pathlib import Path
from unittest.mock import patch
import pytest
# These imports will fail initially (TDD - RED phase)
from cjk_text_formatter.config import load_config, RuleConfig
class TestConfigLoading:
"""Test configuration file loading from different locations."""
def test_load_default_config(self):
"""Test default config when no config files exist."""
with patch('pathlib.Path.exists', return_value=False):
config = load_config()
# All built-in rules should be enabled by default
assert config.rules['ellipsis_normalization'] is True
assert config.rules['dash_conversion'] is True
assert config.rules['emdash_spacing'] is True
assert config.rules['quote_spacing'] is True
assert config.rules['cjk_english_spacing'] is True
assert config.rules['space_collapsing'] is True
# No custom rules by default
assert config.custom_rules == []
def test_load_config_from_project_root(self, tmp_path):
"""Test loading config from project root (./cjk-text-formatter.toml)."""
config_content = """
[rules]
ellipsis_normalization = true
dash_conversion = false
cjk_english_spacing = true
"""
config_file = tmp_path / "cjk-text-formatter.toml"
config_file.write_text(config_content)
with patch('pathlib.Path.cwd', return_value=tmp_path):
config = load_config()
assert config.rules['ellipsis_normalization'] is True
assert config.rules['dash_conversion'] is False
assert config.rules['cjk_english_spacing'] is True
def test_load_config_from_user_home(self, tmp_path):
"""Test loading config from user home (~/.config/cjk-text-formatter.toml)."""
config_dir = tmp_path / ".config"
config_dir.mkdir()
config_file = config_dir / "cjk-text-formatter.toml"
config_content = """
[rules]
quote_spacing = false
"""
config_file.write_text(config_content)
with patch('pathlib.Path.home', return_value=tmp_path):
with patch('pathlib.Path.cwd') as mock_cwd:
# Make sure project config doesn't exist
mock_cwd.return_value = Path("/nonexistent")
config = load_config()
assert config.rules['quote_spacing'] is False
def test_config_priority_project_over_user(self, tmp_path):
"""Test that project config takes priority over user config."""
# Create user config
user_config_dir = tmp_path / "home" / ".config"
user_config_dir.mkdir(parents=True)
user_config = user_config_dir / "cjk-text-formatter.toml"
user_config.write_text("""
[rules]
dash_conversion = false
cjk_english_spacing = false
""")
# Create project config
project_dir = tmp_path / "project"
project_dir.mkdir()
project_config = project_dir / "cjk-text-formatter.toml"
project_config.write_text("""
[rules]
dash_conversion = true
""")
with patch('pathlib.Path.home', return_value=tmp_path / "home"):
with patch('pathlib.Path.cwd', return_value=project_dir):
config = load_config()
# Project config value wins
assert config.rules['dash_conversion'] is True
# User config value for other rules
assert config.rules['cjk_english_spacing'] is False
def test_custom_regex_rules_parsing(self, tmp_path):
"""Test parsing custom regex rules from config."""
config_content = """
[rules]
dash_conversion = true
[[custom_rules]]
name = "arrow_unicode"
pattern = '->'
replacement = '→'
description = "Use Unicode arrow"
[[custom_rules]]
name = "multiply_sign"
pattern = '(\\d+)\\s*x\\s*(\\d+)'
replacement = '\\1×\\2'
description = "Use multiplication sign"
"""
config_file = tmp_path / "cjk-text-formatter.toml"
config_file.write_text(config_content)
# Patch both cwd and home to isolate test from real config files
with patch('pathlib.Path.cwd', return_value=tmp_path):
with patch('pathlib.Path.home', return_value=tmp_path / "fake_home"):
config = load_config()
assert len(config.custom_rules) == 2
# First custom rule
assert config.custom_rules[0]['name'] == 'arrow_unicode'
assert config.custom_rules[0]['pattern'] == '->'
assert config.custom_rules[0]['replacement'] == '→'
# Second custom rule
assert config.custom_rules[1]['name'] == 'multiply_sign'
assert config.custom_rules[1]['pattern'] == r'(\d+)\s*x\s*(\d+)'
class TestPython310Fallback:
"""Test graceful degradation for Python <3.11 (no tomllib)."""
def test_fallback_when_tomllib_unavailable(self):
"""Test that config falls back to defaults when tomllib is not available."""
with patch('cjk_text_formatter.config.TOMLLIB_AVAILABLE', False):
config = load_config()
# Should return default config (all rules enabled)
assert config.rules['ellipsis_normalization'] is True
assert config.rules['dash_conversion'] is True
assert config.custom_rules == []
def test_warning_message_python_310(self, capsys):
"""Test that a warning is shown when config is unavailable."""
with patch('cjk_text_formatter.config.TOMLLIB_AVAILABLE', False):
# Try to load config from a file that exists
config = load_config()
# Should print warning (implementation detail)
# This test ensures users are notified
class TestRuleConfig:
"""Test RuleConfig dataclass."""
def test_rule_config_defaults(self):
"""Test RuleConfig with default values."""
config = RuleConfig()
# All built-in rules enabled by default
assert config.rules['ellipsis_normalization'] is True
assert config.rules['dash_conversion'] is True
assert config.custom_rules == []
def test_rule_config_is_enabled(self):
"""Test checking if a rule is enabled."""
config = RuleConfig(rules={'dash_conversion': False})
assert config.is_enabled('dash_conversion') is False
assert config.is_enabled('cjk_english_spacing') is True # Default
class TestConfigWithPath:
"""Test loading config from a specific path."""
def test_load_config_with_custom_path(self, tmp_path):
"""Test loading config from --config PATH argument."""
config_file = tmp_path / "my_custom_config.toml"
config_file.write_text("""
[rules]
ellipsis_normalization = false
""")
config = load_config(config_path=config_file)
assert config.rules['ellipsis_normalization'] is False
| xiaolai/cjk-text-formatter | 3 | A Python CLI tool for polishing text with Chinese typography rules | Python | xiaolai | xiaolai | inblockchain |
tests/test_config_validation.py | Python | """Tests for config validation functionality."""
from __future__ import annotations
import sys
from pathlib import Path
import pytest
from cjk_text_formatter.config import ValidationResult, validate_config
class TestValidConfigValidation:
"""Test validation of valid configuration files."""
def test_valid_config_passes_all_checks(self, tmp_path):
"""Test that a completely valid config passes validation."""
config_file = tmp_path / "valid.toml"
config_file.write_text("""
[rules]
ellipsis_normalization = true
dash_conversion = true
cjk_english_spacing = false
[[custom_rules]]
name = "arrow_unicode"
pattern = '->'
replacement = '→'
description = "Use Unicode arrow"
""")
result = validate_config(config_file)
assert result.is_valid
assert len(result.errors) == 0
assert len(result.warnings) == 0
def test_valid_config_with_only_rules(self, tmp_path):
"""Test config with only built-in rules (no custom rules)."""
config_file = tmp_path / "rules_only.toml"
config_file.write_text("""
[rules]
ellipsis_normalization = false
dash_conversion = true
""")
result = validate_config(config_file)
assert result.is_valid
assert len(result.errors) == 0
def test_valid_config_with_only_custom_rules(self, tmp_path):
"""Test config with only custom rules (no built-in rules section)."""
config_file = tmp_path / "custom_only.toml"
config_file.write_text("""
[[custom_rules]]
name = "test_rule"
pattern = 'foo'
replacement = 'bar'
""")
result = validate_config(config_file)
assert result.is_valid
assert len(result.errors) == 0
def test_empty_config_is_valid(self, tmp_path):
"""Test that an empty config file is valid."""
config_file = tmp_path / "empty.toml"
config_file.write_text("")
result = validate_config(config_file)
assert result.is_valid
assert len(result.errors) == 0
class TestTOMLSyntaxValidation:
"""Test TOML syntax error detection."""
@pytest.mark.skipif(sys.version_info < (3, 11), reason="Requires Python 3.11+")
def test_invalid_toml_syntax(self, tmp_path):
"""Test that TOML syntax errors are detected."""
config_file = tmp_path / "invalid.toml"
config_file.write_text("""
[rules
ellipsis_normalization = true
""")
result = validate_config(config_file)
assert not result.is_valid
assert len(result.errors) > 0
assert any("toml syntax" in err.lower() or "parse" in err.lower()
for err in result.errors)
@pytest.mark.skipif(sys.version_info < (3, 11), reason="Requires Python 3.11+")
def test_malformed_table(self, tmp_path):
"""Test detection of malformed TOML tables."""
config_file = tmp_path / "malformed.toml"
config_file.write_text("""
[[custom_rules]
name = "missing_bracket"
""")
result = validate_config(config_file)
assert not result.is_valid
assert len(result.errors) > 0
class TestRuleNameValidation:
"""Test validation of rule names."""
def test_unknown_builtin_rule_name(self, tmp_path):
"""Test that unknown built-in rule names are flagged."""
config_file = tmp_path / "unknown_rule.toml"
config_file.write_text("""
[rules]
unknown_rule_name = true
ellipsis_normalization = true
""")
result = validate_config(config_file)
assert not result.is_valid
assert any("unknown_rule_name" in err.lower() for err in result.errors)
def test_multiple_unknown_rules(self, tmp_path):
"""Test detection of multiple unknown rule names."""
config_file = tmp_path / "multiple_unknown.toml"
config_file.write_text("""
[rules]
fake_rule_1 = true
fake_rule_2 = false
cjk_english_spacing = true
""")
result = validate_config(config_file)
assert not result.is_valid
assert any("fake_rule_1" in err.lower() for err in result.errors)
assert any("fake_rule_2" in err.lower() for err in result.errors)
class TestCustomRuleValidation:
"""Test validation of custom rules."""
def test_missing_pattern_field(self, tmp_path):
"""Test that missing 'pattern' field is detected."""
config_file = tmp_path / "missing_pattern.toml"
config_file.write_text("""
[[custom_rules]]
name = "incomplete"
replacement = "bar"
""")
result = validate_config(config_file)
assert not result.is_valid
assert any("pattern" in err.lower() and "incomplete" in err.lower()
for err in result.errors)
def test_missing_replacement_field(self, tmp_path):
"""Test that missing 'replacement' field is detected."""
config_file = tmp_path / "missing_replacement.toml"
config_file.write_text("""
[[custom_rules]]
name = "incomplete"
pattern = "foo"
""")
result = validate_config(config_file)
assert not result.is_valid
assert any("replacement" in err.lower() and "incomplete" in err.lower()
for err in result.errors)
def test_missing_name_field(self, tmp_path):
"""Test that missing 'name' field is detected."""
config_file = tmp_path / "missing_name.toml"
config_file.write_text("""
[[custom_rules]]
pattern = "foo"
replacement = "bar"
""")
result = validate_config(config_file)
assert not result.is_valid
assert any("name" in err.lower() for err in result.errors)
def test_invalid_regex_pattern(self, tmp_path):
"""Test that invalid regex patterns are detected."""
config_file = tmp_path / "invalid_regex.toml"
config_file.write_text("""
[[custom_rules]]
name = "bad_regex"
pattern = '(?P<invalid'
replacement = 'x'
""")
result = validate_config(config_file)
assert not result.is_valid
assert any("regex" in err.lower() or "pattern" in err.lower()
for err in result.errors)
assert any("bad_regex" in err.lower() for err in result.errors)
class TestFileAccessValidation:
"""Test file access and existence validation."""
def test_nonexistent_file(self, tmp_path):
"""Test validation of non-existent file."""
config_file = tmp_path / "nonexistent.toml"
result = validate_config(config_file)
assert not result.is_valid
assert any("not found" in err.lower() or "does not exist" in err.lower()
for err in result.errors)
def test_unreadable_file(self, tmp_path):
"""Test validation of unreadable file."""
config_file = tmp_path / "unreadable.toml"
config_file.write_text("[rules]\n")
config_file.chmod(0o000)
try:
result = validate_config(config_file)
# Should either detect as unreadable or handle gracefully
assert not result.is_valid or len(result.errors) == 0
finally:
config_file.chmod(0o644)
class TestValidationResult:
"""Test ValidationResult dataclass functionality."""
def test_validation_result_with_errors(self, tmp_path):
"""Test that ValidationResult correctly reports errors."""
config_file = tmp_path / "errors.toml"
config_file.write_text("""
[rules]
fake_rule = true
""")
result = validate_config(config_file)
assert not result.is_valid
assert result.config_path == config_file
assert len(result.errors) > 0
def test_validation_result_with_warnings(self, tmp_path):
"""Test that ValidationResult can contain warnings."""
config_file = tmp_path / "warnings.toml"
config_file.write_text("""
[rules]
ellipsis_normalization = true
""")
result = validate_config(config_file)
# Valid config should have no warnings for this simple case
assert result.is_valid
assert isinstance(result.warnings, list)
def test_validation_result_format_report(self, tmp_path):
"""Test that ValidationResult can format a readable report."""
config_file = tmp_path / "test.toml"
config_file.write_text("""
[rules]
ellipsis_normalization = true
""")
result = validate_config(config_file)
report = result.format_report()
assert isinstance(report, str)
assert len(report) > 0
assert str(config_file) in report
class TestPython311Compatibility:
"""Test Python version compatibility."""
@pytest.mark.skipif(sys.version_info >= (3, 11), reason="Only for Python <3.11")
def test_validation_on_old_python(self, tmp_path):
"""Test that validation gracefully handles Python <3.11."""
config_file = tmp_path / "test.toml"
config_file.write_text("""
[rules]
ellipsis_normalization = true
""")
result = validate_config(config_file)
# Should either skip validation or provide helpful error
assert not result.is_valid
assert any("python 3.11" in err.lower() or "tomllib" in err.lower()
for err in result.errors)
@pytest.mark.skipif(sys.version_info < (3, 11), reason="Requires Python 3.11+")
def test_validation_on_new_python(self, tmp_path):
"""Test that validation works on Python 3.11+."""
config_file = tmp_path / "test.toml"
config_file.write_text("""
[rules]
ellipsis_normalization = true
""")
result = validate_config(config_file)
assert result.is_valid
assert len(result.errors) == 0
class TestConfigSourceDetection:
"""Test detection of config source location."""
def test_result_includes_config_path(self, tmp_path):
"""Test that validation result includes the config file path."""
config_file = tmp_path / "test.toml"
config_file.write_text("""
[rules]
ellipsis_normalization = true
""")
result = validate_config(config_file)
assert result.config_path == config_file
def test_detects_config_priority_source(self, tmp_path):
"""Test that validation can show which priority level config came from."""
config_file = tmp_path / "test.toml"
config_file.write_text("""
[rules]
ellipsis_normalization = true
""")
result = validate_config(config_file)
# Result should be able to indicate this is a custom path
assert result.config_path == config_file
| xiaolai/cjk-text-formatter | 3 | A Python CLI tool for polishing text with Chinese typography rules | Python | xiaolai | xiaolai | inblockchain |
tests/test_polish.py | Python | """Tests for text polishing functions."""
import pytest
from cjk_text_formatter.polish import (
polish_text,
contains_cjk,
_replace_dash,
_fix_emdash_spacing,
_fix_quotes,
_fix_single_quotes,
_space_between,
_normalize_ellipsis,
)
class TestContainsCJK:
"""Test CJK text detection (Han characters and Korean Hangul)."""
def test_contains_cjk_with_han(self):
assert contains_cjk("这是中文") is True
assert contains_cjk("mixed 中文 text") is True
assert contains_cjk("日本語です") is True # Japanese with Kanji
assert contains_cjk("한자韓字") is True # Korean with Hanja
def test_contains_cjk_with_hangul(self):
assert contains_cjk("한글") is True # Pure Hangul (Korean alphabet)
assert contains_cjk("안녕하세요") is True # Korean greeting
def test_contains_cjk_without_han_or_hangul(self):
assert contains_cjk("English only") is False
assert contains_cjk("123 abc") is False
assert contains_cjk("") is False
assert contains_cjk("ひらがな") is False # Pure Hiragana (no Kanji)
assert contains_cjk("カタカナ") is False # Pure Katakana (no Kanji)
class TestNormalizeEllipsis:
"""Test ellipsis normalization (universal rule)."""
def test_spaced_ellipsis_three_dots(self):
assert _normalize_ellipsis(". . .") == "..."
assert _normalize_ellipsis("text . . . more") == "text... more"
def test_spaced_ellipsis_four_dots(self):
assert _normalize_ellipsis(". . . .") == "..."
assert _normalize_ellipsis("end . . . .") == "end..."
def test_ellipsis_spacing_after(self):
assert _normalize_ellipsis("wait...next") == "wait... next"
assert _normalize_ellipsis("wait... next") == "wait... next"
def test_already_normalized(self):
assert _normalize_ellipsis("text... more") == "text... more"
class TestReplaceDash:
"""Test -- to —— conversion with proper spacing (CJK context only)."""
def test_double_dash_between_chinese(self):
"""Double dash between Chinese characters should convert."""
assert _replace_dash("文本--内容") == "文本 —— 内容"
assert _replace_dash("中文--更多") == "中文 —— 更多"
def test_triple_dash_between_chinese(self):
"""Triple dash (and more) between Chinese characters should convert."""
assert _replace_dash("中文---更多") == "中文 —— 更多"
assert _replace_dash("中文----更多") == "中文 —— 更多"
assert _replace_dash("文本-----内容") == "文本 —— 内容"
def test_dash_with_spaces_between_chinese(self):
"""Dashes with spaces around them between Chinese should convert."""
assert _replace_dash("中文 -- 更多") == "中文 —— 更多"
assert _replace_dash("中文 -- 更多") == "中文 —— 更多"
assert _replace_dash("文本 --- 内容") == "文本 —— 内容"
def test_closing_angle_quote_no_left_space(self):
"""Closing angle quote 》 should have no space before ——."""
assert _replace_dash("《书名》--作者") == "《书名》—— 作者"
assert _replace_dash("《书名》---作者") == "《书名》—— 作者"
def test_opening_angle_quote_no_right_space(self):
"""Opening angle quote 《 should have no space after ——."""
assert _replace_dash("作者--《书名》") == "作者 ——《书名》"
assert _replace_dash("作者---《书名》") == "作者 ——《书名》"
def test_closing_paren_no_left_space(self):
"""Closing paren ) should have no space before ——."""
assert _replace_dash("(注释)--内容") == "(注释)—— 内容"
assert _replace_dash("(注释)---内容") == "(注释)—— 内容"
def test_opening_paren_no_right_space(self):
"""Opening paren ( should have no space after ——."""
assert _replace_dash("内容--(注释)") == "内容 ——(注释)"
assert _replace_dash("内容---(注释)") == "内容 ——(注释)"
def test_both_quotes(self):
"""Both sides with quotes should have no spaces."""
assert _replace_dash("《书名》--《续集》") == "《书名》——《续集》"
assert _replace_dash("《书名》---《续集》") == "《书名》——《续集》"
def test_dash_between_english_not_converted(self):
"""Dashes between English text should NOT convert."""
assert _replace_dash("text--more") == "text--more"
assert _replace_dash("hello---world") == "hello---world"
assert _replace_dash("foo----bar") == "foo----bar"
def test_markdown_horizontal_rule_not_converted(self):
"""Markdown horizontal rules (---) should NOT convert."""
assert _replace_dash("---") == "---"
assert _replace_dash("----") == "----"
assert _replace_dash("-----") == "-----"
def test_mixed_english_chinese_not_converted(self):
"""Dashes between English and Chinese should NOT convert."""
assert _replace_dash("text--中文") == "text--中文"
assert _replace_dash("中文--text") == "中文--text"
class TestFixEmdashSpacing:
"""Test spacing around existing —— characters."""
def test_regular_text_adds_spaces(self):
assert _fix_emdash_spacing("text——more") == "text —— more"
assert _fix_emdash_spacing("文本——内容") == "文本 —— 内容"
def test_closing_angle_quote_no_left_space(self):
assert _fix_emdash_spacing("《书名》——作者") == "《书名》—— 作者"
assert _fix_emdash_spacing("《书名》 —— 作者") == "《书名》—— 作者" # Fix existing wrong spacing
def test_opening_angle_quote_no_right_space(self):
assert _fix_emdash_spacing("作者——《书名》") == "作者 ——《书名》"
assert _fix_emdash_spacing("作者 —— 《书名》") == "作者 ——《书名》" # Fix existing wrong spacing
def test_closing_paren_no_left_space(self):
assert _fix_emdash_spacing("(注释)——内容") == "(注释)—— 内容"
def test_opening_paren_no_right_space(self):
assert _fix_emdash_spacing("内容——(注释)") == "内容 ——(注释)"
def test_both_quotes(self):
assert _fix_emdash_spacing("《书名》——《续集》") == "《书名》——《续集》"
assert _fix_emdash_spacing("《书名》 —— 《续集》") == "《书名》——《续集》"
class TestFixQuotes:
"""Test spacing around Chinese quotation marks ""."""
def test_opening_quote_adds_space_before(self):
# Using unicode escapes for Chinese quotes to avoid syntax errors
assert _fix_quotes('text\u201cword\u201d') == 'text \u201cword\u201d'
assert _fix_quotes('文本\u201c内容\u201d') == '文本 \u201c内容\u201d'
assert _fix_quotes('123\u201ctest\u201d') == '123 \u201ctest\u201d'
def test_closing_quote_adds_space_after(self):
# Using unicode escapes for Chinese quotes to avoid syntax errors
assert _fix_quotes('\u201cword\u201dtext') == '\u201cword\u201d text'
assert _fix_quotes('\u201c内容\u201d文本') == '\u201c内容\u201d 文本'
assert _fix_quotes('\u201ctest\u201d123') == '\u201ctest\u201d 123'
def test_quote_spacing_with_terminal_punctuation(self):
"""No space between quotes and CJK terminal punctuation (,。!?;:、)."""
# Comma before opening quote, period after closing quote
assert _fix_quotes('文本,\u201c引用\u201d。') == '文本,\u201c引用\u201d。'
# Period before opening quote, Chinese after closing quote
assert _fix_quotes('开始。\u201c内容\u201d结束') == '开始。\u201c内容\u201d 结束'
# Exclamation before opening quote, Chinese after closing quote
assert _fix_quotes('问题!\u201c回答\u201d文本') == '问题!\u201c回答\u201d 文本'
# Chinese before opening quote, question mark after closing quote
assert _fix_quotes('文本\u201c问题\u201d?结束') == '文本 \u201c问题\u201d?结束'
# Semicolon before opening quote, Chinese after closing quote
assert _fix_quotes('列表;\u201c项目\u201d内容') == '列表;\u201c项目\u201d 内容'
# Colon before opening quote, Chinese after closing quote
assert _fix_quotes('标题:\u201c内容\u201d文本') == '标题:\u201c内容\u201d 文本'
# Enumeration comma before opening quote, Chinese after closing quote
assert _fix_quotes('一、\u201c项目\u201d二') == '一、\u201c项目\u201d 二'
def test_quote_spacing_with_book_title_marks(self):
"""No space between quotes and book title marks 《》."""
assert _fix_quotes('《书名》\u201c引用\u201d文本') == '《书名》\u201c引用\u201d 文本'
assert _fix_quotes('文本\u201c引用\u201d《书名》') == '文本 \u201c引用\u201d《书名》'
assert _fix_quotes('《A》\u201cB\u201d《C》') == '《A》\u201cB\u201d《C》'
def test_quote_spacing_with_corner_brackets(self):
"""No space between quotes and corner brackets 「」『』."""
# Single corner brackets
assert _fix_quotes('「日文」\u201c引用\u201d文本') == '「日文」\u201c引用\u201d 文本'
assert _fix_quotes('文本\u201c引用\u201d「日文」') == '文本 \u201c引用\u201d「日文」'
# Double corner brackets
assert _fix_quotes('『重点』\u201c引用\u201d文本') == '『重点』\u201c引用\u201d 文本'
assert _fix_quotes('文本\u201c引用\u201d『重点』') == '文本 \u201c引用\u201d『重点』'
def test_quote_spacing_with_lenticular_brackets(self):
"""No space between quotes and lenticular brackets 【】."""
assert _fix_quotes('【注】\u201c引用\u201d文本') == '【注】\u201c引用\u201d 文本'
assert _fix_quotes('文本\u201c引用\u201d【注】') == '文本 \u201c引用\u201d【注】'
def test_quote_spacing_with_parentheses(self):
"""No space between quotes and full-width parentheses ()."""
assert _fix_quotes('(备注)\u201c引用\u201d文本') == '(备注)\u201c引用\u201d 文本'
assert _fix_quotes('文本\u201c引用\u201d(备注)') == '文本 \u201c引用\u201d(备注)'
def test_quote_spacing_with_angle_brackets(self):
"""No space between quotes and angle brackets 〈〉."""
assert _fix_quotes('〈标记〉\u201c引用\u201d文本') == '〈标记〉\u201c引用\u201d 文本'
assert _fix_quotes('文本\u201c引用\u201d〈标记〉') == '文本 \u201c引用\u201d〈标记〉'
def test_quote_spacing_with_emdash(self):
"""Spaces ARE needed between quotes and em-dash (curly quotes lack visual spacing)."""
assert _fix_quotes('前文——\u201c引用\u201d后文') == '前文—— \u201c引用\u201d 后文'
assert _fix_quotes('前文\u201c引用\u201d——后文') == '前文 \u201c引用\u201d ——后文'
# Both sides with em-dash
assert _fix_quotes('前——\u201c引用\u201d——后') == '前—— \u201c引用\u201d ——后'
class TestFixSingleQuotes:
"""Test spacing around Chinese single quotation marks ''."""
def test_single_quote_spacing_with_emdash(self):
"""Spaces ARE needed between single quotes and em-dash (curly quotes lack visual spacing)."""
assert _fix_single_quotes('前文——\u2018引用\u2019后文') == '前文—— \u2018引用\u2019 后文'
assert _fix_single_quotes('前文\u2018引用\u2019——后文') == '前文 \u2018引用\u2019 ——后文'
# Both sides with em-dash
assert _fix_single_quotes('前——\u2018引用\u2019——后') == '前—— \u2018引用\u2019 ——后'
class TestSpaceBetween:
"""Test spacing between Chinese and English/numbers."""
def test_chinese_then_english(self):
assert _space_between("中文English") == "中文 English"
assert _space_between("测试test文本") == "测试 test 文本"
def test_english_then_chinese(self):
assert _space_between("English中文") == "English 中文"
assert _space_between("test测试text") == "test 测试 text"
def test_chinese_then_number(self):
assert _space_between("数字123") == "数字 123"
assert _space_between("共100个") == "共 100 个"
def test_number_then_chinese(self):
assert _space_between("123数字") == "123 数字"
assert _space_between("100个item") == "100 个 item"
def test_already_spaced(self):
assert _space_between("中文 English") == "中文 English"
assert _space_between("test 测试") == "test 测试"
def test_percentage_spacing(self):
"""Test spacing with percentage symbols."""
assert _space_between("占人口比例的5%甚至更多") == "占人口比例的 5% 甚至更多"
assert _space_between("增长20%左右") == "增长 20% 左右"
assert _space_between("的15%是") == "的 15% 是"
def test_temperature_spacing(self):
"""Test spacing with temperature units."""
# Unicode temperature symbols
assert _space_between("温度25℃很热") == "温度 25℃ 很热"
assert _space_between("约25℉左右") == "约 25℉ 左右"
# Degree + letter combinations
assert _space_between("是25°C今天") == "是 25°C 今天"
assert _space_between("约25°c左右") == "约 25°c 左右"
assert _space_between("温度25°F较低") == "温度 25°F 较低"
assert _space_between("大约25°f吧") == "大约 25°f 吧"
def test_degree_spacing(self):
"""Test spacing with degree symbols."""
assert _space_between("角度45°比较") == "角度 45° 比较"
assert _space_between("转90°然后") == "转 90° 然后"
def test_permille_spacing(self):
"""Test spacing with per mille symbols."""
assert _space_between("浓度3‰的溶液") == "浓度 3‰ 的溶液"
class TestJapaneseSupport:
"""Test Japanese language support (Hiragana, Katakana, Kanji).
Note: CJK-specific rules only apply when Han characters (Kanji) are present.
Pure kana text is treated as non-CJK and doesn't get spacing rules applied.
This is by design - most real Japanese text contains Kanji.
"""
def test_japanese_mixed_kanji_kana_english_spacing(self):
"""Japanese with Kanji should get spaces around English (the reported bug fix)."""
# This was the original bug - を (Hiragana) after "raycast" didn't get space
assert polish_text("私は毎日Raycastを使って仕事の効率化を助けてくれます") == \
"私は毎日 Raycast を使って仕事の効率化を助けてくれます"
assert polish_text("日本語testテキスト") == "日本語 test テキスト"
assert polish_text("東京でappleを買う") == "東京で apple を買う"
def test_japanese_numbers_with_units(self):
"""Japanese numbers with units should get spacing."""
assert polish_text("気温は25°Cです") == "気温は 25°C です"
assert polish_text("価格は100円です") == "価格は 100 円です"
assert polish_text("約5%程度") == "約 5% 程度"
def test_japanese_quote_spacing_with_kanji(self):
"""Japanese quotes with Kanji should get proper spacing."""
# Using curly quotes (U+201C/D) not straight quotes
assert polish_text('私は\u201chello\u201dと言いました') == '私は \u201chello\u201d と言いました'
assert polish_text('東京は\u201cTokyo\u201dです') == '東京は \u201cTokyo\u201d です'
def test_japanese_pure_kana_no_spacing(self):
"""Pure kana text without Kanji doesn't trigger CJK rules (by design)."""
# Pure Hiragana - no Han characters, so CJK rules don't apply
assert polish_text("これはtestですね") == "これはtestですね"
# Pure Katakana - no Han characters, so CJK rules don't apply
assert polish_text("テストtestケース") == "テストtestケース"
def test_japanese_fullwidth_punctuation_preserved(self):
"""Japanese fullwidth punctuation should be preserved/normalized."""
# Japanese uses fullwidth punctuation like Chinese
assert polish_text("日本はアジアです。") == "日本はアジアです。"
assert polish_text("質問は何ですか?") == "質問は何ですか?"
class TestKoreanSupport:
"""Test Korean language support (Hangul).
Korean Hangul characters now trigger CJK spacing rules, so spaces will be
added between Hangul and English/numbers, just like with Chinese and Japanese.
Korean punctuation rules still exclude fullwidth punctuation conversion
since Korean uses Western punctuation.
"""
def test_korean_with_hanja_english_spacing(self):
"""Korean with Hanja (Han characters) should get spaces around English."""
# Korean with Hanja (mixed script)
assert polish_text("韓國에서test를합니다") == "韓國에서 test 를합니다"
assert polish_text("漢字apple사용") == "漢字 apple 사용"
def test_korean_pure_hangul_with_spacing(self):
"""Pure Hangul text now gets CJK spacing rules applied."""
# Pure Hangul - now triggers CJK rules for spacing
assert polish_text("나는매일raycast를사용합니다") == "나는매일 raycast 를사용합니다"
assert polish_text("이것은test입니다") == "이것은 test 입니다"
assert polish_text("한글apple텍스트") == "한글 apple 텍스트"
def test_korean_western_punctuation_preserved(self):
"""Korean halfwidth (Western) punctuation should be preserved."""
# Korean uses Western punctuation . , not fullwidth 。,
text = "이것은 테스트입니다."
assert polish_text(text) == text # Period stays halfwidth
text = "첫째, 둘째, 셋째입니다."
assert polish_text(text) == text # Commas stay halfwidth
class TestCJKParenthesisSpacing:
"""Test spacing between CJK characters and half-width parentheses."""
def test_cjk_with_paren_enclosed_text(self):
"""Add spaces around parentheses, then () may convert to () in CJK context."""
# English content inside parens: spaces added, parens stay half-width
assert polish_text('这是测试(test)内容') == '这是测试 (test) 内容'
# CJK content: spaces added, then fullwidth_parentheses converts () to ()
assert polish_text('文本(注释)继续') == '文本 (注释) 继续'
# Korean + English: spaces added, parens stay half-width
assert polish_text('한글(Korean)텍스트') == '한글 (Korean) 텍스트'
def test_cjk_before_opening_paren(self):
"""Add space between CJK and opening parenthesis."""
assert polish_text('中文(English') == '中文 (English'
assert polish_text('日本語(test') == '日本語 (test'
def test_closing_paren_before_cjk(self):
"""Add space between closing parenthesis and CJK."""
assert polish_text('English)中文') == 'English) 中文'
assert polish_text('test)日本語') == 'test) 日本語'
def test_already_spaced_parentheses(self):
"""Already spaced parentheses will add extra spaces, then space_collapsing cleans them up."""
# The rule adds spaces, creating double spaces, then space_collapsing fixes it
assert polish_text('中文 (test) 文本') == '中文 (test) 文本' # Double spaces collapsed to single
class TestPolishText:
"""Test complete text polishing with all rules applied."""
def test_universal_rules_applied_to_non_chinese(self):
"""Universal rules like ellipsis should apply to all text."""
result = polish_text("wait . . . more")
assert result == "wait... more"
def test_chinese_rules_applied_together(self):
"""All Chinese rules should work together."""
text = "《Python编程》--一本关于Python的书。中文English混合,数字123也包含。"
result = polish_text(text)
# Should have:
# - —— spacing fixed
# - Space between Chinese and English
# - Space between Chinese and numbers
assert "《Python编程》—— 一本" in result or "《Python 编程》—— 一本" in result
assert "中文 English" in result
assert "数字 123" in result
def test_em_dash_with_quotes(self):
"""Test em-dash spacing with angle quotes."""
assert polish_text("《书名》——作者") == "《书名》—— 作者"
assert polish_text("作者——《书名》") == "作者 ——《书名》"
def test_multiple_spaces_collapsed(self):
"""Multiple consecutive spaces should be collapsed to one."""
text = "文本 太多 空格"
result = polish_text(text)
assert " " not in result # No double spaces
def test_space_collapsing_preserves_newlines(self):
"""Space collapsing should preserve newlines (paragraph breaks)."""
# Double newline should be preserved (paragraph break)
text = "段落一。\n\n段落二。"
result = polish_text(text)
assert result == "段落一。\n\n段落二。"
# Single newline should be preserved
text = "第一行。\n第二行。"
result = polish_text(text)
assert result == "第一行。\n第二行。"
def test_trailing_spaces_removed(self):
"""Trailing spaces at end of lines should be removed."""
# Trailing spaces before newline
text = "第一行有空格 \n第二行也有 \n第三行。"
result = polish_text(text)
assert result == "第一行有空格\n第二行也有\n第三行。"
# Trailing spaces before paragraph break
text = "段落一。 \n\n段落二。"
result = polish_text(text)
assert result == "段落一。\n\n段落二。"
# Only trailing spaces (whole line of spaces)
text = "文本\n \n更多文本"
result = polish_text(text)
assert result == "文本\n\n更多文本"
def test_excessive_newlines_collapsed(self):
"""Excessive newlines (3+) should be collapsed to 2 (one blank line)."""
# Triple newline → double newline
text = "章节一。\n\n\n章节二。"
result = polish_text(text)
assert result == "章节一。\n\n章节二。"
# Quadruple newline → double newline
text = "段落一。\n\n\n\n段落二。"
result = polish_text(text)
assert result == "段落一。\n\n段落二。"
# Five newlines → double newline
text = "部分一。\n\n\n\n\n部分二。"
result = polish_text(text)
assert result == "部分一。\n\n部分二。"
def test_space_collapsing_with_mixed_whitespace(self):
"""Space collapsing should handle mixed spaces and newlines correctly."""
# Multiple spaces between words on same line
text = "文本 太多 空格"
result = polish_text(text)
assert result == "文本 太多 空格"
# Leading spaces after newline are preserved (indentation)
text = "一些文本\n 更多文本"
result = polish_text(text)
assert result == "一些文本\n 更多文本"
# Multiple levels of indentation preserved
text = "行一\n 行二缩进\n 行三更多缩进"
result = polish_text(text)
assert result == "行一\n 行二缩进\n 行三更多缩进"
def test_strip_whitespace(self):
"""Trailing whitespace should be removed, leading whitespace preserved."""
# Preserves leading whitespace (for markdown indentation)
assert polish_text(" text ") == " text"
assert polish_text(" 中文 ") == " 中文"
# Still removes trailing whitespace
assert polish_text("text ") == "text"
assert polish_text("中文 ") == "中文"
def test_non_chinese_text_unchanged(self):
"""English text without Chinese should only get universal rules."""
text = "English text with no Chinese"
result = polish_text(text)
assert result == text # Should be unchanged
def test_mixed_complex_text(self):
"""Complex real-world example with new dash conversion rules."""
text = "作者——李华(生于1980年)。他写了《人生》--一部长篇小说。该书在2018年出版。"
result = polish_text(text)
# Check key transformations
assert "作者 —— 李华" in result # Em-dash spacing with regular text
assert "。他" in result or "。 他" in result # Period handling
assert "《人生》—— 一部" in result # Dash converts between CJK (closing quote, no left space)
assert "2018 年" in result # Number-Chinese spacing
| xiaolai/cjk-text-formatter | 3 | A Python CLI tool for polishing text with Chinese typography rules | Python | xiaolai | xiaolai | inblockchain |
tests/test_polish_with_config.py | Python | """Tests for polish functions with configuration."""
from __future__ import annotations
import pytest
from cjk_text_formatter.config import RuleConfig
from cjk_text_formatter.polish import polish_text, polish_text_verbose
class TestPolishWithDisabledRules:
"""Test that disabled rules are not applied."""
def test_polish_with_dash_conversion_disabled(self):
"""Test that dash conversion doesn't run when disabled."""
config = RuleConfig(rules={'dash_conversion': False})
text = "文本--测试"
result = polish_text(text, config=config)
# Dash should NOT be converted
assert result == "文本--测试"
def test_polish_with_cjk_spacing_disabled(self):
"""Test that CJK-English spacing doesn't run when disabled."""
config = RuleConfig(rules={'cjk_english_spacing': False})
text = "文本English"
result = polish_text(text, config=config)
# No space should be added
assert result == "文本English"
def test_polish_with_ellipsis_disabled(self):
"""Test that ellipsis normalization doesn't run when disabled."""
config = RuleConfig(rules={'ellipsis_normalization': False})
text = "wait . . . more"
result = polish_text(text, config=config)
# Ellipsis should NOT be normalized
assert result == "wait . . . more"
def test_polish_with_quote_spacing_disabled(self):
"""Test that quote spacing doesn't run when disabled."""
config = RuleConfig(rules={'quote_spacing': False})
text = '文本"quoted"内容'
result = polish_text(text, config=config)
# No spaces around quotes
assert result == '文本"quoted"内容'
def test_polish_with_multiple_rules_disabled(self):
"""Test disabling multiple rules at once."""
config = RuleConfig(rules={
'dash_conversion': False,
'cjk_english_spacing': False,
})
text = "文本--English混合"
result = polish_text(text, config=config)
# Neither dash conversion nor spacing should happen
assert result == "文本--English混合"
class TestPolishWithCustomRules:
"""Test custom regex rules execution."""
def test_single_custom_rule(self):
"""Test applying a single custom regex rule."""
config = RuleConfig(custom_rules=[
{
'name': 'arrow_unicode',
'pattern': '->',
'replacement': '→',
'description': 'Use Unicode arrow'
}
])
text = "A -> B"
result = polish_text(text, config=config)
assert result == "A → B"
def test_multiple_custom_rules(self):
"""Test applying multiple custom regex rules."""
config = RuleConfig(custom_rules=[
{
'name': 'arrow_unicode',
'pattern': '->',
'replacement': '→',
},
{
'name': 'multiply_sign',
'pattern': r'(\d+)\s*x\s*(\d+)',
'replacement': r'\1×\2',
}
])
text = "A -> B and 3 x 4"
result = polish_text(text, config=config)
assert result == "A → B and 3×4"
def test_custom_rule_with_builtin_disabled(self):
"""Test custom rules still work when built-in rules are disabled."""
config = RuleConfig(
rules={'cjk_english_spacing': False},
custom_rules=[
{
'name': 'arrow_fix',
'pattern': '->',
'replacement': '→',
}
]
)
text = "文本English -> test"
result = polish_text(text, config=config)
# CJK spacing disabled, but custom rule applies
assert result == "文本English → test"
def test_custom_rule_order_after_builtins(self):
"""Test that custom rules run after built-in rules."""
config = RuleConfig(
rules={'cjk_english_spacing': True},
custom_rules=[
{
'name': 'space_to_underscore',
'pattern': r' ',
'replacement': '_',
}
]
)
text = "文本English"
result = polish_text(text, config=config)
# First CJK spacing adds space: "文本 English"
# Then custom rule replaces space with underscore
assert result == "文本_English"
class TestPolishVerboseWithConfig:
"""Test verbose mode with configuration."""
def test_verbose_tracks_disabled_rules(self):
"""Test that verbose mode correctly reports when rules are disabled."""
config = RuleConfig(rules={'dash_conversion': False})
text = "文本English测试"
result, stats = polish_text_verbose(text, config=config)
# Dash conversion disabled (no dashes in text anyway)
assert stats.dash_converted == 0
# CJK spacing still enabled - should add space between 文本 and English
assert stats.cjk_english_spacing_added > 0
def test_verbose_tracks_custom_rules(self):
"""Test that verbose mode tracks custom rule applications."""
config = RuleConfig(custom_rules=[
{
'name': 'arrow_fix',
'pattern': '->',
'replacement': '→',
}
])
text = "A -> B -> C"
result, stats = polish_text_verbose(text, config=config)
# Should track custom rule applications
assert hasattr(stats, 'custom_rules_applied')
assert stats.custom_rules_applied['arrow_fix'] == 2
def test_verbose_summary_includes_custom_rules(self):
"""Test that verbose summary includes custom rule stats."""
config = RuleConfig(custom_rules=[
{
'name': 'arrow_fix',
'pattern': '->',
'replacement': '→',
}
])
text = "A -> B"
result, stats = polish_text_verbose(text, config=config)
summary = stats.format_summary()
assert 'arrow_fix' in summary or 'custom' in summary.lower()
class TestConfigNoneDefault:
"""Test that config=None uses default behavior (all rules enabled)."""
def test_polish_without_config_uses_defaults(self):
"""Test that passing config=None enables all rules (backward compat)."""
text = "文本English混合"
result = polish_text(text, config=None)
# All default rules should apply
assert "文本 English 混合" == result
def test_polish_without_config_parameter(self):
"""Test calling polish_text() without config parameter works."""
text = "文本--内容 and English"
result = polish_text(text) # No config parameter
# Should use defaults (all rules enabled)
assert "——" in result # Dash converts between CJK
assert "文本 —— 内容" in result # Check dash conversion
assert " English" in result # CJK spacing with English
class TestInvalidCustomRules:
"""Test error handling for invalid custom rules."""
def test_invalid_regex_pattern(self):
"""Test that invalid regex patterns are handled gracefully (skipped)."""
config = RuleConfig(custom_rules=[
{
'name': 'bad_rule',
'pattern': '(?P<invalid', # Invalid regex
'replacement': 'x',
}
])
text = "test"
# Should not crash - just skip the invalid rule
result = polish_text(text, config=config)
assert result == "test" # Text unchanged since rule was skipped
def test_missing_required_fields(self):
"""Test that custom rules with missing fields are handled gracefully (skipped)."""
config = RuleConfig(custom_rules=[
{
'name': 'incomplete',
# Missing 'pattern' and 'replacement'
}
])
text = "test"
# Should not crash - just skip the invalid rule
result = polish_text(text, config=config)
assert result == "test" # Text unchanged since rule was skipped
| xiaolai/cjk-text-formatter | 3 | A Python CLI tool for polishing text with Chinese typography rules | Python | xiaolai | xiaolai | inblockchain |
tests/test_processors.py | Python | """Tests for file processors."""
import pytest
from pathlib import Path
from cjk_text_formatter.processors import (
TextProcessor,
MarkdownProcessor,
HTMLProcessor,
process_file,
find_files,
)
class TestTextProcessor:
"""Test plain text file processing."""
def test_process_simple_text(self):
processor = TextProcessor()
text = "文本English混合,数字123也包含。"
result = processor.process(text)
assert "文本 English" in result
assert "数字 123" in result
def test_process_with_ellipsis(self):
processor = TextProcessor()
text = "wait . . . more text"
result = processor.process(text)
assert result == "wait... more text"
def test_process_with_em_dash(self):
processor = TextProcessor()
text = "《书名》--作者"
result = processor.process(text)
assert result == "《书名》—— 作者"
class TestMarkdownProcessor:
"""Test markdown file processing."""
def test_preserve_code_blocks_fenced(self):
processor = MarkdownProcessor()
text = """文本内容
```python
def hello():
print("world")
```
更多文本"""
result = processor.process(text)
# Code blocks should be preserved exactly
assert '```python' in result
assert 'def hello():' in result
assert ' print("world")' in result
def test_preserve_code_blocks_indented(self):
processor = MarkdownProcessor()
text = """文本内容
code line 1
code line 2
更多文本"""
result = processor.process(text)
# Indented code blocks should be preserved
assert ' code line 1' in result
assert ' code line 2' in result
def test_preserve_inline_code(self):
processor = MarkdownProcessor()
text = "文本`code here`更多文本"
result = processor.process(text)
assert "`code here`" in result
def test_format_text_outside_code(self):
processor = MarkdownProcessor()
text = """# 标题Title
文本English混合
```python
# This should not be formatted
text--more
```
数字123也包含"""
result = processor.process(text)
# Text outside code should be formatted
assert "文本 English" in result
assert "数字 123" in result
# Code inside should NOT be formatted
assert "text--more" in result # Should remain unchanged
def test_preserve_indented_list_items(self):
"""Test that indented list items preserve their indentation."""
processor = MarkdownProcessor()
# Use 2-space indents (list items), not 4+ spaces (code blocks)
text = """- 第一级English项目
- 第二级nested项目
- 另一个second项目
- 回到first级别"""
result = processor.process(text)
# List items should be formatted
assert "第一级 English" in result
assert "第二级 nested" in result
assert "另一个 second" in result
assert "回到 first" in result
# But indentation should be preserved
assert " - 第二级 nested" in result
assert " - 另一个 second" in result
def test_preserve_list_item_indentation_two_space(self):
"""Test that 2-space indented list items are processed (not treated as code)."""
processor = MarkdownProcessor()
text = """普通text行
缩进的content行
回到normal级别"""
result = processor.process(text)
# Text should be formatted
assert "普通 text" in result
assert "缩进的 content" in result
assert "回到 normal" in result
# Indentation should be preserved
lines = result.split('\n')
assert lines[0] == "普通 text 行"
assert lines[1].startswith(" ") # 2 spaces preserved
assert "缩进的 content 行" in lines[1]
assert not lines[2].startswith(" ") # No leading spaces
class TestHTMLProcessor:
"""Test HTML file processing."""
def test_format_text_preserve_tags(self):
processor = HTMLProcessor()
html = "<p>文本English混合</p>"
result = processor.process(html)
# Text should be formatted
assert "文本 English" in result
# Tags should be preserved
assert "<p>" in result
assert "</p>" in result
def test_preserve_code_tags(self):
processor = HTMLProcessor()
html = "<p>文本<code>text--more</code>内容</p>"
result = processor.process(html)
# Code content should NOT be formatted
assert "text--more" in result
assert "<code>" in result
def test_preserve_pre_tags(self):
processor = HTMLProcessor()
html = "<pre>text--more\nline2--end</pre>"
result = processor.process(html)
# Pre content should NOT be formatted
assert "text--more" in result
assert "line2--end" in result
def test_format_nested_elements(self):
processor = HTMLProcessor()
html = """<div>
<h1>标题Title</h1>
<p>文本English混合</p>
</div>"""
result = processor.process(html)
assert "标题 Title" in result
assert "文本 English" in result
assert "<div>" in result
assert "</div>" in result
def test_handle_attributes(self):
processor = HTMLProcessor()
html = '<a href="link">文本English</a>'
result = processor.process(html)
assert "文本 English" in result
assert 'href="link"' in result
class TestProcessFile:
"""Test file processing dispatcher."""
def test_process_txt_file(self, tmp_path):
# Create test file
test_file = tmp_path / "test.txt"
test_file.write_text("文本English混合")
result = process_file(test_file)
assert "文本 English" in result
def test_process_md_file(self, tmp_path):
test_file = tmp_path / "test.md"
test_file.write_text("# 标题Title\n\n文本English混合")
result = process_file(test_file)
assert "标题 Title" in result
assert "文本 English" in result
def test_process_html_file(self, tmp_path):
test_file = tmp_path / "test.html"
test_file.write_text("<p>文本English混合</p>")
result = process_file(test_file)
assert "文本 English" in result
assert "<p>" in result
def test_unsupported_file_type(self, tmp_path):
test_file = tmp_path / "test.xyz"
test_file.write_text("content")
with pytest.raises(ValueError, match="Unsupported file type"):
process_file(test_file)
class TestFindFiles:
"""Test file finding functionality."""
def test_find_single_file(self, tmp_path):
test_file = tmp_path / "test.txt"
test_file.write_text("content")
files = find_files(test_file)
assert len(files) == 1
assert files[0] == test_file
def test_find_files_in_directory_non_recursive(self, tmp_path):
# Create files
(tmp_path / "file1.txt").write_text("1")
(tmp_path / "file2.md").write_text("2")
(tmp_path / "file3.html").write_text("3")
(tmp_path / "file4.xyz").write_text("4") # Should be ignored
# Create subdirectory with files
subdir = tmp_path / "sub"
subdir.mkdir()
(subdir / "file5.txt").write_text("5") # Should be ignored (non-recursive)
files = find_files(tmp_path, recursive=False)
assert len(files) == 3 # Only .txt, .md, .html in root
assert all(f.suffix in ['.txt', '.md', '.html'] for f in files)
assert all(f.parent == tmp_path for f in files)
def test_find_files_in_directory_recursive(self, tmp_path):
# Create files in root
(tmp_path / "file1.txt").write_text("1")
# Create subdirectory with files
subdir = tmp_path / "sub"
subdir.mkdir()
(subdir / "file2.md").write_text("2")
# Create nested subdirectory
subsubdir = subdir / "subsub"
subsubdir.mkdir()
(subsubdir / "file3.html").write_text("3")
files = find_files(tmp_path, recursive=True)
assert len(files) == 3
# Check that files from all levels are found
assert any(f.name == "file1.txt" for f in files)
assert any(f.name == "file2.md" for f in files)
assert any(f.name == "file3.html" for f in files)
def test_find_specific_extensions(self, tmp_path):
(tmp_path / "file1.txt").write_text("1")
(tmp_path / "file2.md").write_text("2")
(tmp_path / "file3.html").write_text("3")
# Find only .txt files
files = find_files(tmp_path, extensions=['.txt'])
assert len(files) == 1
assert files[0].suffix == '.txt'
# Find .txt and .md files
files = find_files(tmp_path, extensions=['.txt', '.md'])
assert len(files) == 2
assert all(f.suffix in ['.txt', '.md'] for f in files)
def test_directory_not_found(self, tmp_path):
non_existent = tmp_path / "does_not_exist"
with pytest.raises(FileNotFoundError):
find_files(non_existent)
| xiaolai/cjk-text-formatter | 3 | A Python CLI tool for polishing text with Chinese typography rules | Python | xiaolai | xiaolai | inblockchain |
src/extension.ts | TypeScript | /**
* CJK Text Formatter - VS Code Extension
* Main extension entry point
*/
import * as vscode from 'vscode';
import { formatText, RuleConfig } from './formatter';
import { countWords, formatWordCount } from './wordCounter';
let statusBarItem: vscode.StatusBarItem;
let wordCountStatusBarItem: vscode.StatusBarItem;
/**
* Read formatter configuration from VS Code settings
*/
function getFormatterConfig(): RuleConfig {
const rulesConfig = vscode.workspace.getConfiguration('cjkFormatter.rules');
const mainConfig = vscode.workspace.getConfiguration('cjkFormatter');
return {
ellipsisNormalization: rulesConfig.get('ellipsisNormalization', true),
dashConversion: rulesConfig.get('dashConversion', true),
emdashSpacing: rulesConfig.get('emdashSpacing', true),
straightToCurlyQuotes: rulesConfig.get('straightToCurlyQuotes', true),
quoteSpacing: rulesConfig.get('quoteSpacing', true),
singleQuoteSpacing: rulesConfig.get('singleQuoteSpacing', true),
cjkEnglishSpacing: rulesConfig.get('cjkEnglishSpacing', true),
fullwidthPunctuation: rulesConfig.get('fullwidthPunctuation', true),
fullwidthParentheses: rulesConfig.get('fullwidthParentheses', true),
fullwidthBrackets: rulesConfig.get('fullwidthBrackets', false),
fullwidthAlphanumeric: rulesConfig.get('fullwidthAlphanumeric', true),
currencySpacing: rulesConfig.get('currencySpacing', true),
slashSpacing: rulesConfig.get('slashSpacing', true),
spaceCollapsing: rulesConfig.get('spaceCollapsing', true),
customRules: mainConfig.get('customRules', []),
};
}
/**
* Check if current file type is supported for formatting
*/
function isSupportedLanguage(languageId: string): boolean {
const config = vscode.workspace.getConfiguration('cjkFormatter');
const supported = config.get<string[]>('supportedLanguages',
['markdown', 'plaintext', 'restructuredtext']);
return supported.includes(languageId);
}
/**
* Format the entire document
*/
async function formatDocument() {
const editor = vscode.window.activeTextEditor;
if (!editor) {
vscode.window.showWarningMessage('No active editor found');
return;
}
const document = editor.document;
const text = document.getText();
try {
const config = getFormatterConfig();
const formatted = formatText(text, config);
// Only apply edit if text actually changed
if (formatted !== text) {
const edit = new vscode.WorkspaceEdit();
const fullRange = new vscode.Range(
document.positionAt(0),
document.positionAt(text.length)
);
edit.replace(document.uri, fullRange, formatted);
await vscode.workspace.applyEdit(edit);
updateStatusBarMessage('Formatted', true);
// Clear status after 2 seconds
setTimeout(() => {
updateFormatterStatusBar();
}, 2000);
} else {
updateStatusBarMessage('No changes needed', true);
setTimeout(() => {
updateFormatterStatusBar();
}, 2000);
}
} catch (error) {
vscode.window.showErrorMessage(`CJK Formatter Error: ${error}`);
updateStatusBarMessage('Error', false);
}
}
/**
* Format the selected text
*/
async function formatSelection() {
const editor = vscode.window.activeTextEditor;
if (!editor) {
vscode.window.showWarningMessage('No active editor found');
return;
}
const selection = editor.selection;
if (selection.isEmpty) {
vscode.window.showInformationMessage('No text selected. Use "CJK: Format Document" to format entire document.');
return;
}
const text = editor.document.getText(selection);
try {
const config = getFormatterConfig();
const formatted = formatText(text, config);
if (formatted !== text) {
await editor.edit(editBuilder => {
editBuilder.replace(selection, formatted);
});
updateStatusBarMessage('Selection formatted', true);
setTimeout(() => {
updateFormatterStatusBar();
}, 2000);
} else {
updateStatusBarMessage('No changes needed', true);
setTimeout(() => {
updateFormatterStatusBar();
}, 2000);
}
} catch (error) {
vscode.window.showErrorMessage(`CJK Formatter Error: ${error}`);
updateStatusBarMessage('Error', false);
}
}
/**
* Update formatter status bar visibility based on file type
*/
function updateFormatterStatusBar() {
const editor = vscode.window.activeTextEditor;
if (!statusBarItem) {
return;
}
// Hide if no editor or unsupported language
if (!editor || !isSupportedLanguage(editor.document.languageId)) {
statusBarItem.hide();
return;
}
// Show for supported languages
statusBarItem.text = `$(edit) CJK: Ready`;
statusBarItem.tooltip = 'Click to format document';
statusBarItem.command = 'cjk-formatter.formatDocument';
statusBarItem.show();
}
/**
* Update status bar with temporary message (for formatting feedback)
*/
function updateStatusBarMessage(text: string, temporary = false) {
if (!statusBarItem) {
return;
}
statusBarItem.text = `$(edit) CJK: ${text}`;
if (!temporary) {
statusBarItem.tooltip = 'Click to format document';
statusBarItem.command = 'cjk-formatter.formatDocument';
} else {
statusBarItem.tooltip = text;
}
statusBarItem.show();
}
/**
* Handle format on save
*/
async function onWillSaveDocument(event: vscode.TextDocumentWillSaveEvent) {
const config = vscode.workspace.getConfiguration('cjkFormatter');
const formatOnSave = config.get('formatOnSave', false);
if (!formatOnSave) {
return;
}
// Only format supported languages
const document = event.document;
if (!isSupportedLanguage(document.languageId)) {
return;
}
const text = document.getText();
const formatterConfig = getFormatterConfig();
const formatted = formatText(text, formatterConfig);
if (formatted !== text) {
const edit = new vscode.WorkspaceEdit();
const fullRange = new vscode.Range(
document.positionAt(0),
document.positionAt(text.length)
);
edit.replace(document.uri, fullRange, formatted);
event.waitUntil(vscode.workspace.applyEdit(edit));
}
}
/**
* Update word count status bar
*/
function updateWordCount() {
const editor = vscode.window.activeTextEditor;
// Hide word count if no editor or word count is disabled
if (!wordCountStatusBarItem) {
return;
}
if (!editor) {
wordCountStatusBarItem.hide();
return;
}
// Only show word count for markdown files
if (editor.document.languageId !== 'markdown') {
wordCountStatusBarItem.hide();
return;
}
// Get configuration
const config = vscode.workspace.getConfiguration('cjkFormatter.wordCount');
const format = config.get<'total' | 'detailed'>('format', 'total');
// Check if there's a selection
const selection = editor.selection;
let text: string;
let isSelection = false;
if (!selection.isEmpty) {
text = editor.document.getText(selection);
isSelection = true;
} else {
text = editor.document.getText();
}
// Count words
const result = countWords(text);
const displayText = formatWordCount(result, format);
// Update status bar
if (isSelection) {
wordCountStatusBarItem.text = `$(symbol-string) Selection: ${result.total.toLocaleString()}`;
wordCountStatusBarItem.tooltip = `Selection Word Count\n${displayText}\nCharacters: ${result.chars.toLocaleString()}`;
} else {
wordCountStatusBarItem.text = `$(symbol-string) ${displayText}`;
wordCountStatusBarItem.tooltip = `Document Word Count\nCJK: ${result.cjk.toLocaleString()}\nEnglish: ${result.english.toLocaleString()}\nTotal: ${result.total.toLocaleString()}\nCharacters: ${result.chars.toLocaleString()}`;
}
wordCountStatusBarItem.show();
}
/**
* Extension activation
*/
export function activate(context: vscode.ExtensionContext) {
console.log('CJK Text Formatter is now active');
// Create formatter status bar item
const showStatusBar = vscode.workspace.getConfiguration('cjkFormatter').get('showStatusBar', true);
if (showStatusBar) {
statusBarItem = vscode.window.createStatusBarItem(
vscode.StatusBarAlignment.Right,
100
);
context.subscriptions.push(statusBarItem);
// Update status bar on editor change
const formatterEditorChangeHandler = vscode.window.onDidChangeActiveTextEditor(() => {
updateFormatterStatusBar();
});
context.subscriptions.push(formatterEditorChangeHandler);
// Initial status bar update
updateFormatterStatusBar();
}
// Create word count status bar item
const wordCountConfig = vscode.workspace.getConfiguration('cjkFormatter.wordCount');
const wordCountEnabled = wordCountConfig.get('enabled', true);
if (wordCountEnabled) {
wordCountStatusBarItem = vscode.window.createStatusBarItem(
vscode.StatusBarAlignment.Right,
99 // Position next to formatter status bar
);
context.subscriptions.push(wordCountStatusBarItem);
// Update word count on editor change
const editorChangeHandler = vscode.window.onDidChangeActiveTextEditor(() => {
updateWordCount();
});
context.subscriptions.push(editorChangeHandler);
// Update word count on document change
const documentChangeHandler = vscode.workspace.onDidChangeTextDocument(event => {
const editor = vscode.window.activeTextEditor;
if (editor && event.document === editor.document) {
updateWordCount();
}
});
context.subscriptions.push(documentChangeHandler);
// Update word count on selection change
const selectionChangeHandler = vscode.window.onDidChangeTextEditorSelection(event => {
const editor = vscode.window.activeTextEditor;
if (editor && event.textEditor === editor) {
updateWordCount();
}
});
context.subscriptions.push(selectionChangeHandler);
// Initial word count update
updateWordCount();
}
// Register format document command
const formatDocumentCommand = vscode.commands.registerCommand(
'cjk-formatter.formatDocument',
formatDocument
);
context.subscriptions.push(formatDocumentCommand);
// Register format selection command
const formatSelectionCommand = vscode.commands.registerCommand(
'cjk-formatter.formatSelection',
formatSelection
);
context.subscriptions.push(formatSelectionCommand);
// Register format on save handler
const willSaveHandler = vscode.workspace.onWillSaveTextDocument(onWillSaveDocument);
context.subscriptions.push(willSaveHandler);
// Listen to configuration changes
const configChangeHandler = vscode.workspace.onDidChangeConfiguration(event => {
if (event.affectsConfiguration('cjkFormatter.showStatusBar')) {
const show = vscode.workspace.getConfiguration('cjkFormatter').get('showStatusBar', true);
if (show && !statusBarItem) {
statusBarItem = vscode.window.createStatusBarItem(
vscode.StatusBarAlignment.Right,
100
);
context.subscriptions.push(statusBarItem);
updateFormatterStatusBar();
} else if (!show && statusBarItem) {
statusBarItem.dispose();
}
}
if (event.affectsConfiguration('cjkFormatter.supportedLanguages')) {
updateFormatterStatusBar();
}
if (event.affectsConfiguration('cjkFormatter.wordCount.enabled')) {
const enabled = vscode.workspace.getConfiguration('cjkFormatter.wordCount').get('enabled', true);
if (enabled && !wordCountStatusBarItem) {
wordCountStatusBarItem = vscode.window.createStatusBarItem(
vscode.StatusBarAlignment.Right,
99
);
context.subscriptions.push(wordCountStatusBarItem);
updateWordCount();
} else if (!enabled && wordCountStatusBarItem) {
wordCountStatusBarItem.dispose();
wordCountStatusBarItem = undefined as any;
}
}
if (event.affectsConfiguration('cjkFormatter.wordCount.format')) {
updateWordCount();
}
});
context.subscriptions.push(configChangeHandler);
}
/**
* Extension deactivation
*/
export function deactivate() {
if (statusBarItem) {
statusBarItem.dispose();
}
if (wordCountStatusBarItem) {
wordCountStatusBarItem.dispose();
}
}
| xiaolai/cjk-text-formatter-vscode | 4 | VS Code extension for formatting CJK (Chinese, Japanese, Korean) and English mixed text with proper typography rules | TypeScript | xiaolai | xiaolai | inblockchain |
src/formatter.ts | TypeScript | /**
* CJK Text Formatter - Core formatting logic
* Ported from Python cjk-text-formatter project
*/
// CJK character ranges
const HAN = '\\u4e00-\\u9fff'; // Chinese characters + Japanese Kanji
const HIRAGANA = '\\u3040-\\u309f'; // Japanese Hiragana
const KATAKANA = '\\u30a0-\\u30ff'; // Japanese Katakana
const HANGUL = '\\uac00-\\ud7af'; // Korean Hangul
// Combined patterns
const CJK_ALL = `${HAN}${HIRAGANA}${KATAKANA}${HANGUL}`;
const CJK_NO_KOREAN = `${HAN}${HIRAGANA}${KATAKANA}`;
// CJK punctuation constants
const CJK_TERMINAL_PUNCTUATION = ',。!?;:、';
const CJK_CLOSING_BRACKETS = '》」』】)〉';
const CJK_OPENING_BRACKETS = '《「『【(〈';
const CJK_EM_DASH = '——';
// CJK characters pattern for dash conversion
const CJK_CHARS_PATTERN = `[${HAN}${HIRAGANA}${KATAKANA}《》「」『』【】()〈〉,。!?;:、]`;
// Pre-compiled regex patterns
const CHINESE_RE = /[\u4e00-\u9fff]/;
const ELLIPSIS_PATTERN = /\s*\.\s+\.\s+\.(?:\s+\.)*/g;
const ELLIPSIS_SPACING_PATTERN = /\.\.\.\s*(?=\S)/g;
const CURRENCY_SPACING_PATTERN = /([$¥€£₹]|USD|CNY|EUR|GBP)\s+(\d)/g;
const SLASH_SPACING_PATTERN = /(?<![/:])\s*\/\s*(?!\/)/g;
const TRAILING_SPACE_PATTERN = / +$/gm;
const EXCESSIVE_NEWLINE_PATTERN = /\n{3,}/g;
const CJK_OPENING_PAREN_PATTERN = new RegExp(`([${CJK_ALL}])\\(`, 'g');
const CLOSING_PAREN_CJK_PATTERN = new RegExp(`\\)([${CJK_ALL}])`, 'g');
// Custom rule interface
export interface CustomRule {
name: string;
pattern: string;
replacement: string;
description?: string;
enabled?: boolean;
}
// Rule configuration interface
export interface RuleConfig {
ellipsisNormalization?: boolean;
dashConversion?: boolean;
emdashSpacing?: boolean;
straightToCurlyQuotes?: boolean;
quoteSpacing?: boolean;
singleQuoteSpacing?: boolean;
cjkEnglishSpacing?: boolean;
cjkParenthesisSpacing?: boolean;
fullwidthPunctuation?: boolean;
fullwidthParentheses?: boolean;
fullwidthBrackets?: boolean;
fullwidthAlphanumeric?: boolean;
currencySpacing?: boolean;
slashSpacing?: boolean;
spaceCollapsing?: boolean;
customRules?: CustomRule[];
}
/**
* Check if text contains CJK characters (Han/Kanji)
*/
function containsCJK(text: string): boolean {
return CHINESE_RE.test(text);
}
/**
* Normalize spaced ellipsis patterns to standard ellipsis
*/
function normalizeEllipsis(text: string): string {
// Replace spaced dots (. . . or . . . .) with standard ellipsis
text = text.replace(ELLIPSIS_PATTERN, '...');
// Ensure exactly one space after ellipsis when followed by non-whitespace
text = text.replace(ELLIPSIS_SPACING_PATTERN, '... ');
return text;
}
/**
* Convert dashes (2+) to —— when between CJK characters
*/
function replaceDash(text: string): string {
const dashPattern = new RegExp(
`(${CJK_CHARS_PATTERN})\\s*-{2,}\\s*(${CJK_CHARS_PATTERN})`,
'g'
);
return text.replace(dashPattern, (_match, before, after) => {
// No space between closing quotes/parens and ——
const leftSpace = (before === ')' || before === '》') ? '' : ' ';
// No space between —— and opening quotes/parens
const rightSpace = (after === '(' || after === '《') ? '' : ' ';
return `${before}${leftSpace}——${rightSpace}${after}`;
});
}
/**
* Fix spacing around existing —— (em-dash) characters
*/
function fixEmdashSpacing(text: string): string {
const emdashPattern = /([^\s])\s*——\s*([^\s])/g;
return text.replace(emdashPattern, (_match, before, after) => {
// No space between closing quotes/parens and ——
const leftSpace = (before === ')' || before === '》') ? '' : ' ';
// No space between —— and opening quotes/parens
const rightSpace = (after === '(' || after === '《') ? '' : ' ';
return `${before}${leftSpace}——${rightSpace}${after}`;
});
}
/**
* Convert straight double quotes to curly quotes when surrounding CJK text
* Only converts when the quoted content contains at least one CJK character
* Example: "中文" → "中文", "English" stays unchanged
*/
function convertStraightDoubleQuotes(text: string): string {
// Pattern: "content" where content contains at least one CJK character
// U+201C: " (LEFT DOUBLE QUOTATION MARK)
// U+201D: " (RIGHT DOUBLE QUOTATION MARK)
const pattern = new RegExp(`"([^"]*[${CJK_ALL}][^"]*)"`, 'g');
return text.replace(pattern, '\u201c$1\u201d');
}
/**
* Convert straight single quotes to curly quotes when surrounding CJK text
* Only converts when the quoted content contains at least one CJK character
* Apostrophes in English words (don't, it's) are NOT affected because:
* - They don't form balanced quote pairs around CJK content
* Example: '中文' → '中文', don't stays unchanged
*/
function convertStraightSingleQuotes(text: string): string {
// Pattern: 'content' where content contains at least one CJK character
// U+2018: ' (LEFT SINGLE QUOTATION MARK)
// U+2019: ' (RIGHT SINGLE QUOTATION MARK)
const pattern = new RegExp(`'([^']*[${CJK_ALL}][^']*)'`, 'g');
return text.replace(pattern, '\u2018$1\u2019');
}
/**
* Convert straight quotes to curly quotes when surrounding CJK text
*/
function convertStraightToCurlyQuotes(text: string): string {
text = convertStraightDoubleQuotes(text);
text = convertStraightSingleQuotes(text);
return text;
}
/**
* Fix spacing around quotation marks with smart CJK punctuation handling
*/
function fixQuoteSpacing(text: string, openingQuote: string, closingQuote: string): string {
// All punctuation that should not have space before opening quote
const noSpaceBefore = CJK_CLOSING_BRACKETS + CJK_TERMINAL_PUNCTUATION;
// All punctuation that should not have space after closing quote
const noSpaceAfter = CJK_OPENING_BRACKETS + CJK_TERMINAL_PUNCTUATION;
// Add space before quote if preceded by alphanumeric/CJK/em-dash
const beforePattern = new RegExp(
`([A-Za-z0-9${CJK_ALL}${CJK_CLOSING_BRACKETS}${CJK_TERMINAL_PUNCTUATION}]|${CJK_EM_DASH})${openingQuote}`,
'g'
);
text = text.replace(beforePattern, (_match, before) => {
if (noSpaceBefore.includes(before) || before === CJK_EM_DASH) {
return `${before}${openingQuote}`;
}
return `${before} ${openingQuote}`;
});
// Add space after quote if followed by alphanumeric/CJK/em-dash
const afterPattern = new RegExp(
`${closingQuote}([A-Za-z0-9${CJK_ALL}${CJK_OPENING_BRACKETS}${CJK_TERMINAL_PUNCTUATION}]|${CJK_EM_DASH})`,
'g'
);
text = text.replace(afterPattern, (_match, after) => {
if (noSpaceAfter.includes(after) || after === CJK_EM_DASH) {
return `${closingQuote}${after}`;
}
return `${closingQuote} ${after}`;
});
return text;
}
/**
* Fix spacing around double quotes ""
*/
function fixQuotes(text: string): string {
// U+201C: " (LEFT DOUBLE QUOTATION MARK)
// U+201D: " (RIGHT DOUBLE QUOTATION MARK)
return fixQuoteSpacing(text, '\u201c', '\u201d');
}
/**
* Fix spacing around single quotes ''
*/
function fixSingleQuotes(text: string): string {
// U+2018: ' (LEFT SINGLE QUOTATION MARK)
// U+2019: ' (RIGHT SINGLE QUOTATION MARK)
return fixQuoteSpacing(text, '\u2018', '\u2019');
}
/**
* Normalize punctuation width based on context
*/
function normalizeFullwidthPunctuation(text: string): string {
const halfToFull: { [key: string]: string } = {
',': ',',
'.': '。',
'!': '!',
'?': '?',
';': ';',
':': ':',
};
for (const [half, full] of Object.entries(halfToFull)) {
// CJK + half + CJK → CJK + full + CJK
const pattern1 = new RegExp(`([${CJK_NO_KOREAN}])\\${half}([${CJK_NO_KOREAN}])`, 'g');
text = text.replace(pattern1, `$1${full}$2`);
// CJK + half + end → CJK + full
const pattern2 = new RegExp(`([${CJK_NO_KOREAN}])\\${half}(?=\\s|$)`, 'g');
text = text.replace(pattern2, `$1${full}`);
}
return text;
}
/**
* Normalize parentheses width in CJK context
*/
function normalizeFullwidthParentheses(text: string): string {
const pattern = new RegExp(`\\(([${CJK_NO_KOREAN}][^()]*)\\)`, 'g');
return text.replace(pattern, '($1)');
}
/**
* Normalize brackets width in CJK context
*/
function normalizeFullwidthBrackets(text: string): string {
const pattern = new RegExp(`\\[([${CJK_NO_KOREAN}][^\\[\\]]*)\\]`, 'g');
return text.replace(pattern, '【$1】');
}
/**
* Convert full-width alphanumeric to half-width
*/
function normalizeFullwidthAlphanumeric(text: string): string {
return text.split('').map(char => {
const code = char.charCodeAt(0);
// Full-width numbers (0-9): U+FF10-U+FF19
if (code >= 0xFF10 && code <= 0xFF19) {
return String.fromCharCode(code - 0xFEE0);
}
// Full-width uppercase (A-Z): U+FF21-U+FF3A
if (code >= 0xFF21 && code <= 0xFF3A) {
return String.fromCharCode(code - 0xFEE0);
}
// Full-width lowercase (a-z): U+FF41-U+FF5A
if (code >= 0xFF41 && code <= 0xFF5A) {
return String.fromCharCode(code - 0xFEE0);
}
return char;
}).join('');
}
/**
* Remove spaces between currency symbols and amounts
*/
function fixCurrencySpacing(text: string): string {
return text.replace(CURRENCY_SPACING_PATTERN, '$1$2');
}
/**
* Remove spaces around slashes
*/
function fixSlashSpacing(text: string): string {
return text.replace(SLASH_SPACING_PATTERN, '/');
}
/**
* Add spaces between CJK and English/numbers
*/
function spaceBetween(text: string): string {
// Pattern for currency + numbers and alphanumeric with optional measurement units
const alphanumPattern = '(?:[$¥€£₹][ ]?)?[A-Za-z0-9]+(?:[%‰℃℉]|°[CcFf]?|[ ]?(?:USD|CNY|EUR|GBP|RMB))?';
// CJK followed by alphanumeric/currency
const pattern1 = new RegExp(`([${CJK_ALL}])(${alphanumPattern})`, 'g');
text = text.replace(pattern1, '$1 $2');
// Alphanumeric/currency followed by CJK
const pattern2 = new RegExp(`(${alphanumPattern})([${CJK_ALL}])`, 'g');
text = text.replace(pattern2, '$1 $2');
return text;
}
/**
* Add spaces between CJK characters and half-width parentheses
*/
function fixCJKParenthesisSpacing(text: string): string {
// Add space between CJK character and opening paren
text = text.replace(CJK_OPENING_PAREN_PATTERN, '$1 (');
// Add space between closing paren and CJK character
text = text.replace(CLOSING_PAREN_CJK_PATTERN, ') $1');
return text;
}
/**
* Collapse multiple spaces while preserving markdown list indentation
*/
function collapseSpaces(text: string): string {
const lines = text.split('\n');
const processedLines = lines.map(line => {
// Skip processing for lines that are list items or start with indentation
// List markers: -, *, +, or numbers followed by . or )
const listItemPattern = /^(\s*)([-*+]|\d+[.)])(\s+)(.*)$/;
const match = line.match(listItemPattern);
if (match) {
const [, indent, marker, , content] = match;
// Preserve indentation and marker with exactly one space, then collapse spaces in content
const processedContent = content.replace(/(\S) {2,}/g, '$1 ');
return indent + marker + ' ' + processedContent;
}
// For non-list lines, collapse multiple spaces but preserve leading indentation
return line.replace(/(\S) {2,}/g, '$1 ');
});
return processedLines.join('\n');
}
/**
* Apply custom regex rules to text
*/
function applyCustomRules(text: string, customRules?: CustomRule[]): string {
if (!customRules || customRules.length === 0) {
return text;
}
for (const rule of customRules) {
// Skip disabled rules
if (rule.enabled === false) {
continue;
}
// Validate required fields
if (!rule.name || !rule.pattern || rule.replacement === undefined) {
continue;
}
try {
// Compile and apply the regex pattern
const regex = new RegExp(rule.pattern, 'g');
text = text.replace(regex, rule.replacement);
} catch (error) {
// Skip rules with invalid regex patterns
console.warn(`Invalid regex pattern in custom rule "${rule.name}":`, error);
continue;
}
}
return text;
}
/**
* Main formatting function
*/
export function formatText(text: string, config?: RuleConfig): string {
// Default: all rules enabled
const rules: Required<Omit<RuleConfig, 'customRules'>> & Pick<RuleConfig, 'customRules'> = {
ellipsisNormalization: true,
dashConversion: true,
emdashSpacing: true,
straightToCurlyQuotes: true,
quoteSpacing: true,
singleQuoteSpacing: true,
cjkEnglishSpacing: true,
cjkParenthesisSpacing: true,
fullwidthPunctuation: true,
fullwidthParentheses: true,
fullwidthBrackets: false, // Off by default
fullwidthAlphanumeric: true,
currencySpacing: true,
slashSpacing: true,
spaceCollapsing: true,
customRules: config?.customRules || [],
...config
};
// Universal normalization (applies to all languages)
if (rules.ellipsisNormalization) {
text = normalizeEllipsis(text);
}
// CJK-specific polishing (triggered by presence of Han characters)
if (containsCJK(text)) {
// Normalization rules (run first)
if (rules.fullwidthAlphanumeric) {
text = normalizeFullwidthAlphanumeric(text);
}
if (rules.fullwidthPunctuation) {
text = normalizeFullwidthPunctuation(text);
}
if (rules.fullwidthParentheses) {
text = normalizeFullwidthParentheses(text);
}
if (rules.fullwidthBrackets) {
text = normalizeFullwidthBrackets(text);
}
// Em-dash and quote rules
if (rules.dashConversion) {
text = replaceDash(text);
}
if (rules.emdashSpacing) {
text = fixEmdashSpacing(text);
}
// Convert straight quotes to curly BEFORE applying spacing
if (rules.straightToCurlyQuotes) {
text = convertStraightToCurlyQuotes(text);
}
if (rules.quoteSpacing) {
text = fixQuotes(text);
}
if (rules.singleQuoteSpacing) {
text = fixSingleQuotes(text);
}
// Spacing rules
if (rules.cjkEnglishSpacing) {
text = spaceBetween(text);
}
if (rules.cjkParenthesisSpacing) {
text = fixCJKParenthesisSpacing(text);
}
if (rules.currencySpacing) {
text = fixCurrencySpacing(text);
}
if (rules.slashSpacing) {
text = fixSlashSpacing(text);
}
// Cleanup rules
if (rules.spaceCollapsing) {
// Use the improved space collapsing function that preserves list indentation
text = collapseSpaces(text);
}
// Remove trailing spaces at end of lines
text = text.replace(TRAILING_SPACE_PATTERN, '');
}
// Collapse excessive newlines (3+) to max 2 (one blank line)
text = text.replace(EXCESSIVE_NEWLINE_PATTERN, '\n\n');
// Apply custom regex rules (runs after all built-in rules)
text = applyCustomRules(text, rules.customRules);
return text.trim();
}
| xiaolai/cjk-text-formatter-vscode | 4 | VS Code extension for formatting CJK (Chinese, Japanese, Korean) and English mixed text with proper typography rules | TypeScript | xiaolai | xiaolai | inblockchain |
src/wordCounter.ts | TypeScript | /**
* Word Counter for Markdown Files
* Counts words in CJK/English mixed text, excluding markdown formatting
*/
// CJK character ranges (reused from formatter.ts)
const HAN = '\\u4e00-\\u9fff'; // Chinese characters + Japanese Kanji
const HIRAGANA = '\\u3040-\\u309f'; // Japanese Hiragana
const KATAKANA = '\\u30a0-\\u30ff'; // Japanese Katakana
const HANGUL = '\\uac00-\\ud7af'; // Korean Hangul
const CJK_ALL = `${HAN}${HIRAGANA}${KATAKANA}${HANGUL}`;
// Regex patterns
const CJK_PATTERN = new RegExp(`[${CJK_ALL}]`, 'g');
const ALPHANUMERIC_PATTERN = /\b[A-Za-z0-9]+\b/g;
export interface WordCountResult {
cjk: number; // CJK character count
english: number; // English/alphanumeric word count
total: number; // Total word count
chars: number; // Total character count (excluding spaces)
}
/**
* Strip markdown formatting syntax from text
* This removes formatting while preserving the actual content
*/
function stripMarkdown(text: string): string {
let stripped = text;
// Remove code blocks (fenced and indented)
stripped = stripped.replace(/```[\s\S]*?```/g, ''); // Fenced code blocks
stripped = stripped.replace(/~~~[\s\S]*?~~~/g, ''); // Alternative fenced code blocks
stripped = stripped.replace(/^(?: {4}|\t).+$/gm, ''); // Indented code blocks
// Remove inline code
stripped = stripped.replace(/`[^`\n]+`/g, '');
// Remove HTML comments
stripped = stripped.replace(/<!--[\s\S]*?-->/g, '');
// Remove YAML front matter
stripped = stripped.replace(/^---[\s\S]*?---/m, '');
// Remove horizontal rules
stripped = stripped.replace(/^(?:[-*_]){3,}$/gm, '');
// Remove headers (keep the text, remove the # symbols)
stripped = stripped.replace(/^#{1,6}\s+/gm, '');
// Remove blockquote markers (keep the text)
stripped = stripped.replace(/^>\s+/gm, '');
// Remove list markers (keep the text)
stripped = stripped.replace(/^[\s]*[-*+]\s+/gm, ''); // Unordered lists
stripped = stripped.replace(/^[\s]*\d+\.\s+/gm, ''); // Ordered lists
// Remove links but keep link text: [text](url) -> text
stripped = stripped.replace(/\[([^\]]+)\]\([^)]+\)/g, '$1');
// Remove reference-style links: [text][ref] -> text
stripped = stripped.replace(/\[([^\]]+)\]\[[^\]]*\]/g, '$1');
// Remove link references: [ref]: url
stripped = stripped.replace(/^\[[^\]]+\]:\s+.+$/gm, '');
// Remove images:  -> (remove completely)
stripped = stripped.replace(/!\[([^\]]*)\]\([^)]+\)/g, '');
// Remove bold: **text** or __text__ -> text
stripped = stripped.replace(/\*\*([^*]+)\*\*/g, '$1');
stripped = stripped.replace(/__([^_]+)__/g, '$1');
// Remove italic: *text* or _text_ -> text
stripped = stripped.replace(/\*([^*]+)\*/g, '$1');
stripped = stripped.replace(/_([^_]+)_/g, '$1');
// Remove strikethrough: ~~text~~ -> text
stripped = stripped.replace(/~~([^~]+)~~/g, '$1');
// Remove HTML tags
stripped = stripped.replace(/<[^>]+>/g, '');
// Remove footnotes: [^1] or [^note]
stripped = stripped.replace(/\[\^[^\]]+\]/g, '');
// Remove task list markers: - [ ] or - [x]
stripped = stripped.replace(/^[\s]*-\s+\[[x ]\]\s+/gmi, '');
// Remove tables (simple approach - remove entire table structure)
stripped = stripped.replace(/^\|.+\|$/gm, '');
stripped = stripped.replace(/^\|?[\s]*:?-+:?[\s]*\|.*$/gm, '');
// Remove remaining isolated markdown symbols
stripped = stripped.replace(/[*_~`#]/g, '');
return stripped;
}
/**
* Count CJK characters in text
*/
function countCJKCharacters(text: string): number {
const matches = text.match(CJK_PATTERN);
return matches ? matches.length : 0;
}
/**
* Count English/alphanumeric words (including numbers)
*/
function countAlphanumericWords(text: string): number {
const matches = text.match(ALPHANUMERIC_PATTERN);
return matches ? matches.length : 0;
}
/**
* Count total characters excluding whitespace
*/
function countCharacters(text: string): number {
return text.replace(/\s/g, '').length;
}
/**
* Main word counting function for markdown text
* Strips markdown formatting and counts CJK characters + English words
*/
export function countWords(text: string): WordCountResult {
// Strip markdown formatting first
const plainText = stripMarkdown(text);
// Count different types
const cjkCount = countCJKCharacters(plainText);
const englishCount = countAlphanumericWords(plainText);
const totalCount = cjkCount + englishCount;
const charCount = countCharacters(plainText);
return {
cjk: cjkCount,
english: englishCount,
total: totalCount,
chars: charCount
};
}
/**
* Format word count for display
*/
export function formatWordCount(result: WordCountResult, format: 'total' | 'detailed' = 'total'): string {
if (format === 'detailed') {
return `CJK: ${result.cjk.toLocaleString()} | EN: ${result.english.toLocaleString()} | Total: ${result.total.toLocaleString()}`;
}
return `Words: ${result.total.toLocaleString()}`;
}
| xiaolai/cjk-text-formatter-vscode | 4 | VS Code extension for formatting CJK (Chinese, Japanese, Korean) and English mixed text with proper typography rules | TypeScript | xiaolai | xiaolai | inblockchain |
install.sh | Shell | #!/bin/bash
# Claude Genie Installation Script
# Copies agents and commands folders to $HOME/.claude/
set -e # Exit on any error
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Function to print colored messages
print_success() {
echo -e "${GREEN}✓${NC} $1"
}
print_error() {
echo -e "${RED}✗${NC} $1"
}
print_info() {
echo -e "${YELLOW}➜${NC} $1"
}
# Main installation
main() {
print_info "Starting Claude Genie installation..."
# Get the script's directory
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
TARGET_DIR="$HOME/.claude"
# Create target directory if it doesn't exist
if [ ! -d "$TARGET_DIR" ]; then
print_info "Creating $TARGET_DIR directory..."
mkdir -p "$TARGET_DIR"
print_success "Created $TARGET_DIR"
else
print_info "$TARGET_DIR already exists"
fi
# Copy agents folder
if [ -d "$SCRIPT_DIR/agents" ]; then
print_info "Copying agents folder..."
# Create agents directory if it doesn't exist
if [ ! -d "$TARGET_DIR/agents" ]; then
mkdir -p "$TARGET_DIR/agents"
fi
# Copy all agent files
cp -r "$SCRIPT_DIR/agents/"* "$TARGET_DIR/agents/" 2>/dev/null || true
print_success "Copied agents to $TARGET_DIR/agents/"
else
print_error "agents folder not found in $SCRIPT_DIR"
exit 1
fi
# Copy commands folder
if [ -d "$SCRIPT_DIR/commands" ]; then
print_info "Copying commands folder..."
# Create commands directory if it doesn't exist
if [ ! -d "$TARGET_DIR/commands" ]; then
mkdir -p "$TARGET_DIR/commands"
fi
# Copy all command files
cp -r "$SCRIPT_DIR/commands/"* "$TARGET_DIR/commands/" 2>/dev/null || true
print_success "Copied commands to $TARGET_DIR/commands/"
else
print_error "commands folder not found in $SCRIPT_DIR"
exit 1
fi
# List installed items
print_info "Installed items:"
# List agents
if [ -d "$TARGET_DIR/agents" ]; then
echo ""
echo " ${GREEN}Agents installed:${NC}"
echo " - ${YELLOW}@genie${NC} - Claude Code optimization expert"
echo " - ${YELLOW}@guru${NC} - Natural Language Programming master"
fi
# List commands
if [ -d "$TARGET_DIR/commands" ]; then
echo ""
echo " ${GREEN}Commands installed:${NC}"
echo " - ${YELLOW}/claude-setup-wizard${NC} - Smart setup orchestrator"
echo " - ${YELLOW}/claude-setup-wizard-global${NC} - User identity setup"
echo " - ${YELLOW}/claude-setup-wizard-project${NC} - Project configuration"
fi
echo ""
print_success "Installation complete!"
# Check if CLAUDE.md exists and provide clear next steps
if [ ! -f "$TARGET_DIR/CLAUDE.md" ]; then
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo " ${YELLOW}Next Step:${NC} Run ${GREEN}/claude-setup-wizard${NC} in Claude Code"
echo " This will configure your personalized environment"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
else
print_info "Your Claude environment is already configured!"
print_info "You can run /claude-setup-wizard anytime to reconfigure"
fi
}
# Run main function
main | xiaolai/claude-genie | 20 | Claude Code Development Environment with Specialized Assistants | Shell | xiaolai | xiaolai | inblockchain |
scripts/codex-preflight.sh | Shell | #!/usr/bin/env bash
# codex-preflight.sh — Discover available Codex models by probing in parallel.
#
# Usage: bash scripts/codex-preflight.sh
# Output: JSON to stdout (human summary to stderr)
#
# Caching: Results are cached for 5 minutes in $TMPDIR/codex-preflight-cache.json.
# Set CODEX_PREFLIGHT_NO_CACHE=1 to skip cache.
#
# How it works:
# Invalid models fail fast (~1s) with "not supported" in stderr.
# Valid models start processing (no error within timeout) — we kill & mark available.
set -euo pipefail
# ── Configuration ────────────────────────────────────────────────────────────
PROBE_TIMEOUT=5 # seconds to wait before declaring "available"
CLOUD_TIMEOUT=5 # seconds to wait for codex cloud list
CACHE_TTL=300 # seconds (5 minutes)
PROBE_PROMPT="Respond with ok."
# Candidate models — stale entries are harmless (probe as unavailable).
# Add new model names here as OpenAI releases them.
# ORDER MATTERS: within each family, list newest version FIRST.
# The dedup step keeps only the first available model per family.
CANDIDATE_MODELS=(
# gpt-codex family (newest first)
gpt-5.3-codex
gpt-5.2-codex
# gpt-codex-spark family
gpt-5.3-codex-spark
# gpt-codex-max family
gpt-5.1-codex-max
# gpt-codex-mini family
gpt-5-codex-mini
# o-mini family
o4-mini
# o family
o3
# standalone
codex-mini-latest
# gpt (non-codex) family
gpt-4.1
# gpt-mini (non-codex) family
gpt-4.1-mini
)
# Static options (stable CLI flags — no need to probe).
REASONING_EFFORTS='["low","medium","high"]'
SANDBOX_LEVELS='["read-only","workspace-write","danger-full-access"]'
# ── Helpers ──────────────────────────────────────────────────────────────────
info() { echo "$*" >&2; }
# Escape a string for safe JSON interpolation (handles quotes, backslashes, newlines).
json_escape() {
local s="$1"
s="${s//\\/\\\\}" # backslashes
s="${s//\"/\\\"}" # double quotes
s="${s//$'\n'/\\n}" # newlines
s="${s//$'\r'/}" # carriage returns
s="${s//$'\t'/\\t}" # tabs
printf '%s' "$s"
}
# Resolve the timeout command (timeout on Linux, gtimeout on macOS via coreutils).
resolve_timeout_cmd() {
if command -v timeout &>/dev/null; then
echo "timeout"
elif command -v gtimeout &>/dev/null; then
echo "gtimeout"
else
echo ""
fi
}
# Map a model name to its family (version stripped).
# Within each family, only the newest available model is kept.
get_family() {
local model="$1"
case "$model" in
gpt-*-codex-spark) echo "gpt-codex-spark" ;;
gpt-*-codex-max) echo "gpt-codex-max" ;;
gpt-*-codex-mini) echo "gpt-codex-mini" ;;
gpt-*-codex) echo "gpt-codex" ;;
gpt-*-mini) echo "gpt-mini" ;;
gpt-[0-9]*) echo "gpt" ;;
o[0-9]*-mini) echo "o-mini" ;;
o[0-9]*) echo "o" ;;
*) echo "$model" ;; # unique — no family
esac
}
# Build a JSON array from positional arguments. Handles empty arrays correctly.
json_array() {
if [[ $# -eq 0 ]]; then
echo "[]"
return
fi
local result="["
local first=true
for item in "$@"; do
if $first; then first=false; else result+=","; fi
result+="\"$item\""
done
result+="]"
echo "$result"
}
# ── Step 0: Check cache ─────────────────────────────────────────────────────
CACHE_FILE="${TMPDIR:-/tmp}/codex-preflight-cache.json"
if [[ -z "${CODEX_PREFLIGHT_NO_CACHE:-}" && -f "$CACHE_FILE" ]]; then
# Check if cache is fresh (< CACHE_TTL seconds old)
if [[ "$(uname)" == "Darwin" ]]; then
cache_age=$(( $(date +%s) - $(stat -f %m "$CACHE_FILE") ))
else
cache_age=$(( $(date +%s) - $(stat -c %Y "$CACHE_FILE") ))
fi
if [[ $cache_age -lt $CACHE_TTL ]]; then
info "Using cached results (${cache_age}s old, TTL ${CACHE_TTL}s)"
cat "$CACHE_FILE"
exit 0
fi
fi
# ── Step 1: Check codex CLI ──────────────────────────────────────────────────
if ! command -v codex &>/dev/null; then
cat <<'JSON'
{"status":"error","error":"codex CLI not found. Install: npm install -g @openai/codex","models":[],"reasoning_efforts":[],"sandbox_levels":[]}
JSON
exit 1
fi
# ── Step 2: Get codex version ────────────────────────────────────────────────
CODEX_VERSION=$(codex --version 2>/dev/null || echo "unknown")
info "Codex version: $CODEX_VERSION"
# ── Step 3: Check authentication ─────────────────────────────────────────────
AUTH_MODE="unknown"
# Prefer `codex login status` (available in v0.101+) over parsing auth.json directly.
LOGIN_STATUS=$(codex login status 2>&1) || true
if echo "$LOGIN_STATUS" | grep -qi "logged in"; then
# Extract auth mode from status output if possible
if echo "$LOGIN_STATUS" | grep -qi "chatgpt"; then
AUTH_MODE="chatgpt_login"
elif echo "$LOGIN_STATUS" | grep -qi "api.key\|api_key"; then
AUTH_MODE="api_key"
else
AUTH_MODE="authenticated"
fi
elif echo "$LOGIN_STATUS" | grep -qi "not logged in\|not authenticated"; then
AUTH_MODE="unknown"
else
# Fallback: parse auth.json directly (older Codex versions)
AUTH_FILE="$HOME/.codex/auth.json"
if [[ -f "$AUTH_FILE" ]]; then
if command -v jq &>/dev/null; then
AUTH_MODE=$(jq -r '.auth_mode // "unknown"' "$AUTH_FILE" 2>/dev/null || echo "unknown")
else
AUTH_MODE=$(grep -o '"auth_mode"[[:space:]]*:[[:space:]]*"[^"]*"' "$AUTH_FILE" 2>/dev/null \
| head -1 | sed 's/.*"\([^"]*\)"$/\1/' || echo "unknown")
fi
fi
fi
# Only fall back to API key if no subscription auth was found.
# Subscription (codex login) is preferred — it reflects the user's actual plan.
if [[ "$AUTH_MODE" == "unknown" && -n "${OPENAI_API_KEY:-}" ]]; then
AUTH_MODE="api_key"
fi
if [[ "$AUTH_MODE" == "unknown" ]]; then
CODEX_VERSION_SAFE=$(json_escape "$CODEX_VERSION")
cat <<JSON
{"status":"error","error":"Not authenticated. Run: codex login","auth_mode":"none","codex_version":"$CODEX_VERSION_SAFE","models":[],"reasoning_efforts":$REASONING_EFFORTS,"sandbox_levels":$SANDBOX_LEVELS}
JSON
exit 1
fi
info "Auth mode: $AUTH_MODE"
info "Probing ${#CANDIDATE_MODELS[@]} candidate models (timeout ${PROBE_TIMEOUT}s each)..."
# ── Step 4: Probe models in parallel ─────────────────────────────────────────
TMPDIR_PROBE=$(mktemp -d)
trap 'kill 0 2>/dev/null; rm -rf "$TMPDIR_PROBE"' EXIT
TIMEOUT_CMD=$(resolve_timeout_cmd)
probe_model() {
local model="$1"
local outfile="$TMPDIR_PROBE/$model"
local stderr_file="$TMPDIR_PROBE/${model}.stderr"
if [[ -n "$TIMEOUT_CMD" ]]; then
$TIMEOUT_CMD "$PROBE_TIMEOUT" \
codex exec -m "$model" "$PROBE_PROMPT" \
>"$outfile" 2>"$stderr_file" &
else
# Manual timeout fallback (no coreutils)
(
codex exec -m "$model" "$PROBE_PROMPT" \
>"$outfile" 2>"$stderr_file"
) &
local pid=$!
(
sleep "$PROBE_TIMEOUT"
kill "$pid" 2>/dev/null
) &
local killer=$!
wait "$pid" 2>/dev/null
kill "$killer" 2>/dev/null
wait "$killer" 2>/dev/null
fi
}
# Launch all probes in parallel
for model in "${CANDIDATE_MODELS[@]}"; do
probe_model "$model" &
done
# Wait for all background probes
wait
# ── Step 5: Collect and deduplicate results ──────────────────────────────────
AVAILABLE=()
UNAVAILABLE=()
for model in "${CANDIDATE_MODELS[@]}"; do
stderr_file="$TMPDIR_PROBE/${model}.stderr"
if [[ -f "$stderr_file" ]] && grep -qi "not supported" "$stderr_file" 2>/dev/null; then
UNAVAILABLE+=("$model")
info " $model --> unavailable"
else
AVAILABLE+=("$model")
info " $model --> available"
fi
done
# Deduplicate — keep only newest per family.
# CANDIDATE_MODELS is ordered newest-first within each family, so the first
# available model we see for a family is the newest. Older versions move to
# UNAVAILABLE (with a "superseded" note).
SEEN_FAMILIES=""
FILTERED=()
for model in "${AVAILABLE[@]}"; do
family=$(get_family "$model")
if echo "$SEEN_FAMILIES" | grep -qF "|${family}|"; then
UNAVAILABLE+=("$model")
info " $model --> superseded (keeping newer from $family family)"
else
FILTERED+=("$model")
SEEN_FAMILIES="${SEEN_FAMILIES}|${family}|"
fi
done
# Correctly handle empty arrays (bash "${arr[@]}" expands to "" when empty)
if [[ ${#FILTERED[@]} -gt 0 ]]; then
AVAILABLE=("${FILTERED[@]}")
else
AVAILABLE=()
fi
# ── Step 6: Check Codex Cloud availability ───────────────────────────────────
CODEX_CLOUD="false"
if [[ -n "$TIMEOUT_CMD" ]]; then
if $TIMEOUT_CMD "$CLOUD_TIMEOUT" codex cloud list &>/dev/null; then
CODEX_CLOUD="true"
fi
else
# No timeout command available — skip cloud check rather than risk hanging
info " Skipping cloud check (no timeout command available)"
fi
# ── Step 7: Output JSON ─────────────────────────────────────────────────────
available_json=$(json_array "${AVAILABLE[@]+"${AVAILABLE[@]}"}")
unavailable_json=$(json_array "${UNAVAILABLE[@]+"${UNAVAILABLE[@]}"}")
CODEX_VERSION_SAFE=$(json_escape "$CODEX_VERSION")
AUTH_MODE_SAFE=$(json_escape "$AUTH_MODE")
OUTPUT=$(cat <<JSON
{"status":"ok","codex_version":"$CODEX_VERSION_SAFE","auth_mode":"$AUTH_MODE_SAFE","codex_cloud":$CODEX_CLOUD,"models":$available_json,"unavailable":$unavailable_json,"reasoning_efforts":$REASONING_EFFORTS,"sandbox_levels":$SANDBOX_LEVELS}
JSON
)
# Write to cache and stdout
echo "$OUTPUT" > "$CACHE_FILE"
echo "$OUTPUT"
| xiaolai/codex-toolkit-for-claude | 4 | OpenAI Codex MCP integration for Claude Code — audit, implement, verify, and debug via Codex | Shell | xiaolai | xiaolai | inblockchain |
background/service-worker.js | JavaScript | import { notifyMessage } from '../modules/messaging.js';
import {
saveConversation,
findConversationByConversationId
} from '../modules/history-manager.js';
import { t, initializeLanguage } from '../modules/i18n.js';
// T008 & T065: Install event - setup context menus and configure side panel
const DEFAULT_SHORTCUT_SETTING = { keyboardShortcutEnabled: true };
let keyboardShortcutEnabled = true;
// T070: Track side panel state per window
const sidePanelState = new Map(); // windowId -> boolean (true = open, false = closed)
async function loadShortcutSetting() {
try {
const result = await chrome.storage.sync.get(DEFAULT_SHORTCUT_SETTING);
keyboardShortcutEnabled = result.keyboardShortcutEnabled;
} catch (error) {
// Fallback to default if storage unavailable
keyboardShortcutEnabled = true;
}
}
// T070: Helper to toggle side panel
async function toggleSidePanel(windowId, action = null) {
if (!windowId) {
return;
}
const isOpen = sidePanelState.get(windowId) || false;
if (!isOpen) {
// Open the side panel
try {
await chrome.sidePanel.open({ windowId });
sidePanelState.set(windowId, true);
} catch (error) {
// Silently fail - side panel may not be available
}
} else {
// Close the side panel by sending message to sidebar
try {
await notifyMessage({ action: 'closeSidePanel', payload: {} });
sidePanelState.set(windowId, false);
} catch (error) {
// Even if message fails, assume it's closed
sidePanelState.set(windowId, false);
}
}
}
async function configureActionBehavior() {
// Always handle action clicks ourselves so we can respect the toggle state.
try {
await chrome.sidePanel.setPanelBehavior({ openPanelOnActionClick: false });
} catch (error) {
// Silently fail if API not available
}
}
chrome.runtime.onInstalled.addListener(async () => {
await createContextMenus();
await loadShortcutSetting();
await configureActionBehavior();
});
chrome.runtime.onStartup.addListener(async () => {
await loadShortcutSetting();
await configureActionBehavior();
});
// T065-T068: Create/update context menus dynamically based on enabled providers
async function createContextMenus() {
// Remove all existing menus
await chrome.contextMenus.removeAll();
// Initialize language before creating menus
await initializeLanguage();
// Get enabled providers from settings
const settings = await chrome.storage.sync.get({
enabledProviders: ['chatgpt', 'claude', 'gemini', 'google', 'grok', 'deepseek', 'copilot']
});
const enabledProviders = settings.enabledProviders;
// Create main context menu item
chrome.contextMenus.create({
id: 'open-smarter-panel',
title: t('contextMenuSendTo'),
contexts: ['page', 'selection', 'link']
});
// Create submenu for each enabled provider
const providerNames = {
chatgpt: 'ChatGPT',
claude: 'Claude',
gemini: 'Gemini',
grok: 'Grok',
deepseek: 'DeepSeek',
google: 'Google',
copilot: 'Microsoft Copilot'
};
enabledProviders.forEach(providerId => {
chrome.contextMenus.create({
id: `provider-${providerId}`,
parentId: 'open-smarter-panel',
title: providerNames[providerId] || providerId,
contexts: ['page', 'selection', 'link']
});
});
// Add Prompt Library option
chrome.contextMenus.create({
id: 'open-prompt-library',
parentId: 'open-smarter-panel',
title: t('contextMenuPromptLibrary'),
contexts: ['page', 'selection', 'link']
});
}
// T066: Listen for settings changes and update context menus
chrome.storage.onChanged.addListener((changes, namespace) => {
if (changes.enabledProviders || changes.language) {
createContextMenus();
}
});
// T009 & T067-T068 & T070: Context menu click handler with state tracking
chrome.contextMenus.onClicked.addListener(async (info, tab) => {
try {
if (!tab || !tab.windowId) {
return;
}
if (info.menuItemId.startsWith('provider-')) {
const providerId = info.menuItemId.replace('provider-', '');
// Open side panel and track state
await chrome.sidePanel.open({ windowId: tab.windowId });
sidePanelState.set(tab.windowId, true);
// Get source URL placement setting
const settings = await chrome.storage.sync.get({ sourceUrlPlacement: 'end' });
const placement = settings.sourceUrlPlacement;
// Check if text is selected
if (info.selectionText) {
// Format content with source based on user preference
let contentToSend;
if (placement === 'none') {
contentToSend = info.selectionText;
} else if (placement === 'beginning') {
contentToSend = `Source: ${info.pageUrl}\n\n${info.selectionText}`;
} else {
// default: 'end'
contentToSend = `${info.selectionText}\n\nSource: ${info.pageUrl}`;
}
// Wait for sidebar to load, then send message to switch provider
setTimeout(() => {
notifyMessage({
action: 'switchProvider',
payload: { providerId, selectedText: contentToSend }
}).catch(() => {
// Sidebar may not be ready yet, silently ignore
});
}, 100);
} else {
// No text selected - extract page content
try {
const response = await chrome.tabs.sendMessage(tab.id, {
action: 'extractPageContent'
});
if (response && response.success) {
// Send extracted content to sidebar
setTimeout(() => {
notifyMessage({
action: 'switchProvider',
payload: { providerId, selectedText: response.content }
}).catch(() => {
// Sidebar may not be ready yet, silently ignore
});
}, 100);
} else {
// Extraction failed - send empty to provider
setTimeout(() => {
notifyMessage({
action: 'switchProvider',
payload: { providerId, selectedText: '' }
}).catch(() => {});
}, 100);
}
} catch (error) {
// Content script not ready or extraction failed
// Send empty to provider
setTimeout(() => {
notifyMessage({
action: 'switchProvider',
payload: { providerId, selectedText: '' }
}).catch(() => {});
}, 100);
}
}
} else if (info.menuItemId === 'open-prompt-library') {
// Open side panel with prompt library and track state
await chrome.sidePanel.open({ windowId: tab.windowId });
sidePanelState.set(tab.windowId, true);
// Get source URL placement setting
const settings = await chrome.storage.sync.get({ sourceUrlPlacement: 'end' });
const placement = settings.sourceUrlPlacement;
// Check if text is selected
if (info.selectionText) {
// Format content with source based on user preference
let contentToSend;
if (placement === 'none') {
contentToSend = info.selectionText;
} else if (placement === 'beginning') {
contentToSend = `Source: ${info.pageUrl}\n\n${info.selectionText}`;
} else {
// default: 'end'
contentToSend = `${info.selectionText}\n\nSource: ${info.pageUrl}`;
}
// Wait for sidebar to load, then switch to prompt library
setTimeout(() => {
notifyMessage({
action: 'openPromptLibrary',
payload: { selectedText: contentToSend }
}).catch(() => {
// Sidebar may not be ready yet, ignore error
});
}, 100);
} else {
// No text selected - extract page content
try {
const response = await chrome.tabs.sendMessage(tab.id, {
action: 'extractPageContent'
});
if (response && response.success) {
// Send extracted content to sidebar
setTimeout(() => {
notifyMessage({
action: 'openPromptLibrary',
payload: { selectedText: response.content }
}).catch(() => {
// Sidebar may not be ready yet, ignore error
});
}, 100);
} else {
// Extraction failed - send empty
setTimeout(() => {
notifyMessage({
action: 'openPromptLibrary',
payload: { selectedText: '' }
}).catch(() => {});
}, 100);
}
} catch (error) {
// Content script not ready or extraction failed
// Send empty
setTimeout(() => {
notifyMessage({
action: 'openPromptLibrary',
payload: { selectedText: '' }
}).catch(() => {});
}, 100);
}
}
}
} catch (error) {
// Silently handle context menu errors
}
});
// T010 & T070: Handle action clicks (toolbar or `_execute_action` command) with toggle
chrome.action.onClicked.addListener(async (tab) => {
if (!tab || !tab.windowId) {
return;
}
if (!keyboardShortcutEnabled) {
return;
}
await toggleSidePanel(tab.windowId);
});
chrome.storage.onChanged.addListener((changes, namespace) => {
if (namespace !== 'sync') return;
if (changes.keyboardShortcutEnabled) {
keyboardShortcutEnabled = changes.keyboardShortcutEnabled.newValue !== false;
}
});
// T070: Clean up state when windows are closed
chrome.windows.onRemoved.addListener((windowId) => {
sidePanelState.delete(windowId);
});
// T070: Listen for sidebar close notifications, conversation saves, and duplicate checks
chrome.runtime.onMessage.addListener((message, sender, sendResponse) => {
if (message.action === 'sidePanelClosed') {
// Get windowId from sender
if (sender.tab && sender.tab.windowId) {
sidePanelState.set(sender.tab.windowId, false);
}
sendResponse({ success: true });
} else if (message.action === 'saveConversationFromPage') {
// Handle conversation save from ChatGPT page
handleSaveConversation(message.payload, sender).then(sendResponse);
return true; // Keep channel open for async response
} else if (message.action === 'checkDuplicateConversation') {
// Handle duplicate check request
handleCheckDuplicate(message.payload).then(sendResponse);
return true; // Keep channel open for async response
} else if (message.action === 'fetchLatestCommit') {
// T073: Handle version check request from options page
handleFetchLatestCommit().then(sendResponse);
return true; // Keep channel open for async response
}
return true;
});
// T073: Handle version check by fetching latest commit from GitHub API
async function handleFetchLatestCommit() {
try {
const GITHUB_API_URL = 'https://api.github.com/repos/xiaolai/insidebar-ai/commits/main';
const response = await fetch(GITHUB_API_URL, {
headers: {
'Accept': 'application/vnd.github.v3+json'
}
});
if (!response.ok) {
throw new Error(`GitHub API error: ${response.status}`);
}
const data = await response.json();
return {
success: true,
data: {
sha: data.sha,
shortSha: data.sha.substring(0, 7),
date: data.commit.committer.date,
message: data.commit.message
}
};
} catch (error) {
console.error('[Background] Error fetching latest commit:', error);
return {
success: false,
error: error.message
};
}
}
// Handle duplicate conversation check - now with direct database access
async function handleCheckDuplicate(payload) {
try {
const { conversationId } = payload;
if (!conversationId) {
return { isDuplicate: false };
}
// Query IndexedDB directly without requiring sidebar
const existingConversation = await findConversationByConversationId(conversationId);
if (existingConversation) {
return {
isDuplicate: true,
existingConversation: existingConversation
};
}
return { isDuplicate: false };
} catch (error) {
console.error('[Background] Error checking duplicate:', error);
// Propagate error instead of silently returning false
throw error;
}
}
// Handle saving conversation - now with direct database access
async function handleSaveConversation(conversationData, sender) {
try {
// Save directly to IndexedDB without requiring sidebar
const savedConversation = await saveConversation(conversationData);
// Notify sidebar to refresh chat history if it's open
try {
await notifyMessage({
action: 'refreshChatHistory',
payload: { conversationId: savedConversation.id }
});
} catch (error) {
// Sidebar may not be open, that's okay
}
// Get user setting for auto-opening sidebar
const settings = await chrome.storage.sync.get({
autoOpenSidebarOnSave: false
});
// Optionally open sidebar and switch to chat history
if (settings.autoOpenSidebarOnSave && sender.tab) {
const windowId = sender.tab.windowId;
const isOpen = sidePanelState.get(windowId) || false;
if (!isOpen && windowId) {
try {
// This will work because it's within the user gesture flow
await chrome.sidePanel.open({ windowId });
sidePanelState.set(windowId, true);
// Wait for sidebar to load, then switch to chat history
setTimeout(() => {
notifyMessage({
action: 'switchToChatHistory',
payload: { conversationId: savedConversation.id }
}).catch(() => {
// Sidebar may not be ready, ignore
});
}, 300);
} catch (error) {
// If sidebar opening fails, it's okay - the save already succeeded
console.warn('[Background] Could not open sidebar after save:', error.message);
}
}
}
return { success: true, data: savedConversation };
} catch (error) {
console.error('[Background] Error saving conversation:', error);
return { success: false, error: error.message };
}
}
// T069 & T070: Listen for keyboard shortcuts with toggle support
chrome.commands.onCommand.addListener(async (command, tab) => {
if (!tab || !tab.windowId) {
return;
}
const windowId = tab.windowId;
const isOpen = sidePanelState.get(windowId) || false;
if (command === 'open-prompt-library') {
if (!isOpen) {
// Open and switch to Prompt Library
try {
await chrome.sidePanel.open({ windowId });
sidePanelState.set(windowId, true);
// Wait for sidebar to load, then switch to Prompt Library
setTimeout(() => {
notifyMessage({
action: 'openPromptLibrary',
payload: {}
}).catch(() => {
// Sidebar may not be ready yet, ignore error
});
}, 100);
} catch (error) {
// Silently handle errors
}
} else {
// Close side panel (toggle off)
try {
await notifyMessage({ action: 'closeSidePanel', payload: {} });
sidePanelState.set(windowId, false);
} catch (error) {
// Even if message fails, assume it's closed
sidePanelState.set(windowId, false);
}
}
} else if (command === 'toggle-focus') {
// Toggle focus between sidebar and main page
if (!isOpen) {
// Sidebar not open - open it (it will auto-focus)
try {
await chrome.sidePanel.open({ windowId });
sidePanelState.set(windowId, true);
} catch (error) {
// Silently handle errors
}
} else {
// Sidebar is open - toggle focus between sidebar and page
try {
// Check if sidebar has focus
const sidebarResponse = await notifyMessage({
action: 'checkFocus',
payload: {}
});
if (sidebarResponse && sidebarResponse.hasFocus) {
// Sidebar has focus - switch to page input
if (tab && tab.id) {
try {
await chrome.tabs.sendMessage(tab.id, { action: 'takeFocus' });
} catch (error) {
// Content script may not be available
}
}
} else {
// Page has focus (or unknown) - switch to sidebar
await notifyMessage({
action: 'takeFocus',
payload: {}
});
}
} catch (error) {
// If sidebar messaging fails, try to focus sidebar anyway
try {
await notifyMessage({
action: 'takeFocus',
payload: {}
});
} catch (e) {
// Silently handle errors
}
}
}
}
});
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
content-scripts/button-finder-utils.js | JavaScript | /**
* Button Finder Utility for Content Scripts
* Provides multi-language, priority-based button finding with fallback strategies
*
* NOTE: This file must be loaded BEFORE any enter-behavior-*.js files in manifest.json
* It exports functions to window.ButtonFinderUtils
*/
(function() {
'use strict';
// Create global namespace for button finder utilities
window.ButtonFinderUtils = window.ButtonFinderUtils || {};
/**
* Multi-language text maps for button matching
* Each array contains translations in priority order
*/
const TEXT_MAPS = {
// Send button texts across languages
send: [
'Send', // English
'发送', // Chinese Simplified
'送信', // Japanese
'Отправить', // Russian
'Enviar', // Spanish
'Envoyer', // French
'Senden', // German
'Invia', // Italian
'보내기' // Korean
],
// Submit button texts
submit: [
'Submit', // English
'提交', // Chinese Simplified
'送信', // Japanese
'Отправить', // Russian
'Enviar', // Spanish
'Soumettre', // French
'Senden', // German
'Invia', // Italian
'제출' // Korean
],
// Save button texts
save: [
'Save', // English
'保存', // Chinese Simplified
'保存', // Chinese Traditional
'保存', // Japanese
'Сохранить', // Russian
'Guardar', // Spanish
'Enregistrer', // French
'Speichern', // German
'Salva', // Italian
'저장' // Korean
],
// Update button texts
update: [
'Update', // English
'更新', // Chinese Simplified
'更新', // Chinese Traditional & Japanese
'Обновить', // Russian
'Actualizar', // Spanish
'Mettre à jour', // French
'Aktualisieren', // German
'Aggiorna', // Italian
'업데이트' // Korean
],
// Share button texts (for language detection)
share: [
'Share', // English
'分享', // Chinese Simplified
'分享', // Chinese Traditional
'共有', // Japanese
'Поделиться', // Russian
'Compartir', // Spanish
'Partager', // French
'Teilen', // German
'Condividi', // Italian
'공유' // Korean
]
};
/**
* Selector types for button finding strategies
*/
const SELECTOR_TYPES = {
CSS: 'css', // Direct CSS selector
TEXT: 'text', // Text content matching (multi-language)
ARIA: 'aria', // ARIA label matching (multi-language)
FUNCTION: 'function' // Custom matcher function
};
/**
* Find button using prioritized selector array
* Tries each selector in order until a match is found
*
* @param {Array} selectors - Array of selector configurations
* @returns {HTMLElement|null} Found button element or null
*
* @example
* const button = window.ButtonFinderUtils.findButton([
* 'button[data-testid="send"]', // Try data-testid first
* { type: 'css', value: 'button[type="submit"]' }, // Then structural selector
* { type: 'aria', textKey: 'send' }, // Then multi-language ARIA
* { type: 'text', textKey: 'send' } // Finally multi-language text
* ]);
*/
window.ButtonFinderUtils.findButton = function(selectors) {
if (!Array.isArray(selectors)) {
console.error('[Button Finder] selectors must be an array');
return null;
}
for (let i = 0; i < selectors.length; i++) {
const selector = selectors[i];
try {
const element = trySelector(selector);
if (element) {
// Log which selector succeeded (helpful for debugging)
if (typeof selector === 'string') {
console.debug(`[Button Finder] Found via selector[${i}]:`, selector);
} else {
console.debug(`[Button Finder] Found via selector[${i}]:`, selector.type);
}
return element;
}
} catch (error) {
console.warn(`[Button Finder] Error trying selector[${i}]:`, error);
continue;
}
}
console.warn('[Button Finder] No button found after trying all selectors');
return null;
};
/**
* Try a single selector configuration
* @private
*/
function trySelector(config) {
// String selector = direct CSS querySelector
if (typeof config === 'string') {
const element = document.querySelector(config);
return element;
}
// Object = advanced selector configuration
if (typeof config !== 'object' || config === null) {
console.warn('[Button Finder] Invalid selector config:', config);
return null;
}
const { type, value, textKey, matcher } = config;
switch (type) {
case SELECTOR_TYPES.CSS:
return document.querySelector(value);
case SELECTOR_TYPES.TEXT:
return findByTextContent(textKey);
case SELECTOR_TYPES.ARIA:
return findByAriaLabel(textKey);
case SELECTOR_TYPES.FUNCTION:
if (typeof matcher !== 'function') {
console.warn('[Button Finder] FUNCTION type requires matcher function');
return null;
}
return matcher();
default:
console.warn('[Button Finder] Unknown selector type:', type);
return null;
}
}
/**
* Find button by text content using multi-language text map
* @private
*/
function findByTextContent(textKey) {
const texts = TEXT_MAPS[textKey];
if (!texts || !Array.isArray(texts)) {
console.warn('[Button Finder] Invalid textKey for TEXT search:', textKey);
return null;
}
const buttons = document.querySelectorAll('button');
return Array.from(buttons).find(btn => {
const btnText = btn.textContent?.trim();
return btnText && texts.some(text => btnText === text);
});
}
/**
* Find button by aria-label using multi-language text map
* @private
*/
function findByAriaLabel(textKey) {
const texts = TEXT_MAPS[textKey];
if (!texts || !Array.isArray(texts)) {
console.warn('[Button Finder] Invalid textKey for ARIA search:', textKey);
return null;
}
const buttons = document.querySelectorAll('button');
return Array.from(buttons).find(btn => {
const ariaLabel = btn.getAttribute('aria-label');
return ariaLabel && texts.some(text => ariaLabel.includes(text));
});
}
// Expose TEXT_MAPS and SELECTOR_TYPES for use in content scripts
window.ButtonFinderUtils.TEXT_MAPS = TEXT_MAPS;
window.ButtonFinderUtils.SELECTOR_TYPES = SELECTOR_TYPES;
})();
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
content-scripts/chatgpt-history-extractor.js | JavaScript | // ChatGPT Conversation History Extractor
// Extracts current conversation from ChatGPT.com DOM and saves to extension
//
// IMPORTANT: Requires conversation-extractor-utils.js to be loaded first
(function() {
'use strict';
console.log('[ChatGPT Extractor] Script loaded');
// Import shared utilities from global namespace
const {
extractMarkdownFromElement,
formatMessagesAsText,
generateConversationId,
checkForDuplicate,
showDuplicateWarning,
showNotification,
setupKeyboardShortcut
} = window.ConversationExtractorUtils;
// Share button selector for language detection
const SHARE_BUTTON_SELECTOR = '[data-testid="share-chat-button"]';
let saveButton = null;
// Initialize after page loads
if (document.readyState === 'loading') {
document.addEventListener('DOMContentLoaded', init);
} else {
init();
}
function init() {
console.log('[ChatGPT Extractor] Initializing...');
console.log('[ChatGPT Extractor] In iframe?', window !== window.top);
console.log('[ChatGPT Extractor] URL:', window.location.href);
// Only run on conversation pages (not homepage)
if (!window.location.href.startsWith('https://chatgpt.com/c/')) {
console.log('[ChatGPT Extractor] Not on conversation page, skipping');
return;
}
// Wait a bit for ChatGPT to fully render
setTimeout(() => {
console.log('[ChatGPT Extractor] Attempting to insert save button...');
insertSaveButton();
observeForShareButton();
}, 2000);
}
// Create save button matching ChatGPT's UI
function createSaveButton() {
// Detect provider's UI language and get matching Save button text
const { text, tooltip } = window.LanguageDetector.getSaveButtonText(SHARE_BUTTON_SELECTOR);
const button = document.createElement('button');
button.id = 'insidebar-save-conversation';
button.className = 'btn relative btn-ghost text-token-text-primary mx-2';
button.setAttribute('aria-label', text);
button.innerHTML = `
<div class="flex w-full items-center justify-center gap-1.5">
<svg width="20" height="20" viewBox="0 0 20 20" fill="currentColor" xmlns="http://www.w3.org/2000/svg" class="-ms-0.5 icon">
<path d="M2.66820931,12.6663 L2.66820931,12.5003 C2.66820931,12.1331 2.96598,11.8353 3.33325,11.8353 C3.70052,11.8353 3.99829,12.1331 3.99829,12.5003 L3.99829,12.6663 C3.99829,13.3772 3.9992,13.8707 4.03052,14.2542 C4.0612,14.6298 4.11803,14.8413 4.19849,14.9993 L4.2688,15.1263 C4.44511,15.4137 4.69813,15.6481 5.00024,15.8021 L5.13013,15.8577 C5.2739,15.9092 5.46341,15.947 5.74536,15.97 C6.12888,16.0014 6.62221,16.0013 7.33325,16.0013 L12.6663,16.0013 C13.3771,16.0013 13.8707,16.0014 14.2542,15.97 C14.6295,15.9394 14.8413,15.8825 14.9993,15.8021 L15.1262,15.7308 C15.4136,15.5545 15.6481,15.3014 15.802,14.9993 L15.8577,14.8695 C15.9091,14.7257 15.9469,14.536 15.97,14.2542 C16.0013,13.8707 16.0012,13.3772 16.0012,12.6663 L16.0012,12.5003 C16.0012,12.1332 16.2991,11.8355 16.6663,11.8353 C17.0335,11.8353 17.3313006,12.1331 17.3313006,12.5003 L17.3313006,12.6663 C17.3313006,13.3553 17.3319,13.9124 17.2952,14.3626 C17.2624,14.7636 17.1974,15.1247 17.053,15.4613 L16.9866,15.6038 C16.7211,16.1248 16.3172,16.5605 15.8215,16.8646 L15.6038,16.9866 C15.227,17.1786 14.8206,17.2578 14.3625,17.2952 C13.9123,17.332 13.3553,17.3314006 12.6663,17.3314006 L7.33325,17.3314006 C6.64416,17.3314006 6.0872,17.332 5.63696,17.2952 C5.23642,17.2625 4.87552,17.1982 4.53931,17.054 L4.39673,16.9866 C3.87561,16.7211 3.43911,16.3174 3.13501,15.8216 L3.01294,15.6038 C2.82097,15.2271 2.74177,14.8206 2.70435,14.3626 C2.66758,13.9124 2.66820931,13.3553 2.66820931,12.6663 Z M9.33521,3.33339 L9.33521,10.89489 L7.13696,8.69665 C6.87732,8.43701 6.45625,8.43712 6.19653,8.69665 C5.93684,8.95635 5.93684,9.37738 6.19653,9.63708 L9.52954,12.97106 L9.6311,13.05407 C9.73949,13.12627 9.86809,13.1654 10.0002,13.1654 C10.1763,13.1654 10.3454,13.0955 10.47,12.97106 L13.804,9.63708 C14.0633,9.37741 14.0634,8.95625 13.804,8.69665 C13.5443,8.43695 13.1222,8.43695 12.8625,8.69665 L10.6653,10.89392 L10.6653,3.33339 C10.6651,2.96639 10.3673,2.66849 10.0002,2.66829 C9.63308,2.66829 9.33538,2.96629 9.33521,3.33339 Z"></path>
</svg>
${text}
</div>
`;
button.title = tooltip;
button.addEventListener('click', handleSaveClick);
return button;
}
// Insert save button after share button
function insertSaveButton() {
// Only insert button on conversation pages
if (!window.location.href.startsWith('https://chatgpt.com/c/')) {
console.log('[ChatGPT Extractor] Not a conversation page, skipping save button');
return;
}
// Check if button already exists
if (document.getElementById('insidebar-save-conversation')) {
console.log('[ChatGPT Extractor] Save button already exists');
return;
}
// Find share button
const shareButton = document.querySelector('[data-testid="share-chat-button"]');
console.log('[ChatGPT Extractor] Looking for share button...');
console.log('[ChatGPT Extractor] Share button found?', !!shareButton);
if (!shareButton) {
console.log('[ChatGPT Extractor] Share button not found yet, will retry');
console.log('[ChatGPT Extractor] All buttons on page:',
Array.from(document.querySelectorAll('button')).map(b => ({
text: b.textContent.substring(0, 30),
testId: b.getAttribute('data-testid'),
classes: b.className
}))
);
return;
}
// Check if conversation exists
const hasConversation = detectConversation();
console.log('[ChatGPT Extractor] Has conversation?', hasConversation);
if (!hasConversation) {
console.log('[ChatGPT Extractor] No conversation detected, skipping button insertion');
return;
}
// Create and insert save button after share button
saveButton = createSaveButton();
shareButton.parentElement.insertBefore(saveButton, shareButton.nextSibling);
console.log('[ChatGPT Extractor] Save button inserted after share button');
}
// Detect if there's a conversation on the page
function detectConversation() {
// Look for conversation container
const conversationContainer = document.querySelector('main [class*="react-scroll-to-bottom"]') ||
document.querySelector('main [role="presentation"]') ||
document.querySelector('main');
if (!conversationContainer) return false;
// Look for messages
const messages = getMessages();
return messages && messages.length > 0;
}
// Observe DOM for share button appearance and conversation changes
function observeForShareButton() {
const observer = new MutationObserver(() => {
// Try to insert button if it doesn't exist
insertSaveButton();
// Remove button if conversation no longer exists
const existingButton = document.getElementById('insidebar-save-conversation');
if (existingButton && !detectConversation()) {
existingButton.remove();
saveButton = null;
}
});
// Observe the entire document for changes
observer.observe(document.body, {
childList: true,
subtree: true
});
}
// Extract conversation title
function getConversationTitle() {
// Priority 1: Extract conversation ID from URL and find matching sidebar link
const urlMatch = window.location.pathname.match(/\/c\/([^\/]+)/);
if (urlMatch) {
const conversationId = urlMatch[1];
const historyList = document.getElementById('history');
if (historyList) {
console.log('[ChatGPT Extractor] Found #history list, looking for conversation ID:', conversationId);
// Find the sidebar link that matches this conversation ID
const matchingLink = historyList.querySelector(`a[href*="${conversationId}"]`);
if (matchingLink) {
const titleSpan = matchingLink.querySelector('span[dir="auto"]');
if (titleSpan) {
const title = titleSpan.textContent.trim();
if (title && !title.includes('New chat') && title.length > 0) {
console.log('[ChatGPT Extractor] Found title from URL-matched sidebar link:', title);
return title;
}
}
// Fallback: use the entire link text content
const title = matchingLink.textContent.trim();
if (title && !title.includes('New chat') && title.length > 0) {
console.log('[ChatGPT Extractor] Found title from URL-matched link (fallback):', title);
return title;
}
}
}
}
// Priority 2: Try to get active conversation using data-active attribute
const historyList = document.getElementById('history');
if (historyList) {
console.log('[ChatGPT Extractor] Found #history list, looking for active item...');
// Look for the active item with data-active attribute
const activeItem = historyList.querySelector('[data-active]');
if (activeItem) {
// Find the span with the title text inside the active item
const titleSpan = activeItem.querySelector('span[dir="auto"]');
if (titleSpan) {
const title = titleSpan.textContent.trim();
if (title && !title.includes('New chat') && title.length > 0) {
console.log('[ChatGPT Extractor] Found title from active item ([data-active] fallback):', title);
return title;
}
}
// Fallback: use the entire text content
const title = activeItem.textContent.trim();
if (title && !title.includes('New chat') && title.length > 0) {
console.log('[ChatGPT Extractor] Found title from active item (content fallback):', title);
return title;
}
} else {
console.log('[ChatGPT Extractor] No [data-active] item found in #history');
}
} else {
console.log('[ChatGPT Extractor] #history list not found');
}
// Priority 3: Try other selectors
const fallbackSelectors = [
'nav [aria-current="page"]',
'h1',
'[data-testid="conversation-title"]',
'nav button[class*="font-semibold"]',
'nav button > div'
];
for (const selector of fallbackSelectors) {
const element = document.querySelector(selector);
if (element && element.textContent.trim()) {
console.log('[ChatGPT Extractor] Found title from fallback selector:', element.textContent.trim());
return element.textContent.trim();
}
}
// Ultimate fallback: Use default
console.log('[ChatGPT Extractor] No title found, using default');
return 'Untitled Conversation';
}
// Extract all messages from the conversation
function getMessages() {
const messages = [];
// Try multiple selectors for message containers
const messageContainers = document.querySelectorAll('[data-message-author-role]') ||
document.querySelectorAll('[class*="group/conversation-turn"]') ||
document.querySelectorAll('main [class*="flex"][class*="gap"]');
messageContainers.forEach(container => {
try {
const message = extractMessageFromContainer(container);
if (message) {
messages.push(message);
}
} catch (error) {
console.warn('[ChatGPT Extractor] Error extracting message:', error);
}
});
return messages;
}
// Extract a single message from its container
function extractMessageFromContainer(container) {
// Determine role (user or assistant)
let role = 'unknown';
const roleAttr = container.getAttribute('data-message-author-role');
if (roleAttr) {
role = roleAttr;
} else {
// Try to detect based on structure/classes
const classes = container.className;
if (classes.includes('user') || container.querySelector('[class*="user"]')) {
role = 'user';
} else if (classes.includes('assistant') || container.querySelector('[class*="assistant"]')) {
role = 'assistant';
}
}
// Get message content
const contentElement = container.querySelector('[class*="markdown"]') ||
container.querySelector('[data-message-id]') ||
container.querySelector('div[class*="whitespace"]') ||
container;
if (!contentElement) return null;
// Extract text content, preserving code blocks
const content = extractContentWithFormatting(contentElement);
if (!content.trim()) return null;
return {
role,
content: content.trim()
};
}
// Extract content while preserving markdown formatting
function extractContentWithFormatting(element) {
// Clone the element so we don't modify the original DOM
const clone = element.cloneNode(true);
return extractMarkdownFromElement(clone);
}
// NOTE: Markdown extraction and formatting functions moved to conversation-extractor-utils.js
// Extract full conversation data
function extractConversation() {
try {
const title = getConversationTitle();
const messages = getMessages();
if (!messages || messages.length === 0) {
throw new Error('No messages found in conversation');
}
const content = formatMessagesAsText(messages);
return {
title,
content,
messages,
timestamp: Date.now(),
url: window.location.href,
provider: 'ChatGPT'
};
} catch (error) {
console.error('[ChatGPT Extractor] Error extracting conversation:', error);
throw error;
}
}
// Handle save button click
async function handleSaveClick(e) {
if (e) {
e.preventDefault();
e.stopPropagation();
}
console.log('[ChatGPT Extractor] Save button clicked');
console.log('[ChatGPT Extractor] chrome object exists?', typeof chrome !== 'undefined');
console.log('[ChatGPT Extractor] chrome.runtime exists?', typeof chrome?.runtime !== 'undefined');
if (!saveButton) return;
// Check if chrome API is available
if (typeof chrome === 'undefined' || !chrome.runtime) {
console.error('[ChatGPT Extractor] Chrome extension API not available');
showNotification('Extension API not available. Try reloading the page.', 'error');
return;
}
// Disable button during save
saveButton.disabled = true;
const originalHTML = saveButton.innerHTML;
saveButton.innerHTML = '<div class="flex w-full items-center justify-center gap-1.5"><span>Saving...</span></div>';
try {
const conversation = extractConversation();
console.log('[ChatGPT Extractor] Extracted conversation:', {
title: conversation.title,
messageCount: conversation.messages.length,
contentLength: conversation.content.length,
url: conversation.url,
provider: conversation.provider
});
// Generate conversation ID for deduplication
const conversationId = generateConversationId(conversation.url, conversation.title);
conversation.conversationId = conversationId;
console.log('[ChatGPT Extractor] Generated conversation ID:', conversationId);
// Check for duplicates
const duplicateCheck = await checkForDuplicate(conversationId);
console.log('[ChatGPT Extractor] Duplicate check result:', duplicateCheck);
if (duplicateCheck.isDuplicate) {
console.log('[ChatGPT Extractor] Duplicate found, comparing content...');
// Compare content to decide whether to save
const existingContent = (duplicateCheck.existingConversation.content || '').trim();
const newContent = (conversation.content || '').trim();
if (existingContent === newContent) {
// Content identical - silently skip save
console.log('[ChatGPT Extractor] Content identical, skipping save');
saveButton.disabled = false;
saveButton.innerHTML = originalHTML;
return;
}
// Content changed - automatically overwrite with original timestamp
console.log('[ChatGPT Extractor] Content changed, will overwrite with original timestamp');
conversation.overwriteId = duplicateCheck.existingConversation.id;
conversation.timestamp = duplicateCheck.existingConversation.timestamp;
}
// Send to background script
chrome.runtime.sendMessage({
action: 'saveConversationFromPage',
payload: conversation
}, (response) => {
if (chrome.runtime.lastError) {
console.error('[ChatGPT Extractor] Chrome runtime error:', chrome.runtime.lastError);
const errorMsg = chrome.runtime.lastError.message;
// Provide user-friendly message for context invalidation
if (errorMsg.includes('Extension context invalidated')) {
showNotification('Extension was reloaded. Please reload this page and try saving again.', 'error');
} else {
showNotification('Failed to save: ' + errorMsg, 'error');
}
saveButton.disabled = false;
saveButton.innerHTML = originalHTML;
return;
}
console.log('[ChatGPT Extractor] Response from background:', response);
if (response && response.success) {
console.log('[ChatGPT Extractor] Conversation saved successfully');
// Success notification now shown in sidebar
} else {
console.error('[ChatGPT Extractor] Save failed. Response:', response);
const errorMsg = response?.error || 'Unknown error';
showNotification('Failed to save: ' + errorMsg, 'error');
}
// Re-enable button
saveButton.disabled = false;
saveButton.innerHTML = originalHTML;
});
} catch (error) {
console.error('[ChatGPT Extractor] Error during extraction:', error);
console.error('[ChatGPT Extractor] Error stack:', error.stack);
showNotification('Failed to extract conversation: ' + error.message, 'error');
// Re-enable button
saveButton.disabled = false;
saveButton.innerHTML = originalHTML;
}
}
// Setup keyboard shortcut (Ctrl+Shift+S or Cmd+Shift+S)
setupKeyboardShortcut(handleSaveClick, detectConversation);
})();
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
content-scripts/chatgpt-save-button.css | CSS | /* Save conversation button for ChatGPT pages */
/* Button styling is handled by ChatGPT's native classes */
#insidebar-save-conversation:disabled {
opacity: 0.5;
cursor: not-allowed;
}
/* Notification styles */
.insidebar-notification {
position: fixed;
top: 24px;
right: 24px;
z-index: 10000;
padding: 16px 24px;
background: #10a37f;
color: white;
border-radius: 8px;
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, sans-serif;
font-size: 14px;
font-weight: 500;
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.15);
opacity: 0;
transform: translateX(400px);
transition: all 0.3s ease;
}
.insidebar-notification.show {
opacity: 1;
transform: translateX(0);
}
.insidebar-notification-success {
background: #10a37f;
}
.insidebar-notification-error {
background: #ef4444;
}
.insidebar-notification-info {
background: #3b82f6;
}
/* Responsive adjustments */
@media (max-width: 768px) {
.insidebar-notification {
top: 16px;
right: 16px;
left: 16px;
max-width: none;
}
}
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
content-scripts/claude-history-extractor.js | JavaScript | // Claude Conversation History Extractor
// Extracts current conversation from Claude.ai DOM and saves to extension
//
// IMPORTANT: Requires conversation-extractor-utils.js to be loaded first
(function() {
'use strict';
console.log('[Claude Extractor] Script loaded');
// Import shared utilities from global namespace
const {
extractMarkdownFromElement,
formatMessagesAsText,
generateConversationId,
checkForDuplicate,
showDuplicateWarning,
showNotification,
setupKeyboardShortcut
} = window.ConversationExtractorUtils;
// Share button selector for language detection
const SHARE_BUTTON_SELECTOR = '[data-testid="wiggle-controls-actions-share"]';
let saveButton = null;
// Initialize after page loads
if (document.readyState === 'loading') {
document.addEventListener('DOMContentLoaded', init);
} else {
init();
}
function init() {
console.log('[Claude Extractor] Initializing...');
console.log('[Claude Extractor] In iframe?', window !== window.top);
console.log('[Claude Extractor] URL:', window.location.href);
// Only run on conversation pages (not homepage)
if (!window.location.href.startsWith('https://claude.ai/chat/')) {
console.log('[Claude Extractor] Not on conversation page, skipping');
return;
}
// Wait a bit for Claude to fully render
setTimeout(() => {
console.log('[Claude Extractor] Attempting to insert save button...');
insertSaveButton();
observeForShareButton();
}, 2000);
}
// Create save button matching Claude's UI
function createSaveButton() {
// Detect provider's UI language and get matching Save button text
const { text, tooltip } = window.LanguageDetector.getSaveButtonText(SHARE_BUTTON_SELECTOR);
const button = document.createElement('button');
button.id = 'insidebar-save-conversation';
button.className = `inline-flex
items-center
justify-center
relative
shrink-0
can-focus
select-none
disabled:pointer-events-none
disabled:opacity-50
disabled:shadow-none
disabled:drop-shadow-none
text-text-000
font-base-bold
border-0.5
border-border-200
relative
overflow-hidden
transition
duration-100
hover:border-border-300/0
bg-bg-300/0
hover:bg-bg-400
backface-hidden h-8 rounded-md px-3 min-w-[4rem] active:scale-[0.985] whitespace-nowrap !text-xs`;
button.textContent = text;
button.type = 'button';
button.title = tooltip;
button.style.marginLeft = '8px';
button.addEventListener('click', handleSaveClick);
return button;
}
// Insert save button after share button
function insertSaveButton() {
// Only insert button on conversation pages
if (!window.location.href.startsWith('https://claude.ai/chat/')) {
console.log('[Claude Extractor] Not a conversation page, skipping save button');
return;
}
// Check if button already exists
if (document.getElementById('insidebar-save-conversation')) {
console.log('[Claude Extractor] Save button already exists');
return;
}
// Find share button
const shareButton = document.querySelector('[data-testid="wiggle-controls-actions-share"]');
console.log('[Claude Extractor] Looking for share button...');
console.log('[Claude Extractor] Share button found?', !!shareButton);
if (!shareButton) {
console.log('[Claude Extractor] Share button not found yet, will retry');
return;
}
// Check if conversation exists
const hasConversation = detectConversation();
console.log('[Claude Extractor] Has conversation?', hasConversation);
if (!hasConversation) {
console.log('[Claude Extractor] No conversation detected, skipping button insertion');
return;
}
// Create and insert save button after share button
saveButton = createSaveButton();
shareButton.parentElement.insertBefore(saveButton, shareButton.nextSibling);
console.log('[Claude Extractor] Save button inserted after share button');
}
// Detect if there's a conversation on the page
function detectConversation() {
// Look for messages in Claude's structure
const messages = getMessages();
return messages && messages.length > 0;
}
// Observe DOM for share button appearance and conversation changes
function observeForShareButton() {
const observer = new MutationObserver(() => {
// Try to insert button if it doesn't exist
insertSaveButton();
// Remove button if conversation no longer exists
const existingButton = document.getElementById('insidebar-save-conversation');
if (existingButton && !detectConversation()) {
existingButton.remove();
saveButton = null;
}
});
// Observe the entire document for changes
observer.observe(document.body, {
childList: true,
subtree: true
});
}
// Extract conversation title from URL or active chat item
function getConversationTitle() {
// Priority 1: Extract conversation ID from URL and find matching sidebar link
const urlMatch = window.location.pathname.match(/\/chat\/([^\/]+)/);
if (urlMatch) {
const conversationId = urlMatch[1];
// Find the sidebar link that matches this conversation ID
const matchingLink = document.querySelector(`a[href*="/chat/${conversationId}"]`);
if (matchingLink) {
const titleSpan = matchingLink.querySelector('span[class*="truncate"]');
if (titleSpan) {
const title = titleSpan.textContent.trim();
if (title && title.length > 0) {
console.log('[Claude Extractor] Found title from URL-matched sidebar link:', title);
return title;
}
}
}
// Fallback: Try the old method (!bg-bg-400 class)
const activeChat = document.querySelector('a[class*="!bg-bg-400"]');
if (activeChat) {
const titleSpan = activeChat.querySelector('span[class*="truncate"]');
if (titleSpan) {
const title = titleSpan.textContent.trim();
if (title && title.length > 0) {
console.log('[Claude Extractor] Found title from active chat (bg-bg-400 fallback):', title);
return title;
}
}
}
// Ultimate fallback: Use URL-based title
console.log('[Claude Extractor] Falling back to URL-based title');
return `Claude Conversation ${conversationId.substring(0, 8)}`;
}
// No URL match - use default
console.log('[Claude Extractor] No conversation ID in URL, using default');
return 'Untitled Claude Conversation';
}
// Extract all messages from the conversation
function getMessages() {
const messages = [];
// Claude uses different structure - messages are in divs with specific patterns
// Look for message containers
const messageContainers = document.querySelectorAll('[data-test-render-count]');
console.log('[Claude Extractor] Found message containers:', messageContainers.length);
messageContainers.forEach(container => {
try {
const message = extractMessageFromContainer(container);
if (message) {
messages.push(message);
}
} catch (error) {
console.warn('[Claude Extractor] Error extracting message:', error);
}
});
return messages;
}
// Extract a single message from its container
function extractMessageFromContainer(container) {
// Determine role based on container structure
// Claude typically has user messages and assistant messages
let role = 'unknown';
// Check for role indicators in the container
const contentDiv = container.querySelector('div[class*="font-user-message"]');
if (contentDiv) {
role = 'user';
} else {
// Likely assistant message
role = 'assistant';
}
// Get message content - Claude uses different selectors
const contentElement = container.querySelector('[class*="font-claude-message"]') ||
container.querySelector('[class*="font-user-message"]') ||
container;
if (!contentElement) return null;
// Extract markdown from the content
const content = extractMarkdownFromElement(contentElement);
if (!content.trim()) return null;
return {
role,
content: content.trim()
};
}
// NOTE: Markdown extraction and formatting functions moved to conversation-extractor-utils.js
// Extract full conversation data
function extractConversation() {
try {
const title = getConversationTitle();
const messages = getMessages();
if (!messages || messages.length === 0) {
throw new Error('No messages found in conversation');
}
const content = formatMessagesAsText(messages);
return {
title,
content,
messages,
timestamp: Date.now(),
url: window.location.href,
provider: 'Claude'
};
} catch (error) {
console.error('[Claude Extractor] Error extracting conversation:', error);
throw error;
}
}
// Handle save button click
async function handleSaveClick(e) {
if (e) {
e.preventDefault();
e.stopPropagation();
}
console.log('[Claude Extractor] Save button clicked');
if (!saveButton) return;
// Check if chrome API is available
if (typeof chrome === 'undefined' || !chrome.runtime) {
console.error('[Claude Extractor] Chrome extension API not available');
showNotification('Extension API not available. Try reloading the page.', 'error');
return;
}
// Disable button during save
saveButton.disabled = true;
const originalText = saveButton.textContent;
saveButton.textContent = 'Saving...';
try {
const conversation = extractConversation();
console.log('[Claude Extractor] Extracted conversation:', {
title: conversation.title,
messageCount: conversation.messages.length,
contentLength: conversation.content.length,
url: conversation.url,
provider: conversation.provider
});
// Generate conversation ID for deduplication
const conversationId = generateConversationId(conversation.url, conversation.title);
conversation.conversationId = conversationId;
// Check for duplicates
const duplicateCheck = await checkForDuplicate(conversationId);
if (duplicateCheck.isDuplicate) {
// Compare content to decide whether to save
const existingContent = (duplicateCheck.existingConversation.content || '').trim();
const newContent = (conversation.content || '').trim();
if (existingContent === newContent) {
// Content identical - silently skip save
saveButton.disabled = false;
saveButton.textContent = originalText;
return;
}
// Content changed - automatically overwrite with original timestamp
conversation.overwriteId = duplicateCheck.existingConversation.id;
conversation.timestamp = duplicateCheck.existingConversation.timestamp;
}
// Send to background script
chrome.runtime.sendMessage({
action: 'saveConversationFromPage',
payload: conversation
}, (response) => {
if (chrome.runtime.lastError) {
console.error('[Claude Extractor] Chrome runtime error:', chrome.runtime.lastError);
const errorMsg = chrome.runtime.lastError.message;
// Provide user-friendly message for context invalidation
if (errorMsg.includes('Extension context invalidated')) {
showNotification('Extension was reloaded. Please reload this page and try saving again.', 'error');
} else {
showNotification('Failed to save: ' + errorMsg, 'error');
}
saveButton.disabled = false;
saveButton.textContent = originalText;
return;
}
if (response && response.success) {
console.log('[Claude Extractor] Conversation saved successfully');
// Success notification now shown in sidebar
} else {
const errorMsg = response?.error || 'Unknown error';
showNotification('Failed to save: ' + errorMsg, 'error');
}
// Re-enable button
saveButton.disabled = false;
saveButton.textContent = originalText;
});
} catch (error) {
console.error('[Claude Extractor] Error during extraction:', error);
showNotification('Failed to extract conversation: ' + error.message, 'error');
saveButton.disabled = false;
saveButton.textContent = originalText;
}
}
// Setup keyboard shortcut (Ctrl+Shift+S or Cmd+Shift+S)
setupKeyboardShortcut(handleSaveClick, detectConversation);
})();
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
content-scripts/claude-save-button.css | CSS | /* Claude Save Button Styles */
#insidebar-save-conversation {
/* Button matches Claude's style */
font-family: inherit;
cursor: pointer;
user-select: none;
}
#insidebar-save-conversation:disabled {
opacity: 0.5;
cursor: not-allowed;
}
#insidebar-save-conversation:hover:not(:disabled) {
transform: scale(0.985);
}
/* Notification animations */
@keyframes slideIn {
from {
transform: translateX(100%);
opacity: 0;
}
to {
transform: translateX(0);
opacity: 1;
}
}
@keyframes slideOut {
from {
transform: translateX(0);
opacity: 1;
}
to {
transform: translateX(100%);
opacity: 0;
}
}
.insidebar-notification {
transition: all 0.3s ease;
}
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
content-scripts/conversation-extractor-utils.js | JavaScript | // Shared Utilities for Conversation Extractors
// Common functions used across all AI provider history extractors
// This module eliminates duplication across ChatGPT, Claude, Gemini, Grok, DeepSeek, and Perplexity extractors
//
// NOTE: This file must be loaded BEFORE any *-history-extractor.js files in manifest.json
// It exports functions to window.ConversationExtractorUtils
(function() {
'use strict';
// Create global namespace for shared utilities
window.ConversationExtractorUtils = window.ConversationExtractorUtils || {};
// ============================================================================
// Markdown Extraction Functions
// ============================================================================
/**
* Recursively extract markdown from DOM elements
* Preserves formatting like code blocks, headings, lists, bold, italic, etc.
* @param {Node} node - DOM node to extract from
* @returns {string} Markdown-formatted text
*/
window.ConversationExtractorUtils.extractMarkdownFromElement = function extractMarkdownFromElement(node) {
if (!node) return '';
// Text node - return text content
if (node.nodeType === Node.TEXT_NODE) {
return node.textContent;
}
// Element node - convert to markdown based on tag type
if (node.nodeType === Node.ELEMENT_NODE) {
const tagName = node.tagName.toLowerCase();
// Code blocks (highest priority)
if (tagName === 'pre') {
const codeElement = node.querySelector('code');
if (codeElement) {
const language = codeElement.className.match(/language-(\w+)/)?.[1] || '';
const codeContent = codeElement.textContent;
return language
? `\n\`\`\`${language}\n${codeContent}\n\`\`\`\n\n`
: `\n\`\`\`\n${codeContent}\n\`\`\`\n\n`;
}
return `\n\`\`\`\n${node.textContent}\n\`\`\`\n\n`;
}
// Inline code
if (tagName === 'code') {
return `\`${node.textContent}\``;
}
// Headings
if (tagName.match(/^h[1-6]$/)) {
const level = tagName.charAt(1);
const hashes = '#'.repeat(parseInt(level));
return `\n${hashes} ${getChildrenText(node)}\n\n`;
}
// Bold/Strong
if (tagName === 'strong' || tagName === 'b') {
return `**${getChildrenText(node)}**`;
}
// Italic/Emphasis
if (tagName === 'em' || tagName === 'i') {
return `*${getChildrenText(node)}*`;
}
// Links
if (tagName === 'a') {
const href = node.getAttribute('href') || '';
const text = getChildrenText(node);
return `[${text}](${href})`;
}
// Lists
if (tagName === 'ul') {
let listText = '\n';
Array.from(node.children).forEach(li => {
if (li.tagName.toLowerCase() === 'li') {
listText += `- ${extractMarkdownFromElement(li).trim()}\n`;
}
});
return listText + '\n';
}
if (tagName === 'ol') {
let listText = '\n';
Array.from(node.children).forEach((li, index) => {
if (li.tagName.toLowerCase() === 'li') {
listText += `${index + 1}. ${extractMarkdownFromElement(li).trim()}\n`;
}
});
return listText + '\n';
}
// Blockquotes
if (tagName === 'blockquote') {
const text = getChildrenText(node);
return `\n> ${text}\n\n`;
}
// Line breaks
if (tagName === 'br') {
return '\n';
}
// Paragraphs
if (tagName === 'p') {
return `${getChildrenMarkdown(node)}\n\n`;
}
// Divs - just process children
if (tagName === 'div') {
return getChildrenMarkdown(node);
}
// Default: process children
return getChildrenMarkdown(node);
}
return '';
};
/**
* Helper to get plain text from all children (for simple formatting like headings, bold)
* @param {Node} node - DOM node
* @returns {string} Plain text
*/
function getChildrenText(node) {
return Array.from(node.childNodes)
.map(child => {
if (child.nodeType === Node.TEXT_NODE) {
return child.textContent;
}
return child.textContent || '';
})
.join('');
}
/**
* Helper to get markdown from all children (for complex formatting)
* @param {Node} node - DOM node
* @returns {string} Markdown-formatted text
*/
function getChildrenMarkdown(node) {
return Array.from(node.childNodes)
.map(child => window.ConversationExtractorUtils.extractMarkdownFromElement(child))
.join('');
}
// ============================================================================
// Message Formatting Functions
// ============================================================================
/**
* Format messages array as text with role labels
* @param {Array} messages - Array of {role, content} objects
* @returns {string} Formatted text
*/
window.ConversationExtractorUtils.formatMessagesAsText = function(messages) {
return messages.map(msg => {
const roleLabel = msg.role === 'user' ? 'User' :
msg.role === 'assistant' ? 'Assistant' :
msg.role.charAt(0).toUpperCase() + msg.role.slice(1);
return `${roleLabel}:\n${msg.content}`;
}).join('\n\n---\n\n');
};
// ============================================================================
// Conversation ID and Duplication Functions
// ============================================================================
/**
* Generate a unique conversation ID from URL or title hash
* Uses the full URL as the primary identifier for deduplication
* @param {string} url - Conversation URL (if available)
* @param {string} title - Conversation title
* @returns {string} Unique conversation ID
*/
window.ConversationExtractorUtils.generateConversationId = function(url, title) {
// Prefer URL-based ID for uniqueness and reliability
if (url) {
// Google AI Mode: Use normalized query parameter only
if (url.includes('google.com/search') && url.includes('udm=50')) {
try {
const urlObj = new URL(url);
const query = urlObj.searchParams.get('q');
if (query) {
// Normalize query: lowercase, trim, collapse spaces
const normalized = query.toLowerCase().trim().replace(/\s+/g, ' ');
return `google-ai-${normalized}`;
}
} catch (e) {
console.error('[Extractor Utils] Error parsing Google AI URL:', e);
}
}
// Extract conversation ID from URL if present
// ChatGPT: https://chatgpt.com/c/abc123
// Claude: https://claude.ai/chat/abc-123
const urlMatch = url.match(/\/(c|chat)\/([a-zA-Z0-9-]+)/);
if (urlMatch) {
return urlMatch[2];
}
// Use full URL as fallback
return url;
}
// Fallback: Create ID from title + timestamp
// This won't catch duplicates effectively, but prevents collisions
return `${title}_${Date.now()}`;
};
/**
* Check if a conversation already exists with this ID
* @param {string} conversationId - The conversation ID to check
* @returns {Promise<Object>} {isDuplicate: boolean, existingConversation: Object|null}
*/
window.ConversationExtractorUtils.checkForDuplicate = async function(conversationId) {
try {
const response = await chrome.runtime.sendMessage({
action: 'checkDuplicateConversation',
payload: { conversationId }
});
return response;
} catch (error) {
console.error('[Extractor Utils] Error checking for duplicate:', error);
// Re-throw error to let caller handle it appropriately
// With direct database access in service worker, this should not fail
throw new Error(`Failed to check for duplicate: ${error.message}`);
}
};
/**
* Show duplicate warning modal and get user choice
* @param {string} title - Conversation title
* @param {Object} existingConversation - The existing conversation data
* @returns {Promise<string>} User choice: 'cancel' or 'overwrite'
*/
window.ConversationExtractorUtils.showDuplicateWarning = function(title, existingConversation) {
return new Promise((resolve) => {
// Create modal
const modal = document.createElement('div');
modal.id = 'insidebar-duplicate-modal';
modal.style.cssText = `
position: fixed;
top: 0;
left: 0;
width: 100%;
height: 100%;
background: rgba(0, 0, 0, 0.6);
display: flex;
align-items: center;
justify-center;
z-index: 10000;
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
`;
const existingDate = existingConversation?.timestamp
? new Date(existingConversation.timestamp).toLocaleString()
: 'Unknown date';
modal.innerHTML = `
<div style="
background: white;
color: #333;
border-radius: 12px;
padding: 24px;
max-width: 480px;
box-shadow: 0 8px 32px rgba(0, 0, 0, 0.3);
">
<h3 style="margin: 0 0 16px 0; font-size: 18px; font-weight: 600;">
${chrome.i18n.getMessage('dlgDuplicateTitle')}
</h3>
<p style="margin: 0 0 8px 0; font-size: 14px; line-height: 1.5;">
${chrome.i18n.getMessage('dlgDuplicateDesc')}
</p>
<p style="margin: 0 0 16px 0; font-size: 13px; color: #666; font-weight: 500;">
"${title}"<br>
<span style="font-size: 12px;">${chrome.i18n.getMessage('dlgDuplicateSaved', [existingDate])}</span>
</p>
<p style="margin: 0 0 20px 0; font-size: 14px; line-height: 1.5;">
${chrome.i18n.getMessage('dlgDuplicateQuestion')}
</p>
<div style="display: flex; gap: 12px;">
<button id="insidebar-dup-cancel" style="
flex: 1;
padding: 10px 16px;
border: 1px solid #ddd;
border-radius: 6px;
background: white;
color: #333;
font-size: 14px;
cursor: pointer;
font-weight: 500;
">
${chrome.i18n.getMessage('btnCancel')}
</button>
<button id="insidebar-dup-overwrite" style="
flex: 1;
padding: 10px 16px;
border: none;
border-radius: 6px;
background: #f59e0b;
color: white;
font-size: 14px;
cursor: pointer;
font-weight: 500;
">
${chrome.i18n.getMessage('btnOverwrite')}
</button>
</div>
</div>
`;
document.body.appendChild(modal);
// Add event listeners
const cleanup = (choice) => {
modal.remove();
resolve(choice);
};
document.getElementById('insidebar-dup-cancel').addEventListener('click', () => cleanup('cancel'));
document.getElementById('insidebar-dup-overwrite').addEventListener('click', () => cleanup('overwrite'));
// Close on outside click (same as cancel)
modal.addEventListener('click', (e) => {
if (e.target === modal) {
cleanup('cancel');
}
});
});
};
// ============================================================================
// Notification Functions
// ============================================================================
/**
* Show notification to user on the provider page
* NOTE: Success notifications are now shown in sidebar instead
* This is primarily for error notifications
* @param {string} message - Message to display
* @param {string} type - 'info', 'success', or 'error'
*/
window.ConversationExtractorUtils.showNotification = function(message, type = 'info') {
const notification = document.createElement('div');
notification.className = `insidebar-notification insidebar-notification-${type}`;
notification.style.cssText = `
position: fixed;
top: 24px;
right: 24px;
background: ${type === 'error' ? '#ef4444' : type === 'success' ? '#10b981' : '#3b82f6'};
color: white;
padding: 16px 24px;
border-radius: 8px;
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.15);
z-index: 10000;
max-width: 400px;
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
font-size: 14px;
line-height: 1.5;
opacity: 0;
transform: translateX(100%);
transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1);
`;
notification.textContent = message;
document.body.appendChild(notification);
// Trigger animation
setTimeout(() => {
notification.style.opacity = '1';
notification.style.transform = 'translateX(0)';
}, 10);
// Remove after 3 seconds
setTimeout(() => {
notification.style.opacity = '0';
notification.style.transform = 'translateX(100%)';
setTimeout(() => {
notification.remove();
}, 300);
}, 3000);
};
// ============================================================================
// Keyboard Shortcut Function
// ============================================================================
/**
* Setup keyboard shortcut for saving conversation
* @param {Function} callback - Function to call when shortcut is pressed
* @param {Function} shouldEnable - Optional function to check if shortcut should be enabled
*/
window.ConversationExtractorUtils.setupKeyboardShortcut = function(callback, shouldEnable = null) {
document.addEventListener('keydown', (e) => {
if ((e.ctrlKey || e.metaKey) && e.shiftKey && e.key === 'S') {
e.preventDefault();
// Check if shortcut should be enabled
if (shouldEnable && !shouldEnable()) {
return;
}
callback(e);
}
});
};
// ============================================================================
// URL Change Observer (for SPAs)
// ============================================================================
/**
* Observe URL changes for single-page applications
* @param {Function} callback - Function to call when URL changes
* @param {RegExp|string} urlPattern - Optional pattern to filter URLs
* @returns {Function} Cleanup function to stop observing and clear resources
*/
window.ConversationExtractorUtils.observeUrlChanges = function(callback, urlPattern = null) {
let lastUrl = window.location.href;
// Check URL periodically (SPAs often don't fire popstate)
const intervalId = setInterval(() => {
const currentUrl = window.location.href;
if (currentUrl !== lastUrl) {
lastUrl = currentUrl;
// Check pattern if provided
if (urlPattern) {
if (typeof urlPattern === 'string') {
if (currentUrl.includes(urlPattern)) {
callback(currentUrl);
}
} else if (urlPattern instanceof RegExp) {
if (urlPattern.test(currentUrl)) {
callback(currentUrl);
}
}
} else {
callback(currentUrl);
}
}
}, 1000);
// Also listen for popstate (back/forward navigation)
const popstateHandler = () => {
callback(window.location.href);
};
window.addEventListener('popstate', popstateHandler);
// Auto-cleanup on page unload
const unloadHandler = () => {
clearInterval(intervalId);
window.removeEventListener('popstate', popstateHandler);
};
window.addEventListener('beforeunload', unloadHandler);
// Return cleanup function
return function cleanup() {
clearInterval(intervalId);
window.removeEventListener('popstate', popstateHandler);
window.removeEventListener('beforeunload', unloadHandler);
};
};
})();
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
content-scripts/copilot-history-extractor.js | JavaScript | // Microsoft Copilot Conversation History Extractor
// Extracts current conversation from copilot.microsoft.com and bing.com/chat DOM and saves to extension
//
// IMPORTANT: Requires conversation-extractor-utils.js to be loaded first
(function() {
'use strict';
// Import shared utilities from global namespace
const {
extractMarkdownFromElement,
formatMessagesAsText,
generateConversationId,
checkForDuplicate,
showDuplicateWarning,
showNotification,
setupKeyboardShortcut
} = window.ConversationExtractorUtils;
// Share button selector - handles both /chats/ and /pages/ layouts
// /pages/ uses: title="Share"
// /chats/ uses: title="Share conversation, [conversation name]"
function findShareButton() {
// Try exact match first (for /pages/)
let shareBtn = document.querySelector('button[title="Share"]');
if (shareBtn) return shareBtn;
// Try partial match for /chats/ (title starts with "Share conversation")
const allButtons = Array.from(document.querySelectorAll('button[title]'));
shareBtn = allButtons.find(btn => btn.title.startsWith('Share conversation'));
if (shareBtn) return shareBtn;
// Fallback: look for button with Share text and the specific SVG
shareBtn = allButtons.find(btn =>
btn.textContent.includes('Share') &&
btn.querySelector('svg path[d*="M10.25 3.00011"]')
);
return shareBtn;
}
const SHARE_BUTTON_SELECTOR = 'button[title="Share"]'; // Used for language detection fallback
let saveButton = null;
// Initialize after page loads
if (document.readyState === 'loading') {
document.addEventListener('DOMContentLoaded', init);
} else {
init();
}
function init() {
// Check for Copilot conversation pages: /chats/* or /pages/*
const isCopilotConversation =
(window.location.href.includes('copilot.microsoft.com/chats/') ||
window.location.href.includes('copilot.microsoft.com/pages/') ||
window.location.href.includes('bing.com/chat'));
if (!isCopilotConversation) {
return;
}
// Wait a bit for Copilot to fully render
setTimeout(() => {
insertSaveButton();
observeForShareButton();
}, 2000);
}
// Create save button matching Copilot's UI
function createSaveButton(shareButton) {
// Detect provider's UI language and get matching Save button text
const { text, tooltip } = window.LanguageDetector.getSaveButtonText(SHARE_BUTTON_SELECTOR);
const button = document.createElement('button');
button.id = 'insidebar-save-conversation';
button.setAttribute('type', 'button');
button.setAttribute('data-spatial-navigation-autofocus', 'false');
// Detect page type and match Share button styling
const isChatsPage = window.location.href.includes('/chats/');
const isPagesPage = window.location.href.includes('/pages/');
if (isChatsPage && shareButton) {
// Match /chats/ Share button style
button.className = 'relative flex items-center text-foreground-800 fill-foreground-800 active:text-foreground-600 active:fill-foreground-600 dark:active:text-foreground-650 dark:active:fill-foreground-650 bg-transparent safe-hover:bg-black/5 active:bg-black/3 dark:safe-hover:bg-black/30 dark:active:bg-black/20 text-sm justify-start min-h-9 min-w-9 px-2.5 py-1 gap-x-1.5 rounded-xl after:rounded-xl after:absolute after:inset-0 after:pointer-events-none after:border after:border-transparent after:contrast-more:border-2 outline-2 outline-offset-1 focus-visible:z-[1] focus-visible:outline focus-visible:outline-stroke-900';
button.setAttribute('aria-label', text);
button.title = `${text} conversation`;
button.innerHTML = `
<svg viewBox="0 0 24 24" fill="currentColor" xmlns="http://www.w3.org/2000/svg" class="me-1 size-4">
<path d="M13.75,3.00011 C13.3358,3.00011 13.0001,3.33596 13,3.75011 C13,4.16432 13.3358,4.50011 13.75,4.50011 L17.25,4.50011 C18.49259,4.50011 19.49992,5.50753 19.5,6.75011 L19.5,17.2501 L19.48828,17.4806 C19.37286,18.6149 18.41483,19.50011 17.25,19.50011 L6.75,19.50011 C5.5074,19.50011 4.5,18.4927 4.5,17.2501 L4.5,15.2501 C4.4999,14.836 4.1642,14.50011 3.75,14.50011 C3.3358,14.50011 3,14.836 3,15.2501 L3,17.2501 C3,19.3212 4.6789,21.00011 6.75,21.00011 L17.25,21.00011 C19.25622,21.00011 20.89449,19.4247 20.99512,17.4435 L21,17.2501 L21,6.75011 C21,4.6791 19.32102,3.00011 17.25,3.00011 L13.75,3.00011 Z M16.7820044,9.9365879 C16.9043344,10.2026879 16.8602444,10.5159879 16.6697044,10.7383879 L10.6697044,17.7383879 C10.5322444,17.8986879 10.3329144,17.9938879 10.1218544,18.0000879 C9.91087436,18.0060879 9.70686436,17.9224879 9.56032436,17.7705879 L2.81037436,10.7705879 C2.60167436,10.5540879 2.54247436,10.2338879 2.65997436,9.9570879 C2.77757436,9.6802879 3.04947436,9.5000879 3.35037436,9.5000879 L6.32597436,9.5000879 C6.26237436,8.4857879 6.05317436,7.5295879 5.55737436,6.5605879 C4.97597436,5.4241879 3.97587436,4.2165079 2.25467436,2.8711779 L1.90017436,2.5996979 C1.64217436,2.4059679 1.53637436,2.0689179 1.63847436,1.7627879 C1.74057436,1.4565479 2.02757436,1.2500879 2.35037436,1.2500879 C5.20767436,1.2500879 7.93947436,1.9366779 9.97829436,3.4180579 C11.8715944,4.7938879 13.1249344,6.8288879 13.3220444,9.5000879 L16.1003644,9.5000879 L16.2087644,9.5078879 C16.4575144,9.5441879 16.6749444,9.7037879 16.7820044,9.9365879 Z M12.64609,10.788175 C12.23188,10.788175 11.89609,10.452375 11.89609,10.038175 C11.89609,7.481475 10.82612,5.643175 9.14316,4.420015 C7.935,3.542045 6.386,2.966115 4.65,2.703215 C5.6974,3.703515 6.4351,4.680155 6.9391,5.665175 C7.6979,7.148275 7.8961,8.589175 7.8961,10.038175 C7.8961,10.237075 7.817,10.427775 7.6764,10.568475 C7.5357,10.709075 7.345,10.788175 7.1461,10.788175 L5.1617,10.788175 L10.11387,15.922975 L14.51621,10.788175 L12.64609,10.788175 Z"></path>
</svg>${text}
`;
} else {
// Match /pages/ Share button style (default)
button.className = 'relative flex items-center text-foreground-800 fill-foreground-800 active:text-foreground-600 active:fill-foreground-600 dark:active:text-foreground-650 dark:active:fill-foreground-650 bg-transparent safe-hover:bg-black/5 active:bg-black/3 dark:safe-hover:bg-white/8 dark:active:bg-white/5 text-sm justify-center min-h-9 min-w-9 py-1 gap-x-1.5 rounded-xl after:rounded-xl after:absolute after:inset-0 after:pointer-events-none after:border after:border-transparent after:contrast-more:border-2 outline-2 outline-offset-1 focus-visible:z-[1] focus-visible:outline focus-visible:outline-stroke-900 shrink-0 px-1';
button.setAttribute('aria-label', text);
button.title = text;
button.innerHTML = `
<svg viewBox="0 0 24 24" fill="currentColor" xmlns="http://www.w3.org/2000/svg" class="size-5">
<path d="M13.75,3.00011 C13.3358,3.00011 13.0001,3.33596 13,3.75011 C13,4.16432 13.3358,4.50011 13.75,4.50011 L17.25,4.50011 C18.49259,4.50011 19.49992,5.50753 19.5,6.75011 L19.5,17.2501 L19.48828,17.4806 C19.37286,18.6149 18.41483,19.50011 17.25,19.50011 L6.75,19.50011 C5.5074,19.50011 4.5,18.4927 4.5,17.2501 L4.5,15.2501 C4.4999,14.836 4.1642,14.50011 3.75,14.50011 C3.3358,14.50011 3,14.836 3,15.2501 L3,17.2501 C3,19.3212 4.6789,21.00011 6.75,21.00011 L17.25,21.00011 C19.25622,21.00011 20.89449,19.4247 20.99512,17.4435 L21,17.2501 L21,6.75011 C21,4.6791 19.32102,3.00011 17.25,3.00011 L13.75,3.00011 Z M16.7820044,9.9365879 C16.9043344,10.2026879 16.8602444,10.5159879 16.6697044,10.7383879 L10.6697044,17.7383879 C10.5322444,17.8986879 10.3329144,17.9938879 10.1218544,18.0000879 C9.91087436,18.0060879 9.70686436,17.9224879 9.56032436,17.7705879 L2.81037436,10.7705879 C2.60167436,10.5540879 2.54247436,10.2338879 2.65997436,9.9570879 C2.77757436,9.6802879 3.04947436,9.5000879 3.35037436,9.5000879 L6.32597436,9.5000879 C6.26237436,8.4857879 6.05317436,7.5295879 5.55737436,6.5605879 C4.97597436,5.4241879 3.97587436,4.2165079 2.25467436,2.8711779 L1.90017436,2.5996979 C1.64217436,2.4059679 1.53637436,2.0689179 1.63847436,1.7627879 C1.74057436,1.4565479 2.02757436,1.2500879 2.35037436,1.2500879 C5.20767436,1.2500879 7.93947436,1.9366779 9.97829436,3.4180579 C11.8715944,4.7938879 13.1249344,6.8288879 13.3220444,9.5000879 L16.1003644,9.5000879 L16.2087644,9.5078879 C16.4575144,9.5441879 16.6749444,9.7037879 16.7820044,9.9365879 Z M12.64609,10.788175 C12.23188,10.788175 11.89609,10.452375 11.89609,10.038175 C11.89609,7.481475 10.82612,5.643175 9.14316,4.420015 C7.935,3.542045 6.386,2.966115 4.65,2.703215 C5.6974,3.703515 6.4351,4.680155 6.9391,5.665175 C7.6979,7.148275 7.8961,8.589175 7.8961,10.038175 C7.8961,10.237075 7.817,10.427775 7.6764,10.568475 C7.5357,10.709075 7.345,10.788175 7.1461,10.788175 L5.1617,10.788175 L10.11387,15.922975 L14.51621,10.788175 L12.64609,10.788175 Z"></path>
</svg>
<span class="mx-1.5 hidden sm:inline">${text}</span>
`;
}
button.addEventListener('click', handleSaveClick);
return button;
}
// Insert save button after share button
function insertSaveButton() {
// Check if button already exists
if (document.getElementById('insidebar-save-conversation')) {
return;
}
// Find share button (handles both /chats/ and /pages/ layouts)
const shareButton = findShareButton();
if (!shareButton) {
return;
}
// Check if conversation exists
const hasConversation = detectConversation();
if (!hasConversation) {
return;
}
// Create and insert save button after share button
saveButton = createSaveButton(shareButton);
shareButton.parentElement.insertBefore(saveButton, shareButton.nextSibling);
}
// Detect if there's a conversation on the page
function detectConversation() {
// For /pages/, check if there's content in the page editor
if (window.location.href.includes('/pages/')) {
const hasPageContent = document.querySelector('[contenteditable="true"]') ||
document.querySelector('[role="textbox"]') ||
document.querySelector('textarea');
return !!hasPageContent;
}
// For /chats/, look for messages
const messages = getMessages();
return messages && messages.length > 0;
}
// Observe DOM for share button appearance and conversation changes
function observeForShareButton() {
const observer = new MutationObserver(() => {
// Try to insert button if it doesn't exist
insertSaveButton();
// Remove button if conversation no longer exists
const existingButton = document.getElementById('insidebar-save-conversation');
if (existingButton && !detectConversation()) {
existingButton.remove();
saveButton = null;
}
});
// Observe the entire document for changes
observer.observe(document.body, {
childList: true,
subtree: true
});
}
// Extract conversation title
function getConversationTitle() {
// Look for conversation title in the sidebar/chat list
// Title appears in <p class="truncate" title="...">
const titleSelectors = [
'p.truncate[title]', // Primary selector for conversation title
'h1',
'[data-testid="conversation-title"]',
'header h1',
'[role="heading"]'
];
for (const selector of titleSelectors) {
const element = document.querySelector(selector);
if (element && element.textContent.trim()) {
const title = element.textContent.trim();
console.log('[Copilot Extractor] Found title from selector:', selector, '->', title);
return title;
}
}
// Fallback: Use first user message or default
const messages = getMessages();
if (messages && messages.length > 0) {
const firstUserMessage = messages.find(m => m.role === 'user');
if (firstUserMessage) {
const title = firstUserMessage.content.substring(0, 50) + (firstUserMessage.content.length > 50 ? '...' : '');
console.log('[Copilot Extractor] Using first user message as title:', title);
return title;
}
}
console.log('[Copilot Extractor] No title found, using default');
return 'Untitled Conversation';
}
// Extract all messages from the conversation
function getMessages() {
const messages = [];
// Copilot messages are in the main conversation area
// Try to find message containers - look for data attributes or common patterns
let messageContainers = document.querySelectorAll('[data-message-id]');
if (!messageContainers || messageContainers.length === 0) {
// Fallback: look for common chat message patterns
messageContainers = document.querySelectorAll('[class*="message"]');
}
if (!messageContainers || messageContainers.length === 0) {
// Another fallback: look in main conversation container
const conversationContainer = document.querySelector('main') || document.body;
messageContainers = conversationContainer.querySelectorAll('[role="article"], [role="region"] > div');
}
messageContainers.forEach(container => {
try {
const message = extractMessageFromContainer(container);
if (message) {
messages.push(message);
}
} catch (error) {
console.warn('[Copilot Extractor] Error extracting message:', error);
}
});
console.log('[Copilot Extractor] Found', messages.length, 'messages');
return messages;
}
// Extract a single message from its container
function extractMessageFromContainer(container) {
// Determine role (user or assistant)
let role = 'unknown';
// Try data attribute first
const roleAttr = container.getAttribute('data-message-role') ||
container.getAttribute('data-author') ||
container.getAttribute('data-sender') ||
container.getAttribute('role');
if (roleAttr) {
const roleLower = roleAttr.toLowerCase();
if (roleLower.includes('user') || roleLower.includes('human')) {
role = 'user';
} else if (roleLower.includes('assistant') || roleLower.includes('bot') || roleLower.includes('copilot') || roleLower.includes('ai')) {
role = 'assistant';
}
}
// If role still unknown, try to detect based on classes or content
if (role === 'unknown') {
const classes = container.className.toLowerCase();
const html = container.innerHTML.toLowerCase();
if (classes.includes('user') || html.includes('user')) {
role = 'user';
} else if (classes.includes('assistant') || classes.includes('bot') || classes.includes('copilot') || html.includes('copilot')) {
role = 'assistant';
}
}
// Get message content - try multiple strategies
let contentElement = container.querySelector('[class*="markdown"]') ||
container.querySelector('[class*="message-content"]') ||
container.querySelector('[data-content]') ||
container.querySelector('p, div[class*="text"]');
// If no specific content element found, use the container itself
if (!contentElement) {
contentElement = container;
}
// Extract text content, preserving code blocks
const content = extractContentWithFormatting(contentElement);
if (!content || !content.trim()) return null;
return {
role: role !== 'unknown' ? role : 'assistant', // Default to assistant if unknown
content: content.trim()
};
}
// Extract content while preserving markdown formatting
function extractContentWithFormatting(element) {
// Clone the element so we don't modify the original DOM
const clone = element.cloneNode(true);
return extractMarkdownFromElement(clone);
}
// Extract full conversation data
function extractConversation() {
try {
const title = getConversationTitle();
// For /pages/, extract the page content instead of messages
if (window.location.href.includes('/pages/')) {
const pageContent = extractPageContent();
if (!pageContent || !pageContent.trim()) {
throw new Error('No content found on page');
}
return {
title,
content: pageContent,
messages: [{
role: 'assistant',
content: pageContent
}],
timestamp: Date.now(),
url: window.location.href,
provider: 'Microsoft Copilot'
};
}
// For /chats/, extract messages
const messages = getMessages();
if (!messages || messages.length === 0) {
throw new Error('No messages found in conversation');
}
const content = formatMessagesAsText(messages);
return {
title,
content,
messages,
timestamp: Date.now(),
url: window.location.href,
provider: 'Microsoft Copilot'
};
} catch (error) {
console.error('[Copilot Extractor] Error extracting conversation:', error);
throw error;
}
}
// Extract content from /pages/ (document editor)
function extractPageContent() {
// Look for the main content area in Pages
const contentArea = document.querySelector('[contenteditable="true"]') ||
document.querySelector('[role="textbox"]') ||
document.querySelector('main');
if (!contentArea) {
console.warn('[Copilot Extractor] No content area found on page');
return '';
}
// Extract and format the content
const clone = contentArea.cloneNode(true);
return extractMarkdownFromElement(clone);
}
// Handle save button click
async function handleSaveClick(e) {
if (e) {
e.preventDefault();
e.stopPropagation();
}
console.log('[Copilot Extractor] Save button clicked');
if (!saveButton) return;
// Check if chrome API is available
if (typeof chrome === 'undefined' || !chrome.runtime) {
console.error('[Copilot Extractor] Chrome extension API not available');
showNotification('Extension API not available. Try reloading the page.', 'error');
return;
}
// Disable button during save
saveButton.disabled = true;
const originalHTML = saveButton.innerHTML;
saveButton.innerHTML = '<div class="flex items-center gap-2"><span>Saving...</span></div>';
try {
const conversation = extractConversation();
console.log('[Copilot Extractor] Extracted conversation:', {
title: conversation.title,
messageCount: conversation.messages.length,
contentLength: conversation.content.length,
url: conversation.url,
provider: conversation.provider
});
// Generate conversation ID for deduplication
const conversationId = generateConversationId(conversation.url, conversation.title);
conversation.conversationId = conversationId;
console.log('[Copilot Extractor] Generated conversation ID:', conversationId);
// Check for duplicates
const duplicateCheck = await checkForDuplicate(conversationId);
console.log('[Copilot Extractor] Duplicate check result:', duplicateCheck);
if (duplicateCheck.isDuplicate) {
console.log('[Copilot Extractor] Duplicate found, comparing content...');
// Compare content to decide whether to save
const existingContent = (duplicateCheck.existingConversation.content || '').trim();
const newContent = (conversation.content || '').trim();
if (existingContent === newContent) {
// Content identical - silently skip save
console.log('[Copilot Extractor] Content identical, skipping save');
saveButton.disabled = false;
saveButton.innerHTML = originalHTML;
return;
}
// Content changed - automatically overwrite with original timestamp
console.log('[Copilot Extractor] Content changed, will overwrite with original timestamp');
conversation.overwriteId = duplicateCheck.existingConversation.id;
conversation.timestamp = duplicateCheck.existingConversation.timestamp;
}
// Send to background script
chrome.runtime.sendMessage({
action: 'saveConversationFromPage',
payload: conversation
}, (response) => {
if (chrome.runtime.lastError) {
console.error('[Copilot Extractor] Chrome runtime error:', chrome.runtime.lastError);
const errorMsg = chrome.runtime.lastError.message;
// Provide user-friendly message for context invalidation
if (errorMsg.includes('Extension context invalidated')) {
showNotification('Extension was reloaded. Please reload this page and try saving again.', 'error');
} else {
showNotification('Failed to save: ' + errorMsg, 'error');
}
saveButton.disabled = false;
saveButton.innerHTML = originalHTML;
return;
}
console.log('[Copilot Extractor] Response from background:', response);
if (response && response.success) {
console.log('[Copilot Extractor] Conversation saved successfully');
// Success notification now shown in sidebar
} else {
console.error('[Copilot Extractor] Save failed. Response:', response);
const errorMsg = response?.error || 'Unknown error';
showNotification('Failed to save: ' + errorMsg, 'error');
}
// Re-enable button
saveButton.disabled = false;
saveButton.innerHTML = originalHTML;
});
} catch (error) {
console.error('[Copilot Extractor] Error during extraction:', error);
console.error('[Copilot Extractor] Error stack:', error.stack);
showNotification('Failed to extract conversation: ' + error.message, 'error');
// Re-enable button
saveButton.disabled = false;
saveButton.innerHTML = originalHTML;
}
}
// Setup keyboard shortcut (Ctrl+Shift+S or Cmd+Shift+S)
setupKeyboardShortcut(handleSaveClick, detectConversation);
})();
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
content-scripts/copilot-save-button.css | CSS | /* Save conversation button for Microsoft Copilot pages */
/* Button styling is handled by Copilot's native Tailwind utility classes */
#insidebar-save-conversation:disabled {
opacity: 0.5;
cursor: not-allowed;
}
/* Ensure safe-hover works correctly */
@media (hover: hover) {
#insidebar-save-conversation.safe-hover\:bg-black\/5:hover {
background-color: rgba(0, 0, 0, 0.05);
}
#insidebar-save-conversation.dark\:safe-hover\:bg-white\/8:hover {
background-color: rgba(255, 255, 255, 0.08);
}
}
/* Notification styles */
.insidebar-notification {
position: fixed;
top: 24px;
right: 24px;
z-index: 10000;
padding: 16px 24px;
background: #0078d4;
color: white;
border-radius: 8px;
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, sans-serif;
font-size: 14px;
font-weight: 500;
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.15);
opacity: 0;
transform: translateX(400px);
transition: all 0.3s ease;
}
.insidebar-notification.show {
opacity: 1;
transform: translateX(0);
}
.insidebar-notification-success {
background: #0078d4;
}
.insidebar-notification-error {
background: #ef4444;
}
.insidebar-notification-info {
background: #3b82f6;
}
/* Responsive adjustments */
@media (max-width: 768px) {
.insidebar-notification {
top: 16px;
right: 16px;
left: 16px;
max-width: none;
}
}
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
content-scripts/deepseek-history-extractor.js | JavaScript | // DeepSeek Conversation History Extractor
// Extracts current conversation from DeepSeek DOM and saves to extension
//
// IMPORTANT: Requires conversation-extractor-utils.js and language-detector.js to be loaded first
(function() {
'use strict';
// Import shared utilities from global namespace
const {
extractMarkdownFromElement,
formatMessagesAsText,
generateConversationId,
checkForDuplicate,
showDuplicateWarning,
showNotification,
setupKeyboardShortcut,
observeUrlChanges
} = window.ConversationExtractorUtils;
// Share button selector for language detection
// DeepSeek doesn't have a text-based share button, use null to fallback to document language
const SHARE_BUTTON_SELECTOR = null;
let saveButton = null;
// Initialize after page loads
if (document.readyState === 'loading') {
document.addEventListener('DOMContentLoaded', init);
} else {
init();
}
function init() {
console.log('[DeepSeek Extractor] Initializing...');
console.log('[DeepSeek Extractor] In iframe?', window !== window.top);
console.log('[DeepSeek Extractor] URL:', window.location.href);
// Only run on conversation pages (not homepage)
if (!window.location.href.startsWith('https://chat.deepseek.com/a/chat/')) {
console.log('[DeepSeek Extractor] Not on conversation page, skipping');
return;
}
// Wait a bit for DeepSeek to fully render
setTimeout(() => {
console.log('[DeepSeek Extractor] Attempting to insert save button...');
insertSaveButton();
observeForChanges();
}, 2000);
}
// Create save button matching DeepSeek's icon button style
function createSaveButton() {
// Detect provider's UI language and get matching Save button text
const { text, tooltip, lang } = window.LanguageDetector.getSaveButtonText(SHARE_BUTTON_SELECTOR);
console.log('[DeepSeek Extractor] Creating Save button in language:', lang);
const button = document.createElement('div');
button.id = 'insidebar-save-conversation';
button.className = 'ds-icon-button _57370c5 _5dedc1e';
button.setAttribute('tabindex', '0');
button.setAttribute('role', 'button');
button.setAttribute('aria-disabled', 'false');
button.setAttribute('aria-label', text);
button.style.cssText = '--hover-size: 34px; width: 34px; height: 34px;';
button.title = tooltip;
// Create button structure matching share button exactly
button.innerHTML = `
<div class="ds-icon-button__hover-bg"></div>
<div class="ds-icon" style="font-size: 22px; width: 22px; height: 22px;">
<svg width="22" height="22" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M2.13457,10.1331 L2.13457,10.0002 C2.13457,9.70646 2.37278,9.46826 2.6666,9.46826 C2.96042,9.46826 3.19863,9.70646 3.19863,10.0002 L3.19863,10.1331 C3.19863,10.7018 3.19936,11.0966 3.22442,11.4034 C3.24896,11.7038 3.29442,11.8731 3.35879,11.9994 L3.41504,12.101 C3.55609,12.331 3.75850,12.5185 4.00019,12.6417 L4.10410,12.686 C4.21912,12.7274 4.37073,12.7576 4.59629,12.776 C4.90310,12.8011 5.29777,12.801 5.8666,12.801 L10.1330,12.801 C10.7017,12.801 11.0966,12.8011 11.4034,12.776 C11.7036,12.7515 11.873,12.706 11.9994,12.6417 L12.1010,12.5846 C12.3309,12.4436 12.5185,12.2411 12.6416,11.9994 L12.6862,11.8956 C12.7273,11.7806 12.7575,11.6288 12.776,11.4034 C12.8010,11.0966 12.801,10.7018 12.801,10.1330 L12.801,10.0002 C12.801,9.70656 13.0393,9.46836 13.3330,9.46826 C13.6268,9.46826 13.8650,9.70646 13.8650,10.0002 L13.8650,10.1331 C13.8650,10.6842 13.8655,11.1299 13.8362,11.4901 C13.8099,11.8109 13.7579,12.0998 13.6424,12.369 L13.5893,12.483 C13.3769,12.8998 13.0538,13.2484 12.6572,13.4917 L12.483,13.5893 C12.1816,13.7429 11.8565,13.8062 11.49,13.8362 C11.1298,13.8656 10.6842,13.8651 10.1330,13.8651 L5.8666,13.8651 C5.31533,13.8651 4.86976,13.8656 4.50957,13.8362 C4.18914,13.81 3.90042,13.7586 3.63145,13.6432 L3.51738,13.5893 C3.10049,13.3769 2.75129,13.0539 2.50801,12.6573 L2.41035,12.483 C2.25678,12.1817 2.19342,11.8565 2.16348,11.4901 C2.13406,11.1299 2.13457,10.6842 2.13457,10.1331 Z M7.46817,2.66671 L7.46817,8.71591 L5.70957,6.95732 C5.50186,6.74961 5.165,6.7497 4.95722,6.95732 C4.74947,7.16508 4.74947,7.5019 4.95722,7.70966 L7.62363,10.37685 L7.70488,10.44326 C7.79159,10.50102 7.89447,10.53232 8.00016,10.53232 C8.13704,10.53232 8.27632,10.47640 8.37760,10.37685 L11.0432,7.70966 C11.2506,7.50193 11.2507,7.165 11.0432,6.95732 C10.8354,6.74956 10.4978,6.74956 10.29,6.95732 L8.53224,8.71514 L8.53224,2.66671 C8.53208,2.37311 8.29384,2.13479 8.00016,2.13463 C7.70646,2.13463 7.4683,2.37303 7.46817,2.66671 Z" fill="currentColor" fill-rule="nonzero"></path>
</svg>
</div>
`;
button.addEventListener('click', handleSaveClick);
return button;
}
// Insert save button after the share/upload button in main conversation area
function insertSaveButton() {
// Only insert button on conversation pages
if (!window.location.href.startsWith('https://chat.deepseek.com/a/chat/')) {
console.log('[DeepSeek Extractor] Not a conversation page, skipping save button');
return;
}
// Check if button already exists
if (document.getElementById('insidebar-save-conversation')) {
console.log('[DeepSeek Extractor] Save button already exists');
return;
}
// Find the share/upload button (has upload/share icon SVG)
const shareButtons = document.querySelectorAll('.ds-icon-button._57370c5._5dedc1e');
console.log('[DeepSeek Extractor] Looking for share button...');
console.log('[DeepSeek Extractor] Found icon buttons:', shareButtons.length);
let shareButton = null;
for (const btn of shareButtons) {
// Check if it has the upload/share icon
const svg = btn.querySelector('svg path[d*="M15.7484"]');
if (svg) {
shareButton = btn;
break;
}
}
if (!shareButton) {
console.log('[DeepSeek Extractor] Share button not found yet, will retry');
return;
}
console.log('[DeepSeek Extractor] Share button found');
// Check if conversation exists
const hasConversation = detectConversation();
console.log('[DeepSeek Extractor] Has conversation?', hasConversation);
// If share button exists, assume there's a conversation
if (!hasConversation) {
console.log('[DeepSeek Extractor] No conversation detected via messages, but share button exists');
console.log('[DeepSeek Extractor] Inserting button anyway - messages may load later');
}
// Get the parent container
const parentContainer = shareButton.parentElement;
const shareStyle = window.getComputedStyle(shareButton);
// Create save button with exact same positioning as share button
saveButton = createSaveButton();
// If share button is absolutely positioned, we need to position save button accordingly
if (shareStyle.position === 'absolute' || shareStyle.position === 'fixed') {
saveButton.style.position = shareStyle.position;
saveButton.style.top = shareStyle.top;
// Calculate right position: share button's right + button width + gap
const rightValue = parseFloat(shareStyle.right) || 0;
saveButton.style.right = (rightValue + 34 + 8) + 'px'; // 34px button width + 8px gap
}
// Insert after share button in DOM
if (shareButton.nextSibling) {
parentContainer.insertBefore(saveButton, shareButton.nextSibling);
} else {
parentContainer.appendChild(saveButton);
}
console.log('[DeepSeek Extractor] Save button inserted after share button');
}
// Detect if there's a conversation on the page
function detectConversation() {
const messages = getMessages();
return messages && messages.length > 0;
}
// Observe DOM for changes
function observeForChanges() {
const observer = new MutationObserver(() => {
insertSaveButton();
const existingButton = document.getElementById('insidebar-save-conversation');
if (existingButton) {
if (!window.location.href.startsWith('https://chat.deepseek.com/a/chat/')) {
existingButton.remove();
saveButton = null;
}
}
});
observer.observe(document.body, {
childList: true,
subtree: true
});
}
// Extract conversation title from current chat in sidebar
function getConversationTitle() {
const currentChat = document.querySelector('a._546d736.b64fb9ae');
if (currentChat) {
const titleDiv = currentChat.querySelector('.c08e6e93');
if (titleDiv) {
const title = titleDiv.textContent.trim();
if (title && title.length > 0) {
return title;
}
}
}
// Fallback: Try to extract from URL
const urlMatch = window.location.pathname.match(/\/a\/chat\/s\/([^\/]+)/);
if (urlMatch) {
return `DeepSeek Conversation ${urlMatch[1].substring(0, 8)}`;
}
return 'Untitled DeepSeek Conversation';
}
// Extract all messages from the conversation using .ds-message selector
function getMessages() {
const messages = [];
// Use stable ds-message selector (design system class)
const messageContainers = document.querySelectorAll('.ds-message');
console.log('[DeepSeek Extractor] Found message containers:', messageContainers.length);
messageContainers.forEach((container, index) => {
try {
const message = extractMessageFromContainer(container, index);
if (message) {
messages.push(message);
}
} catch (error) {
console.warn('[DeepSeek Extractor] Error extracting message:', error);
}
});
return messages;
}
// Extract a single message from its container using .ds-message > div pattern
function extractMessageFromContainer(container, index) {
// Get the first child div as content wrapper (based on investigation)
const contentDiv = container.querySelector(':scope > div');
if (!contentDiv) {
console.warn('[DeepSeek Extractor] No content div found in message container');
return null;
}
// Extract text content
const content = extractMarkdownFromElement(contentDiv);
if (!content.trim()) {
console.warn('[DeepSeek Extractor] Empty content extracted');
return null;
}
// Determine role using alternating pattern (user, assistant, user, assistant...)
// This is a fallback strategy when no explicit role markers exist
const role = index % 2 === 0 ? 'user' : 'assistant';
console.log('[DeepSeek Extractor] Extracted message:', {
index,
role,
contentLength: content.length,
preview: content.substring(0, 100)
});
return {
role,
content: content.trim()
};
}
// NOTE: Markdown extraction and formatting functions moved to conversation-extractor-utils.js
// Extract full conversation data
function extractConversation() {
try {
const title = getConversationTitle();
const messages = getMessages();
if (!messages || messages.length === 0) {
throw new Error('No messages found in conversation');
}
const content = formatMessagesAsText(messages);
return {
title,
content,
messages,
timestamp: Date.now(),
url: window.location.href,
provider: 'DeepSeek'
};
} catch (error) {
console.error('[DeepSeek Extractor] Error extracting conversation:', error);
throw error;
}
}
// Handle save button click
async function handleSaveClick(e) {
if (e) {
e.preventDefault();
e.stopPropagation();
}
if (!saveButton) return;
if (typeof chrome === 'undefined' || !chrome.runtime) {
console.error('[DeepSeek Extractor] Chrome extension API not available');
showNotification('Extension API not available. Try reloading the page.', 'error');
return;
}
// Disable button during save
saveButton.setAttribute('aria-disabled', 'true');
saveButton.style.opacity = '0.6';
saveButton.style.cursor = 'not-allowed';
try {
const conversation = extractConversation();
// Generate conversation ID for deduplication
const conversationId = generateConversationId(conversation.url, conversation.title);
conversation.conversationId = conversationId;
// Check for duplicates
const duplicateCheck = await checkForDuplicate(conversationId);
if (duplicateCheck.isDuplicate) {
// Compare content to decide whether to save
const existingContent = (duplicateCheck.existingConversation.content || '').trim();
const newContent = (conversation.content || '').trim();
if (existingContent === newContent) {
// Content identical - silently skip save
saveButton.setAttribute('aria-disabled', 'false');
saveButton.style.opacity = '1';
saveButton.style.cursor = 'pointer';
return;
}
// Content changed - automatically overwrite with original timestamp
conversation.overwriteId = duplicateCheck.existingConversation.id;
conversation.timestamp = duplicateCheck.existingConversation.timestamp;
}
// Send to background script
chrome.runtime.sendMessage({
action: 'saveConversationFromPage',
payload: conversation
}, (response) => {
if (chrome.runtime.lastError) {
console.error('[DeepSeek Extractor] Chrome runtime error:', chrome.runtime.lastError);
const errorMsg = chrome.runtime.lastError.message;
// Provide user-friendly message for context invalidation
if (errorMsg.includes('Extension context invalidated')) {
showNotification('Extension was reloaded. Please reload this page and try saving again.', 'error');
} else {
showNotification('Failed to save: ' + errorMsg, 'error');
}
saveButton.setAttribute('aria-disabled', 'false');
saveButton.style.opacity = '1';
saveButton.style.cursor = 'pointer';
return;
}
if (response && response.success) {
// Success notification now shown in sidebar
} else {
const errorMsg = response?.error || 'Unknown error';
showNotification('Failed to save: ' + errorMsg, 'error');
}
// Re-enable button
saveButton.setAttribute('aria-disabled', 'false');
saveButton.style.opacity = '1';
saveButton.style.cursor = 'pointer';
});
} catch (error) {
console.error('[DeepSeek Extractor] Error during extraction:', error);
showNotification('Failed to extract conversation: ' + error.message, 'error');
saveButton.setAttribute('aria-disabled', 'false');
saveButton.style.opacity = '1';
saveButton.style.cursor = 'pointer';
}
}
// Setup keyboard shortcut (Ctrl+Shift+S or Cmd+Shift+S)
setupKeyboardShortcut(() => {
if (window.location.href.startsWith('https://chat.deepseek.com/a/chat/')) {
handleSaveClick();
}
}, detectConversation);
// Listen for URL changes (DeepSeek is a SPA)
observeUrlChanges((url) => {
console.log('[DeepSeek Extractor] URL changed to:', url);
if (!url.startsWith('https://chat.deepseek.com/a/chat/')) {
const existingButton = document.getElementById('insidebar-save-conversation');
if (existingButton) {
existingButton.remove();
saveButton = null;
}
} else {
setTimeout(() => insertSaveButton(), 1000);
}
});
})();
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
content-scripts/deepseek-save-button.css | CSS | /* DeepSeek Save Button Styles */
#insidebar-save-conversation {
/* Button matches DeepSeek's icon button style */
font-family: inherit;
cursor: pointer;
user-select: none;
}
#insidebar-save-conversation[aria-disabled="true"] {
opacity: 0.6;
cursor: not-allowed;
}
/* Match DeepSeek's hover effect */
#insidebar-save-conversation:hover:not([aria-disabled="true"]) .ds-icon-button__hover-bg {
background-color: var(--dsw-alias-bg-hover, rgba(0, 0, 0, 0.05));
}
/* Notification animations */
@keyframes slideIn {
from {
transform: translateX(100%);
opacity: 0;
}
to {
transform: translateX(0);
opacity: 1;
}
}
@keyframes slideOut {
from {
transform: translateX(0);
opacity: 1;
}
to {
transform: translateX(100%);
opacity: 0;
}
}
.insidebar-notification {
transition: all 0.3s ease;
}
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
content-scripts/enter-behavior-chatgpt.js | JavaScript | // ChatGPT Enter/Shift+Enter behavior swap
// Supports customizable key combinations via settings
// Helper: Create a synthetic Enter KeyboardEvent with specified modifiers
function createEnterEvent(modifiers = {}) {
return new KeyboardEvent("keydown", {
key: "Enter",
code: "Enter",
keyCode: 13,
which: 13,
bubbles: true,
cancelable: true,
shiftKey: modifiers.shift || false,
ctrlKey: modifiers.ctrl || false,
metaKey: modifiers.meta || false,
altKey: modifiers.alt || false
});
}
// Helper: Find ChatGPT's Send button (works for both main prompt and editing)
function findSendButton() {
// Try specific data-testid attributes first (faster)
const byTestId = document.querySelector('button[data-testid="send-button"]') ||
document.querySelector('button[data-testid="fruitjuice-send-button"]');
if (byTestId) return byTestId;
// Fallback: search by text content or aria-label
return Array.from(document.querySelectorAll('button')).find(btn =>
btn.textContent.trim() === 'Send' ||
btn.getAttribute('aria-label')?.includes('Send') ||
btn.getAttribute('aria-label')?.includes('send')
);
}
function handleEnterSwap(event) {
// Only handle trusted Enter key events
// Skip if IME composition is in progress (e.g., Chinese/Japanese input method)
if (!event.isTrusted || event.code !== "Enter" || event.isComposing) {
return;
}
if (!enterKeyConfig || !enterKeyConfig.enabled) {
return;
}
// Get the currently focused element
const activeElement = document.activeElement;
// Check if this is ChatGPT's input area:
// 1. Main prompt: ProseMirror div with id="prompt-textarea"
// 2. Editing area: Regular textarea element (appears when editing old messages)
const isMainPrompt = activeElement &&
activeElement.id === "prompt-textarea" &&
activeElement.contentEditable === "true" &&
activeElement.classList.contains("ProseMirror");
const isEditingTextarea = activeElement &&
activeElement.tagName === "TEXTAREA" &&
activeElement.offsetParent !== null; // visible check
if (!isMainPrompt && !isEditingTextarea) {
return;
}
// Check if this matches newline action
if (matchesModifiers(event, enterKeyConfig.newlineModifiers)) {
if (isEditingTextarea) {
// For regular textarea: let native Enter behavior work
// Don't preventDefault - just return and let browser handle it
return;
} else {
// For ProseMirror: intercept and send Shift+Enter
event.preventDefault();
event.stopImmediatePropagation();
// ProseMirror treats Shift+Enter as newline
const newEvent = createEnterEvent({ shift: true });
activeElement.dispatchEvent(newEvent);
}
}
// Check if this matches send action
else if (matchesModifiers(event, enterKeyConfig.sendModifiers)) {
event.preventDefault();
event.stopImmediatePropagation();
// Find and click the Send button (more reliable for both element types)
const sendButton = findSendButton();
if (sendButton && !sendButton.disabled) {
sendButton.click();
} else {
// Fallback: dispatch Meta+Enter for ProseMirror
const newEvent = createEnterEvent({ meta: true });
activeElement.dispatchEvent(newEvent);
}
}
else {
// Block any other Enter combinations (Ctrl+Enter, Alt+Enter, Meta+Enter, etc.)
// This prevents ChatGPT's native keyboard shortcuts from interfering with user settings.
// For example, ChatGPT natively uses Ctrl+Enter to send messages, but if the user
// configured "swapped" mode (Enter=newline, Shift+Enter=send), then Ctrl+Enter
// should do nothing to avoid confusion and ensure only the configured keys work.
event.preventDefault();
event.stopImmediatePropagation();
}
}
// Apply the setting on initial load
applyEnterSwapSetting();
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
content-scripts/enter-behavior-claude.js | JavaScript | // Claude Enter/Shift+Enter behavior swap
// Supports customizable key combinations via settings
// Helper: Create a synthetic Enter KeyboardEvent with specified modifiers
function createEnterEvent(modifiers = {}) {
const event = new KeyboardEvent('keydown', {
key: 'Enter',
code: 'Enter',
keyCode: 13,
which: 13,
bubbles: true,
cancelable: true,
shiftKey: modifiers.shift || false,
ctrlKey: modifiers.ctrl || false,
metaKey: modifiers.meta || false,
altKey: modifiers.alt || false
});
// Mark this as a synthetic event from our extension
// so handleEnterSwap ignores it
Object.defineProperty(event, '_synthetic_from_extension', {
value: true,
writable: false
});
return event;
}
// Helper: Find Claude's Send/Save button (works for both main prompt and editing)
function findSendButton() {
// Try aria-label first (new message area)
const byAriaLabel = document.querySelector('button[aria-label*="Send"]') ||
document.querySelector('button[aria-label*="send"]');
if (byAriaLabel) return byAriaLabel;
// Try data-testid
const byTestId = document.querySelector('[data-testid="send-button"]');
if (byTestId) return byTestId;
// Fallback: search by text content (editing area has "Save" button)
return Array.from(document.querySelectorAll('button')).find(btn =>
btn.textContent.trim() === 'Save' ||
btn.textContent.trim() === 'Send' ||
btn.getAttribute('type') === 'submit'
);
}
// Helper: Manually insert newline into textarea at cursor position
function insertTextareaNewline(textarea) {
const start = textarea.selectionStart;
const end = textarea.selectionEnd;
const value = textarea.value;
// Insert newline at cursor position
textarea.value = value.substring(0, start) + '\n' + value.substring(end);
// Move cursor after the newline
textarea.selectionStart = textarea.selectionEnd = start + 1;
// Trigger input event so Claude detects the change
textarea.dispatchEvent(new Event('input', { bubbles: true }));
}
function handleEnterSwap(event) {
// Only handle Enter key events
// Skip if IME composition is in progress (e.g., Chinese/Japanese input method)
if (event.code !== "Enter" || event.isComposing) {
return;
}
// Skip synthetic events we created (let them pass through to Claude)
if (event._synthetic_from_extension) {
return;
}
// Check configuration
if (!enterKeyConfig || !enterKeyConfig.enabled) {
return;
}
// Get the currently focused element
const activeElement = document.activeElement;
if (!activeElement) {
return;
}
// Simplified detection: just check if it's a textarea
const isTextarea = activeElement.tagName === "TEXTAREA";
// Check if this matches newline action
if (matchesModifiers(event, enterKeyConfig.newlineModifiers)) {
// MUST preventDefault for both types, or Claude's handler will send the message
event.stopImmediatePropagation();
event.preventDefault();
if (isTextarea) {
// For regular textarea: manually insert newline
insertTextareaNewline(activeElement);
} else {
// For ProseMirror/contenteditable: dispatch Shift+Enter
// Claude's native behavior: Shift+Enter = newline
const enterEvent = new KeyboardEvent('keydown', {
key: 'Enter',
code: 'Enter',
bubbles: true,
shiftKey: true
});
// Mark this as synthetic so we don't process it again
Object.defineProperty(enterEvent, '_synthetic_from_extension', {
value: true,
writable: false
});
activeElement.dispatchEvent(enterEvent);
}
return;
}
// Check if this matches send action
else if (matchesModifiers(event, enterKeyConfig.sendModifiers)) {
event.preventDefault();
event.stopImmediatePropagation();
// Find and click the Send/Save button (more reliable for both element types)
const sendButton = findSendButton();
if (sendButton && !sendButton.disabled) {
sendButton.click();
} else {
// Fallback: dispatch plain Enter
const enterEvent = createEnterEvent();
activeElement.dispatchEvent(enterEvent);
}
return;
}
else {
// Block any other Enter combinations (Ctrl+Enter, Alt+Enter, Meta+Enter, etc.)
// This prevents Claude's native keyboard shortcuts from interfering with user settings.
// For example, if the user configured "swapped" mode (Enter=newline, Shift+Enter=send),
// then Ctrl+Enter should do nothing to avoid confusion and ensure only the configured keys work.
event.preventDefault();
event.stopImmediatePropagation();
}
}
// Apply the setting on initial load
applyEnterSwapSetting();
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
content-scripts/enter-behavior-copilot.js | JavaScript | // Microsoft Copilot Enter/Shift+Enter behavior swap
// Supports customizable key combinations via settings
// Helper: Create a synthetic Enter KeyboardEvent with specified modifiers
function createEnterEvent(modifiers = {}) {
return new KeyboardEvent("keydown", {
key: "Enter",
code: "Enter",
keyCode: 13,
which: 13,
bubbles: true,
cancelable: true,
shiftKey: modifiers.shift || false,
ctrlKey: modifiers.ctrl || false,
metaKey: modifiers.meta || false,
altKey: modifiers.alt || false
});
}
// Helper: Find Copilot's Send button
function findSendButton() {
// Copilot uses data-testid="submit-button" for the send button
const byTestId = document.querySelector('button[data-testid="submit-button"]');
if (byTestId) return byTestId;
// Fallback: search by aria-label
const byAriaLabel = document.querySelector('button[aria-label="Submit message"]');
if (byAriaLabel) return byAriaLabel;
// Another fallback: search all buttons for matching aria-label
return Array.from(document.querySelectorAll('button')).find(btn =>
btn.getAttribute('aria-label')?.includes('Submit') ||
btn.getAttribute('title')?.includes('Submit')
);
}
function handleEnterSwap(event) {
// Only handle trusted Enter key events
// Skip if IME composition is in progress (e.g., Chinese/Japanese input method)
if (!event.isTrusted || event.code !== "Enter" || event.isComposing) {
return;
}
if (!enterKeyConfig || !enterKeyConfig.enabled) {
return;
}
// Get the currently focused element
const activeElement = document.activeElement;
// Check if this is Copilot's input area
// Copilot uses textarea with id="userInput" and data-testid="composer-input"
const isMainComposer = activeElement &&
activeElement.tagName === "TEXTAREA" &&
(activeElement.id === "userInput" ||
activeElement.getAttribute('data-testid') === 'composer-input') &&
activeElement.offsetParent !== null;
// Also check for floating/inline textarea (appears during editing)
const isFloatingTextarea = activeElement &&
activeElement.tagName === "TEXTAREA" &&
activeElement.placeholder?.includes('question or edit') &&
activeElement.offsetParent !== null;
const isCopilotInput = isMainComposer || isFloatingTextarea;
if (!isCopilotInput) {
return;
}
// IMPORTANT: Copilot's native behavior is OPPOSITE of ChatGPT/Claude:
// - Enter (no shift) = Send message
// - Shift+Enter = Newline
// So we must ALWAYS preventDefault and handle both actions ourselves
// Check if this matches newline action
if (matchesModifiers(event, enterKeyConfig.newlineModifiers)) {
event.preventDefault();
event.stopImmediatePropagation();
// Insert a newline character at cursor position
const start = activeElement.selectionStart;
const end = activeElement.selectionEnd;
const value = activeElement.value;
activeElement.value = value.substring(0, start) + '\n' + value.substring(end);
activeElement.selectionStart = activeElement.selectionEnd = start + 1;
// Trigger input event so Copilot knows the content changed
activeElement.dispatchEvent(new Event('input', { bubbles: true }));
}
// Check if this matches send action
else if (matchesModifiers(event, enterKeyConfig.sendModifiers)) {
event.preventDefault();
event.stopImmediatePropagation();
// Find and click the Send button
const sendButton = findSendButton();
if (sendButton && !sendButton.disabled) {
sendButton.click();
} else {
// Let Copilot's native Enter behavior send the message
const newEvent = createEnterEvent({});
activeElement.dispatchEvent(newEvent);
}
}
else {
// Block any other Enter combinations to avoid conflicts
event.preventDefault();
event.stopImmediatePropagation();
}
}
// Apply the setting on initial load
applyEnterSwapSetting();
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
content-scripts/enter-behavior-deepseek.js | JavaScript | // DeepSeek Enter/Shift+Enter behavior swap
// Supports customizable key combinations via settings
// Helper: Create a synthetic Enter KeyboardEvent with specified modifiers
function createEnterEvent(modifiers = {}) {
return new KeyboardEvent("keydown", {
key: "Enter",
code: "Enter",
keyCode: 13,
which: 13,
bubbles: true,
cancelable: true,
shiftKey: modifiers.shift || false,
ctrlKey: modifiers.ctrl || false,
metaKey: modifiers.meta || false,
altKey: modifiers.alt || false
});
}
/**
* Selector array for finding DeepSeek's Submit/Send button
* Priority order: icon button detection → structural selectors → multi-language text
*/
const SEND_BUTTON_SELECTORS = [
// Priority 1: Icon button with SVG (language-independent, most reliable for new messages)
{
type: 'function',
matcher: () => {
const iconButton = document.querySelector('button.ds-icon-button:not([aria-disabled="true"])') ||
document.querySelector('.ds-icon-button[role="button"]:not([aria-disabled="true"])');
if (iconButton) {
// Verify it's the send button by checking if it has an up arrow SVG
const hasSendIcon = iconButton.querySelector('svg path[d*="M8.3125"]') ||
iconButton.querySelector('svg');
if (hasSendIcon) return iconButton;
}
return null;
}
},
// Priority 2: Generic icon button fallback
{
type: 'function',
matcher: () => {
return Array.from(document.querySelectorAll('button, [role="button"]')).find(btn => {
return btn.classList.contains('ds-icon-button') &&
!btn.getAttribute('aria-disabled') &&
btn.querySelector('svg');
});
}
}
];
// Helper: Find DeepSeek's Submit/Send button (context-aware for editing vs new messages)
function findSendButton(activeElement, isEditingTextarea) {
// When editing old messages: search locally from the textarea's parent container
if (isEditingTextarea && activeElement) {
// Search upward to find the editing container, then search within it
let container = activeElement.parentElement;
// Traverse up to find a suitable container (usually within 10 levels)
for (let i = 0; i < 10 && container; i++) {
// Look for Send button within this container (multi-language)
const sendButton = Array.from(container.querySelectorAll('button')).find(btn => {
const text = btn.textContent.trim();
return window.ButtonFinderUtils.TEXT_MAPS.send.some(t => t === text) &&
(btn.classList.contains('ds-basic-button--primary') ||
btn.classList.contains('ds-atom-button'));
});
if (sendButton) return sendButton;
container = container.parentElement;
}
}
// For new messages: search globally for Submit button using selector array
return window.ButtonFinderUtils.findButton(SEND_BUTTON_SELECTORS);
}
// Helper: Manually insert newline into textarea at cursor position
function insertTextareaNewline(textarea) {
const start = textarea.selectionStart;
const end = textarea.selectionEnd;
const value = textarea.value;
// Insert newline at cursor position
textarea.value = value.substring(0, start) + '\n' + value.substring(end);
// Move cursor after the newline
textarea.selectionStart = textarea.selectionEnd = start + 1;
// Trigger input event so DeepSeek detects the change
textarea.dispatchEvent(new Event('input', { bubbles: true }));
textarea.dispatchEvent(new Event('change', { bubbles: true }));
}
function handleEnterSwap(event) {
// Only handle trusted Enter key events
// Skip if IME composition is in progress (e.g., Chinese/Japanese input method)
if (!event.isTrusted || event.code !== "Enter" || event.isComposing) {
return;
}
if (!enterKeyConfig || !enterKeyConfig.enabled) {
return;
}
// Get the currently focused element
const activeElement = document.activeElement;
// Check if this is DeepSeek's input area:
// 1. Main prompt: textarea with placeholder "Message DeepSeek" or ds-scroll-area class
// 2. Editing area: textarea with name="user query" or parent has ds-textarea class
const isMainPrompt = activeElement &&
activeElement.tagName === "TEXTAREA" &&
(activeElement.placeholder?.includes("DeepSeek") ||
(activeElement.classList.contains("ds-scroll-area") &&
!activeElement.getAttribute("name")));
const isEditingTextarea = activeElement &&
activeElement.tagName === "TEXTAREA" &&
(activeElement.getAttribute("name") === "user query" ||
activeElement.classList.contains("ds-textarea__textarea") ||
activeElement.closest('.ds-textarea')) &&
activeElement.offsetParent !== null; // visible check
if (!isMainPrompt && !isEditingTextarea) {
return;
}
// Check if this matches newline action
if (matchesModifiers(event, enterKeyConfig.newlineModifiers)) {
event.preventDefault();
event.stopImmediatePropagation();
// For textarea (both main and editing): manually insert newline
insertTextareaNewline(activeElement);
return;
}
// Check if this matches send action
else if (matchesModifiers(event, enterKeyConfig.sendModifiers)) {
event.preventDefault();
event.stopImmediatePropagation();
// Find and click the Submit/Send button (context-aware for editing vs new messages)
const sendButton = findSendButton(activeElement, isEditingTextarea);
if (sendButton && !sendButton.getAttribute('aria-disabled')) {
sendButton.click();
} else {
// Fallback: dispatch plain Enter
const newEvent = createEnterEvent();
activeElement.dispatchEvent(newEvent);
}
return;
}
else {
// Block any other Enter combinations (Ctrl+Enter, Alt+Enter, Meta+Enter, etc.)
// This prevents DeepSeek's native keyboard shortcuts from interfering with user settings.
// For example, if the user configured "swapped" mode (Enter=newline, Shift+Enter=send),
// then Ctrl+Enter should do nothing to avoid confusion and ensure only the configured keys work.
event.preventDefault();
event.stopImmediatePropagation();
}
}
// Apply the setting on initial load
applyEnterSwapSetting();
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
content-scripts/enter-behavior-gemini.js | JavaScript | // Gemini Enter/Shift+Enter behavior swap
// Supports customizable key combinations via settings
// Helper: Create a synthetic Enter KeyboardEvent with specified modifiers
function createEnterEvent(modifiers = {}) {
return new KeyboardEvent("keydown", {
key: "Enter",
code: "Enter",
keyCode: 13,
which: 13,
bubbles: true,
cancelable: true,
shiftKey: modifiers.shift || false,
ctrlKey: modifiers.ctrl || false,
metaKey: modifiers.meta || false,
altKey: modifiers.alt || false
});
}
// Helper: Find Gemini's Send/Update button (context-aware for editing vs new messages)
function findSendButton(activeElement, isEditingTextarea) {
// When editing old messages: search locally from the textarea's parent container
if (isEditingTextarea && activeElement) {
// Search upward to find the editing container, then search within it
let container = activeElement.parentElement;
// Traverse up to find a suitable container (usually within 5 levels)
for (let i = 0; i < 5 && container; i++) {
// Look for Update button within this container
const updateButton = Array.from(container.querySelectorAll('button')).find(btn => {
const text = btn.textContent.trim();
return text === 'Update' ||
btn.classList.contains('update-button') ||
(btn.classList.contains('submit') && text !== 'Send');
});
if (updateButton) return updateButton;
container = container.parentElement;
}
}
// For new messages: search globally for Send button
// Try by aria-label or class
const byAriaLabel = document.querySelector('button[aria-label*="Send"]') ||
document.querySelector('button[aria-label*="send"]');
if (byAriaLabel) return byAriaLabel;
// Try by class name (Gemini specific)
const byClass = document.querySelector('button.send-button');
if (byClass) return byClass;
// Fallback: search by icon or text
return Array.from(document.querySelectorAll('button')).find(btn => {
const text = btn.textContent.trim();
return text === 'Send' ||
btn.querySelector('mat-icon[fonticon="send"]') ||
btn.classList.contains('submit');
});
}
// Helper: Insert newline into Quill contentEditable div
function insertQuillNewline(div) {
try {
// Try using execCommand first (works well with Quill)
if (document.execCommand) {
document.execCommand('insertLineBreak');
} else {
// Fallback: manual DOM manipulation
const selection = window.getSelection();
const range = selection.getRangeAt(0);
const br = document.createElement('br');
range.deleteContents();
range.insertNode(br);
// Insert second br if at end (Quill needs this)
const isAtEnd = !br.nextSibling ||
(br.nextSibling && br.nextSibling.nodeName === 'BR');
if (isAtEnd) {
const br2 = document.createElement('br');
br.parentNode.insertBefore(br2, br.nextSibling);
}
// Move cursor
range.setStartAfter(br);
range.setEndAfter(br);
selection.removeAllRanges();
selection.addRange(range);
}
// Trigger input event for framework
div.dispatchEvent(new Event('input', { bubbles: true }));
div.dispatchEvent(new Event('change', { bubbles: true }));
} catch (e) {
// Silent fail - if newline insertion fails, just do nothing
}
}
// Helper: Manually insert newline into textarea at cursor position
function insertTextareaNewline(textarea) {
const start = textarea.selectionStart;
const end = textarea.selectionEnd;
const value = textarea.value;
// Insert newline at cursor position
textarea.value = value.substring(0, start) + '\n' + value.substring(end);
// Move cursor after the newline
textarea.selectionStart = textarea.selectionEnd = start + 1;
// Trigger input event so Gemini detects the change
textarea.dispatchEvent(new Event('input', { bubbles: true }));
}
function handleEnterSwap(event) {
// Only handle trusted Enter key events
// Skip if IME composition is in progress (e.g., Chinese/Japanese input method)
if (!event.isTrusted || event.code !== "Enter" || event.isComposing) {
return;
}
if (!enterKeyConfig || !enterKeyConfig.enabled) {
return;
}
// Get the currently focused element
const activeElement = document.activeElement;
// Check if this is Gemini's input area:
// 1. Main prompt: Quill editor (contentEditable div with ql-editor class)
// 2. Editing area: Regular textarea (appears when editing old messages)
const isQuillEditor = activeElement &&
activeElement.tagName === "DIV" &&
activeElement.contentEditable === "true" &&
(activeElement.classList.contains("ql-editor") ||
(activeElement.classList.contains("textarea") &&
activeElement.getAttribute("role") === "textbox"));
const isEditingTextarea = activeElement &&
activeElement.tagName === "TEXTAREA" &&
activeElement.offsetParent !== null; // visible check
if (!isQuillEditor && !isEditingTextarea) {
return;
}
// Check if this matches newline action
if (matchesModifiers(event, enterKeyConfig.newlineModifiers)) {
// MUST preventDefault for both types, or Gemini's handler will send the message
event.preventDefault();
event.stopImmediatePropagation();
if (isEditingTextarea) {
// For regular textarea: manually insert newline
insertTextareaNewline(activeElement);
return;
} else {
// For Quill editor: use DOM manipulation
insertQuillNewline(activeElement);
return;
}
}
// Check if this matches send action
else if (matchesModifiers(event, enterKeyConfig.sendModifiers)) {
event.preventDefault();
event.stopImmediatePropagation();
// Find and click the Send/Update button (context-aware for editing vs new messages)
const sendButton = findSendButton(activeElement, isEditingTextarea);
if (sendButton && !sendButton.disabled) {
sendButton.click();
} else {
// Fallback: dispatch plain Enter
const newEvent = createEnterEvent();
activeElement.dispatchEvent(newEvent);
}
return;
}
else {
// Block any other Enter combinations (Ctrl+Enter, Alt+Enter, Meta+Enter, etc.)
// This prevents Gemini's native keyboard shortcuts from interfering with user settings.
// For example, if the user configured "swapped" mode (Enter=newline, Shift+Enter=send),
// then Ctrl+Enter should do nothing to avoid confusion and ensure only the configured keys work.
event.preventDefault();
event.stopImmediatePropagation();
}
}
// Apply the setting on initial load
applyEnterSwapSetting();
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
content-scripts/enter-behavior-google.js | JavaScript | // Google AI Mode Enter/Shift+Enter behavior swap
// Supports customizable key combinations via settings
// Helper: Create a synthetic Enter KeyboardEvent with specified modifiers
function createEnterEvent(modifiers = {}) {
return new KeyboardEvent("keydown", {
key: "Enter",
code: "Enter",
keyCode: 13,
which: 13,
bubbles: true,
cancelable: true,
shiftKey: modifiers.shift || false,
ctrlKey: modifiers.ctrl || false,
metaKey: modifiers.meta || false,
altKey: modifiers.alt || false
});
}
// Helper: Find Google's Send button
function findSendButton() {
// Try by data-xid first (most reliable)
const byDataXid = document.querySelector('button[data-xid="input-plate-send-button"]');
if (byDataXid) return byDataXid;
// Try by aria-label
const byAriaLabel = document.querySelector('button[aria-label="Send"]');
if (byAriaLabel) return byAriaLabel;
// Try by class (Google-specific)
const byClass = document.querySelector('button.OEueve');
if (byClass) return byClass;
// Fallback: search by aria-label content
return Array.from(document.querySelectorAll('button')).find(btn =>
btn.getAttribute('aria-label')?.includes('Send')
);
}
// Helper: Manually insert newline into textarea at cursor position
function insertTextareaNewline(textarea) {
const start = textarea.selectionStart;
const end = textarea.selectionEnd;
const value = textarea.value;
// Insert newline at cursor position
textarea.value = value.substring(0, start) + '\n' + value.substring(end);
// Move cursor after the newline
textarea.selectionStart = textarea.selectionEnd = start + 1;
// Trigger input event so Google detects the change
textarea.dispatchEvent(new Event('input', { bubbles: true }));
textarea.dispatchEvent(new Event('change', { bubbles: true }));
}
function handleEnterSwap(event) {
// Only handle trusted Enter key events
// Skip if IME composition is in progress (e.g., Chinese/Japanese input method)
if (!event.isTrusted || event.code !== "Enter" || event.isComposing) {
return;
}
if (!enterKeyConfig || !enterKeyConfig.enabled) {
return;
}
// Get the currently focused element
const activeElement = document.activeElement;
// Check if this is Google AI Mode's textarea element
// Note: Google AI Mode does not support editing old messages
const isGoogleInput = activeElement &&
activeElement.tagName === "TEXTAREA" &&
(activeElement.classList.contains("ITIRGe") ||
activeElement.getAttribute("aria-label") === "Ask anything" ||
activeElement.getAttribute("maxlength") === "8192") &&
activeElement.offsetParent !== null; // visible check
if (!isGoogleInput) {
return;
}
// Check if this matches newline action
if (matchesModifiers(event, enterKeyConfig.newlineModifiers)) {
event.preventDefault();
event.stopImmediatePropagation();
// For textarea: manually insert newline
insertTextareaNewline(activeElement);
return;
}
// Check if this matches send action
else if (matchesModifiers(event, enterKeyConfig.sendModifiers)) {
event.preventDefault();
event.stopImmediatePropagation();
// Find and click the Send button
const sendButton = findSendButton();
if (sendButton && !sendButton.disabled) {
sendButton.click();
} else {
// Fallback: dispatch plain Enter
const newEvent = createEnterEvent();
activeElement.dispatchEvent(newEvent);
}
return;
}
else {
// Block any other Enter combinations (Ctrl+Enter, Alt+Enter, Meta+Enter, etc.)
// This prevents Google's native keyboard shortcuts from interfering with user settings.
// For example, if the user configured "swapped" mode (Enter=newline, Shift+Enter=send),
// then Ctrl+Enter should do nothing to avoid confusion and ensure only the configured keys work.
event.preventDefault();
event.stopImmediatePropagation();
}
}
// Apply the setting on initial load
applyEnterSwapSetting();
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
content-scripts/enter-behavior-grok.js | JavaScript | // Grok Enter/Shift+Enter behavior swap
// Supports customizable key combinations via settings
// Helper: Create a synthetic Enter KeyboardEvent with specified modifiers
function createEnterEvent(modifiers = {}) {
return new KeyboardEvent("keydown", {
key: "Enter",
code: "Enter",
keyCode: 13,
which: 13,
bubbles: true,
cancelable: true,
shiftKey: modifiers.shift || false,
ctrlKey: modifiers.ctrl || false,
metaKey: modifiers.meta || false,
altKey: modifiers.alt || false
});
}
/**
* Selector array for finding Grok's Submit/Save button
* Priority order: structural selectors → multi-language ARIA → multi-language text
*/
const SEND_BUTTON_SELECTORS = [
// Priority 1: Type-based (language-independent)
{ type: 'css', value: 'button[type="submit"]' },
// Priority 2: Multi-language ARIA label (Submit or Save)
{
type: 'function',
matcher: () => {
return Array.from(document.querySelectorAll('button')).find(btn => {
const aria = btn.getAttribute('aria-label');
if (!aria) return false;
return window.ButtonFinderUtils.TEXT_MAPS.submit.some(t => aria.includes(t)) ||
window.ButtonFinderUtils.TEXT_MAPS.save.some(t => aria.includes(t));
});
}
},
// Priority 3: Multi-language text (Submit or Save)
{
type: 'function',
matcher: () => {
return Array.from(document.querySelectorAll('button')).find(btn => {
const text = btn.textContent?.trim();
if (!text) return false;
return window.ButtonFinderUtils.TEXT_MAPS.submit.some(t => t === text) ||
window.ButtonFinderUtils.TEXT_MAPS.save.some(t => t === text);
});
}
}
];
// Helper: Find Grok's Submit/Save button (context-aware for editing vs new messages)
function findSendButton(activeElement, isEditingTextarea) {
// When editing old messages: search locally from the textarea's parent container
if (isEditingTextarea && activeElement) {
// Search upward to find the editing container, then search within it
let container = activeElement.parentElement;
// Traverse up to find a suitable container (usually within 5 levels)
for (let i = 0; i < 5 && container; i++) {
// Look for Save button within this container (multi-language)
const saveButton = Array.from(container.querySelectorAll('button')).find(btn => {
const text = btn.textContent.trim();
return window.ButtonFinderUtils.TEXT_MAPS.save.some(t => t === text) ||
(btn.classList.contains('bg-button-filled') && !window.ButtonFinderUtils.TEXT_MAPS.submit.some(t => t === text));
});
if (saveButton) return saveButton;
container = container.parentElement;
}
}
// For new messages: search globally for Submit button using selector array
return window.ButtonFinderUtils.findButton(SEND_BUTTON_SELECTORS);
}
// Helper: Manually insert newline into textarea at cursor position
function insertTextareaNewline(textarea) {
const start = textarea.selectionStart;
const end = textarea.selectionEnd;
const value = textarea.value;
// Insert newline at cursor position
textarea.value = value.substring(0, start) + '\n' + value.substring(end);
// Move cursor after the newline
textarea.selectionStart = textarea.selectionEnd = start + 1;
// Trigger input event so Grok detects the change
textarea.dispatchEvent(new Event('input', { bubbles: true }));
}
function handleEnterSwap(event) {
// Only handle trusted Enter key events
// Skip if IME composition is in progress (e.g., Chinese/Japanese input method)
if (!event.isTrusted || event.code !== "Enter" || event.isComposing) {
return;
}
if (!enterKeyConfig || !enterKeyConfig.enabled) {
return;
}
// Get the currently focused element
const activeElement = document.activeElement;
// Check if this is Grok's input area:
// 1. Main prompt: TipTap/ProseMirror editor (contentEditable div)
// 2. Editing area: Regular textarea (appears when editing old messages)
const isMainPrompt = activeElement &&
activeElement.tagName === "DIV" &&
activeElement.contentEditable === "true" &&
(activeElement.classList.contains("tiptap") ||
activeElement.classList.contains("ProseMirror"));
const isEditingTextarea = activeElement &&
activeElement.tagName === "TEXTAREA" &&
activeElement.offsetParent !== null; // visible check
if (!isMainPrompt && !isEditingTextarea) {
return;
}
// Check if this matches newline action
if (matchesModifiers(event, enterKeyConfig.newlineModifiers)) {
event.preventDefault();
event.stopImmediatePropagation();
if (isEditingTextarea) {
// For regular textarea: manually insert newline
insertTextareaNewline(activeElement);
return;
} else {
// For ProseMirror: Shift+Enter inserts newline
const newEvent = createEnterEvent({ shift: true });
activeElement.dispatchEvent(newEvent);
return;
}
}
// Check if this matches send action
else if (matchesModifiers(event, enterKeyConfig.sendModifiers)) {
event.preventDefault();
event.stopImmediatePropagation();
// Find and click the Submit/Save button (context-aware for editing vs new messages)
const sendButton = findSendButton(activeElement, isEditingTextarea);
if (sendButton && !sendButton.disabled) {
sendButton.click();
} else {
// Fallback: dispatch plain Enter
const newEvent = createEnterEvent();
activeElement.dispatchEvent(newEvent);
}
return;
}
else {
// Block any other Enter combinations (Ctrl+Enter, Alt+Enter, Meta+Enter, etc.)
// This prevents Grok's native keyboard shortcuts from interfering with user settings.
// For example, if the user configured "swapped" mode (Enter=newline, Shift+Enter=send),
// then Ctrl+Enter should do nothing to avoid confusion and ensure only the configured keys work.
event.preventDefault();
event.stopImmediatePropagation();
}
}
// Apply the setting on initial load
applyEnterSwapSetting();
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
content-scripts/enter-behavior-perplexity.js | JavaScript | // Perplexity Enter/Shift+Enter behavior swap
// Supports customizable key combinations via settings
// Helper: Create a synthetic Enter KeyboardEvent with specified modifiers
function createEnterEvent(modifiers = {}) {
return new KeyboardEvent("keydown", {
key: "Enter",
code: "Enter",
keyCode: 13,
which: 13,
bubbles: true,
cancelable: true,
composed: true,
shiftKey: modifiers.shift || false,
ctrlKey: modifiers.ctrl || false,
metaKey: modifiers.meta || false,
altKey: modifiers.alt || false
});
}
// Helper: Find Perplexity's Submit/Save button (context-aware for editing vs new messages)
function findSendButton(activeElement, isEditingLexical) {
// When editing old messages: search locally from the editor's parent container
if (isEditingLexical && activeElement) {
// Search upward to find the editing container, then search within it
let container = activeElement.parentElement;
// Traverse up to find a suitable container (usually within 10 levels)
for (let i = 0; i < 10 && container; i++) {
// Look for Save button within this container
const saveButton = container.querySelector('button[data-testid="confirm-edit-query-button"]') ||
Array.from(container.querySelectorAll('button')).find(btn => {
const ariaLabel = btn.getAttribute('aria-label');
return ariaLabel?.includes('Save and rewrite') ||
ariaLabel?.includes('confirm-edit');
});
if (saveButton) return saveButton;
container = container.parentElement;
}
}
// For new messages: search globally for Submit button
// Try by data-testid first
const byTestId = document.querySelector('button[data-testid="submit-button"]');
if (byTestId) return byTestId;
// Try by aria-label
const byAriaLabel = document.querySelector('button[aria-label="Submit"]');
if (byAriaLabel) return byAriaLabel;
// Fallback: search by aria-label content
return Array.from(document.querySelectorAll('button')).find(btn =>
btn.getAttribute('aria-label')?.includes('Submit')
);
}
function handleEnterSwap(event) {
// Only handle trusted Enter key events
// Skip if IME composition is in progress (e.g., Chinese/Japanese input method)
if (!event.isTrusted || event.code !== "Enter" || event.isComposing) {
return;
}
if (!enterKeyConfig || !enterKeyConfig.enabled) {
return;
}
// Get the currently focused element
const activeElement = document.activeElement;
// Check if this is Perplexity's input area:
// 1. Main prompt: Lexical editor with id="ask-input"
// 2. Editing area: Lexical editor without id (but has data-lexical-editor and role="textbox")
const isMainPrompt = activeElement &&
activeElement.id === "ask-input" &&
activeElement.isContentEditable &&
activeElement.getAttribute("data-lexical-editor") === "true" &&
activeElement.getAttribute("role") === "textbox";
const isEditingLexical = activeElement &&
!activeElement.id && // No id for editing
activeElement.isContentEditable &&
activeElement.getAttribute("data-lexical-editor") === "true" &&
activeElement.getAttribute("role") === "textbox" &&
activeElement.offsetParent !== null; // visible check
if (!isMainPrompt && !isEditingLexical) {
return;
}
// Check if this matches newline action
if (matchesModifiers(event, enterKeyConfig.newlineModifiers)) {
event.preventDefault();
event.stopImmediatePropagation();
// For Lexical editor (both main and editing): Shift+Enter inserts newline
const newEvent = createEnterEvent({ shift: true });
activeElement.dispatchEvent(newEvent);
return;
}
// Check if this matches send action
else if (matchesModifiers(event, enterKeyConfig.sendModifiers)) {
event.preventDefault();
event.stopImmediatePropagation();
// Find and click the Submit/Save button (context-aware for editing vs new messages)
const sendButton = findSendButton(activeElement, isEditingLexical);
if (sendButton && !sendButton.disabled) {
sendButton.click();
} else {
// Fallback: dispatch plain Enter
const newEvent = createEnterEvent();
activeElement.dispatchEvent(newEvent);
}
return;
}
else {
// Block any other Enter combinations (Ctrl+Enter, Alt+Enter, Meta+Enter, etc.)
// This prevents Perplexity's native keyboard shortcuts from interfering with user settings.
// For example, if the user configured "swapped" mode (Enter=newline, Shift+Enter=send),
// then Ctrl+Enter should do nothing to avoid confusion and ensure only the configured keys work.
event.preventDefault();
event.stopImmediatePropagation();
}
}
// Apply the setting on initial load
applyEnterSwapSetting();
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
content-scripts/enter-behavior-utils.js | JavaScript | // Shared utilities for Enter key behavior modification
// Supports customizable key combinations for newline and send actions
let enterKeyConfig = null;
function enableEnterSwap() {
window.addEventListener("keydown", handleEnterSwap, { capture: true });
}
function disableEnterSwap() {
window.removeEventListener("keydown", handleEnterSwap, { capture: true });
}
// Check if event matches the configured modifiers
function matchesModifiers(event, modifiers) {
return event.shiftKey === (modifiers.shift || false) &&
event.ctrlKey === (modifiers.ctrl || false) &&
event.altKey === (modifiers.alt || false) &&
event.metaKey === (modifiers.meta || false);
}
// Get target event modifiers based on action type
function getTargetModifiers(actionType) {
if (!enterKeyConfig) return null;
if (actionType === 'newline') {
return enterKeyConfig.newlineModifiers;
} else if (actionType === 'send') {
return enterKeyConfig.sendModifiers;
}
return null;
}
function applyEnterSwapSetting() {
chrome.storage.sync.get({
enterKeyBehavior: {
enabled: true,
preset: 'swapped',
newlineModifiers: { shift: false, ctrl: false, alt: false, meta: false },
sendModifiers: { shift: true, ctrl: false, alt: false, meta: false }
}
}, (data) => {
enterKeyConfig = data.enterKeyBehavior;
if (enterKeyConfig.enabled) {
enableEnterSwap();
} else {
disableEnterSwap();
}
});
}
// Listen for settings changes
chrome.storage.onChanged.addListener((changes, area) => {
if (area === "sync" && changes.enterKeyBehavior) {
applyEnterSwapSetting();
}
});
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
content-scripts/focus-toggle.js | JavaScript | // Focus toggle content script
// Handles focus switching between sidebar and main page input
/**
* Find the AI provider's input element
* Returns the main input field for the current AI platform
*/
function findProviderInput() {
const host = window.location.hostname;
// ChatGPT
if (host.includes('chatgpt.com') || host.includes('chat.openai.com')) {
return document.querySelector('#prompt-textarea') ||
document.querySelector('textarea[data-id="root"]');
}
// Claude
if (host.includes('claude.ai')) {
return document.querySelector('[contenteditable="true"].ProseMirror') ||
document.querySelector('div[contenteditable="true"]') ||
document.querySelector('textarea');
}
// Gemini
if (host.includes('gemini.google.com')) {
return document.querySelector('.ql-editor[contenteditable="true"]') ||
document.querySelector('div.textarea[role="textbox"]') ||
document.querySelector('textarea');
}
// Google AI Mode
if (host.includes('google.com')) {
return document.querySelector('textarea.ITIRGe') ||
document.querySelector('textarea[aria-label="Ask anything"]') ||
document.querySelector('textarea[maxlength="8192"]');
}
// Grok
if (host.includes('grok.com')) {
return document.querySelector('.tiptap.ProseMirror') ||
document.querySelector('div[contenteditable="true"].ProseMirror');
}
// DeepSeek
if (host.includes('chat.deepseek.com')) {
return document.querySelector('textarea[placeholder*="DeepSeek"]') ||
document.querySelector('textarea.ds-scroll-area');
}
// Perplexity
if (host.includes('perplexity.ai')) {
return document.querySelector('#ask-input[data-lexical-editor="true"]') ||
document.querySelector('div[data-lexical-editor="true"][role="textbox"]');
}
// Copilot
if (host.includes('copilot.microsoft.com') || host.includes('bing.com')) {
return document.querySelector('#userInput') ||
document.querySelector('textarea[data-testid="composer-input"]');
}
// Generic fallback: find any visible textarea or contenteditable
const textarea = document.querySelector('textarea:not([hidden])');
if (textarea && textarea.offsetParent !== null) return textarea;
const contentEditable = document.querySelector('[contenteditable="true"]:not([hidden])');
if (contentEditable && contentEditable.offsetParent !== null) return contentEditable;
return null;
}
/**
* Focus the provider's input element
*/
function focusProviderInput() {
const input = findProviderInput();
if (input) {
input.focus();
// For contenteditable elements, also set cursor at end
if (input.isContentEditable) {
const selection = window.getSelection();
const range = document.createRange();
range.selectNodeContents(input);
range.collapse(false); // collapse to end
selection.removeAllRanges();
selection.addRange(range);
}
return true;
}
return false;
}
/**
* Check if the page currently has focus
*/
function checkPageFocus() {
return document.hasFocus();
}
// Listen for messages from service worker
chrome.runtime.onMessage.addListener((message, sender, sendResponse) => {
if (message.action === 'checkFocus') {
sendResponse({ hasFocus: checkPageFocus() });
} else if (message.action === 'takeFocus') {
const success = focusProviderInput();
sendResponse({ success });
}
return true; // Keep channel open for async response
});
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
content-scripts/gemini-history-extractor.js | JavaScript | // Gemini Conversation History Extractor
// Extracts current conversation from Gemini DOM and saves to extension
//
// IMPORTANT: Requires conversation-extractor-utils.js to be loaded first
(function() {
'use strict';
console.log('[Gemini Extractor] Script loaded');
// Import shared utilities from global namespace
const {
extractMarkdownFromElement,
formatMessagesAsText,
generateConversationId,
checkForDuplicate,
showDuplicateWarning,
showNotification,
setupKeyboardShortcut,
observeUrlChanges
} = window.ConversationExtractorUtils;
// Share button selector for language detection
// Gemini doesn't have a text-based share button, use null to fallback to document language
const SHARE_BUTTON_SELECTOR = null;
let saveButton = null;
// Initialize after page loads
if (document.readyState === 'loading') {
document.addEventListener('DOMContentLoaded', init);
} else {
init();
}
function init() {
console.log('[Gemini Extractor] Initializing...');
console.log('[Gemini Extractor] In iframe?', window !== window.top);
console.log('[Gemini Extractor] URL:', window.location.href);
// Only run on conversation pages (not homepage)
if (!window.location.href.includes('https://gemini.google.com/app/')) {
console.log('[Gemini Extractor] Not on conversation page, skipping');
return;
}
// Wait a bit for Gemini to fully render
setTimeout(() => {
console.log('[Gemini Extractor] Attempting to insert save button...');
insertSaveButton();
observeForModeButton();
}, 2000);
}
// Create save button matching Gemini's referral button style
function createSaveButton() {
// Detect provider's UI language and get matching Save button text
const { text, tooltip } = window.LanguageDetector.getSaveButtonText(SHARE_BUTTON_SELECTOR);
const button = document.createElement('button');
button.id = 'insidebar-save-conversation';
button.className = 'mdc-button mat-mdc-button-base gds-referral-button mdc-button--unelevated mat-mdc-unelevated-button mat-unthemed';
button.setAttribute('mat-flat-button', '');
button.setAttribute('data-test-id', 'insidebar-save-button');
button.type = 'button';
button.title = tooltip;
// Create button structure matching Gemini's referral button
button.innerHTML = `
<span class="mat-mdc-button-persistent-ripple mdc-button__ripple"></span>
<span class="mdc-button__label">
<span data-test-id="save-label" class="gds-label-m">${text}</span>
</span>
<span class="mat-focus-indicator"></span>
<span class="mat-mdc-button-touch-target"></span>
`;
button.addEventListener('click', handleSaveClick);
return button;
}
// Insert save button after referral button in buttons-container
function insertSaveButton() {
// Check if button already exists
if (document.getElementById('insidebar-save-conversation')) {
console.log('[Gemini Extractor] Save button already exists');
return;
}
// Only insert on conversation pages
if (!window.location.href.includes('https://gemini.google.com/app/')) {
console.log('[Gemini Extractor] Not on conversation page');
return;
}
// Find referral button container
const referralContainer = document.querySelector('.buttons-container.referral');
console.log('[Gemini Extractor] Looking for referral button container...');
console.log('[Gemini Extractor] Referral container found?', !!referralContainer);
if (!referralContainer) {
console.log('[Gemini Extractor] Referral container not found yet, will retry');
return;
}
// Check if conversation exists
const hasConversation = detectConversation();
console.log('[Gemini Extractor] Has conversation?', hasConversation);
if (!hasConversation) {
console.log('[Gemini Extractor] No conversation detected, skipping button insertion');
return;
}
// Create wrapper div matching referral container structure
const buttonWrapper = document.createElement('div');
buttonWrapper.className = 'buttons-container ng-star-inserted';
buttonWrapper.setAttribute('data-test-id', 'insidebar-save-container');
saveButton = createSaveButton();
buttonWrapper.appendChild(saveButton);
// Insert after referral container
referralContainer.parentElement.insertBefore(buttonWrapper, referralContainer.nextSibling);
console.log('[Gemini Extractor] Save button inserted after referral button');
}
// Detect if there's a conversation on the page
function detectConversation() {
// Look for messages in Gemini's structure
const messages = getMessages();
return messages && messages.length > 0;
}
// Observe DOM for referral button appearance and conversation changes
function observeForModeButton() {
const observer = new MutationObserver(() => {
// Try to insert button if it doesn't exist
insertSaveButton();
// Remove button wrapper if conversation no longer exists or not on conversation page
const existingContainer = document.querySelector('[data-test-id="insidebar-save-container"]');
if (existingContainer) {
if (!detectConversation() || !window.location.href.includes('https://gemini.google.com/app/')) {
existingContainer.remove();
saveButton = null;
}
}
});
// Observe the entire document for changes
observer.observe(document.body, {
childList: true,
subtree: true
});
}
// Extract conversation title from selected conversation in sidebar
function getConversationTitle() {
// Priority 1: Extract conversation ID from URL and find matching sidebar element
const urlMatch = window.location.pathname.match(/\/app\/([^\/]+)/);
if (urlMatch) {
const conversationId = urlMatch[1];
// Try to find a conversation element with matching ID
// Gemini might use data attributes or href patterns
const matchingConversation = document.querySelector(`[data-test-id="conversation"][href*="${conversationId}"]`) ||
document.querySelector(`a[href*="/app/${conversationId}"]`);
if (matchingConversation) {
const titleDiv = matchingConversation.querySelector('.conversation-title');
if (titleDiv) {
const title = titleDiv.textContent.trim();
if (title && title.length > 0) {
console.log('[Gemini Extractor] Found title from URL-matched conversation:', title);
return title;
}
}
}
// Fallback: Try the old method (.selected class)
const selectedConversation = document.querySelector('[data-test-id="conversation"].selected');
if (selectedConversation) {
const titleDiv = selectedConversation.querySelector('.conversation-title');
if (titleDiv) {
const title = titleDiv.textContent.trim();
if (title && title.length > 0) {
console.log('[Gemini Extractor] Found title from selected conversation (class fallback):', title);
return title;
}
}
}
// Ultimate fallback: Use URL-based title
console.log('[Gemini Extractor] Falling back to URL-based title');
return `Gemini Conversation ${conversationId.substring(0, 8)}`;
}
// No URL match - use default
console.log('[Gemini Extractor] No conversation ID in URL, using default');
return 'Untitled Gemini Conversation';
}
// Extract all messages from the conversation
function getMessages() {
const messages = [];
// Gemini uses message-content elements for both user and model messages
const messageContainers = document.querySelectorAll('message-content');
console.log('[Gemini Extractor] Found message containers:', messageContainers.length);
messageContainers.forEach(container => {
try {
const message = extractMessageFromContainer(container);
if (message) {
messages.push(message);
}
} catch (error) {
console.warn('[Gemini Extractor] Error extracting message:', error);
}
});
return messages;
}
// Extract a single message from its container
function extractMessageFromContainer(container) {
// Determine role based on container attributes
let role = 'unknown';
// Check for user-query (user message) or model-response (assistant message)
const userQuery = container.querySelector('user-query');
const modelResponse = container.querySelector('model-response');
if (userQuery) {
role = 'user';
} else if (modelResponse) {
role = 'assistant';
}
// Get message content
const contentElement = container.querySelector('.query-content, .model-response-text, [class*="message-content"]') || container;
if (!contentElement) return null;
// Extract markdown from the content
const content = extractMarkdownFromElement(contentElement);
if (!content.trim()) return null;
return {
role,
content: content.trim()
};
}
// NOTE: Markdown extraction and formatting functions moved to conversation-extractor-utils.js
// Extract full conversation data
function extractConversation() {
try {
const title = getConversationTitle();
const messages = getMessages();
if (!messages || messages.length === 0) {
throw new Error('No messages found in conversation');
}
const content = formatMessagesAsText(messages);
return {
title,
content,
messages,
timestamp: Date.now(),
url: window.location.href,
provider: 'Gemini'
};
} catch (error) {
console.error('[Gemini Extractor] Error extracting conversation:', error);
throw error;
}
}
// Handle save button click
async function handleSaveClick(e) {
if (e) {
e.preventDefault();
e.stopPropagation();
}
console.log('[Gemini Extractor] Save button clicked');
if (!saveButton) return;
// Check if chrome API is available
if (typeof chrome === 'undefined' || !chrome.runtime) {
console.error('[Gemini Extractor] Chrome extension API not available');
showNotification('Extension API not available. Try reloading the page.', 'error');
return;
}
// Disable button during save
saveButton.disabled = true;
const labelSpan = saveButton.querySelector('[data-test-id="save-label"]');
const originalText = labelSpan.textContent;
labelSpan.textContent = 'Saving...';
try {
const conversation = extractConversation();
console.log('[Gemini Extractor] Extracted conversation:', {
title: conversation.title,
messageCount: conversation.messages.length,
contentLength: conversation.content.length,
url: conversation.url,
provider: conversation.provider
});
// Generate conversation ID for deduplication
const conversationId = generateConversationId(conversation.url, conversation.title);
conversation.conversationId = conversationId;
// Check for duplicates
const duplicateCheck = await checkForDuplicate(conversationId);
if (duplicateCheck.isDuplicate) {
// Compare content to decide whether to save
const existingContent = (duplicateCheck.existingConversation.content || '').trim();
const newContent = (conversation.content || '').trim();
if (existingContent === newContent) {
// Content identical - silently skip save
saveButton.disabled = false;
labelSpan.textContent = originalText;
return;
}
// Content changed - automatically overwrite with original timestamp
conversation.overwriteId = duplicateCheck.existingConversation.id;
conversation.timestamp = duplicateCheck.existingConversation.timestamp;
}
// Send to background script
chrome.runtime.sendMessage({
action: 'saveConversationFromPage',
payload: conversation
}, (response) => {
if (chrome.runtime.lastError) {
console.error('[Gemini Extractor] Chrome runtime error:', chrome.runtime.lastError);
const errorMsg = chrome.runtime.lastError.message;
// Provide user-friendly message for context invalidation
if (errorMsg.includes('Extension context invalidated')) {
showNotification('Extension was reloaded. Please reload this page and try saving again.', 'error');
} else {
showNotification('Failed to save: ' + errorMsg, 'error');
}
saveButton.disabled = false;
labelSpan.textContent = originalText;
return;
}
if (response && response.success) {
console.log('[Gemini Extractor] Conversation saved successfully');
// Success notification now shown in sidebar
} else {
const errorMsg = response?.error || 'Unknown error';
showNotification('Failed to save: ' + errorMsg, 'error');
}
// Re-enable button
saveButton.disabled = false;
labelSpan.textContent = originalText;
});
} catch (error) {
console.error('[Gemini Extractor] Error during extraction:', error);
showNotification('Failed to extract conversation: ' + error.message, 'error');
saveButton.disabled = false;
labelSpan.textContent = originalText;
}
}
// Setup keyboard shortcut (Ctrl+Shift+S or Cmd+Shift+S)
setupKeyboardShortcut(() => {
if (window.location.href.includes('https://gemini.google.com/app/')) {
handleSaveClick();
}
}, detectConversation);
// Listen for URL changes (Gemini is a SPA)
observeUrlChanges((url) => {
console.log('[Gemini Extractor] URL changed to:', url);
// Remove button container if leaving conversation page
if (!url.includes('https://gemini.google.com/app/')) {
const existingContainer = document.querySelector('[data-test-id="insidebar-save-container"]');
if (existingContainer) {
existingContainer.remove();
saveButton = null;
}
} else {
// Try to insert button on conversation page
setTimeout(() => insertSaveButton(), 1000);
}
});
})();
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
content-scripts/gemini-save-button.css | CSS | /* Gemini Save Button Styles */
[data-test-id="insidebar-save-container"] {
/* Match referral container spacing */
display: flex;
align-items: center;
}
#insidebar-save-conversation {
/* Button matches Gemini's referral button style */
font-family: inherit;
cursor: pointer;
user-select: none;
}
#insidebar-save-conversation:disabled {
opacity: 0.5;
cursor: not-allowed;
}
/* Notification animations */
@keyframes slideIn {
from {
transform: translateX(100%);
opacity: 0;
}
to {
transform: translateX(0);
opacity: 1;
}
}
@keyframes slideOut {
from {
transform: translateX(0);
opacity: 1;
}
to {
transform: translateX(100%);
opacity: 0;
}
}
.insidebar-notification {
transition: all 0.3s ease;
}
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
content-scripts/google-history-extractor.js | JavaScript | // Google AI Mode Conversation History Extractor
// Extracts current conversation from Google AI Mode DOM and saves to extension
//
// IMPORTANT: Requires conversation-extractor-utils.js to be loaded first
(function() {
'use strict';
console.log('[Google Extractor] Script loaded');
// Import shared utilities from global namespace
const {
extractMarkdownFromElement,
formatMessagesAsText,
generateConversationId,
checkForDuplicate,
showDuplicateWarning,
showNotification,
setupKeyboardShortcut,
observeUrlChanges
} = window.ConversationExtractorUtils;
// Share button selector for language detection
// Google AI Mode doesn't have a text-based share button, use null to fallback to document language
const SHARE_BUTTON_SELECTOR = null;
let saveButton = null;
// Initialize after page loads
if (document.readyState === 'loading') {
document.addEventListener('DOMContentLoaded', init);
} else {
init();
}
function init() {
console.log('[Google Extractor] Initializing...');
console.log('[Google Extractor] In iframe?', window !== window.top);
console.log('[Google Extractor] URL:', window.location.href);
// Only run on Google AI Mode pages (udm=50 parameter)
if (!window.location.search.includes('udm=50')) {
console.log('[Google Extractor] Not on AI Mode page, skipping');
return;
}
// Wait a bit for Google AI Mode to fully render
setTimeout(() => {
console.log('[Google Extractor] Attempting to insert save button...');
insertSaveButton();
observeForButtons();
}, 2000);
}
// Create save button with download icon matching Google's style
function createSaveButton() {
// Detect provider's UI language and get matching Save button text
const { tooltip } = window.LanguageDetector.getSaveButtonText(SHARE_BUTTON_SELECTOR);
const button = document.createElement('button');
button.id = 'insidebar-google-save-conversation';
button.className = 'UTNPFf';
button.setAttribute('data-test-id', 'insidebar-google-save-button');
button.type = 'button';
button.title = tooltip;
button.setAttribute('aria-label', tooltip);
// Create button structure with download icon
button.innerHTML = `
<div class="juBd7">
<svg xmlns="http://www.w3.org/2000/svg" height="28px" viewBox="0 -960 960 960" width="28px" fill="#434343"><path d="M290-290h380v-60H290v60Zm190-123.85L626.15-560 584-602.15l-74 72.77V-710h-60v180.62l-74-72.77L333.85-560 480-413.85Zm.07 313.85q-78.84 0-148.21-29.92t-120.68-81.21q-51.31-51.29-81.25-120.63Q100-401.1 100-479.93q0-78.84 29.92-148.21t81.21-120.68q51.29-51.31 120.63-81.25Q401.1-860 479.93-860q78.84 0 148.21 29.92t120.68 81.21q51.31 51.29 81.25 120.63Q860-558.9 860-480.07q0 78.84-29.92 148.21t-81.21 120.68q-51.29 51.31-120.63 81.25Q558.9-100 480.07-100Zm-.07-60q134 0 227-93t93-227q0-134-93-227t-227-93q-134 0-227 93t-93 227q0 134 93 227t227 93Zm0-320Z"/></svg>
</div>
`;
button.addEventListener('click', handleSaveClick);
return button;
}
// Insert save button after "Open AI Mode history" button
function insertSaveButton() {
// Check if button already exists
if (document.getElementById('insidebar-google-save-conversation')) {
console.log('[Google Extractor] Save button already exists');
return;
}
// Only insert on AI Mode pages
if (!window.location.search.includes('udm=50')) {
console.log('[Google Extractor] Not on AI Mode page');
return;
}
// Find the OEwhSe container with the history button
const buttonContainer = document.querySelector('.OEwhSe');
console.log('[Google Extractor] Looking for button container...');
console.log('[Google Extractor] Button container found?', !!buttonContainer);
if (!buttonContainer) {
console.log('[Google Extractor] Button container not found yet, will retry');
return;
}
// Check if conversation exists
const hasConversation = detectConversation();
console.log('[Google Extractor] Has conversation?', hasConversation);
if (!hasConversation) {
console.log('[Google Extractor] No conversation detected, skipping button insertion');
return;
}
saveButton = createSaveButton();
// Insert after the second button (history button)
const historyButton = buttonContainer.querySelectorAll('button')[1];
if (historyButton && historyButton.nextSibling) {
buttonContainer.insertBefore(saveButton, historyButton.nextSibling);
} else if (historyButton) {
historyButton.parentElement.appendChild(saveButton);
} else {
// Fallback: just append to container
buttonContainer.appendChild(saveButton);
}
console.log('[Google Extractor] Save button inserted after history button');
}
// Detect if there's a conversation on the page
function detectConversation() {
// Look for query parameter indicating a conversation
const urlParams = new URLSearchParams(window.location.search);
const query = urlParams.get('q');
// Check for messages in the DOM (this may need adjustment based on actual Google AI DOM)
const messages = getMessages();
return (query && query.length > 0) || (messages && messages.length > 0);
}
// Observe DOM for button appearance and conversation changes
function observeForButtons() {
const observer = new MutationObserver(() => {
// Try to insert button if it doesn't exist
insertSaveButton();
// Remove button if conversation no longer exists or not on AI Mode page
const existingButton = document.getElementById('insidebar-google-save-conversation');
if (existingButton) {
if (!detectConversation() || !window.location.search.includes('udm=50')) {
existingButton.remove();
saveButton = null;
}
}
});
// Observe the entire document for changes
observer.observe(document.body, {
childList: true,
subtree: true
});
}
// Extract conversation title from query or page
function getConversationTitle() {
// Priority 1: Get from query parameter
const urlParams = new URLSearchParams(window.location.search);
const query = urlParams.get('q');
if (query && query.length > 0) {
// Use first 50 characters of query as title
const title = query.substring(0, 50);
console.log('[Google Extractor] Found title from query:', title);
return title + (query.length > 50 ? '...' : '');
}
// Fallback: Use default
console.log('[Google Extractor] Using default title');
return 'Google AI Conversation';
}
// Extract all messages from the conversation
function getMessages() {
const messages = [];
// Google AI Mode DOM structure - this will need to be adjusted based on actual DOM
// Common patterns to look for:
// - User messages might be in specific containers
// - AI responses might be in different containers
// Try to find message containers
const possibleSelectors = [
'div[data-processed="true"]', // Google AI Mode primary selector
'[role="article"]',
'[data-message-author]',
'.message-container',
'.conversation-turn'
];
let messageContainers = [];
for (const selector of possibleSelectors) {
messageContainers = document.querySelectorAll(selector);
if (messageContainers.length > 0) {
console.log('[Google Extractor] Found messages using selector:', selector);
break;
}
}
console.log('[Google Extractor] Found message containers:', messageContainers.length);
messageContainers.forEach(container => {
try {
const message = extractMessageFromContainer(container);
if (message) {
messages.push(message);
}
} catch (error) {
console.warn('[Google Extractor] Error extracting message:', error);
}
});
// If no messages found via containers, try to extract from query
if (messages.length === 0) {
const urlParams = new URLSearchParams(window.location.search);
const query = urlParams.get('q');
if (query) {
messages.push({
role: 'user',
content: query
});
}
}
return messages;
}
// Extract a single message from its container
function extractMessageFromContainer(container) {
// Determine role based on container attributes or structure
let role = 'unknown';
// Google AI Mode specific: Check data-processed attribute
if (container.hasAttribute('data-processed')) {
// Heuristic: Alternate between user and assistant
// First message is typically user query
const allMessages = document.querySelectorAll('div[data-processed="true"]');
const index = Array.from(allMessages).indexOf(container);
role = index % 2 === 0 ? 'user' : 'assistant';
}
// Try to detect role from attributes or classes
else if (container.hasAttribute('data-message-author')) {
const author = container.getAttribute('data-message-author');
role = author === 'user' ? 'user' : 'assistant';
} else if (container.classList.contains('user-message')) {
role = 'user';
} else if (container.classList.contains('assistant-message') || container.classList.contains('ai-message')) {
role = 'assistant';
} else {
// Heuristic: alternate between user and assistant
const allMessages = document.querySelectorAll('[role="article"]');
const index = Array.from(allMessages).indexOf(container);
role = index % 2 === 0 ? 'user' : 'assistant';
}
// Extract markdown from the content
const content = extractMarkdownFromElement(container);
if (!content.trim()) return null;
return {
role,
content: content.trim()
};
}
// Extract full conversation data
function extractConversation() {
try {
const title = getConversationTitle();
const messages = getMessages();
if (!messages || messages.length === 0) {
throw new Error('No messages found in conversation');
}
const content = formatMessagesAsText(messages);
return {
title,
content,
messages,
timestamp: Date.now(),
url: window.location.href,
provider: 'Google'
};
} catch (error) {
console.error('[Google Extractor] Error extracting conversation:', error);
throw error;
}
}
// Handle save button click
async function handleSaveClick(e) {
if (e) {
e.preventDefault();
e.stopPropagation();
}
console.log('[Google Extractor] Save button clicked');
if (!saveButton) return;
// Check if chrome API is available
if (typeof chrome === 'undefined' || !chrome.runtime) {
console.error('[Google Extractor] Chrome extension API not available');
showNotification('Extension API not available. Try reloading the page.', 'error');
return;
}
// Disable button during save
saveButton.disabled = true;
const originalTitle = saveButton.title;
saveButton.title = 'Saving...';
try {
const conversation = extractConversation();
console.log('[Google Extractor] Extracted conversation:', {
title: conversation.title,
messageCount: conversation.messages.length,
contentLength: conversation.content.length,
url: conversation.url,
provider: conversation.provider
});
// Generate conversation ID for deduplication
const conversationId = generateConversationId(conversation.url, conversation.title);
conversation.conversationId = conversationId;
// Check for duplicates
const duplicateCheck = await checkForDuplicate(conversationId);
if (duplicateCheck.isDuplicate) {
// Compare content to decide whether to save
const existingContent = (duplicateCheck.existingConversation.content || '').trim();
const newContent = (conversation.content || '').trim();
if (existingContent === newContent) {
// Content identical - silently skip save
saveButton.disabled = false;
saveButton.title = originalTitle;
return;
}
// Content changed - automatically overwrite with original timestamp
conversation.overwriteId = duplicateCheck.existingConversation.id;
conversation.timestamp = duplicateCheck.existingConversation.timestamp;
}
// Send to background script
chrome.runtime.sendMessage({
action: 'saveConversationFromPage',
payload: conversation
}, (response) => {
if (chrome.runtime.lastError) {
console.error('[Google Extractor] Chrome runtime error:', chrome.runtime.lastError);
const errorMsg = chrome.runtime.lastError.message;
// Provide user-friendly message for context invalidation
if (errorMsg.includes('Extension context invalidated')) {
showNotification('Extension was reloaded. Please reload this page and try saving again.', 'error');
} else {
showNotification('Failed to save: ' + errorMsg, 'error');
}
saveButton.disabled = false;
saveButton.title = originalTitle;
return;
}
if (response && response.success) {
console.log('[Google Extractor] Conversation saved successfully');
// Success notification now shown in sidebar
} else {
const errorMsg = response?.error || 'Unknown error';
showNotification('Failed to save: ' + errorMsg, 'error');
}
// Re-enable button
saveButton.disabled = false;
saveButton.title = originalTitle;
});
} catch (error) {
console.error('[Google Extractor] Error during extraction:', error);
showNotification('Failed to extract conversation: ' + error.message, 'error');
saveButton.disabled = false;
saveButton.title = originalTitle;
}
}
// Setup keyboard shortcut (Ctrl+Shift+S or Cmd+Shift+S)
setupKeyboardShortcut(() => {
if (window.location.search.includes('udm=50')) {
handleSaveClick();
}
}, detectConversation);
// Listen for URL changes (Google AI is likely a SPA)
observeUrlChanges((url) => {
console.log('[Google Extractor] URL changed to:', url);
// Remove button if leaving AI Mode page
if (!url.includes('udm=50')) {
const existingButton = document.getElementById('insidebar-google-save-conversation');
if (existingButton) {
existingButton.remove();
saveButton = null;
}
} else {
// Try to insert button on AI Mode page
setTimeout(() => insertSaveButton(), 1000);
}
});
})();
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
content-scripts/google-save-button.css | CSS | /* Google AI Mode Save Button Styles */
[data-test-id="insidebar-google-save-container"] {
/* Match OEwhSe container spacing */
display: inline-flex;
align-items: center;
margin-left: 8px;
}
#insidebar-google-save-conversation {
/* Button matches Google AI Mode button style */
font-family: inherit;
cursor: pointer;
user-select: none;
background: transparent;
border: none;
padding: 8px;
border-radius: 50%;
display: flex;
align-items: center;
justify-content: center;
transition: background-color 0.2s;
}
#insidebar-google-save-conversation:hover {
background-color: rgba(0, 0, 0, 0.04);
}
#insidebar-google-save-conversation:active {
background-color: rgba(0, 0, 0, 0.08);
}
#insidebar-google-save-conversation:disabled {
opacity: 0.5;
cursor: not-allowed;
}
#insidebar-google-save-conversation svg {
display: block;
}
/* Notification animations */
@keyframes slideIn {
from {
transform: translateX(100%);
opacity: 0;
}
to {
transform: translateX(0);
opacity: 1;
}
}
@keyframes slideOut {
from {
transform: translateX(0);
opacity: 1;
}
to {
transform: translateX(100%);
opacity: 0;
}
}
.insidebar-notification {
transition: all 0.3s ease;
}
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
content-scripts/grok-history-extractor.js | JavaScript | // Grok Conversation History Extractor
// Extracts current conversation from Grok DOM and saves to extension
//
// IMPORTANT: Requires conversation-extractor-utils.js and language-detector.js to be loaded first
(function() {
'use strict';
console.log('[Grok Extractor] Script loaded');
// Import shared utilities from global namespace
const {
extractMarkdownFromElement,
formatMessagesAsText,
generateConversationId,
checkForDuplicate,
showDuplicateWarning,
showNotification,
setupKeyboardShortcut,
observeUrlChanges
} = window.ConversationExtractorUtils;
// Helper function to find Share button by its text content (language-agnostic)
function findShareButton() {
const buttons = document.querySelectorAll('button.rounded-full');
for (const button of buttons) {
const text = button.textContent?.trim();
// Check if button text matches any known Share text variation
// Note: Order matters - check longer strings first to avoid partial matches
const shareTexts = [
'共有する', // Japanese (verb form)
'Поделиться', // Russian
'Compartir', // Spanish
'Partager', // French
'Condividi', // Italian
'Teilen', // German
'Share', // English
'共享', // Chinese
'分享', // Chinese
'共有', // Japanese (noun)
'공유' // Korean
];
if (text && shareTexts.some(shareText => text.includes(shareText))) {
return button;
}
}
return null;
}
let saveButton = null;
// Initialize after page loads
if (document.readyState === 'loading') {
document.addEventListener('DOMContentLoaded', init);
} else {
init();
}
function init() {
console.log('[Grok Extractor] Initializing...');
console.log('[Grok Extractor] In iframe?', window !== window.top);
console.log('[Grok Extractor] URL:', window.location.href);
// Only run on conversation pages (not homepage)
if (!window.location.href.includes('https://grok.com/c/')) {
console.log('[Grok Extractor] Not on conversation page, skipping');
return;
}
// Wait a bit for Grok to fully render
setTimeout(() => {
console.log('[Grok Extractor] Attempting to insert save button...');
insertSaveButton();
observeForShareButton();
}, 2000);
}
// Create save button matching Grok's UI with SVG icon
function createSaveButton() {
// Find Share button to detect language
const shareButton = findShareButton();
let lang = 'en'; // default
if (shareButton) {
const shareText = shareButton.textContent?.trim();
// Detect language from Share button text
// Note: Order matters - check longer strings first to avoid partial matches
const langMap = {
'共有する': 'ja', // Japanese (verb form) - check before noun form
'Поделиться': 'ru',
'Compartir': 'es',
'Partager': 'fr',
'Condividi': 'it',
'Teilen': 'de',
'Share': 'en',
'共享': 'zh_CN',
'分享': 'zh_CN',
'共有': 'ja', // Japanese (noun form)
'공유': 'ko'
};
for (const [text, detectedLang] of Object.entries(langMap)) {
if (shareText && shareText.includes(text)) {
lang = detectedLang;
break;
}
}
}
// Get Save button text for detected language
const saveTexts = {
'en': 'Save',
'zh_CN': '保存',
'zh_TW': '保存',
'ja': '保存',
'ko': '저장',
'ru': 'Сохранить',
'es': 'Guardar',
'fr': 'Enregistrer',
'de': 'Speichern',
'it': 'Salva'
};
const tooltips = {
'en': 'Save this conversation to insidebar.ai',
'zh_CN': '保存此对话到 insidebar.ai',
'zh_TW': '保存此對話到 insidebar.ai',
'ja': 'この会話を insidebar.ai に保存',
'ko': '이 대화를 insidebar.ai에 저장',
'ru': 'Сохранить этот разговор в insidebar.ai',
'es': 'Guardar esta conversación en insidebar.ai',
'fr': 'Enregistrer cette conversation dans insidebar.ai',
'de': 'Dieses Gespräch in insidebar.ai speichern',
'it': 'Salva questa conversazione su insidebar.ai'
};
const text = saveTexts[lang] || saveTexts['en'];
const tooltip = tooltips[lang] || tooltips['en'];
console.log('[Grok Extractor] Creating Save button in language:', lang);
const button = document.createElement('button');
button.id = 'insidebar-save-conversation';
button.className = 'inline-flex items-center justify-center gap-2 whitespace-nowrap font-medium cursor-pointer focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-ring disabled:opacity-60 disabled:cursor-not-allowed transition-colors duration-100 [&_svg]:shrink-0 select-none border border-border-l2 text-fg-primary hover:bg-button-ghost-hover [&_svg]:hover:text-fg-primary disabled:hover:bg-transparent h-10 px-3.5 py-1.5 text-sm rounded-full';
button.type = 'button';
button.setAttribute('aria-label', text);
button.setAttribute('data-state', 'closed');
button.title = tooltip;
// Create button structure with SVG icon + text
button.innerHTML = `
<span style="opacity: 1; transform: none;">
<svg width="20" height="20" viewBox="0 0 20 20" fill="none" xmlns="http://www.w3.org/2000/svg" class="stroke-[2]" stroke-width="2">
<path d="M2.66820931,12.6663 L2.66820931,12.5003 C2.66820931,12.1331 2.96598,11.8353 3.33325,11.8353 C3.70052,11.8353 3.99829,12.1331 3.99829,12.5003 L3.99829,12.6663 C3.99829,13.3772 3.9992,13.8707 4.03052,14.2542 C4.0612,14.6298 4.11803,14.8413 4.19849,14.9993 L4.2688,15.1263 C4.44511,15.4137 4.69813,15.6481 5.00024,15.8021 L5.13013,15.8577 C5.2739,15.9092 5.46341,15.947 5.74536,15.97 C6.12888,16.0014 6.62221,16.0013 7.33325,16.0013 L12.6663,16.0013 C13.3771,16.0013 13.8707,16.0014 14.2542,15.97 C14.6295,15.9394 14.8413,15.8825 14.9993,15.8021 L15.1262,15.7308 C15.4136,15.5545 15.6481,15.3014 15.802,14.9993 L15.8577,14.8695 C15.9091,14.7257 15.9469,14.536 15.97,14.2542 C16.0013,13.8707 16.0012,13.3772 16.0012,12.6663 L16.0012,12.5003 C16.0012,12.1332 16.2991,11.8355 16.6663,11.8353 C17.0335,11.8353 17.3313006,12.1331 17.3313006,12.5003 L17.3313006,12.6663 C17.3313006,13.3553 17.3319,13.9124 17.2952,14.3626 C17.2624,14.7636 17.1974,15.1247 17.053,15.4613 L16.9866,15.6038 C16.7211,16.1248 16.3172,16.5605 15.8215,16.8646 L15.6038,16.9866 C15.227,17.1786 14.8206,17.2578 14.3625,17.2952 C13.9123,17.332 13.3553,17.3314006 12.6663,17.3314006 L7.33325,17.3314006 C6.64416,17.3314006 6.0872,17.332 5.63696,17.2952 C5.23642,17.2625 4.87552,17.1982 4.53931,17.054 L4.39673,16.9866 C3.87561,16.7211 3.43911,16.3174 3.13501,15.8216 L3.01294,15.6038 C2.82097,15.2271 2.74177,14.8206 2.70435,14.3626 C2.66758,13.9124 2.66820931,13.3553 2.66820931,12.6663 Z M9.33521,3.33339 L9.33521,10.89489 L7.13696,8.69665 C6.87732,8.43701 6.45625,8.43712 6.19653,8.69665 C5.93684,8.95635 5.93684,9.37738 6.19653,9.63708 L9.52954,12.97106 L9.6311,13.05407 C9.73949,13.12627 9.86809,13.1654 10.0002,13.1654 C10.1763,13.1654 10.3454,13.0955 10.47,12.97106 L13.804,9.63708 C14.0633,9.37741 14.0634,8.95625 13.804,8.69665 C13.5443,8.43695 13.1222,8.43695 12.8625,8.69665 L10.6653,10.89392 L10.6653,3.33339 C10.6651,2.96639 10.3673,2.66849 10.0002,2.66829 C9.63308,2.66829 9.33538,2.96629 9.33521,3.33339 Z" fill="currentColor" fill-rule="nonzero"></path>
</svg>
</span>
<span class="font-semibold" data-test-id="save-label">${text}</span>
`;
button.addEventListener('click', handleSaveClick);
return button;
}
// Insert save button after share button
function insertSaveButton() {
// Check if button already exists
if (document.getElementById('insidebar-save-conversation')) {
console.log('[Grok Extractor] Save button already exists');
return;
}
// Only insert on conversation pages
if (!window.location.href.includes('https://grok.com/c/')) {
console.log('[Grok Extractor] Not on conversation page');
return;
}
// Find share button using language-agnostic helper
const shareButton = findShareButton();
console.log('[Grok Extractor] Looking for share button...');
console.log('[Grok Extractor] Share button found?', !!shareButton);
if (!shareButton) {
console.log('[Grok Extractor] Share button not found yet, will retry');
return;
}
// Check if conversation exists
const hasConversation = detectConversation();
console.log('[Grok Extractor] Has conversation?', hasConversation);
// If share button exists, assume there's a conversation
// (message detection might fail on first load)
if (!hasConversation) {
console.log('[Grok Extractor] No conversation detected via messages, but share button exists');
console.log('[Grok Extractor] Inserting button anyway - messages may load later');
// Don't return - continue with button insertion
}
// Create and insert save button after share button
saveButton = createSaveButton();
shareButton.parentElement.insertBefore(saveButton, shareButton.nextSibling);
console.log('[Grok Extractor] Save button inserted after share button');
}
// Detect if there's a conversation on the page
function detectConversation() {
// Look for messages in Grok's structure
const messages = getMessages();
return messages && messages.length > 0;
}
// Observe DOM for share button appearance and conversation changes
function observeForShareButton() {
const observer = new MutationObserver(() => {
// Try to insert button if it doesn't exist
insertSaveButton();
// Remove button if conversation no longer exists or not on conversation page
const existingButton = document.getElementById('insidebar-save-conversation');
if (existingButton) {
if (!detectConversation() || !window.location.href.includes('https://grok.com/c/')) {
existingButton.remove();
saveButton = null;
}
}
});
// Observe the entire document for changes
observer.observe(document.body, {
childList: true,
subtree: true
});
}
// Extract conversation title from first user message
function getConversationTitle() {
// Extract conversation ID from URL for fallback
const urlMatch = window.location.pathname.match(/\/c\/([^\/]+)/);
const conversationId = urlMatch ? urlMatch[1] : 'unknown';
// Use first user message as title
try {
const messages = getMessages();
if (messages && messages.length > 0) {
const firstUserMessage = messages.find(m => m.role === 'user');
if (firstUserMessage && firstUserMessage.content) {
let content = firstUserMessage.content;
// Filter out "User: " prefix if present
content = content.replace(/^User:\s*/i, '');
// Truncate to first 50 chars, remove newlines
const truncated = content
.replace(/\n+/g, ' ')
.substring(0, 50)
.trim();
if (truncated.length > 0) {
const title = truncated.length === 50 ? truncated + '...' : truncated;
console.log('[Grok Extractor] Using first user message as title:', title);
return title;
}
}
}
} catch (error) {
console.warn('[Grok Extractor] Error extracting title from first message:', error);
}
// Fallback: Use URL-based title (only if message extraction failed)
console.log('[Grok Extractor] Falling back to URL-based title');
return `Grok Conversation ${conversationId.substring(0, 8)}`;
}
// Extract all messages from the conversation
function getMessages() {
const messages = [];
// Try multiple selectors to find message containers
let messageContainers = document.querySelectorAll('[data-testid^="conversation-turn-"]');
// If not found, try alternative selectors
if (messageContainers.length === 0) {
messageContainers = document.querySelectorAll('[class*="conversation-turn"], [class*="message-"], article[class*="group"]');
}
// If still not found, look for any article tags in main content
if (messageContainers.length === 0) {
const mainContent = document.querySelector('main, [role="main"], .chat-container');
if (mainContent) {
messageContainers = mainContent.querySelectorAll('article, [class*="turn"], [data-message]');
}
}
console.log('[Grok Extractor] Found message containers:', messageContainers.length);
console.log('[Grok Extractor] Sample container classes:', messageContainers[0]?.className);
messageContainers.forEach(container => {
try {
const message = extractMessageFromContainer(container);
if (message) {
messages.push(message);
}
} catch (error) {
console.warn('[Grok Extractor] Error extracting message:', error);
}
});
return messages;
}
// Extract a single message from its container
function extractMessageFromContainer(container) {
// Determine role based on multiple indicators
let role = 'unknown';
// Try data-testid
const testId = container.getAttribute('data-testid');
if (testId) {
if (testId.includes('user')) {
role = 'user';
} else if (testId.includes('assistant') || testId.includes('grok')) {
role = 'assistant';
}
}
// Try data-message-author-role attribute
if (role === 'unknown') {
const authorRole = container.getAttribute('data-message-author-role');
if (authorRole) {
role = authorRole;
}
}
// Try to detect from content structure
if (role === 'unknown') {
const hasUserIndicator = container.querySelector('[data-message-author-role="user"], [class*="user-message"]');
const hasAssistantIndicator = container.querySelector('[data-message-author-role="assistant"], [class*="assistant-message"], [class*="grok-message"]');
if (hasUserIndicator) {
role = 'user';
} else if (hasAssistantIndicator) {
role = 'assistant';
}
}
// Try class-based detection
if (role === 'unknown') {
const className = container.className || '';
if (className.includes('user')) {
role = 'user';
} else if (className.includes('assistant') || className.includes('grok')) {
role = 'assistant';
}
}
// Try to detect based on position (alternating pattern: user, assistant, user, assistant)
if (role === 'unknown') {
// Get all message containers
const allMessages = Array.from(container.parentElement?.children || []);
const index = allMessages.indexOf(container);
// Assume odd indices are user, even are assistant (or vice versa)
role = index % 2 === 0 ? 'user' : 'assistant';
}
// Get message content
const contentElement = container.querySelector('[data-message-content], .markdown, [class*="message-content"], [class*="prose"]') || container;
if (!contentElement) return null;
// Extract markdown from the content
const content = extractMarkdownFromElement(contentElement);
if (!content.trim()) return null;
console.log('[Grok Extractor] Extracted message:', { role, contentLength: content.length });
return {
role,
content: content.trim()
};
}
// NOTE: Markdown extraction and formatting functions moved to conversation-extractor-utils.js
// Extract full conversation data
function extractConversation() {
try {
const title = getConversationTitle();
const messages = getMessages();
if (!messages || messages.length === 0) {
throw new Error('No messages found in conversation');
}
const content = formatMessagesAsText(messages);
return {
title,
content,
messages,
timestamp: Date.now(),
url: window.location.href,
provider: 'Grok'
};
} catch (error) {
console.error('[Grok Extractor] Error extracting conversation:', error);
throw error;
}
}
// Handle save button click
async function handleSaveClick(e) {
if (e) {
e.preventDefault();
e.stopPropagation();
}
console.log('[Grok Extractor] Save button clicked');
if (!saveButton) return;
// Check if chrome API is available
if (typeof chrome === 'undefined' || !chrome.runtime) {
console.error('[Grok Extractor] Chrome extension API not available');
showNotification('Extension API not available. Try reloading the page.', 'error');
return;
}
// Disable button during save
saveButton.disabled = true;
const labelSpan = saveButton.querySelector('[data-test-id="save-label"]');
const originalText = labelSpan.textContent;
labelSpan.textContent = 'Saving...';
try {
const conversation = extractConversation();
console.log('[Grok Extractor] Extracted conversation:', {
title: conversation.title,
messageCount: conversation.messages.length,
contentLength: conversation.content.length,
url: conversation.url,
provider: conversation.provider
});
// Generate conversation ID for deduplication
const conversationId = generateConversationId(conversation.url, conversation.title);
conversation.conversationId = conversationId;
// Check for duplicates
const duplicateCheck = await checkForDuplicate(conversationId);
if (duplicateCheck.isDuplicate) {
// Compare content to decide whether to save
const existingContent = (duplicateCheck.existingConversation.content || '').trim();
const newContent = (conversation.content || '').trim();
if (existingContent === newContent) {
// Content identical - silently skip save
saveButton.disabled = false;
labelSpan.textContent = originalText;
return;
}
// Content changed - automatically overwrite with original timestamp
conversation.overwriteId = duplicateCheck.existingConversation.id;
conversation.timestamp = duplicateCheck.existingConversation.timestamp;
}
// Send to background script
chrome.runtime.sendMessage({
action: 'saveConversationFromPage',
payload: conversation
}, (response) => {
if (chrome.runtime.lastError) {
console.error('[Grok Extractor] Chrome runtime error:', chrome.runtime.lastError);
const errorMsg = chrome.runtime.lastError.message;
// Provide user-friendly message for context invalidation
if (errorMsg.includes('Extension context invalidated')) {
showNotification('Extension was reloaded. Please reload this page and try saving again.', 'error');
} else {
showNotification('Failed to save: ' + errorMsg, 'error');
}
saveButton.disabled = false;
labelSpan.textContent = originalText;
return;
}
if (response && response.success) {
console.log('[Grok Extractor] Conversation saved successfully');
// Success notification now shown in sidebar
} else {
const errorMsg = response?.error || 'Unknown error';
showNotification('Failed to save: ' + errorMsg, 'error');
}
// Re-enable button
saveButton.disabled = false;
labelSpan.textContent = originalText;
});
} catch (error) {
console.error('[Grok Extractor] Error during extraction:', error);
showNotification('Failed to extract conversation: ' + error.message, 'error');
saveButton.disabled = false;
labelSpan.textContent = originalText;
}
}
// Setup keyboard shortcut (Ctrl+Shift+S or Cmd+Shift+S)
setupKeyboardShortcut(() => {
if (window.location.href.includes('https://grok.com/c/')) {
handleSaveClick();
}
}, detectConversation);
// Listen for URL changes (Grok is a SPA)
observeUrlChanges((url) => {
console.log('[Grok Extractor] URL changed to:', url);
// Remove button if leaving conversation page
if (!url.includes('https://grok.com/c/')) {
const existingButton = document.getElementById('insidebar-save-conversation');
if (existingButton) {
existingButton.remove();
saveButton = null;
}
} else {
// Try to insert button on conversation page
setTimeout(() => insertSaveButton(), 1000);
}
});
})();
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
content-scripts/grok-save-button.css | CSS | /* Grok Save Button Styles */
#insidebar-save-conversation {
/* Button matches Grok's style */
font-family: inherit;
cursor: pointer;
user-select: none;
}
#insidebar-save-conversation:disabled {
opacity: 0.6;
cursor: not-allowed;
}
/* Notification animations */
@keyframes slideIn {
from {
transform: translateX(100%);
opacity: 0;
}
to {
transform: translateX(0);
opacity: 1;
}
}
@keyframes slideOut {
from {
transform: translateX(0);
opacity: 1;
}
to {
transform: translateX(100%);
opacity: 0;
}
}
.insidebar-notification {
transition: all 0.3s ease;
}
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
content-scripts/language-detector.js | JavaScript | /**
* Language Detector for Content Scripts
* Detects provider's UI language and provides matching text for our Save buttons
*
* NOTE: This file must be loaded BEFORE any *-history-extractor.js files in manifest.json
* It exports functions to window.LanguageDetector
*/
(function() {
'use strict';
// Create global namespace for language detector utilities
window.LanguageDetector = window.LanguageDetector || {};
/**
* Share button text in different languages
* Used to detect what language the provider is using
* Note: Some providers use multiple variations for the same language
*/
const SHARE_BUTTON_TEXT = {
'en': ['Share'],
'zh_CN': ['分享', '共享'], // ChatGPT uses 共享, others use 分享
'zh_TW': ['分享', '共享'],
'ja': ['共有する', '共有'], // Grok uses 共有する (verb), others use 共有 (noun)
'ko': ['공유'],
'ru': ['Поделиться'],
'es': ['Compartir'],
'fr': ['Partager'],
'de': ['Teilen'],
'it': ['Condividi']
};
/**
* Save button text in different languages
* Matches the language detected from provider's UI
*/
const SAVE_BUTTON_TEXT = {
'en': 'Save',
'zh_CN': '保存',
'zh_TW': '保存',
'ja': '保存',
'ko': '저장',
'ru': 'Сохранить',
'es': 'Guardar',
'fr': 'Enregistrer',
'de': 'Speichern',
'it': 'Salva'
};
/**
* Save button tooltip text in different languages
*/
const SAVE_TOOLTIP_TEXT = {
'en': 'Save this conversation to insidebar.ai',
'zh_CN': '保存此对话到 insidebar.ai',
'zh_TW': '保存此對話到 insidebar.ai',
'ja': 'この会話を insidebar.ai に保存',
'ko': '이 대화를 insidebar.ai에 저장',
'ru': 'Сохранить этот разговор в insidebar.ai',
'es': 'Guardar esta conversación en insidebar.ai',
'fr': 'Enregistrer cette conversation dans insidebar.ai',
'de': 'Dieses Gespräch in insidebar.ai speichern',
'it': 'Salva questa conversazione su insidebar.ai'
};
/**
* Detect provider's UI language by examining their Share button
*
* Strategy:
* 1. Find Share button using provided selector
* 2. Read its text content
* 3. Match against known translations
* 4. Fallback to document language if Share button not found
* 5. Final fallback to English
*
* @param {string|null} shareButtonSelector - CSS selector for provider's Share button
* @returns {string} Language code (e.g., 'en', 'zh_CN', 'ja')
*
* @example
* // ChatGPT
* const lang = window.LanguageDetector.detectProviderLanguage('[data-testid="share-chat-button"]');
*
* // No Share button (fallback to document lang)
* const lang = window.LanguageDetector.detectProviderLanguage(null);
*/
window.LanguageDetector.detectProviderLanguage = function(shareButtonSelector) {
// Try to detect from Share button first
if (shareButtonSelector) {
try {
const shareButton = document.querySelector(shareButtonSelector);
if (shareButton) {
const shareText = shareButton.textContent?.trim();
if (shareText) {
// Match against known Share button texts
for (const [lang, texts] of Object.entries(SHARE_BUTTON_TEXT)) {
// texts is now an array of possible variations
if (texts.includes(shareText)) {
console.debug('[Language Detector] Detected from Share button:', lang, 'text:', shareText);
return lang;
}
}
console.debug('[Language Detector] Share button text not recognized:', shareText);
}
}
} catch (error) {
console.warn('[Language Detector] Error reading Share button:', error);
}
}
// Fallback: detect from document language attribute
const docLang = detectFromDocumentLanguage();
console.debug('[Language Detector] Using document language fallback:', docLang);
return docLang;
};
/**
* Detect language from HTML lang attribute
* @private
*/
function detectFromDocumentLanguage() {
const htmlLang = document.documentElement.lang;
if (!htmlLang) {
console.debug('[Language Detector] No document language set, using English');
return 'en';
}
// Map common lang codes to our supported languages
const langLower = htmlLang.toLowerCase();
// Chinese variants
if (langLower.includes('zh-cn') || langLower.includes('zh-hans') || langLower === 'zh') {
return 'zh_CN';
}
if (langLower.includes('zh-tw') || langLower.includes('zh-hk') || langLower.includes('zh-hant')) {
return 'zh_TW';
}
// Other languages (match by prefix)
if (langLower.startsWith('ja')) return 'ja';
if (langLower.startsWith('ko')) return 'ko';
if (langLower.startsWith('ru')) return 'ru';
if (langLower.startsWith('es')) return 'es';
if (langLower.startsWith('fr')) return 'fr';
if (langLower.startsWith('de')) return 'de';
if (langLower.startsWith('it')) return 'it';
// Default to English
return 'en';
}
/**
* Get Save button text in the detected language
*
* This is the main function to use in history extractors
*
* @param {string|null} shareButtonSelector - CSS selector for provider's Share button
* @returns {Object} Object with text, tooltip, and detected language
* @returns {string} return.text - Save button text in detected language
* @returns {string} return.tooltip - Tooltip text in detected language
* @returns {string} return.lang - Detected language code
*
* @example
* // In ChatGPT history extractor
* const { text, tooltip, lang } = window.LanguageDetector.getSaveButtonText('[data-testid="share-chat-button"]');
* button.textContent = text; // "Save" or "保存" etc.
* button.title = tooltip; // Translated tooltip
* console.log('Using language:', lang); // "en" or "zh_CN" etc.
*/
window.LanguageDetector.getSaveButtonText = function(shareButtonSelector) {
const lang = window.LanguageDetector.detectProviderLanguage(shareButtonSelector);
return {
text: SAVE_BUTTON_TEXT[lang] || SAVE_BUTTON_TEXT['en'],
tooltip: SAVE_TOOLTIP_TEXT[lang] || SAVE_TOOLTIP_TEXT['en'],
lang: lang
};
};
// Expose language maps for direct use if needed
window.LanguageDetector.SHARE_BUTTON_TEXT = SHARE_BUTTON_TEXT;
window.LanguageDetector.SAVE_BUTTON_TEXT = SAVE_BUTTON_TEXT;
window.LanguageDetector.SAVE_TOOLTIP_TEXT = SAVE_TOOLTIP_TEXT;
})();
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
content-scripts/page-content-extractor.js | JavaScript | // Page Content Extractor
// Extracts clean page content using Mozilla Readability.js
// Used when context menu is invoked without text selection
//
// This content script runs on all pages and listens for extraction requests
// from the service worker (context menu handler)
(function() {
'use strict';
/**
* Extract page content using Readability.js
* @returns {Object} {title, content, url}
*/
function extractPageContent() {
const url = window.location.href;
// Check if Readability is available
if (typeof Readability === 'undefined') {
console.warn('[Page Extractor] Readability.js not loaded, using fallback');
return fallbackExtraction(url);
}
try {
// Clone document for Readability (it modifies the DOM)
const documentClone = document.cloneNode(true);
// Create Readability instance
const reader = new Readability(documentClone, {
charThreshold: 500,
classesToPreserve: []
});
// Parse article
const article = reader.parse();
if (!article) {
console.warn('[Page Extractor] Readability failed to parse, using fallback');
return fallbackExtraction(url);
}
// Convert HTML content to markdown-like format
const markdownContent = htmlToMarkdown(article.content);
return {
title: article.title || document.title || 'Untitled Page',
content: markdownContent,
url: url
};
} catch (error) {
console.error('[Page Extractor] Error using Readability:', error);
return fallbackExtraction(url);
}
}
/**
* Fallback extraction method when Readability fails
* @param {string} url - Page URL
* @returns {Object} {title, content, url}
*/
function fallbackExtraction(url) {
// Get main content area (try common selectors)
const selectors = [
'main',
'article',
'[role="main"]',
'.main-content',
'.content',
'#content',
'body'
];
let contentElement = null;
for (const selector of selectors) {
contentElement = document.querySelector(selector);
if (contentElement) break;
}
const textContent = contentElement
? contentElement.innerText
: document.body.innerText;
return {
title: document.title || 'Untitled Page',
content: textContent.trim(),
url: url
};
}
/**
* Convert HTML to markdown-like format
* Basic conversion for common elements
* @param {string} html - HTML content
* @returns {string} Markdown-formatted text
*/
function htmlToMarkdown(html) {
// Create temporary element
const temp = document.createElement('div');
temp.innerHTML = html;
// Process element recursively
return processNode(temp).trim();
}
/**
* Process DOM node recursively to extract markdown
* @param {Node} node - DOM node
* @returns {string} Markdown text
*/
function processNode(node) {
if (!node) return '';
// Text node
if (node.nodeType === Node.TEXT_NODE) {
return node.textContent;
}
// Element node
if (node.nodeType === Node.ELEMENT_NODE) {
const tagName = node.tagName.toLowerCase();
// Code blocks
if (tagName === 'pre') {
const code = node.querySelector('code');
if (code) {
const lang = code.className.match(/language-(\w+)/)?.[1] || '';
return `\n\`\`\`${lang}\n${code.textContent}\n\`\`\`\n\n`;
}
return `\n\`\`\`\n${node.textContent}\n\`\`\`\n\n`;
}
// Inline code
if (tagName === 'code') {
return `\`${node.textContent}\``;
}
// Headings
if (tagName.match(/^h[1-6]$/)) {
const level = tagName.charAt(1);
const hashes = '#'.repeat(parseInt(level));
return `\n${hashes} ${getTextContent(node)}\n\n`;
}
// Bold
if (tagName === 'strong' || tagName === 'b') {
return `**${getTextContent(node)}**`;
}
// Italic
if (tagName === 'em' || tagName === 'i') {
return `*${getTextContent(node)}*`;
}
// Links
if (tagName === 'a') {
const href = node.getAttribute('href') || '';
const text = getTextContent(node);
return `[${text}](${href})`;
}
// Lists
if (tagName === 'ul') {
let list = '\n';
Array.from(node.children).forEach(li => {
if (li.tagName.toLowerCase() === 'li') {
list += `- ${processNode(li).trim()}\n`;
}
});
return list + '\n';
}
if (tagName === 'ol') {
let list = '\n';
Array.from(node.children).forEach((li, index) => {
if (li.tagName.toLowerCase() === 'li') {
list += `${index + 1}. ${processNode(li).trim()}\n`;
}
});
return list + '\n';
}
// Blockquotes
if (tagName === 'blockquote') {
return `\n> ${getTextContent(node)}\n\n`;
}
// Line breaks
if (tagName === 'br') {
return '\n';
}
// Paragraphs
if (tagName === 'p') {
return `${processChildren(node)}\n\n`;
}
// Divs - just process children
if (tagName === 'div') {
return processChildren(node);
}
// Default: process children
return processChildren(node);
}
return '';
}
/**
* Get plain text content from node
* @param {Node} node
* @returns {string}
*/
function getTextContent(node) {
return Array.from(node.childNodes)
.map(child => {
if (child.nodeType === Node.TEXT_NODE) {
return child.textContent;
}
return child.textContent || '';
})
.join('');
}
/**
* Process all children of a node
* @param {Node} node
* @returns {string}
*/
function processChildren(node) {
return Array.from(node.childNodes)
.map(child => processNode(child))
.join('');
}
/**
* Format extracted content with title and source based on user preference
* @param {Object} extracted - {title, content, url}
* @param {string} placement - 'beginning', 'end', or 'none'
* @returns {string} Formatted content
*/
function formatContent(extracted, placement = 'end') {
const titleLine = `[${extracted.title}]`;
const sourceLine = `Source: ${extracted.url}`;
if (placement === 'none') {
// No URL - just title and content
return `${titleLine}\n\n${extracted.content}`;
} else if (placement === 'beginning') {
// URL at beginning (after title)
return `${titleLine}\n${sourceLine}\n\n${extracted.content}`;
} else {
// Default: URL at end
return `${titleLine}\n\n${extracted.content}\n\nSource: ${extracted.url}`;
}
}
// Listen for extraction requests from service worker
chrome.runtime.onMessage.addListener((message, sender, sendResponse) => {
if (message.action === 'extractPageContent') {
try {
const extracted = extractPageContent();
// Get user's source URL placement preference
chrome.storage.sync.get({ sourceUrlPlacement: 'end' }, (settings) => {
const formatted = formatContent(extracted, settings.sourceUrlPlacement);
sendResponse({
success: true,
content: formatted,
title: extracted.title,
url: extracted.url
});
});
} catch (error) {
console.error('[Page Extractor] Error extracting content:', error);
sendResponse({
success: false,
error: error.message
});
}
return true; // Keep channel open for async response
}
});
})();
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
content-scripts/perplexity-history-extractor.js | JavaScript | // Perplexity Conversation History Extractor
// Extracts current conversation from Perplexity DOM and saves to extension
//
// IMPORTANT: Requires conversation-extractor-utils.js to be loaded first
(function() {
'use strict';
console.log('[Perplexity Extractor] Script loaded');
// Import shared utilities from global namespace
const {
extractMarkdownFromElement,
formatMessagesAsText,
generateConversationId,
checkForDuplicate,
showDuplicateWarning,
showNotification,
setupKeyboardShortcut,
observeUrlChanges
} = window.ConversationExtractorUtils;
// Share button selector for language detection
const SHARE_BUTTON_SELECTOR = 'button[data-testid="share-button"]';
let saveButton = null;
// Initialize after page loads
if (document.readyState === 'loading') {
document.addEventListener('DOMContentLoaded', init);
} else {
init();
}
function init() {
console.log('[Perplexity Extractor] Initializing...');
console.log('[Perplexity Extractor] In iframe?', window !== window.top);
console.log('[Perplexity Extractor] URL:', window.location.href);
// Only run on search pages (not homepage)
if (!window.location.href.includes('https://www.perplexity.ai/search/')) {
console.log('[Perplexity Extractor] Not on search page, skipping');
return;
}
// Wait a bit for Perplexity to fully render
setTimeout(() => {
console.log('[Perplexity Extractor] Attempting to insert save button...');
insertSaveButton();
observeForShareButton();
}, 2000);
}
// Create save button matching Perplexity's button style
function createSaveButton() {
// Detect provider's UI language and get matching Save button text
const { text, tooltip } = window.LanguageDetector.getSaveButtonText(SHARE_BUTTON_SELECTOR);
const button = document.createElement('button');
button.id = 'insidebar-save-conversation';
button.setAttribute('data-testid', 'save-button');
button.type = 'button';
button.className = 'bg-subtle text-foreground md:hover:text-quiet font-sans focus:outline-none outline-none outline-transparent transition duration-300 ease-out select-none items-center relative group/button font-semimedium justify-center text-center items-center rounded-lg cursor-pointer active:scale-[0.97] active:duration-150 active:ease-outExpo origin-center whitespace-nowrap inline-flex text-sm h-8 px-2.5';
button.title = tooltip;
// Create button structure matching share button
button.innerHTML = `
<div class="flex items-center min-w-0 gap-two justify-center">
<div class="flex shrink-0 items-center justify-center size-4">
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 24 24" color="currentColor" class="tabler-icon" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
<path d="M4 17v2a2 2 0 0 0 2 2h12a2 2 0 0 0 2 -2v-2 M7 11l5 5l5 -5 M12 4l0 12"></path>
</svg>
</div>
<div class="relative truncate text-center px-1 leading-loose -mb-px" data-label="save">${text}</div>
</div>
`;
button.addEventListener('click', handleSaveClick);
return button;
}
// Insert save button after share button
function insertSaveButton() {
// Check if button already exists
if (document.getElementById('insidebar-save-conversation')) {
console.log('[Perplexity Extractor] Save button already exists');
return;
}
// Only insert on search pages
if (!window.location.href.includes('https://www.perplexity.ai/search/')) {
console.log('[Perplexity Extractor] Not on search page');
return;
}
// Find share button
const shareButton = document.querySelector('button[data-testid="share-button"]');
console.log('[Perplexity Extractor] Looking for share button...');
console.log('[Perplexity Extractor] Share button found?', !!shareButton);
if (!shareButton) {
console.log('[Perplexity Extractor] Share button not found yet, will retry');
return;
}
// Check if conversation exists
const hasConversation = detectConversation();
console.log('[Perplexity Extractor] Has conversation?', hasConversation);
// If share button exists, assume there's a conversation
if (!hasConversation) {
console.log('[Perplexity Extractor] No conversation detected via messages, but share button exists');
console.log('[Perplexity Extractor] Inserting button anyway - messages may load later');
}
// Create and wrap both buttons
const parentSpan = shareButton.parentElement;
const grandparent = parentSpan.parentElement;
// Create wrapper for save button (matching share button's span wrapper)
const saveButtonWrapper = document.createElement('span');
saveButton = createSaveButton();
saveButtonWrapper.appendChild(saveButton);
// Insert after share button's wrapper span
grandparent.insertBefore(saveButtonWrapper, parentSpan.nextSibling);
console.log('[Perplexity Extractor] Save button inserted after share button');
}
// Detect if there's a conversation on the page
function detectConversation() {
// Look for messages in Perplexity's structure
const messages = getMessages();
return messages && messages.length > 0;
}
// Observe DOM for share button appearance and conversation changes
function observeForShareButton() {
const observer = new MutationObserver(() => {
// Try to insert button if it doesn't exist
insertSaveButton();
// Remove button if conversation no longer exists or not on search page
const existingButton = document.getElementById('insidebar-save-conversation');
if (existingButton) {
if (!window.location.href.includes('https://www.perplexity.ai/search/')) {
existingButton.parentElement?.remove(); // Remove wrapper span
saveButton = null;
}
}
});
// Observe the entire document for changes
observer.observe(document.body, {
childList: true,
subtree: true
});
}
// Extract conversation title from current thread in sidebar
function getConversationTitle() {
// Priority 1: Extract conversation ID from URL and find matching thread
const urlMatch = window.location.pathname.match(/\/search\/([^\/]+)/);
if (urlMatch) {
const fullPath = urlMatch[1]; // e.g., "some-title-abc123def456"
// Find the thread link that matches this path
const matchingThread = document.querySelector(`a[href*="/search/${fullPath}"]`);
if (matchingThread) {
const titleSpan = matchingThread.querySelector('span');
if (titleSpan) {
const title = titleSpan.textContent.trim();
if (title && title.length > 0) {
console.log('[Perplexity Extractor] Found title from URL-matched thread:', title);
return title;
}
}
}
// Fallback: Try the old method (data-testid pattern)
const currentThread = document.querySelector('a[data-testid*="thread-title-"]');
if (currentThread) {
const titleSpan = currentThread.querySelector('span');
if (titleSpan) {
const title = titleSpan.textContent.trim();
if (title && title.length > 0) {
console.log('[Perplexity Extractor] Found title from current thread (data-testid fallback):', title);
return title;
}
}
}
// Ultimate fallback: URL-based title
console.log('[Perplexity Extractor] Falling back to URL-based title');
const idMatch = fullPath.match(/-([^-]+)$/);
if (idMatch) {
return `Perplexity Search ${idMatch[1].substring(0, 8)}`;
}
return `Perplexity Search ${fullPath.substring(0, 8)}`;
}
// No URL match - use default
console.log('[Perplexity Extractor] No conversation ID in URL, using default');
return 'Untitled Perplexity Search';
}
// Extract all messages from the conversation
function getMessages() {
const messages = [];
// Extract user query from the question area
const userQuery = document.querySelector('[data-lexical-text="true"]');
if (userQuery) {
const queryText = userQuery.textContent.trim();
if (queryText) {
console.log('[Perplexity Extractor] Found user query:', queryText.substring(0, 50));
messages.push({
role: 'user',
content: queryText
});
}
}
// Extract assistant answers from markdown content areas
// Perplexity uses div[id^="markdown-content-"] for actual answers
const answerContainers = document.querySelectorAll('div[id^="markdown-content-"]');
console.log('[Perplexity Extractor] Found answer containers:', answerContainers.length);
answerContainers.forEach((container) => {
try {
// Extract markdown from the answer content
const content = extractMarkdownFromElement(container);
if (content && content.trim()) {
console.log('[Perplexity Extractor] Extracted answer:', content.substring(0, 50));
messages.push({
role: 'assistant',
content: content.trim()
});
}
} catch (error) {
console.warn('[Perplexity Extractor] Error extracting answer:', error);
}
});
console.log('[Perplexity Extractor] Total messages extracted:', messages.length);
return messages;
}
// NOTE: extractMessageFromContainer() function removed - no longer needed
// Message extraction now handled directly in getMessages() with specific selectors
// NOTE: Markdown extraction and formatting functions moved to conversation-extractor-utils.js
// Extract full conversation data
function extractConversation() {
try {
const title = getConversationTitle();
const messages = getMessages();
if (!messages || messages.length === 0) {
throw new Error('No messages found in conversation');
}
const content = formatMessagesAsText(messages);
return {
title,
content,
messages,
timestamp: Date.now(),
url: window.location.href,
provider: 'Perplexity'
};
} catch (error) {
console.error('[Perplexity Extractor] Error extracting conversation:', error);
throw error;
}
}
// Handle save button click
async function handleSaveClick(e) {
if (e) {
e.preventDefault();
e.stopPropagation();
}
console.log('[Perplexity Extractor] Save button clicked');
if (!saveButton) return;
// Check if chrome API is available
if (typeof chrome === 'undefined' || !chrome.runtime) {
console.error('[Perplexity Extractor] Chrome extension API not available');
showNotification('Extension API not available. Try reloading the page.', 'error');
return;
}
// Disable button during save
saveButton.disabled = true;
const labelDiv = saveButton.querySelector('[data-label="save"]');
const originalText = labelDiv.textContent;
labelDiv.textContent = 'Saving...';
try {
const conversation = extractConversation();
console.log('[Perplexity Extractor] Extracted conversation:', {
title: conversation.title,
messageCount: conversation.messages.length,
contentLength: conversation.content.length,
url: conversation.url,
provider: conversation.provider
});
// Generate conversation ID for deduplication
const conversationId = generateConversationId(conversation.url, conversation.title);
conversation.conversationId = conversationId;
// Check for duplicates
const duplicateCheck = await checkForDuplicate(conversationId);
if (duplicateCheck.isDuplicate) {
// Compare content to decide whether to save
const existingContent = (duplicateCheck.existingConversation.content || '').trim();
const newContent = (conversation.content || '').trim();
if (existingContent === newContent) {
// Content identical - silently skip save
saveButton.disabled = false;
labelDiv.textContent = originalText;
return;
}
// Content changed - automatically overwrite with original timestamp
conversation.overwriteId = duplicateCheck.existingConversation.id;
conversation.timestamp = duplicateCheck.existingConversation.timestamp;
}
// Send to background script
chrome.runtime.sendMessage({
action: 'saveConversationFromPage',
payload: conversation
}, (response) => {
if (chrome.runtime.lastError) {
console.error('[Perplexity Extractor] Chrome runtime error:', chrome.runtime.lastError);
const errorMsg = chrome.runtime.lastError.message;
// Provide user-friendly message for context invalidation
if (errorMsg.includes('Extension context invalidated')) {
showNotification('Extension was reloaded. Please reload this page and try saving again.', 'error');
} else {
showNotification('Failed to save: ' + errorMsg, 'error');
}
saveButton.disabled = false;
labelDiv.textContent = originalText;
return;
}
if (response && response.success) {
console.log('[Perplexity Extractor] Conversation saved successfully');
// Success notification now shown in sidebar
} else {
const errorMsg = response?.error || 'Unknown error';
showNotification('Failed to save: ' + errorMsg, 'error');
}
// Re-enable button
saveButton.disabled = false;
labelDiv.textContent = originalText;
});
} catch (error) {
console.error('[Perplexity Extractor] Error during extraction:', error);
showNotification('Failed to extract conversation: ' + error.message, 'error');
saveButton.disabled = false;
labelDiv.textContent = originalText;
}
}
// Setup keyboard shortcut (Ctrl+Shift+S or Cmd+Shift+S)
setupKeyboardShortcut(() => {
if (window.location.href.includes('https://www.perplexity.ai/search/')) {
handleSaveClick();
}
}, detectConversation);
// Listen for URL changes (Perplexity is a SPA)
observeUrlChanges((url) => {
console.log('[Perplexity Extractor] URL changed to:', url);
// Remove button if leaving search page
if (!url.includes('https://www.perplexity.ai/search/')) {
const existingButton = document.getElementById('insidebar-save-conversation');
if (existingButton) {
existingButton.parentElement?.remove(); // Remove wrapper span
saveButton = null;
}
} else {
// Try to insert button on search page
setTimeout(() => insertSaveButton(), 1000);
}
});
})();
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
content-scripts/perplexity-save-button.css | CSS | /* Perplexity Save Button Styles */
#insidebar-save-conversation {
/* Button matches Perplexity's style */
font-family: inherit;
cursor: pointer;
user-select: none;
}
#insidebar-save-conversation:disabled {
opacity: 0.6;
cursor: not-allowed;
}
/* Notification animations */
@keyframes slideIn {
from {
transform: translateX(100%);
opacity: 0;
}
to {
transform: translateX(0);
opacity: 1;
}
}
@keyframes slideOut {
from {
transform: translateX(0);
opacity: 1;
}
to {
transform: translateX(100%);
opacity: 0;
}
}
.insidebar-notification {
transition: all 0.3s ease;
}
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
content-scripts/text-injection-all-providers.js | JavaScript | // Text injection handler for all AI providers
// Self-contained script without module imports (for iframe compatibility)
(function() {
'use strict';
// Provider-specific selectors
const PROVIDER_SELECTORS = {
chatgpt: ['#prompt-textarea'],
claude: [
'.ProseMirror[role="textbox"]',
'.ProseMirror[contenteditable="true"]',
'div[contenteditable="true"].ProseMirror',
'div[contenteditable="true"]'
],
gemini: ['.ql-editor'],
grok: ['textarea', '.tiptap', '.ProseMirror'],
deepseek: ['textarea.ds-scroll-area'],
google: ['textarea.ITIRGe', 'textarea[aria-label="Ask anything"]', 'textarea[maxlength="8192"]'],
// Copilot uses textarea with id="userInput" or data-testid="composer-input"
copilot: ['textarea#userInput', 'textarea[data-testid="composer-input"]', 'textarea[placeholder*="Message Copilot"]']
};
// Detect which provider we're on based on hostname
function detectProvider() {
const hostname = window.location.hostname;
if (hostname.includes('chatgpt.com') || hostname.includes('openai.com')) {
return 'chatgpt';
} else if (hostname.includes('claude.ai')) {
return 'claude';
} else if (hostname.includes('gemini.google.com')) {
return 'gemini';
} else if (hostname.includes('grok.com')) {
return 'grok';
} else if (hostname.includes('deepseek.com')) {
return 'deepseek';
} else if (hostname.includes('google.com') && window.location.search.includes('udm=50')) {
return 'google';
} else if (hostname.includes('copilot.microsoft.com') || hostname.includes('bing.com/chat')) {
return 'copilot';
}
return null;
}
// Find text input element by selector
function findTextInputElement(selector) {
if (!selector || typeof selector !== 'string') {
return null;
}
try {
return document.querySelector(selector);
} catch (error) {
console.error('Error finding element:', error);
return null;
}
}
// Inject text into an element (textarea or contenteditable)
function injectTextIntoElement(element, text) {
if (!element || !text || typeof text !== 'string' || text.trim() === '') {
return false;
}
try {
const isTextarea = element.tagName === 'TEXTAREA' || element.tagName === 'INPUT';
const isContentEditable = element.isContentEditable || element.getAttribute('contenteditable') === 'true';
if (!isTextarea && !isContentEditable) {
console.warn('Element is not a textarea or contenteditable:', element);
return false;
}
if (isTextarea) {
// For textarea/input elements
const currentValue = element.value || '';
const newValue = currentValue + text;
// For React - use native setter to bypass React's control
const nativeInputValueSetter = Object.getOwnPropertyDescriptor(window.HTMLTextAreaElement.prototype, 'value').set;
nativeInputValueSetter.call(element, newValue);
// Trigger multiple events to notify React/Vue/etc
element.dispatchEvent(new Event('input', { bubbles: true }));
element.dispatchEvent(new Event('change', { bubbles: true }));
// Move cursor to end (without focusing to avoid cross-origin error)
element.selectionStart = element.selectionEnd = element.value.length;
} else {
// For contenteditable elements
const currentText = element.textContent || '';
element.textContent = currentText + text;
// Trigger input event
element.dispatchEvent(new Event('input', { bubbles: true }));
// Move cursor to end for contenteditable (without focusing)
try {
const range = document.createRange();
const selection = window.getSelection();
range.selectNodeContents(element);
range.collapse(false); // Collapse to end
selection.removeAllRanges();
selection.addRange(range);
} catch (e) {
// Ignore selection errors in cross-origin context
}
}
return true;
} catch (error) {
console.error('Error injecting text:', error);
return false;
}
}
// Handle text injection message
function handleTextInjection(event) {
// Validate event data structure
if (!event || !event.data || typeof event.data !== 'object') {
return;
}
// Only handle INJECT_TEXT messages
if (event.data.type !== 'INJECT_TEXT') {
return;
}
// Validate text payload
const text = event.data.text;
if (!text || typeof text !== 'string' || text.length === 0) {
console.warn('[Text Injection] Invalid text payload');
return;
}
// Sanity check: reject extremely large payloads (> 1MB)
if (text.length > 1048576) {
console.error('[Text Injection] Text payload too large:', text.length, 'bytes');
return;
}
const provider = detectProvider();
if (!provider) {
console.warn('Unknown provider, cannot inject text');
return;
}
const selectors = PROVIDER_SELECTORS[provider];
if (!selectors) {
console.warn('No selectors configured for provider:', provider);
return;
}
// Try each selector until we find an element
let element = null;
for (const selector of selectors) {
element = findTextInputElement(selector);
if (element) break;
}
if (element) {
const success = injectTextIntoElement(element, text);
if (!success) {
console.error(`[Text Injection] Failed to inject text into ${provider}`);
}
} else {
// Retry after a short delay in case page is still loading
setTimeout(() => {
let retryElement = null;
for (const selector of selectors) {
retryElement = findTextInputElement(selector);
if (retryElement) {
break;
}
}
if (retryElement) {
injectTextIntoElement(retryElement, text);
} else {
console.error(`[Text Injection] ${provider} editor not found`);
}
}, 1000);
}
}
// Listen for messages from sidebar
window.addEventListener('message', handleTextInjection);
})();
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
content-scripts/text-injection-chatgpt.js | JavaScript | // Text injection handler for ChatGPT
import { setupTextInjectionListener } from './text-injection-handler.js';
// ChatGPT uses #prompt-textarea
setupTextInjectionListener('#prompt-textarea', 'ChatGPT');
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
content-scripts/text-injection-claude.js | JavaScript | // Text injection handler for Claude
import { setupTextInjectionListener } from './text-injection-handler.js';
// Claude uses .ProseMirror contenteditable with role="textbox"
setupTextInjectionListener('.ProseMirror[role="textbox"]', 'Claude');
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
content-scripts/text-injection-deepseek.js | JavaScript | // Text injection handler for DeepSeek
import { setupTextInjectionListener } from './text-injection-handler.js';
// DeepSeek uses textarea with .ds-scroll-area class
setupTextInjectionListener('textarea.ds-scroll-area', 'DeepSeek');
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
content-scripts/text-injection-gemini.js | JavaScript | // Text injection handler for Gemini
import { setupTextInjectionListener } from './text-injection-handler.js';
// Gemini uses Quill editor with .ql-editor class
setupTextInjectionListener('.ql-editor', 'Gemini');
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
content-scripts/text-injection-grok.js | JavaScript | // Text injection handler for Grok
import { setupTextInjectionListener } from './text-injection-handler.js';
// Grok can use textarea, .tiptap, or .ProseMirror
setupTextInjectionListener(['textarea', '.tiptap', '.ProseMirror'], 'Grok');
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
content-scripts/text-injection-handler.js | JavaScript | // Common text injection handler for all AI providers
// Listens for postMessage from sidebar and injects text using the provided selector(s)
import { findTextInputElement, injectTextIntoElement } from '../modules/text-injector.js';
/**
* Create a text injection handler for a specific provider
* @param {string|string[]} selectors - CSS selector(s) to find the input element
* @param {string} providerName - Name of the provider for logging
* @returns {Function} Event handler function
*/
export function createTextInjectionHandler(selectors, providerName) {
const selectorArray = Array.isArray(selectors) ? selectors : [selectors];
return function handleTextInjection(event) {
// Only handle INJECT_TEXT messages
if (!event.data || event.data.type !== 'INJECT_TEXT' || !event.data.text) {
return;
}
// Try each selector until we find an element
let element = null;
for (const selector of selectorArray) {
element = findTextInputElement(selector);
if (element) break;
}
if (element) {
const success = injectTextIntoElement(element, event.data.text);
if (success) {
console.log(`Text injected into ${providerName} editor`);
} else {
console.error(`Failed to inject text into ${providerName}`);
}
} else {
console.warn(`${providerName} editor not found, will retry...`);
// Retry after a short delay in case page is still loading
setTimeout(() => {
let retryElement = null;
for (const selector of selectorArray) {
retryElement = findTextInputElement(selector);
if (retryElement) break;
}
if (retryElement) {
injectTextIntoElement(retryElement, event.data.text);
}
}, 1000);
}
};
}
/**
* Setup text injection listener for a provider
* @param {string|string[]} selectors - CSS selector(s) to find the input element
* @param {string} providerName - Name of the provider for logging
*/
export function setupTextInjectionListener(selectors, providerName) {
const handler = createTextInjectionHandler(selectors, providerName);
window.addEventListener('message', handler);
}
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
data/prompt-libraries/transform-libraries.js | JavaScript | // Transform raw JSON libraries to unified schema for import
// Run with: node transform-libraries.js
import fs from 'fs';
import path from 'path';
import { fileURLToPath } from 'url';
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
// Helper to generate title from template (first 60 chars)
function generateTitle(template) {
const cleaned = template.replace(/\{[^}]+\}/g, '…').trim();
return cleaned.length > 60 ? cleaned.substring(0, 57) + '...' : cleaned;
}
// Transform a single library
function transformLibrary(sourceData) {
const transformed = [];
for (const category of sourceData.categories) {
for (const item of category.items) {
const prompt = {
title: generateTitle(item.template),
content: item.template,
category: category.name,
tags: item.tags || [], // Keep only original tags, no auto-added ones
variables: item.variables || [],
isFavorite: false,
useCount: 0,
lastUsed: null
};
transformed.push(prompt);
}
}
return transformed;
}
// Merge and deduplicate by title
function mergeLibraries(lib1, lib2) {
const merged = [...lib1, ...lib2];
const seen = new Set();
const unique = [];
for (const prompt of merged) {
// Deduplicate by title instead of externalId
const key = prompt.title.toLowerCase().trim();
if (!seen.has(key)) {
seen.add(key);
unique.push(prompt);
}
}
return unique;
}
// Main transformation
async function main() {
try {
console.log('Reading source files...');
// Read source JSON files from Downloads
const aiPath = '/Users/joker/Downloads/ai-meta-prompts-library.json';
const researchPath = '/Users/joker/Downloads/research-meta-prompts-library.json';
const aiData = JSON.parse(fs.readFileSync(aiPath, 'utf8'));
const researchData = JSON.parse(fs.readFileSync(researchPath, 'utf8'));
console.log(`AI Library: ${aiData.total_templates} templates`);
console.log(`Research Library: ${researchData.total_templates} templates`);
// Transform both libraries
console.log('\nTransforming libraries...');
const aiTransformed = transformLibrary(aiData);
const researchTransformed = transformLibrary(researchData);
console.log(`Transformed AI: ${aiTransformed.length} prompts`);
console.log(`Transformed Research: ${researchTransformed.length} prompts`);
// Merge and deduplicate
const combined = mergeLibraries(aiTransformed, researchTransformed);
console.log(`Combined (deduplicated): ${combined.length} prompts`);
// Write single output file
const outputDir = __dirname;
fs.writeFileSync(
path.join(outputDir, 'default-prompts.json'),
JSON.stringify({
version: '1.0',
title: 'Default Prompt Library',
description: 'Curated meta-prompts for AI workflows, research, coding, and analysis',
count: combined.length,
prompts: combined
}, null, 2)
);
console.log('\n✓ Transformation complete!');
console.log(` - default-prompts.json (${combined.length} prompts)`);
} catch (error) {
console.error('Error:', error);
process.exit(1);
}
}
main();
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
libs/Readability.js | JavaScript | /*
* Copyright (c) 2010 Arc90 Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* This code is heavily based on Arc90's readability.js (1.7.1) script
* available at: http://code.google.com/p/arc90labs-readability
*/
/**
* Public constructor.
* @param {HTMLDocument} doc The document to parse.
* @param {Object} options The options object.
*/
function Readability(doc, options) {
// In some older versions, people passed a URI as the first argument. Cope:
if (options && options.documentElement) {
doc = options;
options = arguments[2];
} else if (!doc || !doc.documentElement) {
throw new Error(
"First argument to Readability constructor should be a document object."
);
}
options = options || {};
this._doc = doc;
this._docJSDOMParser = this._doc.firstChild.__JSDOMParser__;
this._articleTitle = null;
this._articleByline = null;
this._articleDir = null;
this._articleSiteName = null;
this._attempts = [];
this._metadata = {};
// Configurable options
this._debug = !!options.debug;
this._maxElemsToParse =
options.maxElemsToParse || this.DEFAULT_MAX_ELEMS_TO_PARSE;
this._nbTopCandidates =
options.nbTopCandidates || this.DEFAULT_N_TOP_CANDIDATES;
this._charThreshold = options.charThreshold || this.DEFAULT_CHAR_THRESHOLD;
this._classesToPreserve = this.CLASSES_TO_PRESERVE.concat(
options.classesToPreserve || []
);
this._keepClasses = !!options.keepClasses;
this._serializer =
options.serializer ||
function (el) {
return el.innerHTML;
};
this._disableJSONLD = !!options.disableJSONLD;
this._allowedVideoRegex = options.allowedVideoRegex || this.REGEXPS.videos;
this._linkDensityModifier = options.linkDensityModifier || 0;
// Start with all flags set
this._flags =
this.FLAG_STRIP_UNLIKELYS |
this.FLAG_WEIGHT_CLASSES |
this.FLAG_CLEAN_CONDITIONALLY;
// Control whether log messages are sent to the console
if (this._debug) {
let logNode = function (node) {
if (node.nodeType == node.TEXT_NODE) {
return `${node.nodeName} ("${node.textContent}")`;
}
let attrPairs = Array.from(node.attributes || [], function (attr) {
return `${attr.name}="${attr.value}"`;
}).join(" ");
return `<${node.localName} ${attrPairs}>`;
};
this.log = function () {
if (typeof console !== "undefined") {
let args = Array.from(arguments, arg => {
if (arg && arg.nodeType == this.ELEMENT_NODE) {
return logNode(arg);
}
return arg;
});
args.unshift("Reader: (Readability)");
// eslint-disable-next-line no-console
console.log(...args);
} else if (typeof dump !== "undefined") {
/* global dump */
var msg = Array.prototype.map
.call(arguments, function (x) {
return x && x.nodeName ? logNode(x) : x;
})
.join(" ");
dump("Reader: (Readability) " + msg + "\n");
}
};
} else {
this.log = function () {};
}
}
Readability.prototype = {
FLAG_STRIP_UNLIKELYS: 0x1,
FLAG_WEIGHT_CLASSES: 0x2,
FLAG_CLEAN_CONDITIONALLY: 0x4,
// https://developer.mozilla.org/en-US/docs/Web/API/Node/nodeType
ELEMENT_NODE: 1,
TEXT_NODE: 3,
// Max number of nodes supported by this parser. Default: 0 (no limit)
DEFAULT_MAX_ELEMS_TO_PARSE: 0,
// The number of top candidates to consider when analysing how
// tight the competition is among candidates.
DEFAULT_N_TOP_CANDIDATES: 5,
// Element tags to score by default.
DEFAULT_TAGS_TO_SCORE: "section,h2,h3,h4,h5,h6,p,td,pre"
.toUpperCase()
.split(","),
// The default number of chars an article must have in order to return a result
DEFAULT_CHAR_THRESHOLD: 500,
// All of the regular expressions in use within readability.
// Defined up here so we don't instantiate them repeatedly in loops.
REGEXPS: {
// NOTE: These two regular expressions are duplicated in
// Readability-readerable.js. Please keep both copies in sync.
unlikelyCandidates:
/-ad-|ai2html|banner|breadcrumbs|combx|comment|community|cover-wrap|disqus|extra|footer|gdpr|header|legends|menu|related|remark|replies|rss|shoutbox|sidebar|skyscraper|social|sponsor|supplemental|ad-break|agegate|pagination|pager|popup|yom-remote/i,
okMaybeItsACandidate:
/and|article|body|column|content|main|mathjax|shadow/i,
positive:
/article|body|content|entry|hentry|h-entry|main|page|pagination|post|text|blog|story/i,
negative:
/-ad-|hidden|^hid$| hid$| hid |^hid |banner|combx|comment|com-|contact|footer|gdpr|masthead|media|meta|outbrain|promo|related|scroll|share|shoutbox|sidebar|skyscraper|sponsor|shopping|tags|widget/i,
extraneous:
/print|archive|comment|discuss|e[\-]?mail|share|reply|all|login|sign|single|utility/i,
byline: /byline|author|dateline|writtenby|p-author/i,
replaceFonts: /<(\/?)font[^>]*>/gi,
normalize: /\s{2,}/g,
videos:
/\/\/(www\.)?((dailymotion|youtube|youtube-nocookie|player\.vimeo|v\.qq|bilibili|live.bilibili)\.com|(archive|upload\.wikimedia)\.org|player\.twitch\.tv)/i,
shareElements: /(\b|_)(share|sharedaddy)(\b|_)/i,
nextLink: /(next|weiter|continue|>([^\|]|$)|»([^\|]|$))/i,
prevLink: /(prev|earl|old|new|<|«)/i,
tokenize: /\W+/g,
whitespace: /^\s*$/,
hasContent: /\S$/,
hashUrl: /^#.+/,
srcsetUrl: /(\S+)(\s+[\d.]+[xw])?(\s*(?:,|$))/g,
b64DataUrl: /^data:\s*([^\s;,]+)\s*;\s*base64\s*,/i,
// Commas as used in Latin, Sindhi, Chinese and various other scripts.
// see: https://en.wikipedia.org/wiki/Comma#Comma_variants
commas: /\u002C|\u060C|\uFE50|\uFE10|\uFE11|\u2E41|\u2E34|\u2E32|\uFF0C/g,
// See: https://schema.org/Article
jsonLdArticleTypes:
/^Article|AdvertiserContentArticle|NewsArticle|AnalysisNewsArticle|AskPublicNewsArticle|BackgroundNewsArticle|OpinionNewsArticle|ReportageNewsArticle|ReviewNewsArticle|Report|SatiricalArticle|ScholarlyArticle|MedicalScholarlyArticle|SocialMediaPosting|BlogPosting|LiveBlogPosting|DiscussionForumPosting|TechArticle|APIReference$/,
// used to see if a node's content matches words commonly used for ad blocks or loading indicators
adWords:
/^(ad(vertising|vertisement)?|pub(licité)?|werb(ung)?|广告|Реклама|Anuncio)$/iu,
loadingWords:
/^((loading|正在加载|Загрузка|chargement|cargando)(…|\.\.\.)?)$/iu,
},
UNLIKELY_ROLES: [
"menu",
"menubar",
"complementary",
"navigation",
"alert",
"alertdialog",
"dialog",
],
DIV_TO_P_ELEMS: new Set([
"BLOCKQUOTE",
"DL",
"DIV",
"IMG",
"OL",
"P",
"PRE",
"TABLE",
"UL",
]),
ALTER_TO_DIV_EXCEPTIONS: ["DIV", "ARTICLE", "SECTION", "P", "OL", "UL"],
PRESENTATIONAL_ATTRIBUTES: [
"align",
"background",
"bgcolor",
"border",
"cellpadding",
"cellspacing",
"frame",
"hspace",
"rules",
"style",
"valign",
"vspace",
],
DEPRECATED_SIZE_ATTRIBUTE_ELEMS: ["TABLE", "TH", "TD", "HR", "PRE"],
// The commented out elements qualify as phrasing content but tend to be
// removed by readability when put into paragraphs, so we ignore them here.
PHRASING_ELEMS: [
// "CANVAS", "IFRAME", "SVG", "VIDEO",
"ABBR",
"AUDIO",
"B",
"BDO",
"BR",
"BUTTON",
"CITE",
"CODE",
"DATA",
"DATALIST",
"DFN",
"EM",
"EMBED",
"I",
"IMG",
"INPUT",
"KBD",
"LABEL",
"MARK",
"MATH",
"METER",
"NOSCRIPT",
"OBJECT",
"OUTPUT",
"PROGRESS",
"Q",
"RUBY",
"SAMP",
"SCRIPT",
"SELECT",
"SMALL",
"SPAN",
"STRONG",
"SUB",
"SUP",
"TEXTAREA",
"TIME",
"VAR",
"WBR",
],
// These are the classes that readability sets itself.
CLASSES_TO_PRESERVE: ["page"],
// These are the list of HTML entities that need to be escaped.
HTML_ESCAPE_MAP: {
lt: "<",
gt: ">",
amp: "&",
quot: '"',
apos: "'",
},
/**
* Run any post-process modifications to article content as necessary.
*
* @param Element
* @return void
**/
_postProcessContent(articleContent) {
// Readability cannot open relative uris so we convert them to absolute uris.
this._fixRelativeUris(articleContent);
this._simplifyNestedElements(articleContent);
if (!this._keepClasses) {
// Remove classes.
this._cleanClasses(articleContent);
}
},
/**
* Iterates over a NodeList, calls `filterFn` for each node and removes node
* if function returned `true`.
*
* If function is not passed, removes all the nodes in node list.
*
* @param NodeList nodeList The nodes to operate on
* @param Function filterFn the function to use as a filter
* @return void
*/
_removeNodes(nodeList, filterFn) {
// Avoid ever operating on live node lists.
if (this._docJSDOMParser && nodeList._isLiveNodeList) {
throw new Error("Do not pass live node lists to _removeNodes");
}
for (var i = nodeList.length - 1; i >= 0; i--) {
var node = nodeList[i];
var parentNode = node.parentNode;
if (parentNode) {
if (!filterFn || filterFn.call(this, node, i, nodeList)) {
parentNode.removeChild(node);
}
}
}
},
/**
* Iterates over a NodeList, and calls _setNodeTag for each node.
*
* @param NodeList nodeList The nodes to operate on
* @param String newTagName the new tag name to use
* @return void
*/
_replaceNodeTags(nodeList, newTagName) {
// Avoid ever operating on live node lists.
if (this._docJSDOMParser && nodeList._isLiveNodeList) {
throw new Error("Do not pass live node lists to _replaceNodeTags");
}
for (const node of nodeList) {
this._setNodeTag(node, newTagName);
}
},
/**
* Iterate over a NodeList, which doesn't natively fully implement the Array
* interface.
*
* For convenience, the current object context is applied to the provided
* iterate function.
*
* @param NodeList nodeList The NodeList.
* @param Function fn The iterate function.
* @return void
*/
_forEachNode(nodeList, fn) {
Array.prototype.forEach.call(nodeList, fn, this);
},
/**
* Iterate over a NodeList, and return the first node that passes
* the supplied test function
*
* For convenience, the current object context is applied to the provided
* test function.
*
* @param NodeList nodeList The NodeList.
* @param Function fn The test function.
* @return void
*/
_findNode(nodeList, fn) {
return Array.prototype.find.call(nodeList, fn, this);
},
/**
* Iterate over a NodeList, return true if any of the provided iterate
* function calls returns true, false otherwise.
*
* For convenience, the current object context is applied to the
* provided iterate function.
*
* @param NodeList nodeList The NodeList.
* @param Function fn The iterate function.
* @return Boolean
*/
_someNode(nodeList, fn) {
return Array.prototype.some.call(nodeList, fn, this);
},
/**
* Iterate over a NodeList, return true if all of the provided iterate
* function calls return true, false otherwise.
*
* For convenience, the current object context is applied to the
* provided iterate function.
*
* @param NodeList nodeList The NodeList.
* @param Function fn The iterate function.
* @return Boolean
*/
_everyNode(nodeList, fn) {
return Array.prototype.every.call(nodeList, fn, this);
},
_getAllNodesWithTag(node, tagNames) {
if (node.querySelectorAll) {
return node.querySelectorAll(tagNames.join(","));
}
return [].concat.apply(
[],
tagNames.map(function (tag) {
var collection = node.getElementsByTagName(tag);
return Array.isArray(collection) ? collection : Array.from(collection);
})
);
},
/**
* Removes the class="" attribute from every element in the given
* subtree, except those that match CLASSES_TO_PRESERVE and
* the classesToPreserve array from the options object.
*
* @param Element
* @return void
*/
_cleanClasses(node) {
var classesToPreserve = this._classesToPreserve;
var className = (node.getAttribute("class") || "")
.split(/\s+/)
.filter(cls => classesToPreserve.includes(cls))
.join(" ");
if (className) {
node.setAttribute("class", className);
} else {
node.removeAttribute("class");
}
for (node = node.firstElementChild; node; node = node.nextElementSibling) {
this._cleanClasses(node);
}
},
/**
* Tests whether a string is a URL or not.
*
* @param {string} str The string to test
* @return {boolean} true if str is a URL, false if not
*/
_isUrl(str) {
try {
new URL(str);
return true;
} catch {
return false;
}
},
/**
* Converts each <a> and <img> uri in the given element to an absolute URI,
* ignoring #ref URIs.
*
* @param Element
* @return void
*/
_fixRelativeUris(articleContent) {
var baseURI = this._doc.baseURI;
var documentURI = this._doc.documentURI;
function toAbsoluteURI(uri) {
// Leave hash links alone if the base URI matches the document URI:
if (baseURI == documentURI && uri.charAt(0) == "#") {
return uri;
}
// Otherwise, resolve against base URI:
try {
return new URL(uri, baseURI).href;
} catch (ex) {
// Something went wrong, just return the original:
}
return uri;
}
var links = this._getAllNodesWithTag(articleContent, ["a"]);
this._forEachNode(links, function (link) {
var href = link.getAttribute("href");
if (href) {
// Remove links with javascript: URIs, since
// they won't work after scripts have been removed from the page.
if (href.indexOf("javascript:") === 0) {
// if the link only contains simple text content, it can be converted to a text node
if (
link.childNodes.length === 1 &&
link.childNodes[0].nodeType === this.TEXT_NODE
) {
var text = this._doc.createTextNode(link.textContent);
link.parentNode.replaceChild(text, link);
} else {
// if the link has multiple children, they should all be preserved
var container = this._doc.createElement("span");
while (link.firstChild) {
container.appendChild(link.firstChild);
}
link.parentNode.replaceChild(container, link);
}
} else {
link.setAttribute("href", toAbsoluteURI(href));
}
}
});
var medias = this._getAllNodesWithTag(articleContent, [
"img",
"picture",
"figure",
"video",
"audio",
"source",
]);
this._forEachNode(medias, function (media) {
var src = media.getAttribute("src");
var poster = media.getAttribute("poster");
var srcset = media.getAttribute("srcset");
if (src) {
media.setAttribute("src", toAbsoluteURI(src));
}
if (poster) {
media.setAttribute("poster", toAbsoluteURI(poster));
}
if (srcset) {
var newSrcset = srcset.replace(
this.REGEXPS.srcsetUrl,
function (_, p1, p2, p3) {
return toAbsoluteURI(p1) + (p2 || "") + p3;
}
);
media.setAttribute("srcset", newSrcset);
}
});
},
_simplifyNestedElements(articleContent) {
var node = articleContent;
while (node) {
if (
node.parentNode &&
["DIV", "SECTION"].includes(node.tagName) &&
!(node.id && node.id.startsWith("readability"))
) {
if (this._isElementWithoutContent(node)) {
node = this._removeAndGetNext(node);
continue;
} else if (
this._hasSingleTagInsideElement(node, "DIV") ||
this._hasSingleTagInsideElement(node, "SECTION")
) {
var child = node.children[0];
for (var i = 0; i < node.attributes.length; i++) {
child.setAttributeNode(node.attributes[i].cloneNode());
}
node.parentNode.replaceChild(child, node);
node = child;
continue;
}
}
node = this._getNextNode(node);
}
},
/**
* Get the article title as an H1.
*
* @return string
**/
_getArticleTitle() {
var doc = this._doc;
var curTitle = "";
var origTitle = "";
try {
curTitle = origTitle = doc.title.trim();
// If they had an element with id "title" in their HTML
if (typeof curTitle !== "string") {
curTitle = origTitle = this._getInnerText(
doc.getElementsByTagName("title")[0]
);
}
} catch (e) {
/* ignore exceptions setting the title. */
}
var titleHadHierarchicalSeparators = false;
function wordCount(str) {
return str.split(/\s+/).length;
}
// If there's a separator in the title, first remove the final part
const titleSeparators = /\|\-–—\\\/>»/.source;
if (new RegExp(`\\s[${titleSeparators}]\\s`).test(curTitle)) {
titleHadHierarchicalSeparators = /\s[\\\/>»]\s/.test(curTitle);
let allSeparators = Array.from(
origTitle.matchAll(new RegExp(`\\s[${titleSeparators}]\\s`, "gi"))
);
curTitle = origTitle.substring(0, allSeparators.pop().index);
// If the resulting title is too short, remove the first part instead:
if (wordCount(curTitle) < 3) {
curTitle = origTitle.replace(
new RegExp(`^[^${titleSeparators}]*[${titleSeparators}]`, "gi"),
""
);
}
} else if (curTitle.includes(": ")) {
// Check if we have an heading containing this exact string, so we
// could assume it's the full title.
var headings = this._getAllNodesWithTag(doc, ["h1", "h2"]);
var trimmedTitle = curTitle.trim();
var match = this._someNode(headings, function (heading) {
return heading.textContent.trim() === trimmedTitle;
});
// If we don't, let's extract the title out of the original title string.
if (!match) {
curTitle = origTitle.substring(origTitle.lastIndexOf(":") + 1);
// If the title is now too short, try the first colon instead:
if (wordCount(curTitle) < 3) {
curTitle = origTitle.substring(origTitle.indexOf(":") + 1);
// But if we have too many words before the colon there's something weird
// with the titles and the H tags so let's just use the original title instead
} else if (wordCount(origTitle.substr(0, origTitle.indexOf(":"))) > 5) {
curTitle = origTitle;
}
}
} else if (curTitle.length > 150 || curTitle.length < 15) {
var hOnes = doc.getElementsByTagName("h1");
if (hOnes.length === 1) {
curTitle = this._getInnerText(hOnes[0]);
}
}
curTitle = curTitle.trim().replace(this.REGEXPS.normalize, " ");
// If we now have 4 words or fewer as our title, and either no
// 'hierarchical' separators (\, /, > or ») were found in the original
// title or we decreased the number of words by more than 1 word, use
// the original title.
var curTitleWordCount = wordCount(curTitle);
if (
curTitleWordCount <= 4 &&
(!titleHadHierarchicalSeparators ||
curTitleWordCount !=
wordCount(
origTitle.replace(new RegExp(`\\s[${titleSeparators}]\\s`, "g"), "")
) -
1)
) {
curTitle = origTitle;
}
return curTitle;
},
/**
* Prepare the HTML document for readability to scrape it.
* This includes things like stripping javascript, CSS, and handling terrible markup.
*
* @return void
**/
_prepDocument() {
var doc = this._doc;
// Remove all style tags in head
this._removeNodes(this._getAllNodesWithTag(doc, ["style"]));
if (doc.body) {
this._replaceBrs(doc.body);
}
this._replaceNodeTags(this._getAllNodesWithTag(doc, ["font"]), "SPAN");
},
/**
* Finds the next node, starting from the given node, and ignoring
* whitespace in between. If the given node is an element, the same node is
* returned.
*/
_nextNode(node) {
var next = node;
while (
next &&
next.nodeType != this.ELEMENT_NODE &&
this.REGEXPS.whitespace.test(next.textContent)
) {
next = next.nextSibling;
}
return next;
},
/**
* Replaces 2 or more successive <br> elements with a single <p>.
* Whitespace between <br> elements are ignored. For example:
* <div>foo<br>bar<br> <br><br>abc</div>
* will become:
* <div>foo<br>bar<p>abc</p></div>
*/
_replaceBrs(elem) {
this._forEachNode(this._getAllNodesWithTag(elem, ["br"]), function (br) {
var next = br.nextSibling;
// Whether 2 or more <br> elements have been found and replaced with a
// <p> block.
var replaced = false;
// If we find a <br> chain, remove the <br>s until we hit another node
// or non-whitespace. This leaves behind the first <br> in the chain
// (which will be replaced with a <p> later).
while ((next = this._nextNode(next)) && next.tagName == "BR") {
replaced = true;
var brSibling = next.nextSibling;
next.remove();
next = brSibling;
}
// If we removed a <br> chain, replace the remaining <br> with a <p>. Add
// all sibling nodes as children of the <p> until we hit another <br>
// chain.
if (replaced) {
var p = this._doc.createElement("p");
br.parentNode.replaceChild(p, br);
next = p.nextSibling;
while (next) {
// If we've hit another <br><br>, we're done adding children to this <p>.
if (next.tagName == "BR") {
var nextElem = this._nextNode(next.nextSibling);
if (nextElem && nextElem.tagName == "BR") {
break;
}
}
if (!this._isPhrasingContent(next)) {
break;
}
// Otherwise, make this node a child of the new <p>.
var sibling = next.nextSibling;
p.appendChild(next);
next = sibling;
}
while (p.lastChild && this._isWhitespace(p.lastChild)) {
p.lastChild.remove();
}
if (p.parentNode.tagName === "P") {
this._setNodeTag(p.parentNode, "DIV");
}
}
});
},
_setNodeTag(node, tag) {
this.log("_setNodeTag", node, tag);
if (this._docJSDOMParser) {
node.localName = tag.toLowerCase();
node.tagName = tag.toUpperCase();
return node;
}
var replacement = node.ownerDocument.createElement(tag);
while (node.firstChild) {
replacement.appendChild(node.firstChild);
}
node.parentNode.replaceChild(replacement, node);
if (node.readability) {
replacement.readability = node.readability;
}
for (var i = 0; i < node.attributes.length; i++) {
replacement.setAttributeNode(node.attributes[i].cloneNode());
}
return replacement;
},
/**
* Prepare the article node for display. Clean out any inline styles,
* iframes, forms, strip extraneous <p> tags, etc.
*
* @param Element
* @return void
**/
_prepArticle(articleContent) {
this._cleanStyles(articleContent);
// Check for data tables before we continue, to avoid removing items in
// those tables, which will often be isolated even though they're
// visually linked to other content-ful elements (text, images, etc.).
this._markDataTables(articleContent);
this._fixLazyImages(articleContent);
// Clean out junk from the article content
this._cleanConditionally(articleContent, "form");
this._cleanConditionally(articleContent, "fieldset");
this._clean(articleContent, "object");
this._clean(articleContent, "embed");
this._clean(articleContent, "footer");
this._clean(articleContent, "link");
this._clean(articleContent, "aside");
// Clean out elements with little content that have "share" in their id/class combinations from final top candidates,
// which means we don't remove the top candidates even they have "share".
var shareElementThreshold = this.DEFAULT_CHAR_THRESHOLD;
this._forEachNode(articleContent.children, function (topCandidate) {
this._cleanMatchedNodes(topCandidate, function (node, matchString) {
return (
this.REGEXPS.shareElements.test(matchString) &&
node.textContent.length < shareElementThreshold
);
});
});
this._clean(articleContent, "iframe");
this._clean(articleContent, "input");
this._clean(articleContent, "textarea");
this._clean(articleContent, "select");
this._clean(articleContent, "button");
this._cleanHeaders(articleContent);
// Do these last as the previous stuff may have removed junk
// that will affect these
this._cleanConditionally(articleContent, "table");
this._cleanConditionally(articleContent, "ul");
this._cleanConditionally(articleContent, "div");
// replace H1 with H2 as H1 should be only title that is displayed separately
this._replaceNodeTags(
this._getAllNodesWithTag(articleContent, ["h1"]),
"h2"
);
// Remove extra paragraphs
this._removeNodes(
this._getAllNodesWithTag(articleContent, ["p"]),
function (paragraph) {
// At this point, nasty iframes have been removed; only embedded video
// ones remain.
var contentElementCount = this._getAllNodesWithTag(paragraph, [
"img",
"embed",
"object",
"iframe",
]).length;
return (
contentElementCount === 0 && !this._getInnerText(paragraph, false)
);
}
);
this._forEachNode(
this._getAllNodesWithTag(articleContent, ["br"]),
function (br) {
var next = this._nextNode(br.nextSibling);
if (next && next.tagName == "P") {
br.remove();
}
}
);
// Remove single-cell tables
this._forEachNode(
this._getAllNodesWithTag(articleContent, ["table"]),
function (table) {
var tbody = this._hasSingleTagInsideElement(table, "TBODY")
? table.firstElementChild
: table;
if (this._hasSingleTagInsideElement(tbody, "TR")) {
var row = tbody.firstElementChild;
if (this._hasSingleTagInsideElement(row, "TD")) {
var cell = row.firstElementChild;
cell = this._setNodeTag(
cell,
this._everyNode(cell.childNodes, this._isPhrasingContent)
? "P"
: "DIV"
);
table.parentNode.replaceChild(cell, table);
}
}
}
);
},
/**
* Initialize a node with the readability object. Also checks the
* className/id for special names to add to its score.
*
* @param Element
* @return void
**/
_initializeNode(node) {
node.readability = { contentScore: 0 };
switch (node.tagName) {
case "DIV":
node.readability.contentScore += 5;
break;
case "PRE":
case "TD":
case "BLOCKQUOTE":
node.readability.contentScore += 3;
break;
case "ADDRESS":
case "OL":
case "UL":
case "DL":
case "DD":
case "DT":
case "LI":
case "FORM":
node.readability.contentScore -= 3;
break;
case "H1":
case "H2":
case "H3":
case "H4":
case "H5":
case "H6":
case "TH":
node.readability.contentScore -= 5;
break;
}
node.readability.contentScore += this._getClassWeight(node);
},
_removeAndGetNext(node) {
var nextNode = this._getNextNode(node, true);
node.remove();
return nextNode;
},
/**
* Traverse the DOM from node to node, starting at the node passed in.
* Pass true for the second parameter to indicate this node itself
* (and its kids) are going away, and we want the next node over.
*
* Calling this in a loop will traverse the DOM depth-first.
*
* @param {Element} node
* @param {boolean} ignoreSelfAndKids
* @return {Element}
*/
_getNextNode(node, ignoreSelfAndKids) {
// First check for kids if those aren't being ignored
if (!ignoreSelfAndKids && node.firstElementChild) {
return node.firstElementChild;
}
// Then for siblings...
if (node.nextElementSibling) {
return node.nextElementSibling;
}
// And finally, move up the parent chain *and* find a sibling
// (because this is depth-first traversal, we will have already
// seen the parent nodes themselves).
do {
node = node.parentNode;
} while (node && !node.nextElementSibling);
return node && node.nextElementSibling;
},
// compares second text to first one
// 1 = same text, 0 = completely different text
// works the way that it splits both texts into words and then finds words that are unique in second text
// the result is given by the lower length of unique parts
_textSimilarity(textA, textB) {
var tokensA = textA
.toLowerCase()
.split(this.REGEXPS.tokenize)
.filter(Boolean);
var tokensB = textB
.toLowerCase()
.split(this.REGEXPS.tokenize)
.filter(Boolean);
if (!tokensA.length || !tokensB.length) {
return 0;
}
var uniqTokensB = tokensB.filter(token => !tokensA.includes(token));
var distanceB = uniqTokensB.join(" ").length / tokensB.join(" ").length;
return 1 - distanceB;
},
/**
* Checks whether an element node contains a valid byline
*
* @param node {Element}
* @param matchString {string}
* @return boolean
*/
_isValidByline(node, matchString) {
var rel = node.getAttribute("rel");
var itemprop = node.getAttribute("itemprop");
var bylineLength = node.textContent.trim().length;
return (
(rel === "author" ||
(itemprop && itemprop.includes("author")) ||
this.REGEXPS.byline.test(matchString)) &&
!!bylineLength &&
bylineLength < 100
);
},
_getNodeAncestors(node, maxDepth) {
maxDepth = maxDepth || 0;
var i = 0,
ancestors = [];
while (node.parentNode) {
ancestors.push(node.parentNode);
if (maxDepth && ++i === maxDepth) {
break;
}
node = node.parentNode;
}
return ancestors;
},
/***
* grabArticle - Using a variety of metrics (content score, classname, element types), find the content that is
* most likely to be the stuff a user wants to read. Then return it wrapped up in a div.
*
* @param page a document to run upon. Needs to be a full document, complete with body.
* @return Element
**/
/* eslint-disable-next-line complexity */
_grabArticle(page) {
this.log("**** grabArticle ****");
var doc = this._doc;
var isPaging = page !== null;
page = page ? page : this._doc.body;
// We can't grab an article if we don't have a page!
if (!page) {
this.log("No body found in document. Abort.");
return null;
}
var pageCacheHtml = page.innerHTML;
while (true) {
this.log("Starting grabArticle loop");
var stripUnlikelyCandidates = this._flagIsActive(
this.FLAG_STRIP_UNLIKELYS
);
// First, node prepping. Trash nodes that look cruddy (like ones with the
// class name "comment", etc), and turn divs into P tags where they have been
// used inappropriately (as in, where they contain no other block level elements.)
var elementsToScore = [];
var node = this._doc.documentElement;
let shouldRemoveTitleHeader = true;
while (node) {
if (node.tagName === "HTML") {
this._articleLang = node.getAttribute("lang");
}
var matchString = node.className + " " + node.id;
if (!this._isProbablyVisible(node)) {
this.log("Removing hidden node - " + matchString);
node = this._removeAndGetNext(node);
continue;
}
// User is not able to see elements applied with both "aria-modal = true" and "role = dialog"
if (
node.getAttribute("aria-modal") == "true" &&
node.getAttribute("role") == "dialog"
) {
node = this._removeAndGetNext(node);
continue;
}
// If we don't have a byline yet check to see if this node is a byline; if it is store the byline and remove the node.
if (
!this._articleByline &&
!this._metadata.byline &&
this._isValidByline(node, matchString)
) {
// Find child node matching [itemprop="name"] and use that if it exists for a more accurate author name byline
var endOfSearchMarkerNode = this._getNextNode(node, true);
var next = this._getNextNode(node);
var itemPropNameNode = null;
while (next && next != endOfSearchMarkerNode) {
var itemprop = next.getAttribute("itemprop");
if (itemprop && itemprop.includes("name")) {
itemPropNameNode = next;
break;
} else {
next = this._getNextNode(next);
}
}
this._articleByline = (itemPropNameNode ?? node).textContent.trim();
node = this._removeAndGetNext(node);
continue;
}
if (shouldRemoveTitleHeader && this._headerDuplicatesTitle(node)) {
this.log(
"Removing header: ",
node.textContent.trim(),
this._articleTitle.trim()
);
shouldRemoveTitleHeader = false;
node = this._removeAndGetNext(node);
continue;
}
// Remove unlikely candidates
if (stripUnlikelyCandidates) {
if (
this.REGEXPS.unlikelyCandidates.test(matchString) &&
!this.REGEXPS.okMaybeItsACandidate.test(matchString) &&
!this._hasAncestorTag(node, "table") &&
!this._hasAncestorTag(node, "code") &&
node.tagName !== "BODY" &&
node.tagName !== "A"
) {
this.log("Removing unlikely candidate - " + matchString);
node = this._removeAndGetNext(node);
continue;
}
if (this.UNLIKELY_ROLES.includes(node.getAttribute("role"))) {
this.log(
"Removing content with role " +
node.getAttribute("role") +
" - " +
matchString
);
node = this._removeAndGetNext(node);
continue;
}
}
// Remove DIV, SECTION, and HEADER nodes without any content(e.g. text, image, video, or iframe).
if (
(node.tagName === "DIV" ||
node.tagName === "SECTION" ||
node.tagName === "HEADER" ||
node.tagName === "H1" ||
node.tagName === "H2" ||
node.tagName === "H3" ||
node.tagName === "H4" ||
node.tagName === "H5" ||
node.tagName === "H6") &&
this._isElementWithoutContent(node)
) {
node = this._removeAndGetNext(node);
continue;
}
if (this.DEFAULT_TAGS_TO_SCORE.includes(node.tagName)) {
elementsToScore.push(node);
}
// Turn all divs that don't have children block level elements into p's
if (node.tagName === "DIV") {
// Put phrasing content into paragraphs.
var childNode = node.firstChild;
while (childNode) {
var nextSibling = childNode.nextSibling;
if (this._isPhrasingContent(childNode)) {
var fragment = doc.createDocumentFragment();
// Collect all consecutive phrasing content into a fragment.
do {
nextSibling = childNode.nextSibling;
fragment.appendChild(childNode);
childNode = nextSibling;
} while (childNode && this._isPhrasingContent(childNode));
// Trim leading and trailing whitespace from the fragment.
while (
fragment.firstChild &&
this._isWhitespace(fragment.firstChild)
) {
fragment.firstChild.remove();
}
while (
fragment.lastChild &&
this._isWhitespace(fragment.lastChild)
) {
fragment.lastChild.remove();
}
// If the fragment contains anything, wrap it in a paragraph and
// insert it before the next non-phrasing node.
if (fragment.firstChild) {
var p = doc.createElement("p");
p.appendChild(fragment);
node.insertBefore(p, nextSibling);
}
}
childNode = nextSibling;
}
// Sites like http://mobile.slate.com encloses each paragraph with a DIV
// element. DIVs with only a P element inside and no text content can be
// safely converted into plain P elements to avoid confusing the scoring
// algorithm with DIVs with are, in practice, paragraphs.
if (
this._hasSingleTagInsideElement(node, "P") &&
this._getLinkDensity(node) < 0.25
) {
var newNode = node.children[0];
node.parentNode.replaceChild(newNode, node);
node = newNode;
elementsToScore.push(node);
} else if (!this._hasChildBlockElement(node)) {
node = this._setNodeTag(node, "P");
elementsToScore.push(node);
}
}
node = this._getNextNode(node);
}
/**
* Loop through all paragraphs, and assign a score to them based on how content-y they look.
* Then add their score to their parent node.
*
* A score is determined by things like number of commas, class names, etc. Maybe eventually link density.
**/
var candidates = [];
this._forEachNode(elementsToScore, function (elementToScore) {
if (
!elementToScore.parentNode ||
typeof elementToScore.parentNode.tagName === "undefined"
) {
return;
}
// If this paragraph is less than 25 characters, don't even count it.
var innerText = this._getInnerText(elementToScore);
if (innerText.length < 25) {
return;
}
// Exclude nodes with no ancestor.
var ancestors = this._getNodeAncestors(elementToScore, 5);
if (ancestors.length === 0) {
return;
}
var contentScore = 0;
// Add a point for the paragraph itself as a base.
contentScore += 1;
// Add points for any commas within this paragraph.
contentScore += innerText.split(this.REGEXPS.commas).length;
// For every 100 characters in this paragraph, add another point. Up to 3 points.
contentScore += Math.min(Math.floor(innerText.length / 100), 3);
// Initialize and score ancestors.
this._forEachNode(ancestors, function (ancestor, level) {
if (
!ancestor.tagName ||
!ancestor.parentNode ||
typeof ancestor.parentNode.tagName === "undefined"
) {
return;
}
if (typeof ancestor.readability === "undefined") {
this._initializeNode(ancestor);
candidates.push(ancestor);
}
// Node score divider:
// - parent: 1 (no division)
// - grandparent: 2
// - great grandparent+: ancestor level * 3
if (level === 0) {
var scoreDivider = 1;
} else if (level === 1) {
scoreDivider = 2;
} else {
scoreDivider = level * 3;
}
ancestor.readability.contentScore += contentScore / scoreDivider;
});
});
// After we've calculated scores, loop through all of the possible
// candidate nodes we found and find the one with the highest score.
var topCandidates = [];
for (var c = 0, cl = candidates.length; c < cl; c += 1) {
var candidate = candidates[c];
// Scale the final candidates score based on link density. Good content
// should have a relatively small link density (5% or less) and be mostly
// unaffected by this operation.
var candidateScore =
candidate.readability.contentScore *
(1 - this._getLinkDensity(candidate));
candidate.readability.contentScore = candidateScore;
this.log("Candidate:", candidate, "with score " + candidateScore);
for (var t = 0; t < this._nbTopCandidates; t++) {
var aTopCandidate = topCandidates[t];
if (
!aTopCandidate ||
candidateScore > aTopCandidate.readability.contentScore
) {
topCandidates.splice(t, 0, candidate);
if (topCandidates.length > this._nbTopCandidates) {
topCandidates.pop();
}
break;
}
}
}
var topCandidate = topCandidates[0] || null;
var neededToCreateTopCandidate = false;
var parentOfTopCandidate;
// If we still have no top candidate, just use the body as a last resort.
// We also have to copy the body node so it is something we can modify.
if (topCandidate === null || topCandidate.tagName === "BODY") {
// Move all of the page's children into topCandidate
topCandidate = doc.createElement("DIV");
neededToCreateTopCandidate = true;
// Move everything (not just elements, also text nodes etc.) into the container
// so we even include text directly in the body:
while (page.firstChild) {
this.log("Moving child out:", page.firstChild);
topCandidate.appendChild(page.firstChild);
}
page.appendChild(topCandidate);
this._initializeNode(topCandidate);
} else if (topCandidate) {
// Find a better top candidate node if it contains (at least three) nodes which belong to `topCandidates` array
// and whose scores are quite closed with current `topCandidate` node.
var alternativeCandidateAncestors = [];
for (var i = 1; i < topCandidates.length; i++) {
if (
topCandidates[i].readability.contentScore /
topCandidate.readability.contentScore >=
0.75
) {
alternativeCandidateAncestors.push(
this._getNodeAncestors(topCandidates[i])
);
}
}
var MINIMUM_TOPCANDIDATES = 3;
if (alternativeCandidateAncestors.length >= MINIMUM_TOPCANDIDATES) {
parentOfTopCandidate = topCandidate.parentNode;
while (parentOfTopCandidate.tagName !== "BODY") {
var listsContainingThisAncestor = 0;
for (
var ancestorIndex = 0;
ancestorIndex < alternativeCandidateAncestors.length &&
listsContainingThisAncestor < MINIMUM_TOPCANDIDATES;
ancestorIndex++
) {
listsContainingThisAncestor += Number(
alternativeCandidateAncestors[ancestorIndex].includes(
parentOfTopCandidate
)
);
}
if (listsContainingThisAncestor >= MINIMUM_TOPCANDIDATES) {
topCandidate = parentOfTopCandidate;
break;
}
parentOfTopCandidate = parentOfTopCandidate.parentNode;
}
}
if (!topCandidate.readability) {
this._initializeNode(topCandidate);
}
// Because of our bonus system, parents of candidates might have scores
// themselves. They get half of the node. There won't be nodes with higher
// scores than our topCandidate, but if we see the score going *up* in the first
// few steps up the tree, that's a decent sign that there might be more content
// lurking in other places that we want to unify in. The sibling stuff
// below does some of that - but only if we've looked high enough up the DOM
// tree.
parentOfTopCandidate = topCandidate.parentNode;
var lastScore = topCandidate.readability.contentScore;
// The scores shouldn't get too low.
var scoreThreshold = lastScore / 3;
while (parentOfTopCandidate.tagName !== "BODY") {
if (!parentOfTopCandidate.readability) {
parentOfTopCandidate = parentOfTopCandidate.parentNode;
continue;
}
var parentScore = parentOfTopCandidate.readability.contentScore;
if (parentScore < scoreThreshold) {
break;
}
if (parentScore > lastScore) {
// Alright! We found a better parent to use.
topCandidate = parentOfTopCandidate;
break;
}
lastScore = parentOfTopCandidate.readability.contentScore;
parentOfTopCandidate = parentOfTopCandidate.parentNode;
}
// If the top candidate is the only child, use parent instead. This will help sibling
// joining logic when adjacent content is actually located in parent's sibling node.
parentOfTopCandidate = topCandidate.parentNode;
while (
parentOfTopCandidate.tagName != "BODY" &&
parentOfTopCandidate.children.length == 1
) {
topCandidate = parentOfTopCandidate;
parentOfTopCandidate = topCandidate.parentNode;
}
if (!topCandidate.readability) {
this._initializeNode(topCandidate);
}
}
// Now that we have the top candidate, look through its siblings for content
// that might also be related. Things like preambles, content split by ads
// that we removed, etc.
var articleContent = doc.createElement("DIV");
if (isPaging) {
articleContent.id = "readability-content";
}
var siblingScoreThreshold = Math.max(
10,
topCandidate.readability.contentScore * 0.2
);
// Keep potential top candidate's parent node to try to get text direction of it later.
parentOfTopCandidate = topCandidate.parentNode;
var siblings = parentOfTopCandidate.children;
for (var s = 0, sl = siblings.length; s < sl; s++) {
var sibling = siblings[s];
var append = false;
this.log(
"Looking at sibling node:",
sibling,
sibling.readability
? "with score " + sibling.readability.contentScore
: ""
);
this.log(
"Sibling has score",
sibling.readability ? sibling.readability.contentScore : "Unknown"
);
if (sibling === topCandidate) {
append = true;
} else {
var contentBonus = 0;
// Give a bonus if sibling nodes and top candidates have the example same classname
if (
sibling.className === topCandidate.className &&
topCandidate.className !== ""
) {
contentBonus += topCandidate.readability.contentScore * 0.2;
}
if (
sibling.readability &&
sibling.readability.contentScore + contentBonus >=
siblingScoreThreshold
) {
append = true;
} else if (sibling.nodeName === "P") {
var linkDensity = this._getLinkDensity(sibling);
var nodeContent = this._getInnerText(sibling);
var nodeLength = nodeContent.length;
if (nodeLength > 80 && linkDensity < 0.25) {
append = true;
} else if (
nodeLength < 80 &&
nodeLength > 0 &&
linkDensity === 0 &&
nodeContent.search(/\.( |$)/) !== -1
) {
append = true;
}
}
}
if (append) {
this.log("Appending node:", sibling);
if (!this.ALTER_TO_DIV_EXCEPTIONS.includes(sibling.nodeName)) {
// We have a node that isn't a common block level element, like a form or td tag.
// Turn it into a div so it doesn't get filtered out later by accident.
this.log("Altering sibling:", sibling, "to div.");
sibling = this._setNodeTag(sibling, "DIV");
}
articleContent.appendChild(sibling);
// Fetch children again to make it compatible
// with DOM parsers without live collection support.
siblings = parentOfTopCandidate.children;
// siblings is a reference to the children array, and
// sibling is removed from the array when we call appendChild().
// As a result, we must revisit this index since the nodes
// have been shifted.
s -= 1;
sl -= 1;
}
}
if (this._debug) {
this.log("Article content pre-prep: " + articleContent.innerHTML);
}
// So we have all of the content that we need. Now we clean it up for presentation.
this._prepArticle(articleContent);
if (this._debug) {
this.log("Article content post-prep: " + articleContent.innerHTML);
}
if (neededToCreateTopCandidate) {
// We already created a fake div thing, and there wouldn't have been any siblings left
// for the previous loop, so there's no point trying to create a new div, and then
// move all the children over. Just assign IDs and class names here. No need to append
// because that already happened anyway.
topCandidate.id = "readability-page-1";
topCandidate.className = "page";
} else {
var div = doc.createElement("DIV");
div.id = "readability-page-1";
div.className = "page";
while (articleContent.firstChild) {
div.appendChild(articleContent.firstChild);
}
articleContent.appendChild(div);
}
if (this._debug) {
this.log("Article content after paging: " + articleContent.innerHTML);
}
var parseSuccessful = true;
// Now that we've gone through the full algorithm, check to see if
// we got any meaningful content. If we didn't, we may need to re-run
// grabArticle with different flags set. This gives us a higher likelihood of
// finding the content, and the sieve approach gives us a higher likelihood of
// finding the -right- content.
var textLength = this._getInnerText(articleContent, true).length;
if (textLength < this._charThreshold) {
parseSuccessful = false;
// eslint-disable-next-line no-unsanitized/property
page.innerHTML = pageCacheHtml;
this._attempts.push({
articleContent,
textLength,
});
if (this._flagIsActive(this.FLAG_STRIP_UNLIKELYS)) {
this._removeFlag(this.FLAG_STRIP_UNLIKELYS);
} else if (this._flagIsActive(this.FLAG_WEIGHT_CLASSES)) {
this._removeFlag(this.FLAG_WEIGHT_CLASSES);
} else if (this._flagIsActive(this.FLAG_CLEAN_CONDITIONALLY)) {
this._removeFlag(this.FLAG_CLEAN_CONDITIONALLY);
} else {
// No luck after removing flags, just return the longest text we found during the different loops
this._attempts.sort(function (a, b) {
return b.textLength - a.textLength;
});
// But first check if we actually have something
if (!this._attempts[0].textLength) {
return null;
}
articleContent = this._attempts[0].articleContent;
parseSuccessful = true;
}
}
if (parseSuccessful) {
// Find out text direction from ancestors of final top candidate.
var ancestors = [parentOfTopCandidate, topCandidate].concat(
this._getNodeAncestors(parentOfTopCandidate)
);
this._someNode(ancestors, function (ancestor) {
if (!ancestor.tagName) {
return false;
}
var articleDir = ancestor.getAttribute("dir");
if (articleDir) {
this._articleDir = articleDir;
return true;
}
return false;
});
return articleContent;
}
}
},
/**
* Converts some of the common HTML entities in string to their corresponding characters.
*
* @param str {string} - a string to unescape.
* @return string without HTML entity.
*/
_unescapeHtmlEntities(str) {
if (!str) {
return str;
}
var htmlEscapeMap = this.HTML_ESCAPE_MAP;
return str
.replace(/&(quot|amp|apos|lt|gt);/g, function (_, tag) {
return htmlEscapeMap[tag];
})
.replace(/&#(?:x([0-9a-f]+)|([0-9]+));/gi, function (_, hex, numStr) {
var num = parseInt(hex || numStr, hex ? 16 : 10);
// these character references are replaced by a conforming HTML parser
if (num == 0 || num > 0x10ffff || (num >= 0xd800 && num <= 0xdfff)) {
num = 0xfffd;
}
return String.fromCodePoint(num);
});
},
/**
* Try to extract metadata from JSON-LD object.
* For now, only Schema.org objects of type Article or its subtypes are supported.
* @return Object with any metadata that could be extracted (possibly none)
*/
_getJSONLD(doc) {
var scripts = this._getAllNodesWithTag(doc, ["script"]);
var metadata;
this._forEachNode(scripts, function (jsonLdElement) {
if (
!metadata &&
jsonLdElement.getAttribute("type") === "application/ld+json"
) {
try {
// Strip CDATA markers if present
var content = jsonLdElement.textContent.replace(
/^\s*<!\[CDATA\[|\]\]>\s*$/g,
""
);
var parsed = JSON.parse(content);
if (Array.isArray(parsed)) {
parsed = parsed.find(it => {
return (
it["@type"] &&
it["@type"].match(this.REGEXPS.jsonLdArticleTypes)
);
});
if (!parsed) {
return;
}
}
var schemaDotOrgRegex = /^https?\:\/\/schema\.org\/?$/;
var matches =
(typeof parsed["@context"] === "string" &&
parsed["@context"].match(schemaDotOrgRegex)) ||
(typeof parsed["@context"] === "object" &&
typeof parsed["@context"]["@vocab"] == "string" &&
parsed["@context"]["@vocab"].match(schemaDotOrgRegex));
if (!matches) {
return;
}
if (!parsed["@type"] && Array.isArray(parsed["@graph"])) {
parsed = parsed["@graph"].find(it => {
return (it["@type"] || "").match(this.REGEXPS.jsonLdArticleTypes);
});
}
if (
!parsed ||
!parsed["@type"] ||
!parsed["@type"].match(this.REGEXPS.jsonLdArticleTypes)
) {
return;
}
metadata = {};
if (
typeof parsed.name === "string" &&
typeof parsed.headline === "string" &&
parsed.name !== parsed.headline
) {
// we have both name and headline element in the JSON-LD. They should both be the same but some websites like aktualne.cz
// put their own name into "name" and the article title to "headline" which confuses Readability. So we try to check if either
// "name" or "headline" closely matches the html title, and if so, use that one. If not, then we use "name" by default.
var title = this._getArticleTitle();
var nameMatches = this._textSimilarity(parsed.name, title) > 0.75;
var headlineMatches =
this._textSimilarity(parsed.headline, title) > 0.75;
if (headlineMatches && !nameMatches) {
metadata.title = parsed.headline;
} else {
metadata.title = parsed.name;
}
} else if (typeof parsed.name === "string") {
metadata.title = parsed.name.trim();
} else if (typeof parsed.headline === "string") {
metadata.title = parsed.headline.trim();
}
if (parsed.author) {
if (typeof parsed.author.name === "string") {
metadata.byline = parsed.author.name.trim();
} else if (
Array.isArray(parsed.author) &&
parsed.author[0] &&
typeof parsed.author[0].name === "string"
) {
metadata.byline = parsed.author
.filter(function (author) {
return author && typeof author.name === "string";
})
.map(function (author) {
return author.name.trim();
})
.join(", ");
}
}
if (typeof parsed.description === "string") {
metadata.excerpt = parsed.description.trim();
}
if (parsed.publisher && typeof parsed.publisher.name === "string") {
metadata.siteName = parsed.publisher.name.trim();
}
if (typeof parsed.datePublished === "string") {
metadata.datePublished = parsed.datePublished.trim();
}
} catch (err) {
this.log(err.message);
}
}
});
return metadata ? metadata : {};
},
/**
* Attempts to get excerpt and byline metadata for the article.
*
* @param {Object} jsonld — object containing any metadata that
* could be extracted from JSON-LD object.
*
* @return Object with optional "excerpt" and "byline" properties
*/
_getArticleMetadata(jsonld) {
var metadata = {};
var values = {};
var metaElements = this._doc.getElementsByTagName("meta");
// property is a space-separated list of values
var propertyPattern =
/\s*(article|dc|dcterm|og|twitter)\s*:\s*(author|creator|description|published_time|title|site_name)\s*/gi;
// name is a single value
var namePattern =
/^\s*(?:(dc|dcterm|og|twitter|parsely|weibo:(article|webpage))\s*[-\.:]\s*)?(author|creator|pub-date|description|title|site_name)\s*$/i;
// Find description tags.
this._forEachNode(metaElements, function (element) {
var elementName = element.getAttribute("name");
var elementProperty = element.getAttribute("property");
var content = element.getAttribute("content");
if (!content) {
return;
}
var matches = null;
var name = null;
if (elementProperty) {
matches = elementProperty.match(propertyPattern);
if (matches) {
// Convert to lowercase, and remove any whitespace
// so we can match below.
name = matches[0].toLowerCase().replace(/\s/g, "");
// multiple authors
values[name] = content.trim();
}
}
if (!matches && elementName && namePattern.test(elementName)) {
name = elementName;
if (content) {
// Convert to lowercase, remove any whitespace, and convert dots
// to colons so we can match below.
name = name.toLowerCase().replace(/\s/g, "").replace(/\./g, ":");
values[name] = content.trim();
}
}
});
// get title
metadata.title =
jsonld.title ||
values["dc:title"] ||
values["dcterm:title"] ||
values["og:title"] ||
values["weibo:article:title"] ||
values["weibo:webpage:title"] ||
values.title ||
values["twitter:title"] ||
values["parsely-title"];
if (!metadata.title) {
metadata.title = this._getArticleTitle();
}
const articleAuthor =
typeof values["article:author"] === "string" &&
!this._isUrl(values["article:author"])
? values["article:author"]
: undefined;
// get author
metadata.byline =
jsonld.byline ||
values["dc:creator"] ||
values["dcterm:creator"] ||
values.author ||
values["parsely-author"] ||
articleAuthor;
// get description
metadata.excerpt =
jsonld.excerpt ||
values["dc:description"] ||
values["dcterm:description"] ||
values["og:description"] ||
values["weibo:article:description"] ||
values["weibo:webpage:description"] ||
values.description ||
values["twitter:description"];
// get site name
metadata.siteName = jsonld.siteName || values["og:site_name"];
// get article published time
metadata.publishedTime =
jsonld.datePublished ||
values["article:published_time"] ||
values["parsely-pub-date"] ||
null;
// in many sites the meta value is escaped with HTML entities,
// so here we need to unescape it
metadata.title = this._unescapeHtmlEntities(metadata.title);
metadata.byline = this._unescapeHtmlEntities(metadata.byline);
metadata.excerpt = this._unescapeHtmlEntities(metadata.excerpt);
metadata.siteName = this._unescapeHtmlEntities(metadata.siteName);
metadata.publishedTime = this._unescapeHtmlEntities(metadata.publishedTime);
return metadata;
},
/**
* Check if node is image, or if node contains exactly only one image
* whether as a direct child or as its descendants.
*
* @param Element
**/
_isSingleImage(node) {
while (node) {
if (node.tagName === "IMG") {
return true;
}
if (node.children.length !== 1 || node.textContent.trim() !== "") {
return false;
}
node = node.children[0];
}
return false;
},
/**
* Find all <noscript> that are located after <img> nodes, and which contain only one
* <img> element. Replace the first image with the image from inside the <noscript> tag,
* and remove the <noscript> tag. This improves the quality of the images we use on
* some sites (e.g. Medium).
*
* @param Element
**/
_unwrapNoscriptImages(doc) {
// Find img without source or attributes that might contains image, and remove it.
// This is done to prevent a placeholder img is replaced by img from noscript in next step.
var imgs = Array.from(doc.getElementsByTagName("img"));
this._forEachNode(imgs, function (img) {
for (var i = 0; i < img.attributes.length; i++) {
var attr = img.attributes[i];
switch (attr.name) {
case "src":
case "srcset":
case "data-src":
case "data-srcset":
return;
}
if (/\.(jpg|jpeg|png|webp)/i.test(attr.value)) {
return;
}
}
img.remove();
});
// Next find noscript and try to extract its image
var noscripts = Array.from(doc.getElementsByTagName("noscript"));
this._forEachNode(noscripts, function (noscript) {
// Parse content of noscript and make sure it only contains image
if (!this._isSingleImage(noscript)) {
return;
}
var tmp = doc.createElement("div");
// We're running in the document context, and using unmodified
// document contents, so doing this should be safe.
// (Also we heavily discourage people from allowing script to
// run at all in this document...)
// eslint-disable-next-line no-unsanitized/property
tmp.innerHTML = noscript.innerHTML;
// If noscript has previous sibling and it only contains image,
// replace it with noscript content. However we also keep old
// attributes that might contains image.
var prevElement = noscript.previousElementSibling;
if (prevElement && this._isSingleImage(prevElement)) {
var prevImg = prevElement;
if (prevImg.tagName !== "IMG") {
prevImg = prevElement.getElementsByTagName("img")[0];
}
var newImg = tmp.getElementsByTagName("img")[0];
for (var i = 0; i < prevImg.attributes.length; i++) {
var attr = prevImg.attributes[i];
if (attr.value === "") {
continue;
}
if (
attr.name === "src" ||
attr.name === "srcset" ||
/\.(jpg|jpeg|png|webp)/i.test(attr.value)
) {
if (newImg.getAttribute(attr.name) === attr.value) {
continue;
}
var attrName = attr.name;
if (newImg.hasAttribute(attrName)) {
attrName = "data-old-" + attrName;
}
newImg.setAttribute(attrName, attr.value);
}
}
noscript.parentNode.replaceChild(tmp.firstElementChild, prevElement);
}
});
},
/**
* Removes script tags from the document.
*
* @param Element
**/
_removeScripts(doc) {
this._removeNodes(this._getAllNodesWithTag(doc, ["script", "noscript"]));
},
/**
* Check if this node has only whitespace and a single element with given tag
* Returns false if the DIV node contains non-empty text nodes
* or if it contains no element with given tag or more than 1 element.
*
* @param Element
* @param string tag of child element
**/
_hasSingleTagInsideElement(element, tag) {
// There should be exactly 1 element child with given tag
if (element.children.length != 1 || element.children[0].tagName !== tag) {
return false;
}
// And there should be no text nodes with real content
return !this._someNode(element.childNodes, function (node) {
return (
node.nodeType === this.TEXT_NODE &&
this.REGEXPS.hasContent.test(node.textContent)
);
});
},
_isElementWithoutContent(node) {
return (
node.nodeType === this.ELEMENT_NODE &&
!node.textContent.trim().length &&
(!node.children.length ||
node.children.length ==
node.getElementsByTagName("br").length +
node.getElementsByTagName("hr").length)
);
},
/**
* Determine whether element has any children block level elements.
*
* @param Element
*/
_hasChildBlockElement(element) {
return this._someNode(element.childNodes, function (node) {
return (
this.DIV_TO_P_ELEMS.has(node.tagName) ||
this._hasChildBlockElement(node)
);
});
},
/***
* Determine if a node qualifies as phrasing content.
* https://developer.mozilla.org/en-US/docs/Web/Guide/HTML/Content_categories#Phrasing_content
**/
_isPhrasingContent(node) {
return (
node.nodeType === this.TEXT_NODE ||
this.PHRASING_ELEMS.includes(node.tagName) ||
((node.tagName === "A" ||
node.tagName === "DEL" ||
node.tagName === "INS") &&
this._everyNode(node.childNodes, this._isPhrasingContent))
);
},
_isWhitespace(node) {
return (
(node.nodeType === this.TEXT_NODE &&
node.textContent.trim().length === 0) ||
(node.nodeType === this.ELEMENT_NODE && node.tagName === "BR")
);
},
/**
* Get the inner text of a node - cross browser compatibly.
* This also strips out any excess whitespace to be found.
*
* @param Element
* @param Boolean normalizeSpaces (default: true)
* @return string
**/
_getInnerText(e, normalizeSpaces) {
normalizeSpaces =
typeof normalizeSpaces === "undefined" ? true : normalizeSpaces;
var textContent = e.textContent.trim();
if (normalizeSpaces) {
return textContent.replace(this.REGEXPS.normalize, " ");
}
return textContent;
},
/**
* Get the number of times a string s appears in the node e.
*
* @param Element
* @param string - what to split on. Default is ","
* @return number (integer)
**/
_getCharCount(e, s) {
s = s || ",";
return this._getInnerText(e).split(s).length - 1;
},
/**
* Remove the style attribute on every e and under.
* TODO: Test if getElementsByTagName(*) is faster.
*
* @param Element
* @return void
**/
_cleanStyles(e) {
if (!e || e.tagName.toLowerCase() === "svg") {
return;
}
// Remove `style` and deprecated presentational attributes
for (var i = 0; i < this.PRESENTATIONAL_ATTRIBUTES.length; i++) {
e.removeAttribute(this.PRESENTATIONAL_ATTRIBUTES[i]);
}
if (this.DEPRECATED_SIZE_ATTRIBUTE_ELEMS.includes(e.tagName)) {
e.removeAttribute("width");
e.removeAttribute("height");
}
var cur = e.firstElementChild;
while (cur !== null) {
this._cleanStyles(cur);
cur = cur.nextElementSibling;
}
},
/**
* Get the density of links as a percentage of the content
* This is the amount of text that is inside a link divided by the total text in the node.
*
* @param Element
* @return number (float)
**/
_getLinkDensity(element) {
var textLength = this._getInnerText(element).length;
if (textLength === 0) {
return 0;
}
var linkLength = 0;
// XXX implement _reduceNodeList?
this._forEachNode(element.getElementsByTagName("a"), function (linkNode) {
var href = linkNode.getAttribute("href");
var coefficient = href && this.REGEXPS.hashUrl.test(href) ? 0.3 : 1;
linkLength += this._getInnerText(linkNode).length * coefficient;
});
return linkLength / textLength;
},
/**
* Get an elements class/id weight. Uses regular expressions to tell if this
* element looks good or bad.
*
* @param Element
* @return number (Integer)
**/
_getClassWeight(e) {
if (!this._flagIsActive(this.FLAG_WEIGHT_CLASSES)) {
return 0;
}
var weight = 0;
// Look for a special classname
if (typeof e.className === "string" && e.className !== "") {
if (this.REGEXPS.negative.test(e.className)) {
weight -= 25;
}
if (this.REGEXPS.positive.test(e.className)) {
weight += 25;
}
}
// Look for a special ID
if (typeof e.id === "string" && e.id !== "") {
if (this.REGEXPS.negative.test(e.id)) {
weight -= 25;
}
if (this.REGEXPS.positive.test(e.id)) {
weight += 25;
}
}
return weight;
},
/**
* Clean a node of all elements of type "tag".
* (Unless it's a youtube/vimeo video. People love movies.)
*
* @param Element
* @param string tag to clean
* @return void
**/
_clean(e, tag) {
var isEmbed = ["object", "embed", "iframe"].includes(tag);
this._removeNodes(this._getAllNodesWithTag(e, [tag]), function (element) {
// Allow youtube and vimeo videos through as people usually want to see those.
if (isEmbed) {
// First, check the elements attributes to see if any of them contain youtube or vimeo
for (var i = 0; i < element.attributes.length; i++) {
if (this._allowedVideoRegex.test(element.attributes[i].value)) {
return false;
}
}
// For embed with <object> tag, check inner HTML as well.
if (
element.tagName === "object" &&
this._allowedVideoRegex.test(element.innerHTML)
) {
return false;
}
}
return true;
});
},
/**
* Check if a given node has one of its ancestor tag name matching the
* provided one.
* @param HTMLElement node
* @param String tagName
* @param Number maxDepth
* @param Function filterFn a filter to invoke to determine whether this node 'counts'
* @return Boolean
*/
_hasAncestorTag(node, tagName, maxDepth, filterFn) {
maxDepth = maxDepth || 3;
tagName = tagName.toUpperCase();
var depth = 0;
while (node.parentNode) {
if (maxDepth > 0 && depth > maxDepth) {
return false;
}
if (
node.parentNode.tagName === tagName &&
(!filterFn || filterFn(node.parentNode))
) {
return true;
}
node = node.parentNode;
depth++;
}
return false;
},
/**
* Return an object indicating how many rows and columns this table has.
*/
_getRowAndColumnCount(table) {
var rows = 0;
var columns = 0;
var trs = table.getElementsByTagName("tr");
for (var i = 0; i < trs.length; i++) {
var rowspan = trs[i].getAttribute("rowspan") || 0;
if (rowspan) {
rowspan = parseInt(rowspan, 10);
}
rows += rowspan || 1;
// Now look for column-related info
var columnsInThisRow = 0;
var cells = trs[i].getElementsByTagName("td");
for (var j = 0; j < cells.length; j++) {
var colspan = cells[j].getAttribute("colspan") || 0;
if (colspan) {
colspan = parseInt(colspan, 10);
}
columnsInThisRow += colspan || 1;
}
columns = Math.max(columns, columnsInThisRow);
}
return { rows, columns };
},
/**
* Look for 'data' (as opposed to 'layout') tables, for which we use
* similar checks as
* https://searchfox.org/mozilla-central/rev/f82d5c549f046cb64ce5602bfd894b7ae807c8f8/accessible/generic/TableAccessible.cpp#19
*/
_markDataTables(root) {
var tables = root.getElementsByTagName("table");
for (var i = 0; i < tables.length; i++) {
var table = tables[i];
var role = table.getAttribute("role");
if (role == "presentation") {
table._readabilityDataTable = false;
continue;
}
var datatable = table.getAttribute("datatable");
if (datatable == "0") {
table._readabilityDataTable = false;
continue;
}
var summary = table.getAttribute("summary");
if (summary) {
table._readabilityDataTable = true;
continue;
}
var caption = table.getElementsByTagName("caption")[0];
if (caption && caption.childNodes.length) {
table._readabilityDataTable = true;
continue;
}
// If the table has a descendant with any of these tags, consider a data table:
var dataTableDescendants = ["col", "colgroup", "tfoot", "thead", "th"];
var descendantExists = function (tag) {
return !!table.getElementsByTagName(tag)[0];
};
if (dataTableDescendants.some(descendantExists)) {
this.log("Data table because found data-y descendant");
table._readabilityDataTable = true;
continue;
}
// Nested tables indicate a layout table:
if (table.getElementsByTagName("table")[0]) {
table._readabilityDataTable = false;
continue;
}
var sizeInfo = this._getRowAndColumnCount(table);
if (sizeInfo.columns == 1 || sizeInfo.rows == 1) {
// single colum/row tables are commonly used for page layout purposes.
table._readabilityDataTable = false;
continue;
}
if (sizeInfo.rows >= 10 || sizeInfo.columns > 4) {
table._readabilityDataTable = true;
continue;
}
// Now just go by size entirely:
table._readabilityDataTable = sizeInfo.rows * sizeInfo.columns > 10;
}
},
/* convert images and figures that have properties like data-src into images that can be loaded without JS */
_fixLazyImages(root) {
this._forEachNode(
this._getAllNodesWithTag(root, ["img", "picture", "figure"]),
function (elem) {
// In some sites (e.g. Kotaku), they put 1px square image as base64 data uri in the src attribute.
// So, here we check if the data uri is too short, just might as well remove it.
if (elem.src && this.REGEXPS.b64DataUrl.test(elem.src)) {
// Make sure it's not SVG, because SVG can have a meaningful image in under 133 bytes.
var parts = this.REGEXPS.b64DataUrl.exec(elem.src);
if (parts[1] === "image/svg+xml") {
return;
}
// Make sure this element has other attributes which contains image.
// If it doesn't, then this src is important and shouldn't be removed.
var srcCouldBeRemoved = false;
for (var i = 0; i < elem.attributes.length; i++) {
var attr = elem.attributes[i];
if (attr.name === "src") {
continue;
}
if (/\.(jpg|jpeg|png|webp)/i.test(attr.value)) {
srcCouldBeRemoved = true;
break;
}
}
// Here we assume if image is less than 100 bytes (or 133 after encoded to base64)
// it will be too small, therefore it might be placeholder image.
if (srcCouldBeRemoved) {
var b64starts = parts[0].length;
var b64length = elem.src.length - b64starts;
if (b64length < 133) {
elem.removeAttribute("src");
}
}
}
// also check for "null" to work around https://github.com/jsdom/jsdom/issues/2580
if (
(elem.src || (elem.srcset && elem.srcset != "null")) &&
!elem.className.toLowerCase().includes("lazy")
) {
return;
}
for (var j = 0; j < elem.attributes.length; j++) {
attr = elem.attributes[j];
if (
attr.name === "src" ||
attr.name === "srcset" ||
attr.name === "alt"
) {
continue;
}
var copyTo = null;
if (/\.(jpg|jpeg|png|webp)\s+\d/.test(attr.value)) {
copyTo = "srcset";
} else if (/^\s*\S+\.(jpg|jpeg|png|webp)\S*\s*$/.test(attr.value)) {
copyTo = "src";
}
if (copyTo) {
//if this is an img or picture, set the attribute directly
if (elem.tagName === "IMG" || elem.tagName === "PICTURE") {
elem.setAttribute(copyTo, attr.value);
} else if (
elem.tagName === "FIGURE" &&
!this._getAllNodesWithTag(elem, ["img", "picture"]).length
) {
//if the item is a <figure> that does not contain an image or picture, create one and place it inside the figure
//see the nytimes-3 testcase for an example
var img = this._doc.createElement("img");
img.setAttribute(copyTo, attr.value);
elem.appendChild(img);
}
}
}
}
);
},
_getTextDensity(e, tags) {
var textLength = this._getInnerText(e, true).length;
if (textLength === 0) {
return 0;
}
var childrenLength = 0;
var children = this._getAllNodesWithTag(e, tags);
this._forEachNode(
children,
child => (childrenLength += this._getInnerText(child, true).length)
);
return childrenLength / textLength;
},
/**
* Clean an element of all tags of type "tag" if they look fishy.
* "Fishy" is an algorithm based on content length, classnames, link density, number of images & embeds, etc.
*
* @return void
**/
_cleanConditionally(e, tag) {
if (!this._flagIsActive(this.FLAG_CLEAN_CONDITIONALLY)) {
return;
}
// Gather counts for other typical elements embedded within.
// Traverse backwards so we can remove nodes at the same time
// without effecting the traversal.
//
// TODO: Consider taking into account original contentScore here.
this._removeNodes(this._getAllNodesWithTag(e, [tag]), function (node) {
// First check if this node IS data table, in which case don't remove it.
var isDataTable = function (t) {
return t._readabilityDataTable;
};
var isList = tag === "ul" || tag === "ol";
if (!isList) {
var listLength = 0;
var listNodes = this._getAllNodesWithTag(node, ["ul", "ol"]);
this._forEachNode(
listNodes,
list => (listLength += this._getInnerText(list).length)
);
isList = listLength / this._getInnerText(node).length > 0.9;
}
if (tag === "table" && isDataTable(node)) {
return false;
}
// Next check if we're inside a data table, in which case don't remove it as well.
if (this._hasAncestorTag(node, "table", -1, isDataTable)) {
return false;
}
if (this._hasAncestorTag(node, "code")) {
return false;
}
// keep element if it has a data tables
if (
[...node.getElementsByTagName("table")].some(
tbl => tbl._readabilityDataTable
)
) {
return false;
}
var weight = this._getClassWeight(node);
this.log("Cleaning Conditionally", node);
var contentScore = 0;
if (weight + contentScore < 0) {
return true;
}
if (this._getCharCount(node, ",") < 10) {
// If there are not very many commas, and the number of
// non-paragraph elements is more than paragraphs or other
// ominous signs, remove the element.
var p = node.getElementsByTagName("p").length;
var img = node.getElementsByTagName("img").length;
var li = node.getElementsByTagName("li").length - 100;
var input = node.getElementsByTagName("input").length;
var headingDensity = this._getTextDensity(node, [
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
]);
var embedCount = 0;
var embeds = this._getAllNodesWithTag(node, [
"object",
"embed",
"iframe",
]);
for (var i = 0; i < embeds.length; i++) {
// If this embed has attribute that matches video regex, don't delete it.
for (var j = 0; j < embeds[i].attributes.length; j++) {
if (this._allowedVideoRegex.test(embeds[i].attributes[j].value)) {
return false;
}
}
// For embed with <object> tag, check inner HTML as well.
if (
embeds[i].tagName === "object" &&
this._allowedVideoRegex.test(embeds[i].innerHTML)
) {
return false;
}
embedCount++;
}
var innerText = this._getInnerText(node);
// toss any node whose inner text contains nothing but suspicious words
if (
this.REGEXPS.adWords.test(innerText) ||
this.REGEXPS.loadingWords.test(innerText)
) {
return true;
}
var contentLength = innerText.length;
var linkDensity = this._getLinkDensity(node);
var textishTags = ["SPAN", "LI", "TD"].concat(
Array.from(this.DIV_TO_P_ELEMS)
);
var textDensity = this._getTextDensity(node, textishTags);
var isFigureChild = this._hasAncestorTag(node, "figure");
// apply shadiness checks, then check for exceptions
const shouldRemoveNode = () => {
const errs = [];
if (!isFigureChild && img > 1 && p / img < 0.5) {
errs.push(`Bad p to img ratio (img=${img}, p=${p})`);
}
if (!isList && li > p) {
errs.push(`Too many li's outside of a list. (li=${li} > p=${p})`);
}
if (input > Math.floor(p / 3)) {
errs.push(`Too many inputs per p. (input=${input}, p=${p})`);
}
if (
!isList &&
!isFigureChild &&
headingDensity < 0.9 &&
contentLength < 25 &&
(img === 0 || img > 2) &&
linkDensity > 0
) {
errs.push(
`Suspiciously short. (headingDensity=${headingDensity}, img=${img}, linkDensity=${linkDensity})`
);
}
if (
!isList &&
weight < 25 &&
linkDensity > 0.2 + this._linkDensityModifier
) {
errs.push(
`Low weight and a little linky. (linkDensity=${linkDensity})`
);
}
if (weight >= 25 && linkDensity > 0.5 + this._linkDensityModifier) {
errs.push(
`High weight and mostly links. (linkDensity=${linkDensity})`
);
}
if ((embedCount === 1 && contentLength < 75) || embedCount > 1) {
errs.push(
`Suspicious embed. (embedCount=${embedCount}, contentLength=${contentLength})`
);
}
if (img === 0 && textDensity === 0) {
errs.push(
`No useful content. (img=${img}, textDensity=${textDensity})`
);
}
if (errs.length) {
this.log("Checks failed", errs);
return true;
}
return false;
};
var haveToRemove = shouldRemoveNode();
// Allow simple lists of images to remain in pages
if (isList && haveToRemove) {
for (var x = 0; x < node.children.length; x++) {
let child = node.children[x];
// Don't filter in lists with li's that contain more than one child
if (child.children.length > 1) {
return haveToRemove;
}
}
let li_count = node.getElementsByTagName("li").length;
// Only allow the list to remain if every li contains an image
if (img == li_count) {
return false;
}
}
return haveToRemove;
}
return false;
});
},
/**
* Clean out elements that match the specified conditions
*
* @param Element
* @param Function determines whether a node should be removed
* @return void
**/
_cleanMatchedNodes(e, filter) {
var endOfSearchMarkerNode = this._getNextNode(e, true);
var next = this._getNextNode(e);
while (next && next != endOfSearchMarkerNode) {
if (filter.call(this, next, next.className + " " + next.id)) {
next = this._removeAndGetNext(next);
} else {
next = this._getNextNode(next);
}
}
},
/**
* Clean out spurious headers from an Element.
*
* @param Element
* @return void
**/
_cleanHeaders(e) {
let headingNodes = this._getAllNodesWithTag(e, ["h1", "h2"]);
this._removeNodes(headingNodes, function (node) {
let shouldRemove = this._getClassWeight(node) < 0;
if (shouldRemove) {
this.log("Removing header with low class weight:", node);
}
return shouldRemove;
});
},
/**
* Check if this node is an H1 or H2 element whose content is mostly
* the same as the article title.
*
* @param Element the node to check.
* @return boolean indicating whether this is a title-like header.
*/
_headerDuplicatesTitle(node) {
if (node.tagName != "H1" && node.tagName != "H2") {
return false;
}
var heading = this._getInnerText(node, false);
this.log("Evaluating similarity of header:", heading, this._articleTitle);
return this._textSimilarity(this._articleTitle, heading) > 0.75;
},
_flagIsActive(flag) {
return (this._flags & flag) > 0;
},
_removeFlag(flag) {
this._flags = this._flags & ~flag;
},
_isProbablyVisible(node) {
// Have to null-check node.style and node.className.includes to deal with SVG and MathML nodes.
return (
(!node.style || node.style.display != "none") &&
(!node.style || node.style.visibility != "hidden") &&
!node.hasAttribute("hidden") &&
//check for "fallback-image" so that wikimedia math images are displayed
(!node.hasAttribute("aria-hidden") ||
node.getAttribute("aria-hidden") != "true" ||
(node.className &&
node.className.includes &&
node.className.includes("fallback-image")))
);
},
/**
* Runs readability.
*
* Workflow:
* 1. Prep the document by removing script tags, css, etc.
* 2. Build readability's DOM tree.
* 3. Grab the article content from the current dom tree.
* 4. Replace the current DOM tree with the new one.
* 5. Read peacefully.
*
* @return void
**/
parse() {
// Avoid parsing too large documents, as per configuration option
if (this._maxElemsToParse > 0) {
var numTags = this._doc.getElementsByTagName("*").length;
if (numTags > this._maxElemsToParse) {
throw new Error(
"Aborting parsing document; " + numTags + " elements found"
);
}
}
// Unwrap image from noscript
this._unwrapNoscriptImages(this._doc);
// Extract JSON-LD metadata before removing scripts
var jsonLd = this._disableJSONLD ? {} : this._getJSONLD(this._doc);
// Remove script tags from the document.
this._removeScripts(this._doc);
this._prepDocument();
var metadata = this._getArticleMetadata(jsonLd);
this._metadata = metadata;
this._articleTitle = metadata.title;
var articleContent = this._grabArticle();
if (!articleContent) {
return null;
}
this.log("Grabbed: " + articleContent.innerHTML);
this._postProcessContent(articleContent);
// If we haven't found an excerpt in the article's metadata, use the article's
// first paragraph as the excerpt. This is used for displaying a preview of
// the article's content.
if (!metadata.excerpt) {
var paragraphs = articleContent.getElementsByTagName("p");
if (paragraphs.length) {
metadata.excerpt = paragraphs[0].textContent.trim();
}
}
var textContent = articleContent.textContent;
return {
title: this._articleTitle,
byline: metadata.byline || this._articleByline,
dir: this._articleDir,
lang: this._articleLang,
content: this._serializer(articleContent),
textContent,
length: textContent.length,
excerpt: metadata.excerpt,
siteName: metadata.siteName || this._articleSiteName,
publishedTime: metadata.publishedTime,
};
},
};
if (typeof module === "object") {
/* eslint-disable-next-line no-redeclare */
/* global module */
module.exports = Readability;
}
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
modules/history-manager.js | JavaScript | // Chat History Manager with IndexedDB operations
// Handles CRUD operations for saved conversations
import { initPromptDB } from './prompt-manager.js';
const DB_NAME = 'SmarterPanelDB';
const CONVERSATIONS_STORE = 'conversations';
// Validation constants
const MAX_TITLE_LENGTH = 200;
const MAX_CONTENT_LENGTH = 100000; // Longer for conversations
const MAX_NOTES_LENGTH = 5000;
const MAX_TAG_LENGTH = 30;
const MAX_TAGS_COUNT = 20;
let db = null;
const MAX_IDB_ATTEMPTS = 3;
const RETRY_DELAY_BASE_MS = 100;
function isQuotaExceeded(error) {
if (!error) return false;
return error.name === 'QuotaExceededError' || error.code === 22;
}
function buildQuotaError() {
return new Error('Storage quota exceeded. Delete old conversations to free space.');
}
async function ensureDb() {
if (db) {
try {
db.objectStoreNames;
return;
} catch (_) {
db = null;
}
}
db = await initPromptDB();
}
// Input sanitization helpers
function sanitizeString(str, maxLength) {
if (typeof str !== 'string') return '';
return str.trim().slice(0, maxLength);
}
function validateConversationData(data) {
const errors = [];
if (!data.content || data.content.trim().length === 0) {
errors.push('Conversation content is required');
}
// Note: Content length is auto-truncated by sanitizeString(), no validation needed
// Title, notes, and tags are also auto-truncated for consistency
if (data.tags && data.tags.length > MAX_TAGS_COUNT) {
errors.push(`Maximum ${MAX_TAGS_COUNT} tags allowed`);
}
return errors;
}
// Generate searchable text from conversation
function generateSearchText(conversation) {
const parts = [
conversation.title,
conversation.content,
conversation.provider,
conversation.notes || '',
...conversation.tags
];
return parts.join(' ').toLowerCase();
}
// Generate auto title from content (first line or truncated content)
export function generateAutoTitle(content, maxLength = 60) {
const firstLine = content.split('\n')[0].trim();
if (firstLine.length > maxLength) {
return firstLine.slice(0, maxLength - 3) + '...';
}
return firstLine || 'Untitled Conversation';
}
// Save new conversation
export async function saveConversation(conversationData) {
await ensureDb();
// Validate input
const validationErrors = validateConversationData(conversationData);
if (validationErrors.length > 0) {
throw new Error(validationErrors.join(', '));
}
// Check if we should overwrite an existing conversation
if (conversationData.overwriteId) {
// Update existing conversation instead of creating new one
const existingConversation = await getConversation(conversationData.overwriteId);
if (existingConversation) {
return await updateConversation(conversationData.overwriteId, {
title: sanitizeString(conversationData.title || generateAutoTitle(conversationData.content), MAX_TITLE_LENGTH),
content: sanitizeString(conversationData.content, MAX_CONTENT_LENGTH),
provider: sanitizeString(conversationData.provider || 'unknown', 20),
timestamp: conversationData.timestamp, // Preserve original timestamp (no fallback)
tags: Array.isArray(conversationData.tags)
? conversationData.tags.slice(0, MAX_TAGS_COUNT).map(tag => sanitizeString(tag, MAX_TAG_LENGTH)).filter(t => t)
: [],
notes: sanitizeString(conversationData.notes || '', MAX_NOTES_LENGTH),
conversationId: sanitizeString(conversationData.conversationId || '', 200),
url: sanitizeString(conversationData.url || '', 500),
modifiedAt: Date.now()
});
}
}
// Sanitize and prepare conversation
const now = Date.now();
const conversation = {
title: sanitizeString(conversationData.title || generateAutoTitle(conversationData.content), MAX_TITLE_LENGTH),
content: sanitizeString(conversationData.content, MAX_CONTENT_LENGTH),
provider: sanitizeString(conversationData.provider || 'unknown', 20),
timestamp: conversationData.timestamp || now,
tags: Array.isArray(conversationData.tags)
? conversationData.tags.slice(0, MAX_TAGS_COUNT).map(tag => sanitizeString(tag, MAX_TAG_LENGTH)).filter(t => t)
: [],
isFavorite: Boolean(conversationData.isFavorite),
notes: sanitizeString(conversationData.notes || '', MAX_NOTES_LENGTH),
conversationId: sanitizeString(conversationData.conversationId || '', 200),
url: sanitizeString(conversationData.url || '', 500),
modifiedAt: now,
searchText: '' // Will be set below
};
// Generate search text
conversation.searchText = generateSearchText(conversation);
return runWithRetry(() => {
const transaction = db.transaction([CONVERSATIONS_STORE], 'readwrite');
const store = transaction.objectStore(CONVERSATIONS_STORE);
const request = store.add(conversation);
return wrapRequest(request, resolveValue => ({ ...conversation, id: resolveValue }));
});
}
// Get conversation by ID
export async function getConversation(id) {
await ensureDb();
return runWithRetry(() => {
const transaction = db.transaction([CONVERSATIONS_STORE], 'readonly');
const store = transaction.objectStore(CONVERSATIONS_STORE);
const request = store.get(id);
return wrapRequest(request, value => value);
});
}
// Get all conversations
export async function getAllConversations() {
await ensureDb();
return runWithRetry(() => {
const transaction = db.transaction([CONVERSATIONS_STORE], 'readonly');
const store = transaction.objectStore(CONVERSATIONS_STORE);
const request = store.getAll();
return wrapRequest(request, value => value || []);
});
}
// Update existing conversation
export async function updateConversation(id, updates) {
await ensureDb();
return runWithRetry(() => new Promise((resolve, reject) => {
const transaction = db.transaction([CONVERSATIONS_STORE], 'readwrite');
const store = transaction.objectStore(CONVERSATIONS_STORE);
const getRequest = store.get(id);
getRequest.onsuccess = () => {
const conversation = getRequest.result;
if (!conversation) {
reject(new Error(`Conversation with id ${id} not found`));
return;
}
const updatedConversation = { ...conversation, ...updates, id, modifiedAt: Date.now() };
// Regenerate search text if content changed
if (updates.title || updates.content || updates.tags || updates.notes || updates.provider) {
updatedConversation.searchText = generateSearchText(updatedConversation);
}
const putRequest = store.put(updatedConversation);
wrapRequest(putRequest, () => updatedConversation).then(resolve).catch(reject);
};
getRequest.onerror = () => reject(getRequest.error);
}));
}
// Delete conversation
export async function deleteConversation(id) {
await ensureDb();
return runWithRetry(() => {
const transaction = db.transaction([CONVERSATIONS_STORE], 'readwrite');
const store = transaction.objectStore(CONVERSATIONS_STORE);
const request = store.delete(id);
return wrapRequest(request, () => true);
});
}
// Search conversations with enhanced features using cursor-based filtering
export async function searchConversations(searchText) {
await ensureDb();
// Parse search query for operators and field-specific searches
const searchOptions = parseSearchQuery(searchText);
return runWithRetry(() => new Promise((resolve, reject) => {
const transaction = db.transaction([CONVERSATIONS_STORE], 'readonly');
const store = transaction.objectStore(CONVERSATIONS_STORE);
// Optimize by using indexes where possible
let cursorSource;
// If searching by provider, use provider index
if (searchOptions.fieldSearches.provider.length > 0) {
const providerValue = searchOptions.fieldSearches.provider[0];
const index = store.index('provider');
cursorSource = index.openCursor(IDBKeyRange.only(providerValue));
} else {
// Use primary cursor for general search
cursorSource = store.openCursor();
}
const results = [];
cursorSource.onsuccess = (event) => {
const cursor = event.target.result;
if (cursor) {
const conv = cursor.value;
// Apply filters incrementally
if (matchesSearchCriteria(conv, searchOptions)) {
// Calculate relevance score and insert in sorted position
const score = calculateRelevanceScore(conv, searchOptions);
// Binary search to find insertion point for sorted order
let insertIndex = results.length;
for (let i = 0; i < results.length; i++) {
const existingScore = results[i]._relevanceScore;
if (score > existingScore ||
(score === existingScore && conv.timestamp > results[i].timestamp)) {
insertIndex = i;
break;
}
}
results.splice(insertIndex, 0, { ...conv, _relevanceScore: score });
}
cursor.continue();
} else {
// Cursor exhausted, remove score field and return results
const cleanedResults = results.map(({ _relevanceScore, ...conv }) => conv);
resolve(cleanedResults);
}
};
cursorSource.onerror = () => reject(cursorSource.error);
}));
}
// Parse search query to extract operators and field filters
function parseSearchQuery(searchText) {
const options = {
terms: [],
exactPhrases: [],
excludeTerms: [],
fieldSearches: {
title: [],
content: [],
tag: [],
provider: []
},
operator: 'AND' // default operator
};
let remaining = searchText;
// Extract exact phrases (quoted strings)
const exactPhraseRegex = /"([^"]+)"/g;
let match;
while ((match = exactPhraseRegex.exec(searchText)) !== null) {
options.exactPhrases.push(match[1].toLowerCase());
remaining = remaining.replace(match[0], ' ');
}
// Split remaining text into tokens
const tokens = remaining.split(/\s+/).filter(t => t.trim());
for (const token of tokens) {
const lower = token.toLowerCase();
// Check for field-specific search (field:value)
if (lower.includes(':')) {
const [field, value] = lower.split(':', 2);
if (value && ['title', 'content', 'tag', 'provider'].includes(field)) {
options.fieldSearches[field].push(value);
continue;
}
}
// Check for exclude operator
if (lower.startsWith('-') || lower === 'not') {
if (lower.startsWith('-') && lower.length > 1) {
options.excludeTerms.push(lower.substring(1));
}
continue;
}
// Check for OR operator
if (lower === 'or') {
options.operator = 'OR';
continue;
}
// Check for AND operator (explicit)
if (lower === 'and') {
options.operator = 'AND';
continue;
}
// Regular search term
if (lower) {
options.terms.push(lower);
}
}
return options;
}
// Check if conversation matches search criteria
function matchesSearchCriteria(conv, options) {
const { terms, exactPhrases, excludeTerms, fieldSearches, operator } = options;
// Check excluded terms first (must not match any)
for (const term of excludeTerms) {
if (conv.searchText.includes(term)) {
return false;
}
}
// Check exact phrases (must match all)
for (const phrase of exactPhrases) {
if (!conv.searchText.includes(phrase)) {
return false;
}
}
// Check field-specific searches
for (const [field, values] of Object.entries(fieldSearches)) {
if (values.length > 0) {
let fieldMatches = false;
const fieldText = getFieldText(conv, field);
for (const value of values) {
if (fieldText.includes(value) || fuzzyMatch(fieldText, value)) {
fieldMatches = true;
break;
}
}
if (!fieldMatches) {
return false;
}
}
}
// Check general search terms
if (terms.length > 0) {
if (operator === 'OR') {
// At least one term must match
let hasMatch = false;
for (const term of terms) {
if (conv.searchText.includes(term) || fuzzyMatch(conv.searchText, term)) {
hasMatch = true;
break;
}
}
if (!hasMatch) {
return false;
}
} else {
// All terms must match (AND)
for (const term of terms) {
if (!conv.searchText.includes(term) && !fuzzyMatch(conv.searchText, term)) {
return false;
}
}
}
}
return true;
}
// Get field-specific text for searching
function getFieldText(conv, field) {
switch (field) {
case 'title':
return conv.title.toLowerCase();
case 'content':
return conv.content.toLowerCase();
case 'tag':
return conv.tags.join(' ').toLowerCase();
case 'provider':
return conv.provider.toLowerCase();
default:
return '';
}
}
// Fuzzy matching for typo tolerance (Levenshtein distance ≤ 2)
function fuzzyMatch(text, term) {
// Only apply fuzzy matching for terms longer than 4 characters
if (term.length <= 4) {
return false;
}
// Split text into words and check each word
const words = text.split(/\s+/);
for (const word of words) {
if (levenshteinDistance(word, term) <= 2) {
return true;
}
}
return false;
}
// Calculate Levenshtein distance between two strings
function levenshteinDistance(str1, str2) {
const len1 = str1.length;
const len2 = str2.length;
// Create distance matrix
const matrix = Array(len1 + 1).fill(null).map(() => Array(len2 + 1).fill(0));
// Initialize first row and column
for (let i = 0; i <= len1; i++) matrix[i][0] = i;
for (let j = 0; j <= len2; j++) matrix[0][j] = j;
// Fill matrix
for (let i = 1; i <= len1; i++) {
for (let j = 1; j <= len2; j++) {
const cost = str1[i - 1] === str2[j - 1] ? 0 : 1;
matrix[i][j] = Math.min(
matrix[i - 1][j] + 1, // deletion
matrix[i][j - 1] + 1, // insertion
matrix[i - 1][j - 1] + cost // substitution
);
}
}
return matrix[len1][len2];
}
// Calculate relevance score for ranking
function calculateRelevanceScore(conv, options) {
let score = 0;
const { terms, exactPhrases, fieldSearches } = options;
const allTerms = [...terms, ...exactPhrases];
// Score based on where matches appear
for (const term of allTerms) {
// Title matches are most valuable (weight: 10)
if (conv.title.toLowerCase().includes(term)) {
score += 10;
}
// Tag matches are second (weight: 5)
const tagText = conv.tags.join(' ').toLowerCase();
if (tagText.includes(term)) {
score += 5;
}
// Notes matches are third (weight: 3)
if (conv.notes && conv.notes.toLowerCase().includes(term)) {
score += 3;
}
// Content matches are least (weight: 1)
if (conv.content.toLowerCase().includes(term)) {
score += 1;
}
}
// Boost score for field-specific matches
for (const [field, values] of Object.entries(fieldSearches)) {
if (values.length > 0) {
score += 5; // Bonus for using field-specific search
}
}
// Boost score for exact phrase matches
score += exactPhrases.length * 8;
// Recency bonus (newer conversations get slight boost)
const daysSinceCreation = (Date.now() - conv.timestamp) / (1000 * 60 * 60 * 24);
if (daysSinceCreation < 7) {
score += 3;
} else if (daysSinceCreation < 30) {
score += 1;
}
return score;
}
// Filter by provider
export async function getConversationsByProvider(provider) {
await ensureDb();
if (!provider || typeof provider !== 'string') {
return getAllConversations();
}
return runWithRetry(() => {
const transaction = db.transaction([CONVERSATIONS_STORE], 'readonly');
const store = transaction.objectStore(CONVERSATIONS_STORE);
const index = store.index('provider');
const request = index.getAll(provider);
return wrapRequest(request, value => value || []);
});
}
// Get favorite conversations using cursor-based filtering
export async function getFavoriteConversations() {
await ensureDb();
return runWithRetry(() => {
const transaction = db.transaction([CONVERSATIONS_STORE], 'readonly');
const store = transaction.objectStore(CONVERSATIONS_STORE);
const index = store.index('isFavorite');
// Use index to get only favorites (isFavorite = 1/true)
const request = index.getAll(1);
return wrapRequest(request, value => value || []);
});
}
// Toggle favorite status
export async function toggleConversationFavorite(id) {
const conversation = await getConversation(id);
if (!conversation) throw new Error(`Conversation ${id} not found`);
return await updateConversation(id, { isFavorite: !conversation.isFavorite });
}
// Get conversations by date range
export async function getConversationsByDateRange(startDate, endDate) {
await ensureDb();
const allConversations = await getAllConversations();
return allConversations.filter(conv =>
conv.timestamp >= startDate && conv.timestamp <= endDate
);
}
// Get all tags used in conversations
export async function getAllConversationTags() {
const conversations = await getAllConversations();
const tags = new Set();
conversations.forEach(c => c.tags.forEach(tag => tags.add(tag)));
return Array.from(tags).sort();
}
// Check for duplicate conversation by conversationId
export async function findConversationByConversationId(conversationId) {
if (!conversationId) {
return null;
}
await ensureDb();
return runWithRetry(() => {
const transaction = db.transaction([CONVERSATIONS_STORE], 'readonly');
const store = transaction.objectStore(CONVERSATIONS_STORE);
const index = store.index('conversationId');
const request = index.get(conversationId);
return wrapRequest(request, value => value || null);
});
}
// Export conversations as JSON
export async function exportConversations() {
const conversations = await getAllConversations();
return {
version: '1.0',
exportDate: new Date().toISOString(),
conversations: conversations
};
}
// Import conversations from JSON
export async function importConversations(data, mergeStrategy = 'skip') {
if (!data || !data.conversations || !Array.isArray(data.conversations)) {
throw new Error('Invalid import data format');
}
const results = {
imported: 0,
skipped: 0,
errors: []
};
for (const conversationData of data.conversations) {
try {
// Remove id to let IndexedDB assign new ones
const { id, ...conversationWithoutId } = conversationData;
if (mergeStrategy === 'overwrite') {
await saveConversation(conversationWithoutId);
results.imported++;
} else if (mergeStrategy === 'skip') {
// Check if similar conversation exists (same title and timestamp within 1 minute)
const existing = await getAllConversations();
const isDuplicate = existing.some(c =>
c.title === conversationData.title &&
Math.abs(c.timestamp - conversationData.timestamp) < 60000
);
if (!isDuplicate) {
await saveConversation(conversationWithoutId);
results.imported++;
} else {
results.skipped++;
}
}
} catch (error) {
results.errors.push({ conversation: conversationData.title, error: error.message });
}
}
return results;
}
// Clear all conversations
export async function clearAllConversations() {
await ensureDb();
return runWithRetry(() => {
const transaction = db.transaction([CONVERSATIONS_STORE], 'readwrite');
const store = transaction.objectStore(CONVERSATIONS_STORE);
const request = store.clear();
return wrapRequest(request, () => true);
});
}
// Get statistics
export async function getConversationStats() {
const conversations = await getAllConversations();
const stats = {
total: conversations.length,
favorites: conversations.filter(c => c.isFavorite).length,
byProvider: {},
oldestTimestamp: conversations.length > 0 ? Math.min(...conversations.map(c => c.timestamp)) : null,
newestTimestamp: conversations.length > 0 ? Math.max(...conversations.map(c => c.timestamp)) : null
};
// Count by provider
conversations.forEach(c => {
stats.byProvider[c.provider] = (stats.byProvider[c.provider] || 0) + 1;
});
return stats;
}
// Helper functions for IndexedDB operations
function runWithRetry(operation, attempt = 1) {
return new Promise((resolve, reject) => {
try {
const result = operation();
Promise.resolve(result).then(resolve).catch((error) => {
handleIdbError(error, operation, attempt, resolve, reject);
});
} catch (error) {
handleIdbError(error, operation, attempt, resolve, reject);
}
});
}
function handleIdbError(error, operation, attempt, resolve, reject) {
if (isQuotaExceeded(error)) {
reject(buildQuotaError());
return;
}
if (attempt < MAX_IDB_ATTEMPTS) {
const delay = RETRY_DELAY_BASE_MS * Math.pow(2, attempt - 1);
setTimeout(() => {
runWithRetry(operation, attempt + 1).then(resolve).catch(reject);
}, delay);
} else {
reject(error);
}
}
function wrapRequest(request, mapper) {
return new Promise((resolve, reject) => {
request.onsuccess = () => {
const value = typeof mapper === 'function' ? mapper(request.result) : request.result;
resolve(value);
};
request.onerror = () => {
if (isQuotaExceeded(request.error)) {
reject(buildQuotaError());
} else {
reject(request.error);
}
};
});
}
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
modules/html-utils.js | JavaScript | /**
* HTML utility functions for safe template rendering
*/
/**
* Escapes HTML special characters to prevent XSS
* @param {string} text - Text to escape
* @returns {string} - Escaped HTML-safe text
*/
export function escapeHtml(text) {
if (text === null || text === undefined) {
return '';
}
const div = document.createElement('div');
div.textContent = String(text);
return div.innerHTML;
}
/**
* Tagged template literal for safe HTML rendering with automatic escaping
* Values are automatically escaped unless explicitly marked as safe
*
* @example
* const name = '<script>alert("xss")</script>';
* const safeHtml = html`<div>Hello ${name}</div>`;
* // Result: <div>Hello <script>alert("xss")</script></div>
*
* @param {TemplateStringsArray} strings - Template string parts
* @param {...any} values - Values to interpolate
* @returns {string} - Safe HTML string
*/
export function html(strings, ...values) {
return strings.reduce((result, string, i) => {
const value = values[i];
if (value === undefined || value === null) {
return result + string;
}
// If value is marked as safe (SafeHtml), use it directly
if (value && value.__isSafeHtml) {
return result + string + value.html;
}
// Otherwise, escape the value
const escaped = escapeHtml(String(value));
return result + string + escaped;
}, '');
}
/**
* Marks a string as safe HTML (won't be escaped)
* USE WITH EXTREME CAUTION - only for trusted, already-sanitized HTML
*
* @param {string} htmlString - Pre-sanitized HTML string
* @returns {{__isSafeHtml: boolean, html: string}} - Safe HTML object
*/
export function unsafeHtml(htmlString) {
return {
__isSafeHtml: true,
html: htmlString,
};
}
/**
* Renders an array using a template function, escaping each item
* @param {Array} items - Array of items to render
* @param {Function} templateFn - Template function for each item
* @param {string} separator - Separator between items (default: '')
* @returns {string} - Rendered HTML
*/
export function renderList(items, templateFn, separator = '') {
if (!Array.isArray(items)) {
return '';
}
return items.map(templateFn).join(separator);
}
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
modules/i18n.js | JavaScript | // T074: Internationalization (i18n) utility module
// Provides helper functions for Chrome i18n API
// Translation cache for custom language override
let translationCache = null;
let currentLocale = null;
/**
* Load translations from a specific locale
* @param {string} locale - Locale code (e.g., 'en', 'zh_CN', 'zh_TW')
* @returns {Promise<Object>} Translation messages object
*/
async function loadTranslations(locale) {
try {
const url = chrome.runtime.getURL(`_locales/${locale}/messages.json`);
const response = await fetch(url);
const messages = await response.json();
return messages;
} catch (error) {
console.warn(`Failed to load translations for locale: ${locale}`, error);
return null;
}
}
/**
* Initialize i18n with user's preferred language
* @param {string} preferredLocale - User's preferred locale from settings
*/
export async function initializeLanguage(preferredLocale = null) {
// Determine which locale to use
let locale = preferredLocale;
if (!locale) {
// Try to get from storage
try {
const result = await chrome.storage.sync.get({ language: null });
locale = result.language;
} catch (error) {
// Storage not available, use browser default
}
}
// If still no locale, use browser's language
if (!locale) {
locale = getBrowserLocale();
}
// Load the translations
translationCache = await loadTranslations(locale);
currentLocale = locale;
// If loading failed, try fallback to English
if (!translationCache && locale !== 'en') {
translationCache = await loadTranslations('en');
currentLocale = 'en';
}
}
/**
* Get browser's locale in our supported format
* @returns {string} Locale code
*/
function getBrowserLocale() {
const browserLang = chrome.i18n.getUILanguage();
// Map browser language codes to our supported locales
if (browserLang.startsWith('zh')) {
if (browserLang.includes('TW') || browserLang.includes('HK') || browserLang.includes('Hant')) {
return 'zh_TW';
}
return 'zh_CN';
}
return 'en';
}
/**
* Get translated message with substitutions support
* @param {string} key - Message key from messages.json
* @param {string|string[]} substitutions - Optional substitution values
* @returns {string} Translated message
*/
export function t(key, substitutions = null) {
// If we have custom translations loaded, use them
if (translationCache && translationCache[key]) {
let message = translationCache[key].message || '';
// Handle substitutions
if (substitutions) {
const subs = Array.isArray(substitutions) ? substitutions : [substitutions];
subs.forEach((sub, index) => {
const placeholder = `$${index + 1}`;
message = message.replace(new RegExp(`\\$${index + 1}`, 'g'), sub);
// Also try $PLACEHOLDER$ format
if (translationCache[key].placeholders) {
Object.entries(translationCache[key].placeholders).forEach(([name, config]) => {
if (config.content === placeholder) {
message = message.replace(new RegExp(`\\$${name.toUpperCase()}\\$`, 'g'), sub);
}
});
}
});
}
return message;
}
// Fallback to Chrome's native i18n
return chrome.i18n.getMessage(key, substitutions) || key;
}
/**
* Translate all elements with data-i18n attribute
* @param {HTMLElement} root - Root element to search (default: document)
*/
export function translatePage(root = document) {
// Translate elements with data-i18n attribute for text content
root.querySelectorAll('[data-i18n]').forEach(element => {
const key = element.getAttribute('data-i18n');
if (key) {
element.textContent = t(key);
}
});
// Translate elements with data-i18n-html attribute for HTML content
root.querySelectorAll('[data-i18n-html]').forEach(element => {
const key = element.getAttribute('data-i18n-html');
if (key) {
element.innerHTML = t(key);
}
});
// Translate placeholders
root.querySelectorAll('[data-i18n-placeholder]').forEach(element => {
const key = element.getAttribute('data-i18n-placeholder');
if (key) {
element.placeholder = t(key);
}
});
// Translate titles/tooltips
root.querySelectorAll('[data-i18n-title]').forEach(element => {
const key = element.getAttribute('data-i18n-title');
if (key) {
element.title = t(key);
}
});
// Translate aria-labels
root.querySelectorAll('[data-i18n-aria]').forEach(element => {
const key = element.getAttribute('data-i18n-aria');
if (key) {
element.setAttribute('aria-label', t(key));
}
});
}
/**
* Get current UI language
* @returns {string} Language code (e.g., 'en', 'zh_CN', 'zh_TW')
*/
export function getCurrentLanguage() {
return chrome.i18n.getUILanguage();
}
/**
* Initialize i18n for a page
* Call this on DOMContentLoaded
*/
export function initI18n() {
if (document.readyState === 'loading') {
document.addEventListener('DOMContentLoaded', () => translatePage());
} else {
translatePage();
}
}
// Export as default for convenience
export default {
t,
translatePage,
getCurrentLanguage,
initI18n,
initializeLanguage
};
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
modules/messaging.js | JavaScript | const DEFAULT_MESSAGE_TIMEOUT_MS = 2000;
export function sendMessageWithTimeout(message, options = {}) {
const { timeout = DEFAULT_MESSAGE_TIMEOUT_MS, expectResponse = true } = options;
return new Promise((resolve, reject) => {
let completed = false;
const timer = expectResponse
? setTimeout(() => {
if (!completed) {
completed = true;
reject(new Error(`Message timeout${message?.action ? `: ${message.action}` : ''}`));
}
}, timeout)
: null;
try {
chrome.runtime.sendMessage(message, (response) => {
if (!expectResponse) {
resolve(undefined);
return;
}
if (completed) {
return;
}
completed = true;
if (timer) clearTimeout(timer);
const lastError = chrome.runtime.lastError;
if (lastError) {
reject(new Error(lastError.message));
return;
}
resolve(response);
});
if (!expectResponse) {
completed = true;
if (timer) clearTimeout(timer);
resolve(undefined);
}
} catch (error) {
if (!completed) {
if (timer) clearTimeout(timer);
reject(error);
}
}
});
}
export function notifyMessage(message) {
return sendMessageWithTimeout(message, { expectResponse: false });
}
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
modules/prompt-manager.js | JavaScript | // T028: Prompt Manager with IndexedDB operations
// Handles CRUD operations for prompts in the Prompt Library
const DB_NAME = 'SmarterPanelDB';
const DB_VERSION = 4; // Upgraded to add modifiedAt field for conversations
const PROMPTS_STORE = 'prompts';
const CONVERSATIONS_STORE = 'conversations';
// T069: Input validation constants
const MAX_TITLE_LENGTH = 200;
const MAX_CONTENT_LENGTH = 50000;
const MAX_CATEGORY_LENGTH = 50;
const MAX_TAG_LENGTH = 30;
const MAX_TAGS_COUNT = 20;
let db = null;
const MAX_IDB_ATTEMPTS = 3;
const RETRY_DELAY_BASE_MS = 100;
function isQuotaExceeded(error) {
if (!error) return false;
return error.name === 'QuotaExceededError' || error.code === 22;
}
function buildQuotaError() {
return new Error('Storage quota exceeded. Delete unused prompts to free space.');
}
async function ensureDb() {
if (db) {
try {
// Accessing objectStoreNames will throw if connection is closing/closed
db.objectStoreNames;
return;
} catch (_) {
db = null;
}
}
await initPromptDB();
}
// T069: Input sanitization helpers
function sanitizeString(str, maxLength) {
if (typeof str !== 'string') return '';
return str.trim().slice(0, maxLength);
}
function validatePromptData(promptData) {
const errors = [];
if (typeof promptData.content !== 'string' || promptData.content.trim().length === 0) {
errors.push('Prompt content is required');
}
if (promptData.content && promptData.content.length > MAX_CONTENT_LENGTH) {
errors.push(`Prompt content must be less than ${MAX_CONTENT_LENGTH} characters`);
}
if (promptData.title && promptData.title.length > MAX_TITLE_LENGTH) {
errors.push(`Title must be less than ${MAX_TITLE_LENGTH} characters`);
}
if (promptData.category && promptData.category.length > MAX_CATEGORY_LENGTH) {
errors.push(`Category must be less than ${MAX_CATEGORY_LENGTH} characters`);
}
if (promptData.tags && promptData.tags.length > MAX_TAGS_COUNT) {
errors.push(`Maximum ${MAX_TAGS_COUNT} tags allowed`);
}
return errors;
}
// T029: Initialize IndexedDB
export async function initPromptDB() {
return new Promise((resolve, reject) => {
const request = indexedDB.open(DB_NAME, DB_VERSION);
request.onerror = () => reject(request.error);
request.onsuccess = () => {
db = request.result;
db.onclose = () => {
db = null;
};
resolve(db);
};
request.onupgradeneeded = (event) => {
const db = event.target.result;
const oldVersion = event.oldVersion;
// Create prompts object store (version 1)
if (oldVersion < 1) {
const promptsStore = db.createObjectStore(PROMPTS_STORE, {
keyPath: 'id',
autoIncrement: true
});
// Create indexes for efficient querying
promptsStore.createIndex('title', 'title', { unique: false });
promptsStore.createIndex('category', 'category', { unique: false });
promptsStore.createIndex('tags', 'tags', { unique: false, multiEntry: true });
promptsStore.createIndex('createdAt', 'createdAt', { unique: false });
promptsStore.createIndex('lastUsed', 'lastUsed', { unique: false });
promptsStore.createIndex('isFavorite', 'isFavorite', { unique: false });
}
// Create conversations object store (version 2)
if (oldVersion < 2) {
const conversationsStore = db.createObjectStore(CONVERSATIONS_STORE, {
keyPath: 'id',
autoIncrement: true
});
// Create indexes for efficient querying
conversationsStore.createIndex('provider', 'provider', { unique: false });
conversationsStore.createIndex('timestamp', 'timestamp', { unique: false });
conversationsStore.createIndex('tags', 'tags', { unique: false, multiEntry: true });
conversationsStore.createIndex('isFavorite', 'isFavorite', { unique: false });
conversationsStore.createIndex('searchText', 'searchText', { unique: false });
}
// Add conversationId index (version 3)
if (oldVersion < 3) {
const transaction = event.target.transaction;
const conversationsStore = transaction.objectStore(CONVERSATIONS_STORE);
// Add index for conversationId to enable efficient duplicate checking
conversationsStore.createIndex('conversationId', 'conversationId', { unique: false });
}
};
});
}
// T030 & T069: Save new prompt with validation
export async function savePrompt(promptData) {
await ensureDb();
// Validate input
const validationErrors = validatePromptData(promptData);
if (validationErrors.length > 0) {
throw new Error(validationErrors.join(', '));
}
// Sanitize input
const prompt = {
title: sanitizeString(promptData.title || 'Untitled Prompt', MAX_TITLE_LENGTH),
content: sanitizeString(promptData.content, MAX_CONTENT_LENGTH),
category: sanitizeString(promptData.category || 'General', MAX_CATEGORY_LENGTH),
tags: Array.isArray(promptData.tags)
? promptData.tags.slice(0, MAX_TAGS_COUNT).map(tag => sanitizeString(tag, MAX_TAG_LENGTH)).filter(t => t)
: [],
variables: Array.isArray(promptData.variables) ? promptData.variables : [],
isFavorite: Boolean(promptData.isFavorite),
createdAt: promptData.createdAt || Date.now(),
lastUsed: promptData.lastUsed || null,
useCount: promptData.useCount || 0
};
return runWithRetry(() => {
const transaction = db.transaction([PROMPTS_STORE], 'readwrite');
const store = transaction.objectStore(PROMPTS_STORE);
const request = store.add(prompt);
return wrapRequest(request, resolveValue => ({ ...prompt, id: resolveValue }));
});
}
// T031: Get prompt by ID
export async function getPrompt(id) {
await ensureDb();
return runWithRetry(() => {
const transaction = db.transaction([PROMPTS_STORE], 'readonly');
const store = transaction.objectStore(PROMPTS_STORE);
const request = store.get(id);
return wrapRequest(request, value => value);
});
}
// T032: Get all prompts
export async function getAllPrompts() {
await ensureDb();
return runWithRetry(() => {
const transaction = db.transaction([PROMPTS_STORE], 'readonly');
const store = transaction.objectStore(PROMPTS_STORE);
const request = store.getAll();
return wrapRequest(request, value => value || []);
});
}
// T033: Update existing prompt
export async function updatePrompt(id, updates) {
await ensureDb();
return runWithRetry(() => new Promise((resolve, reject) => {
const transaction = db.transaction([PROMPTS_STORE], 'readwrite');
const store = transaction.objectStore(PROMPTS_STORE);
const getRequest = store.get(id);
getRequest.onsuccess = () => {
const prompt = getRequest.result;
if (!prompt) {
reject(new Error(`Prompt with id ${id} not found`));
return;
}
const updatedPrompt = { ...prompt, ...updates, id };
const putRequest = store.put(updatedPrompt);
wrapRequest(putRequest, () => updatedPrompt).then(resolve).catch(reject);
};
getRequest.onerror = () => reject(getRequest.error);
}));
}
// T034: Delete prompt
export async function deletePrompt(id) {
await ensureDb();
return runWithRetry(() => {
const transaction = db.transaction([PROMPTS_STORE], 'readwrite');
const store = transaction.objectStore(PROMPTS_STORE);
const request = store.delete(id);
return wrapRequest(request, () => true);
});
}
function runWithRetry(operation, attempt = 1) {
return new Promise((resolve, reject) => {
try {
const result = operation();
// Operation may return a wrapped promise already
Promise.resolve(result).then(resolve).catch((error) => {
handleIdbError(error, operation, attempt, resolve, reject);
});
} catch (error) {
handleIdbError(error, operation, attempt, resolve, reject);
}
});
}
function handleIdbError(error, operation, attempt, resolve, reject) {
if (isQuotaExceeded(error)) {
reject(buildQuotaError());
return;
}
if (attempt < MAX_IDB_ATTEMPTS) {
const delay = RETRY_DELAY_BASE_MS * Math.pow(2, attempt - 1);
setTimeout(() => {
runWithRetry(operation, attempt + 1).then(resolve).catch(reject);
}, delay);
} else {
reject(error);
}
}
function wrapRequest(request, mapper) {
return new Promise((resolve, reject) => {
request.onsuccess = () => {
const value = typeof mapper === 'function' ? mapper(request.result) : request.result;
resolve(value);
};
request.onerror = () => {
if (isQuotaExceeded(request.error)) {
reject(buildQuotaError());
} else {
reject(request.error);
}
};
});
}
// T035: Search prompts by text (title or content)
export async function searchPrompts(searchText) {
await ensureDb();
const allPrompts = await getAllPrompts();
const lowerSearch = searchText.toLowerCase();
return allPrompts.filter(prompt =>
prompt.title.toLowerCase().includes(lowerSearch) ||
prompt.content.toLowerCase().includes(lowerSearch) ||
prompt.tags.some(tag => tag.toLowerCase().includes(lowerSearch))
);
}
// T036: Filter prompts by category
export async function getPromptsByCategory(category) {
await ensureDb();
// If category is not provided or invalid, return all prompts
if (!category || typeof category !== 'string') {
return getAllPrompts();
}
return runWithRetry(() => {
const transaction = db.transaction([PROMPTS_STORE], 'readonly');
const store = transaction.objectStore(PROMPTS_STORE);
const index = store.index('category');
const request = index.getAll(category);
return wrapRequest(request, value => value || []);
});
}
// T037: Get favorite prompts
export async function getFavoritePrompts() {
await ensureDb();
// Filter in memory since boolean index queries don't work reliably across browsers
const allPrompts = await getAllPrompts();
return allPrompts.filter(p => p.isFavorite === true);
}
// T038: Toggle favorite status
export async function toggleFavorite(id) {
const prompt = await getPrompt(id);
if (!prompt) throw new Error(`Prompt ${id} not found`);
return await updatePrompt(id, { isFavorite: !prompt.isFavorite });
}
// T039: Record prompt usage
export async function recordPromptUsage(id) {
const prompt = await getPrompt(id);
if (!prompt) throw new Error(`Prompt ${id} not found`);
return await updatePrompt(id, {
lastUsed: Date.now(),
useCount: (prompt.useCount || 0) + 1
});
}
// T040: Get all categories
export async function getAllCategories() {
const prompts = await getAllPrompts();
const categories = new Set(prompts.map(p => p.category));
return Array.from(categories).sort();
}
// T041: Get all tags
export async function getAllTags() {
const prompts = await getAllPrompts();
const tags = new Set();
prompts.forEach(p => p.tags.forEach(tag => tags.add(tag)));
return Array.from(tags).sort();
}
// T042: Export all prompts as JSON
export async function exportPrompts() {
const prompts = await getAllPrompts();
return {
version: '1.0',
exportDate: new Date().toISOString(),
prompts: prompts
};
}
// T043: Import prompts from JSON
export async function importPrompts(data, mergeStrategy = 'skip') {
if (!data || !data.prompts || !Array.isArray(data.prompts)) {
throw new Error('Invalid import data format');
}
const results = {
imported: 0,
skipped: 0,
errors: []
};
for (const promptData of data.prompts) {
try {
// Remove id to let IndexedDB assign new ones
const { id, ...promptWithoutId } = promptData;
if (mergeStrategy === 'overwrite') {
await savePrompt(promptWithoutId);
results.imported++;
} else if (mergeStrategy === 'skip') {
// Check if similar prompt exists (same title)
const existing = await searchPrompts(promptData.title);
if (existing.length === 0) {
await savePrompt(promptWithoutId);
results.imported++;
} else {
results.skipped++;
}
}
} catch (error) {
results.errors.push({ prompt: promptData.title, error: error.message });
}
}
return results;
}
// T044: Clear all prompts (with confirmation)
export async function clearAllPrompts() {
await ensureDb();
return runWithRetry(() => {
const transaction = db.transaction([PROMPTS_STORE], 'readwrite');
const store = transaction.objectStore(PROMPTS_STORE);
const request = store.clear();
return wrapRequest(request, () => true);
});
}
// T071: Get recently used prompts (ordered by lastUsed DESC)
export async function getRecentlyUsedPrompts(limit = 5) {
const allPrompts = await getAllPrompts();
return allPrompts
.filter(p => p.lastUsed !== null && p.lastUsed !== undefined)
.sort((a, b) => b.lastUsed - a.lastUsed)
.slice(0, limit);
}
// T072: Get top favorites (ordered by useCount DESC, favorites only)
export async function getTopFavorites(limit = 5) {
const favorites = await getFavoritePrompts();
return favorites
.sort((a, b) => (b.useCount || 0) - (a.useCount || 0))
.slice(0, limit);
}
// Import default library with title-based deduplication
export async function importDefaultLibrary(libraryData) {
if (!libraryData || !libraryData.prompts || !Array.isArray(libraryData.prompts)) {
throw new Error('Invalid library data format');
}
const results = {
imported: 0,
skipped: 0,
errors: []
};
// Get all existing prompts for deduplication check
const allPrompts = await getAllPrompts();
const existingTitles = new Set(
allPrompts.map(p => p.title.toLowerCase().trim())
);
for (const promptData of libraryData.prompts) {
try {
// Check if already imported by title
const titleKey = promptData.title.toLowerCase().trim();
if (existingTitles.has(titleKey)) {
results.skipped++;
continue;
}
// Save the prompt
await savePrompt(promptData);
results.imported++;
// Add to existing titles to avoid duplicates in same batch
existingTitles.add(titleKey);
} catch (error) {
results.errors.push({
prompt: promptData.title,
error: error.message
});
}
}
return results;
}
// Initialize DB on module load
initPromptDB().catch(console.error);
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
modules/providers.js | JavaScript | export const PROVIDERS = [
{
id: 'chatgpt',
name: 'ChatGPT',
url: 'https://chatgpt.com',
icon: '/icons/providers/chatgpt.png',
iconDark: '/icons/providers/dark/chatgpt.png',
enabled: true
},
{
id: 'claude',
name: 'Claude',
url: 'https://claude.ai',
icon: '/icons/providers/claude.png',
iconDark: '/icons/providers/dark/claude.png',
enabled: true
},
{
id: 'gemini',
name: 'Gemini',
url: 'https://gemini.google.com',
icon: '/icons/providers/gemini.png',
iconDark: '/icons/providers/dark/gemini.png',
enabled: true
},
{
id: 'google',
name: 'Google',
url: 'https://www.google.com/search?udm=50',
icon: '/icons/providers/google.png',
iconDark: '/icons/providers/dark/google.png',
enabled: true
},
{
id: 'grok',
name: 'Grok',
url: 'https://grok.com',
icon: '/icons/providers/grok.png',
iconDark: '/icons/providers/dark/grok.png',
enabled: true
},
{
id: 'copilot',
name: 'Microsoft Copilot',
url: 'https://copilot.microsoft.com',
icon: '/icons/providers/copilot.png',
iconDark: '/icons/providers/dark/copilot.png',
enabled: true
},
{
id: 'deepseek',
name: 'DeepSeek',
url: 'https://chat.deepseek.com',
icon: '/icons/providers/deepseek.png',
iconDark: '/icons/providers/dark/deepseek.png',
enabled: true
}
];
export function getProviderById(id) {
return PROVIDERS.find(p => p.id === id);
}
export async function getProviderByIdWithSettings(id) {
const provider = PROVIDERS.find(p => p.id === id);
if (!provider) return null;
return provider;
}
export async function getEnabledProviders() {
const settings = await chrome.storage.sync.get({
enabledProviders: ['chatgpt', 'claude', 'gemini', 'google', 'grok', 'copilot', 'deepseek']
});
return PROVIDERS
.filter(p => settings.enabledProviders.includes(p.id));
}
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
modules/settings.js | JavaScript | const DEFAULT_SETTINGS = {
enabledProviders: ['chatgpt', 'claude', 'gemini', 'grok', 'deepseek'],
defaultProvider: 'chatgpt',
lastSelectedProvider: 'chatgpt',
rememberLastProvider: true, // When true, sidebar opens last selected provider; when false, always opens default provider
theme: 'auto',
keyboardShortcutEnabled: true,
enterKeyBehavior: {
enabled: true,
preset: 'swapped', // 'default', 'swapped', 'slack', 'discord', 'custom'
newlineKey: 'Enter',
newlineModifiers: { shift: false, ctrl: false, alt: false, meta: false },
sendKey: 'Enter',
sendModifiers: { shift: true, ctrl: false, alt: false, meta: false }
}
};
export async function getSettings() {
try {
const result = await chrome.storage.sync.get(DEFAULT_SETTINGS);
return result;
} catch (error) {
console.warn('chrome.storage.sync unavailable, using local', error);
return await chrome.storage.local.get(DEFAULT_SETTINGS);
}
}
export async function getSetting(key) {
const settings = await getSettings();
return settings[key];
}
export async function saveSetting(key, value) {
const update = { [key]: value };
try {
await chrome.storage.sync.set(update);
} catch (error) {
console.warn('chrome.storage.sync unavailable, using local', error);
await chrome.storage.local.set(update);
}
}
export async function saveSettings(settings) {
try {
await chrome.storage.sync.set(settings);
} catch (error) {
console.warn('chrome.storage.sync unavailable, using local', error);
await chrome.storage.local.set(settings);
}
}
export async function resetSettings() {
try {
await chrome.storage.sync.clear();
await chrome.storage.sync.set(DEFAULT_SETTINGS);
} catch (error) {
console.warn('chrome.storage.sync unavailable, using local', error);
await chrome.storage.local.clear();
await chrome.storage.local.set(DEFAULT_SETTINGS);
}
}
export async function exportSettings() {
return await getSettings();
}
export async function importSettings(settings) {
// Validate settings
const validKeys = Object.keys(DEFAULT_SETTINGS);
const imported = {};
const skipped = [];
const errors = {};
for (const [key, value] of Object.entries(settings)) {
if (validKeys.includes(key)) {
imported[key] = value;
} else {
skipped.push(key);
errors[key] = 'Setting key not recognized';
}
}
await saveSettings(imported);
return {
success: true,
imported: Object.keys(imported),
skipped,
errors
};
}
| xiaolai/insidebar-ai | 216 | A browser extension for Chrome/Edge | JavaScript | xiaolai | xiaolai | inblockchain |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.