file_path stringlengths 3 280 | file_language stringclasses 66 values | content stringlengths 1 1.04M | repo_name stringlengths 5 92 | repo_stars int64 0 154k | repo_description stringlengths 0 402 | repo_primary_language stringclasses 108 values | developer_username stringlengths 1 25 | developer_name stringlengths 0 30 | developer_company stringlengths 0 82 |
|---|---|---|---|---|---|---|---|---|---|
scripts/bin/inula-scripts.js | JavaScript | #!/usr/bin/env node
const { join } = require('path')
const { existsSync } = require('fs')
const { sync } = require('@umijs/utils/compiled/cross-spawn')
const chalk = require('@umijs/utils/compiled/chalk').default
const assert = require('assert')
const argv = process.argv.slice(2)
const [name, ...throughArgs] = argv
const scriptsPath = join(__dirname, `../${name}.ts`)
assert(
existsSync(scriptsPath) && !name.startsWith('.'),
`Executed script '${chalk.red(name)}' does not exist`
)
console.log(chalk.cyan(`inula-scripts: ${name}\n`))
// current dir path may contain spaces
// https://github.com/umijs/umi/issues/9865
const scriptPathAsStr = JSON.stringify(scriptsPath)
const spawn = sync(
'tsx',
[scriptPathAsStr, ...throughArgs],
{
env: process.env,
cwd: process.cwd(),
stdio: 'inherit',
shell: true
}
)
if (spawn.status !== 0) {
console.log(chalk.red(`inula-scripts: ${name} execute fail`))
process.exit(1)
}
| xiaohuoni/inula | 0 | TypeScript | xiaohuoni | 聪小陈 | 浮光陈氏 | |
scripts/bootstrap.ts | TypeScript | import 'zx/globals';
import { PATHS, SCRIPTS } from './.internal/constants';
import { setExcludeFolder } from './.internal/utils';
(async () => {
const root = PATHS.ROOT;
const pkgDir = path.join(root, 'packages');
const pkgs = await fs.readdir(pkgDir);
for (const pkg of pkgs) {
if (pkg.charAt(0) === '.') continue;
if (!(await fs.stat(path.join(pkgDir, pkg))).isDirectory()) continue;
await bootstrapPkg({
pkgDir,
pkg,
force: argv.force,
});
}
function getName(pkgName: string) {
if (['inula'].includes(pkgName)) {
return pkgName;
} else {
return `@aluni/${pkgName}`;
}
}
function getVersion() {
return require(PATHS.LERNA_CONFIG).version;
}
async function bootstrapPkg(opts: any) {
const pkgDir = path.join(opts.pkgDir, opts.pkg);
if (!opts.force && fs.existsSync(path.join(pkgDir, 'package.json'))) {
console.log(`${opts.pkg} exists`);
} else {
const name = getName(opts.pkg);
// package.json
const pkgPkgJSONPath = path.join(pkgDir, 'package.json');
const hasPkgJSON = fs.existsSync(pkgPkgJSONPath);
const pkgPkgJSON = hasPkgJSON ? require(pkgPkgJSONPath) : {};
fs.writeJSONSync(
pkgPkgJSONPath,
Object.assign(
{
name,
version: getVersion(),
description: name,
main: 'dist/index.js',
types: 'dist/index.d.ts',
files: ['dist'],
scripts: {
build: SCRIPTS.BUILD,
'build:deps': SCRIPTS.BUNDLE_DEPS,
dev: SCRIPTS.DEV,
},
repository: {
type: 'git',
url: 'https://gitee.com/congxiaochen/inula',
},
authors: [
'xiaohuoni <xiaohuoni@gmail.com> (https://github.com/xiaohuoni)',
],
license: 'MIT',
bugs: 'https://gitee.com/congxiaochen/inula/issues',
homepage: `https://gitee.com/congxiaochen/inula/tree/master/packages/${opts.pkg}#readme`,
publishConfig: {
access: 'public',
},
},
{
...(hasPkgJSON
? {
authors: pkgPkgJSON.authors,
bin: pkgPkgJSON.bin,
files: pkgPkgJSON.files,
scripts: pkgPkgJSON.scripts,
description: pkgPkgJSON.description,
dependencies: pkgPkgJSON.dependencies,
devDependencies: pkgPkgJSON.devDependencies,
compiledConfig: pkgPkgJSON.compiledConfig,
}
: {}),
},
),
{ spaces: ' ' },
);
// readme 存在就不覆盖
if (!fs.existsSync(path.join(pkgDir, 'README.md'))) {
// README.md
await fs.writeFile(
path.join(pkgDir, 'README.md'),
`# ${name}\n\nSee our website [inula](https://inula.cn) for more information.`,
'utf-8',
);
}
// tsconfig.json
await fs.writeFile(
path.join(pkgDir, 'tsconfig.json'),
`{
"extends": "../../tsconfig.base.json",
"compilerOptions": {
"outDir": "./dist",
"rootDir": "./src"
},
"include": ["src"]
}\n`,
'utf-8',
);
// .fatherrc.ts
await fs.writeFile(
path.join(pkgDir, '.fatherrc.ts'),
`import { defineConfig } from 'father';
export default defineConfig({
extends: '../../.fatherrc.base.ts',
});\n`,
'utf-8',
);
// src/index.ts
const srcDir = path.join(pkgDir, 'src');
if (!fs.existsSync(srcDir)) {
await $`mkdir ${srcDir}`;
}
if (!fs.existsSync(path.join(pkgDir, 'src', 'index.ts'))) {
await fs.writeFile(
path.join(pkgDir, 'src', 'index.ts'),
`
export default () => {
return '${name}';
};\n`.trimStart(),
'utf-8',
);
}
// set excludeFolder for webstorm
setExcludeFolder({ pkg: opts.pkg, cwd: root });
console.log(chalk.green(`${opts.pkg} bootstrapped`));
}
}
})();
| xiaohuoni/inula | 0 | TypeScript | xiaohuoni | 聪小陈 | 浮光陈氏 | |
scripts/bundleDeps.ts | TypeScript | import { readWantedLockfile } from '@pnpm/lockfile-file';
// @ts-ignore
import ncc from '@vercel/ncc';
import { Package } from 'dts-packer';
import resolve from 'resolve';
import 'zx/globals';
import { PATHS } from './.internal/constants';
// @ts-ignore
// import { Package } from '/Users/chencheng/code/github.com/sorrycc/dts-packer/dist/Package.js';
export async function buildDep(opts: any) {
console.log(chalk.green(`Build dep ${opts.pkgName || opts.file}`));
const nodeModulesPath = path.join(opts.base, 'node_modules');
const target = path.join(opts.base, opts.target);
if (opts.clean) {
fs.removeSync(target);
}
let entry;
if (opts.pkgName) {
let resolvePath = opts.pkgName;
// mini-css-extract-plugin 用 dist/cjs 为入口会有问题
if (opts.pkgName === 'mini-css-extract-plugin') {
resolvePath = 'mini-css-extract-plugin/dist/index';
}
entry = resolve.sync(resolvePath, {
basedir: nodeModulesPath,
});
} else {
entry = path.join(opts.base, opts.file);
}
if (!opts.dtsOnly) {
if (opts.isDependency) {
fs.ensureDirSync(target);
fs.writeFileSync(
path.join(target, 'index.js'),
`
const exported = require("${opts.pkgName}");
Object.keys(exported).forEach(function (key) {
if (key === "default" || key === "__esModule") return;
if (key in exports && exports[key] === exported[key]) return;
Object.defineProperty(exports, key, {
enumerable: true,
get: function get() {
return exported[key];
}
});
});
`.trim() + '\n',
'utf-8',
);
} else {
const filesToCopy: string[] = [];
if (opts.file === './bundles/webpack/bundle') {
delete opts.webpackExternals['webpack'];
}
// babel pre rewrite
if (opts.file === './bundles/babel/bundle') {
// See https://github.com/umijs/umi/issues/10356
// The inherited `browserslist` config is dynamic loaded
const babelCorePkg = require.resolve('@babel/core/package.json', {
paths: [path.join(PATHS.PACKAGES, './bundler-utils')],
});
// And need overrides a consistent version of `browserslist` in `packages.json#pnpm.overrides`
const browserslistPkg = require.resolve('browserslist/package.json', {
paths: [path.dirname(babelCorePkg)],
});
const nodePartFile = path.join(
path.dirname(browserslistPkg),
'node.js',
);
const originContent = fs.readFileSync(nodePartFile, 'utf-8');
// https://github.com/browserslist/browserslist/blob/fc5fc088c640466df62a6b6c86154b19be3de821/node.js#L176
fs.writeFileSync(
nodePartFile,
originContent.replace(
/require\(require\.resolve/g,
'eval("require")(require.resolve',
),
'utf-8',
);
}
let { code, assets } = await ncc(entry, {
externals: opts.webpackExternals,
minify: !!opts.minify,
target: 'es5',
assetBuilds: false,
customEmit(filePath: string, { id }: any) {
if (
(opts.file === './bundles/webpack/bundle' &&
filePath.endsWith('.runtime.js')) ||
(opts.pkgName === 'terser-webpack-plugin' &&
filePath.endsWith('./utils') &&
id.endsWith('terser-webpack-plugin/dist/index.js')) ||
(opts.pkgName === 'css-minimizer-webpack-plugin' &&
filePath.endsWith('./utils') &&
id.endsWith('css-minimizer-webpack-plugin/dist/index.js'))
) {
filesToCopy.push(
resolve.sync(filePath, {
basedir: path.dirname(id),
}),
);
return `'./${path.basename(filePath)}'`;
}
},
});
// assets
console.log('filesToCopy', filesToCopy);
for (const key of Object.keys(assets)) {
const asset = assets[key];
const data = asset.source;
const filePath = path.join(target, key);
fs.ensureDirSync(path.dirname(filePath));
fs.writeFileSync(path.join(target, key), data);
}
// filesToCopy
for (const fileToCopy of filesToCopy) {
let content = fs.readFileSync(fileToCopy, 'utf-8');
for (const key of Object.keys(opts.webpackExternals)) {
content = content.replace(
new RegExp(`require\\\(['"]${key}['"]\\\)`, 'gm'),
`require('${opts.webpackExternals[key]}')`,
);
content = content.replace(
new RegExp(`require\\\(['"]${key}/package(\.json)?['"]\\\)`, 'gm'),
`require('${opts.webpackExternals[key]}/package.json')`,
);
}
fs.writeFileSync(
path.join(target, path.basename(fileToCopy)),
content,
'utf-8',
);
}
// entry code
fs.ensureDirSync(target);
// node 14 support for chalk
if (
[
'chalk',
'pkg-up',
'execa',
'globby',
'os-locale',
'gzip-size',
'prettier',
'copy-webpack-plugin',
'zx',
'@vitejs/plugin-legacy',
'@vitejs/plugin-vue',
'@clack/prompts',
].includes(opts.pkgName)
) {
code = code.replace(/require\("node:/g, 'require("');
}
// in production, we have the global all `core-js` polyfill (feature/polyfill.ts)
// don't need the polyfill added by vite
// https://github.com/vitejs/vite/blob/d953536aae448e2bea0f3a7cb3c0062b16d45597/packages/plugin-legacy/src/index.ts#L257
if (opts.pkgName === '@vitejs/plugin-legacy') {
code = code.replace(
'await detectPolyfills(`Promise.resolve(); Promise.all();`',
'await (()=>{})(`Promise.resolve(); Promise.all();`',
);
}
if (
code.includes('"node:') &&
opts.pkgName && // skip local file bundle like babel/bundle.js
opts.pkgName !== 'stylelint-declaration-block-no-ignored-properties' &&
opts.pkgName !== 'vite' &&
opts.pkgName !== 'https-proxy-agent' &&
opts.pkgName !== 'socks-proxy-agent'
) {
throw new Error(`${opts.pkgName} has "node:"`);
}
// patch less resolve path to umi compiled path
if (opts.pkgName === 'vite') {
code = code.replace(
'loadPreprocessor("less"',
'loadPreprocessor("@umijs/bundler-utils/compiled/less"',
);
}
fs.writeFileSync(path.join(target, 'index.js'), code, 'utf-8');
// patch
if (opts.pkgName === 'mini-css-extract-plugin') {
fs.copySync(
path.join(nodeModulesPath, opts.pkgName, 'dist', 'hmr'),
path.join(target, 'hmr'),
);
fs.copyFileSync(
path.join(nodeModulesPath, opts.pkgName, 'dist', 'utils.js'),
path.join(target, 'utils.js'),
);
fs.copyFileSync(
path.join(
nodeModulesPath,
opts.pkgName,
'dist',
'loader-options.json',
),
path.join(target, 'loader-options.json'),
);
}
if (opts.pkgName === 'fork-ts-checker-webpack-plugin') {
fs.removeSync(path.join(target, 'typescript.js'));
}
// for bundler-webpack
if (opts.pkgName === 'webpack') {
fs.writeFileSync(
path.join(opts.base, 'compiled/express.d.ts'),
`import e = require('@umijs/bundler-utils/compiled/express');\nexport = e;`,
'utf-8',
);
}
// validate babel dynamic dep version
if (opts.file === './bundles/babel/bundle') {
const pkg = require(path.join(opts.base, 'package.json'));
readWantedLockfile(PATHS.ROOT, {
ignoreIncompatible: true,
}).then((lockfile) => {
const unicodePkgName = 'regenerate-unicode-properties';
const [, unicodeParentPkg] = Object.entries(lockfile!.packages!).find(
([name]) => name.startsWith('/regexpu-core/'),
)!;
if (
unicodeParentPkg.dependencies![unicodePkgName] !==
pkg.dependencies[unicodePkgName]
) {
throw new Error(`regenerate-unicode-properties is outdated, please update it to ${
unicodeParentPkg.dependencies![unicodePkgName]
} in bundler-utils/package.json before update compiled files!
ref: https://github.com/umijs/umi/pull/7972`);
}
});
}
}
}
// license & package.json
if (opts.pkgName) {
if (opts.isDependency) {
fs.ensureDirSync(target);
fs.writeFileSync(
path.join(target, 'index.d.ts'),
`export * from '${opts.pkgName}';\n`,
'utf-8',
);
} else {
fs.ensureDirSync(target);
const pkgRoot = path.dirname(
resolve.sync(`${opts.pkgName}/package.json`, {
basedir: opts.base,
}),
);
if (fs.existsSync(path.join(pkgRoot, 'LICENSE'))) {
fs.writeFileSync(
path.join(target, 'LICENSE'),
fs.readFileSync(path.join(pkgRoot, 'LICENSE'), 'utf-8'),
'utf-8',
);
}
const { name, author, license, types, typing, typings, version } =
JSON.parse(
fs.readFileSync(path.join(pkgRoot, 'package.json'), 'utf-8'),
);
fs.writeJSONSync(path.join(target, 'package.json'), {
...{},
...{ name },
...{ version },
...(author ? { author } : undefined),
...(license ? { license } : undefined),
...(types ? { types } : undefined),
...(typing ? { typing } : undefined),
...(typings ? { typings } : undefined),
});
// dts
if (opts.noDts) {
console.log(chalk.yellow(`Do not build dts for ${opts.pkgName}`));
} else {
new Package({
cwd: opts.base,
name: opts.pkgName,
typesRoot: target,
externals: opts.dtsExternals,
});
// patch
if (opts.pkgName === 'webpack-5-chain') {
const filePath = path.join(target, 'types/index.d.ts');
fs.writeFileSync(
filePath,
fs
.readFileSync(filePath, 'utf-8')
.replace(
`} from 'webpack';`,
`} from '@umijs/bunder-webpack/compiled/webpack';`,
),
'utf-8',
);
}
if (opts.pkgName === 'lodash') {
// TODO
// fs.copySync()
}
// for bundler-utils
if (opts.pkgName === 'less') {
const dtsPath = path.join(opts.base, 'compiled/less/index.d.ts');
fs.writeFileSync(
dtsPath,
fs
.readFileSync(dtsPath, 'utf-8')
.replace(
'declare module "less"',
'declare module "@umijs/bundler-utils/compiled/less"',
),
'utf-8',
);
}
}
}
}
// copy files in packages
if (opts.file && !opts.dtsOnly) {
const packagesDir = path.join(
opts.base,
path.dirname(opts.file),
'packages',
);
if (fs.existsSync(packagesDir)) {
const files = fs.readdirSync(packagesDir);
files.forEach((file) => {
if (file.charAt(0) === '.') return;
if (!fs.statSync(path.join(packagesDir, file)).isFile()) return;
fs.copyFileSync(path.join(packagesDir, file), path.join(target, file));
});
}
}
}
/**
* 编译打包 package.json 文件中 compiledConfig 配置的依赖库
*/
(async () => {
const base = process.cwd();
const pkg = fs.readJSONSync(path.join(base, 'package.json'));
const pkgDeps = pkg.dependencies || {};
const {
deps,
externals = {},
noMinify = [],
extraDtsDeps = [],
extraDtsExternals = [],
excludeDtsDeps = [],
} = pkg.compiledConfig;
const webpackExternals: Record<string, string> = {};
const dtsExternals = [...extraDtsDeps, ...extraDtsExternals];
Object.keys(externals).forEach((name) => {
const val = externals[name];
if (val === '$$LOCAL') {
dtsExternals.push(name);
webpackExternals[name] = `${pkg.name}/compiled/${name}`;
} else {
webpackExternals[name] = val;
}
});
for (const dep of argv.dep
? [argv.dep]
: argv['extra-dts-only']
? extraDtsDeps
: deps.concat(extraDtsDeps)) {
const isDep = dep.charAt(0) !== '.';
await buildDep({
...(isDep ? { pkgName: dep } : { file: dep }),
target: `compiled/${isDep ? dep : path.basename(path.dirname(dep))}`,
base,
webpackExternals,
dtsExternals,
clean: argv.clean,
minify: !noMinify.includes(dep),
dtsOnly: extraDtsDeps.includes(dep),
noDts: excludeDtsDeps.includes(dep),
isDependency: dep in pkgDeps,
});
}
})();
| xiaohuoni/inula | 0 | TypeScript | xiaohuoni | 聪小陈 | 浮光陈氏 | |
scripts/changeset.ts | TypeScript | import { getPackages } from '@manypkg/get-packages';
import 'zx/globals';
const root = path.join(__dirname, '../');
const changesetConfig = path.join(__dirname, '../.changeset/config.json');
const getWorkspaces = async () => getPackages(root);
const change = async () => {
const ws = await getWorkspaces();
const appNames: string[] = [];
ws.packages.forEach((submodule) => {
const isPrivate = submodule.packageJson?.private;
if (isPrivate) {
appNames.push(submodule.packageJson.name);
}
});
const config = await fs.readJson(changesetConfig, { encoding: 'utf-8' });
config.ignore = appNames;
await fs.writeFile(changesetConfig, `${JSON.stringify(config, null, 2)}\n`, {
encoding: 'utf-8',
});
console.log(
chalk.green(`[changeset-config]: refresh config ignore list complete`),
);
$`changeset`;
};
change();
| xiaohuoni/inula | 0 | TypeScript | xiaohuoni | 聪小陈 | 浮光陈氏 | |
scripts/checkPackageFiles.ts | TypeScript | import { glob, lodash, logger } from '@umijs/utils';
import { isMatch } from 'matcher';
import 'zx/globals';
import { PATHS, SCRIPTS } from './.internal/constants';
import { eachPkg, getPkgs } from './.internal/utils';
const COMMON_IGNORES = [
// default included
'bin',
// deps
'node_modules',
// for test
'fixtures',
'examples',
'scripts',
// source
'src',
'bundles',
// doc
'*.md',
// config files
'tsconfig*.json',
'*.config.js',
'package.json',
'typings.d.ts',
// extra
'devToolApp',
];
// check packages/*
let missingDetected = false;
eachPkg(getPkgs(), ({ pkgJson, dir, name, pkgPath }) => {
/**
* check `files` missing
*/
const files = fs.readdirSync(dir).filter((f) => {
return !isMatch(f, COMMON_IGNORES) && !f.startsWith('.');
});
const missingAddFiles = files.filter((f) => !isMatch(f, pkgJson.files));
if (missingAddFiles.length > 0) {
logger.error('Checking package:', name);
logger.error(
` "${missingAddFiles.join(
', ',
)}" missing in the package.json files field`,
);
missingDetected = true;
}
/**
* check jest `test` script exist
*/
const testFiles = glob.sync(`${path.join(dir)}/src/**/*.test.ts`);
const oldPkgJson = lodash.cloneDeep(pkgJson);
if (testFiles.length) {
pkgJson.scripts.test = SCRIPTS.TEST_TURBO;
} else {
delete pkgJson.scripts.test;
}
pkgJson.scripts['build:deps'] = SCRIPTS.BUNDLE_DEPS;
if (!lodash.isEqual(oldPkgJson, pkgJson)) {
fs.writeFileSync(pkgPath, `${JSON.stringify(pkgJson, null, 2)}\n`, 'utf-8');
}
});
if (missingDetected) {
process.exit(1);
} else {
logger.ready(`Check packages files success`);
}
// check examples/*
const EXAMPLE_DIR = PATHS.EXAMPLES;
eachPkg(
getPkgs({ base: EXAMPLE_DIR }),
({ name, pkgJson, pkgPath }) => {
/**
* check example `package.json` includes required fields
*/
logger.info(`Checking ${chalk.blue('example')}:`, name);
const oldPkgJson = lodash.cloneDeep(pkgJson);
const expectName = `@example/${name}`;
if (pkgJson.name !== expectName) {
pkgJson.name = expectName;
logger.warn(
chalk.yellow(`Change '${name}' example name to '${expectName}'`),
);
}
if (pkgJson.private !== true) {
pkgJson.private = true;
logger.warn(chalk.yellow(`Set '${name}' example as private package`));
}
if (!lodash.isEqual(pkgJson, oldPkgJson)) {
fs.writeFileSync(
pkgPath,
`${JSON.stringify(pkgJson, null, 2)}\n`,
'utf-8',
);
}
},
{
base: EXAMPLE_DIR,
},
);
logger.ready(`Check examples success`);
| xiaohuoni/inula | 0 | TypeScript | xiaohuoni | 聪小陈 | 浮光陈氏 | |
scripts/cypress.ts | TypeScript | import { spawnSync } from './.internal/utils';
const CYPRESS_RECORD_KEY = process.env.CYPRESS_RECORD_KEY as string;
spawnSync('npx cypress install', {});
if (CYPRESS_RECORD_KEY) {
spawnSync(`npx cypress run --record --key ${CYPRESS_RECORD_KEY}`, {});
} else {
spawnSync('npx cypress run', {});
}
| xiaohuoni/inula | 0 | TypeScript | xiaohuoni | 聪小陈 | 浮光陈氏 | |
scripts/father.ts | TypeScript | import { spawnSync } from './.internal/utils';
(async () => {
const args = process.argv.slice(2);
const isBuild = args.includes('build');
if (isBuild) {
args.push('--quiet');
}
const command = `father ${args.join(' ')}`;
spawnSync(command, { cwd: process.cwd() });
})();
| xiaohuoni/inula | 0 | TypeScript | xiaohuoni | 聪小陈 | 浮光陈氏 | |
scripts/ghpage.ts | TypeScript | import { PATHS } from './.internal/constants';
import { spawnSync } from './.internal/utils';
// @ts-ignore
import ghpages from 'gh-pages';
(async () => {
const args = process.argv.slice(2);
const command = `pnpm run build:docs ${args.join(' ')}`;
spawnSync(command, { cwd: PATHS.ROOT });
ghpages.publish(
'dist',
{
branch: 'ghpage',
repo: 'https://gitee.com/congxiaochen/inula.git',
},
function (err: any) {
console.error(err);
},
);
})();
| xiaohuoni/inula | 0 | TypeScript | xiaohuoni | 聪小陈 | 浮光陈氏 | |
scripts/postinstall.ts | TypeScript | import { readFileSync, writeFileSync } from 'fs';
import { join } from 'path';
import { PATHS } from './.internal/constants';
// ref: https://github.com/isaacs/node-graceful-fs/commit/e61a20a052b838f420b98195c232a824a6ac04ee
const GRACEFUL_FS_TO_REPLACE = `if(j.uid<0)j.uid+=4294967296;if(j.gid<0)j.gid+=4294967296;`;
const replaces = [
// j is undefined when accessing .uid property
[
'[node-graceful-fs]',
GRACEFUL_FS_TO_REPLACE,
`/*${GRACEFUL_FS_TO_REPLACE}*//*PATCHED*/`,
],
// when bundle pure esm package e.g. chalk@5
// ncc will set esm to true, which will cause the error
[
'[esm]',
'esm=entry.endsWith(".mjs")||!entry.endsWith(".cjs")&&hasTypeModule(entry)',
'esm=false/*PATCHED*/',
],
];
console.log('patch ncc');
const path = join(
PATHS.ROOT,
'./node_modules/@vercel/ncc/dist/ncc/index.js.cache.js',
);
const content = readFileSync(path, 'utf-8');
let ret = content;
for (const replace of replaces) {
if (ret.includes(replace[2])) {
console.log(`${replace[0]} already patched`);
} else {
console.log(`${replace[0]} patching`);
ret = ret.replace(replace[1], replace[2]);
}
}
writeFileSync(path, ret, 'utf-8');
| xiaohuoni/inula | 0 | TypeScript | xiaohuoni | 聪小陈 | 浮光陈氏 | |
scripts/prettier-plugin/index.js | JavaScript | const { parsers } = require('prettier-plugin-organize-imports');
function createParser(original, transform) {
return {
...original,
parse: (text, parsers, options) => {
const ast = original.parse(text, parsers, options);
transform(ast, { ...options, text });
return ast;
}
}
}
// https://lihautan.com/manipulating-ast-with-javascript/
function visit(ast, callbackMap) {
function _visit(node, parent, key, index) {
if (typeof callbackMap === 'function') {
if (callbackMap(node, parent, key, index) === false) {
return
}
} else if (node.type in callbackMap) {
if (callbackMap[node.type](node, parent, key, index) === false) {
return
}
}
const keys = Object.keys(node)
for (let i = 0; i < keys.length; i++) {
const child = node[keys[i]]
if (Array.isArray(child)) {
for (let j = 0; j < child.length; j++) {
if (child[j] !== null) {
_visit(child[j], node, keys[i], j)
}
}
} else if (typeof child?.type === 'string') {
_visit(child, node, keys[i], i)
}
}
}
_visit(ast)
}
function transformJavaScript(ast, options) {
if (!options.text.includes('// sort-object-keys')) return;
visit(ast, {
ObjectExpression(node) {
const { properties } = node;
properties.sort((a, b) => {
const { key: aKey } = a;
const { key: bKey } = b;
if (aKey.type === 'Identifier' && bKey.type === 'Identifier') {
return aKey.name.localeCompare(bKey.name);
}
return 0;
});
},
TSTypeLiteral(node) {
const { members } = node;
members.sort((a, b) => {
const { key: aKey } = a;
const { key: bKey } = b;
if (aKey.type === 'Identifier' && bKey.type === 'Identifier') {
return aKey.name.localeCompare(bKey.name);
}
return 0;
});
}
})
}
exports.parsers = {
...parsers,
typescript: createParser(parsers.typescript, transformJavaScript),
};
| xiaohuoni/inula | 0 | TypeScript | xiaohuoni | 聪小陈 | 浮光陈氏 | |
scripts/prettier-plugin/index.test.js | JavaScript | const prettier = require('prettier');
const code = `
import 'b';
import 'a';
// sort-object-keys
const foo = { b, a};
`;
const ret = prettier.format(code, {
parser: 'typescript',
plugins: [require.resolve('./')]
});
console.log(ret);
| xiaohuoni/inula | 0 | TypeScript | xiaohuoni | 聪小陈 | 浮光陈氏 | |
scripts/publish.ts | TypeScript | import { logger } from '@umijs/utils';
import getGitRepoInfo from 'git-repo-info';
import 'zx/globals';
import { getPkgs } from './.internal/utils';
(async () => {
const { branch } = getGitRepoInfo();
logger.info(`branch: ${branch}`);
const pkgs = getPkgs();
logger.info(`pkgs: ${pkgs.join(', ')}`);
// pnpm publish
logger.event('pnpm publish');
$.verbose = false;
const innerPkgs = pkgs.filter((pkg) => !['inula'].includes(pkg));
// check 2fa config
let otpArg: string[] = [];
if (
(await $`npm profile get "two-factor auth"`).toString().includes('writes')
) {
let code = '';
do {
// get otp from user
code = await question('This operation requires a one-time password: ');
// generate arg for zx command
// why use array? https://github.com/google/zx/blob/main/docs/quotes.md
otpArg = ['--otp', code];
} while (code.length !== 6);
}
//
let tag = 'latest';
await Promise.all(
innerPkgs.map(async (pkg) => {
await $`cd packages/${pkg} && pnpm publish --no-git-checks --tag ${tag} ${otpArg}`;
logger.info(`+ ${pkg}`);
}),
);
await $`cd packages/inula && pnpm publish --no-git-checks --tag ${tag} ${otpArg}`;
logger.info(`+ inula`);
$.verbose = true;
})();
| xiaohuoni/inula | 0 | TypeScript | xiaohuoni | 聪小陈 | 浮光陈氏 | |
scripts/release.ts | TypeScript | import { logger } from '@umijs/utils';
import { existsSync } from 'fs';
import getGitRepoInfo from 'git-repo-info';
import { join } from 'path';
import rimraf from 'rimraf';
import 'zx/globals';
import { PATHS } from './.internal/constants';
import { assert, eachPkg, getPkgs } from './.internal/utils';
(async () => {
const { branch } = getGitRepoInfo();
logger.info(`branch: ${branch}`);
const pkgs = getPkgs();
logger.info(`pkgs: ${pkgs.join(', ')}`);
// check git status
logger.event('check git status');
const isGitClean = (await $`git status --porcelain`).stdout.trim().length;
assert(!isGitClean, 'git status is not clean');
// check git remote update
logger.event('check git remote update');
await $`git fetch`;
const gitStatus = (await $`git status --short --branch`).stdout.trim();
assert(!gitStatus.includes('behind'), `git status is behind remote`);
// check npm registry
logger.event('check npm registry');
const registry = (await $`npm config get registry`).stdout.trim();
assert(
registry === 'https://registry.npmjs.org/',
'npm registry is not https://registry.npmjs.org/',
);
// check package changed
logger.event('check package changed');
const changed = (await $`lerna changed --loglevel error`).stdout.trim();
assert(changed, `no package is changed`);
// check npm ownership
logger.event('check npm ownership');
const whoami = (await $`npm whoami`).stdout.trim();
try {
await Promise.all(
['inula'].map(async (pkg) => {
const owners = (await $`npm owner ls ${pkg}`).stdout
.trim()
.split('\n')
.map((line) => {
return line.split(' ')[0];
});
assert(owners.includes(whoami), `${pkg} is not owned by ${whoami}`);
}),
);
} catch (e: any) {
// only throw ownership error
if (e.message.includes('is not owned by')) {
throw e;
}
}
// check package.json
logger.event('check package.json info');
await $`npm run check:packageFiles`;
// clean
logger.event('clean');
eachPkg(pkgs, ({ dir, name }) => {
logger.info(`clean dist of ${name}`);
rimraf.sync(join(dir, 'dist'));
});
// build packages
logger.event('build packages');
await $`npm run build:release`;
// bump version
logger.event('bump version');
await $`lerna version --exact --no-commit-hooks --no-git-tag-version --no-push --loglevel error`;
const version = require(PATHS.LERNA_CONFIG).version;
let tag = 'latest';
if (
version.includes('-alpha.') ||
version.includes('-beta.') ||
version.includes('-rc.')
) {
tag = 'next';
}
if (version.includes('-canary.')) tag = 'canary';
// update example versions
logger.event('update example versions');
const examplesDir = PATHS.EXAMPLES;
const examples = fs.readdirSync(examplesDir).filter((dir) => {
return (
!dir.startsWith('.') && existsSync(join(examplesDir, dir, 'package.json'))
);
});
examples.forEach((example) => {
const pkg = require(join(examplesDir, example, 'package.json'));
pkg.scripts ||= {};
pkg.scripts['start'] = 'npm run dev';
delete pkg.version;
fs.writeFileSync(
join(examplesDir, example, 'package.json'),
`${JSON.stringify(pkg, null, 2)}\n`,
);
});
// // update pnpm lockfile
// logger.event('update pnpm lockfile');
// $.verbose = false;
// await $`pnpm i`;
// $.verbose = true;
// commit
logger.event('commit');
await $`git commit --all --message "release: ${version}"`;
// git tag
if (tag !== 'canary') {
logger.event('git tag');
await $`git tag v${version}`;
}
// git push
logger.event('git push');
await $`git push origin ${branch} --tags`;
// pnpm publish
logger.event('pnpm publish');
$.verbose = false;
const innerPkgs = pkgs.filter((pkg) => !['inula'].includes(pkg));
// check 2fa config
let otpArg: string[] = [];
if (
(await $`npm profile get "two-factor auth"`).toString().includes('writes')
) {
let code = '';
do {
// get otp from user
code = await question('This operation requires a one-time password: ');
// generate arg for zx command
// why use array? https://github.com/google/zx/blob/main/docs/quotes.md
otpArg = ['--otp', code];
} while (code.length !== 6);
}
await Promise.all(
innerPkgs.map(async (pkg) => {
await $`cd packages/${pkg} && pnpm publish --no-git-checks --tag ${tag} ${otpArg}`;
logger.info(`+ ${pkg}`);
}),
);
await $`cd packages/inula && pnpm publish --no-git-checks --tag ${tag} ${otpArg}`;
logger.info(`+ inula`);
$.verbose = true;
})();
| xiaohuoni/inula | 0 | TypeScript | xiaohuoni | 聪小陈 | 浮光陈氏 | |
scripts/releasePackage.ts | TypeScript | import assert from 'assert';
import getGitRepoInfo from 'git-repo-info';
import 'zx/globals';
(async () => {
const { branch } = getGitRepoInfo();
const pkgName = argv.pkg;
assert(pkgName, `pkg name is required, specify with --pkg=xxx`);
const pkgPath = path.join(__dirname, '..', pkgName);
assert(fs.existsSync(pkgPath), `pkg ${pkgName} not exists`);
try {
await $`cd ${pkgPath} && npm run build`;
} catch (e) {
await $`cd ${pkgPath} && npm run ui:build`;
}
await $`cd ${pkgPath} && npm version patch`;
const newVersion = require(path.join(pkgPath, 'package.json')).version;
await $`cd ${pkgPath} && pnpm publish --no-git-checks`;
// commit and tag and push
await $`git commit -am "release: ${pkgName}@${newVersion}"`;
await $`git tag ${pkgName}@${newVersion}`;
await $`git push origin ${branch} --tags`;
})().catch((e) => {
console.error(e);
process.exit(1);
});
| xiaohuoni/inula | 0 | TypeScript | xiaohuoni | 聪小陈 | 浮光陈氏 | |
scripts/setupWebStorm.ts | TypeScript | import { PATHS } from './.internal/constants';
import { eachPkg, getPkgs, setExcludeFolder } from './.internal/utils';
const cwd = process.cwd();
eachPkg(getPkgs(), ({ name }) => {
setExcludeFolder({ pkg: name, cwd });
});
eachPkg(
getPkgs({ base: PATHS.EXAMPLES }),
({ name }) => {
setExcludeFolder({
pkg: name,
cwd,
dirName: 'examples',
folders: ['.umi', 'src/.umi'],
});
},
{
base: PATHS.EXAMPLES,
},
);
| xiaohuoni/inula | 0 | TypeScript | xiaohuoni | 聪小陈 | 浮光陈氏 | |
scripts/syncTnpm.ts | TypeScript | import { logger } from '@umijs/utils';
import 'zx/globals';
import { PATHS } from './.internal/constants';
import { getPkgs } from './.internal/utils';
(async () => {
const pkgs = getPkgs();
logger.info(`pkgs: ${pkgs.join(', ')}`);
// sync tnpm
logger.event('sync tnpm');
$.verbose = false;
await Promise.all(
pkgs.map(async (pkg) => {
const { name } = require(path.join(PATHS.PACKAGES, pkg, 'package.json'));
logger.info(`sync ${name}`);
await $`tnpm sync ${name}`;
}),
);
$.verbose = true;
})();
| xiaohuoni/inula | 0 | TypeScript | xiaohuoni | 聪小陈 | 浮光陈氏 | |
scripts/testCodemod.ts | TypeScript | import { logger, rimraf } from '@umijs/utils';
import assert from 'assert';
import 'zx/globals';
const fixtureDir = path.join(__dirname, '../codemod/fixtures');
const tmpDir = path.join(fixtureDir, 'tmp');
(async () => {
// 1、copy fixtures/origin > fixtures/tmp
logger.info('copy fixtures/origin > fixtures/tmp');
rimraf.sync(tmpDir);
fs.copySync(path.join(fixtureDir, 'origin'), tmpDir);
// 2、run codemod script
logger.info('run codemod script');
// enable color for kleur
// ref: https://github.com/lukeed/kleur/blob/86a7db8/index.mjs#L5
process.env.FORCE_COLOR = '1';
await $`npm run codemod:run`;
// 3、test
assert(
getContent('src/useRouteMatch.ts').includes(`useMatch as useRouteMatch`),
`src/useRouteMatch.ts`,
);
assert(
getContent('.eslintrc.js').includes(`require.resolve('umi/eslint')`) &&
getContent('.eslintrc.js').includes(
`"@typescript-eslint/naming-convention": 0`,
),
`.eslintrc.js`,
);
})();
function getContent(filePath: string) {
return fs.readFileSync(path.join(tmpDir, filePath), 'utf-8');
}
| xiaohuoni/inula | 0 | TypeScript | xiaohuoni | 聪小陈 | 浮光陈氏 | |
scripts/turbo.ts | TypeScript | import { PATHS } from './.internal/constants';
import { spawnSync } from './.internal/utils';
(async () => {
const args = process.argv.slice(2);
// no cache
if (args.includes('--no-cache')) {
args.unshift('--force');
}
// filter
if (!args.includes('--filter')) {
// Tips: should use double quotes, single quotes are not valid on windows.
args.unshift('--filter', `"./packages/*"`);
}
// turbo cache
if (!args.includes('--cache-dir')) {
args.unshift('--cache-dir', `".turbo"`);
}
const command = `turbo run ${args.join(' ')}`;
spawnSync(command, { cwd: PATHS.ROOT });
})();
| xiaohuoni/inula | 0 | TypeScript | xiaohuoni | 聪小陈 | 浮光陈氏 | |
scripts/typings.d.ts | TypeScript | // keep
| xiaohuoni/inula | 0 | TypeScript | xiaohuoni | 聪小陈 | 浮光陈氏 | |
scripts/verifyCommit.ts | TypeScript | import 'zx/globals';
const msgPath = process.argv[2];
if (!msgPath) process.exit();
const msg = removeComment(fs.readFileSync(msgPath, 'utf-8').trim());
const commitRE =
/^(revert: )?(feat|fix|docs|style|refactor|perf|test|workflow|build|ci|chore|types|wip|release|dep|example|Merge)(\(.+\))?: .{1,50}/;
if (!commitRE.test(msg)) {
console.log();
console.error(
` ${chalk.bgRed.white(' ERROR ')} ${chalk.red(
`invalid commit message format.`,
)}\n\n` +
chalk.red(
` Proper commit message format is required for automated changelog generation. Examples:\n\n`,
) +
` ${chalk.green(`feat(bundler-webpack): add 'comments' option`)}\n` +
` ${chalk.green(`fix(core): handle events on blur (close #28)`)}\n\n` +
chalk.red(` See .github/commit-convention.md for more details.\n`),
);
process.exit(1);
}
function removeComment(msg: string) {
return msg.replace(/^#.*[\n\r]*/gm, '');
}
| xiaohuoni/inula | 0 | TypeScript | xiaohuoni | 聪小陈 | 浮光陈氏 | |
theme.config.ts | TypeScript | // @ts-ignore
import InulaLogo from './logo.png';
export default {
title: 'Inula',
description: '关注业务需求,以开发体验为主,集成 openInula 全生态',
logo: InulaLogo,
github: 'https://gitee.com/congxiaochen/inula',
searchHotKey: {
macos: '⌘+k',
windows: 'ctrl+k',
},
navs: [
{
path: '/docs',
title: '文档',
type: 'nav',
children: [
{
title: '快速开始',
children: ['getting-started'],
},
{
title: '目录结构',
children: ['directory-structure'],
},
{
title: '配置',
children: ['config'],
},
{
title: '插件集成',
children: [
'x',
'request',
'intl',
'antd',
'pro-layout',
'openapi',
'aigc',
],
},
],
},
{
title: 'openInula',
type: 'link',
path: 'https://openinula.net/',
},
],
};
| xiaohuoni/inula | 0 | TypeScript | xiaohuoni | 聪小陈 | 浮光陈氏 | |
app.py | Python | from openai import OpenAI
import streamlit as st
import edge_tts
import asyncio
import os
from datetime import datetime
import requests
from dotenv import load_dotenv
from typing import Literal
ImageSize = Literal["1024x1024", "1792x1024", "1024x1792"]
load_dotenv()
# Create necessary directories if they don't exist
os.makedirs("./audios", exist_ok=True)
os.makedirs("./texts", exist_ok=True)
os.makedirs("./images", exist_ok=True)
# Function definitions
def generate_story(prompt):
response = client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "system", "content": "You are a children's story writer."},
{"role": "user", "content": prompt + "\n\nRespond without further explanations or comments. "}
],
temperature=0.8
)
return response.choices[0].message.content.strip()
def generate_images_with_dalle(story, image_prompt, size="1024x1024"):
dalle_prompt = f"{image_prompt}\n\nStory:\n\n'{story}'"
response = client.images.generate(
model="dall-e-3",
prompt=dalle_prompt,
size=size,
quality="standard",
n=1
)
return response.data[0].url
async def generate_audio(story, voice):
communicate = edge_tts.Communicate(story, voice)
audio_filename = f"./audios/audio-{st.session_state.timestamp}-{voice}.mp3"
await communicate.save(audio_filename)
return audio_filename
def generate_audio_sync(story, voice):
return asyncio.run(generate_audio(story, voice))
def save_story(story):
filename = f"./texts/story-{st.session_state.timestamp}.txt"
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, "w") as f:
f.write(story)
return filename
def save_image(image_url):
filename = f"./images/image-{st.session_state.timestamp}-v{st.session_state.image_version}.png"
os.makedirs(os.path.dirname(filename), exist_ok=True)
response = requests.get(image_url)
with open(filename, "wb") as f:
f.write(response.content)
return filename
def load_api_key_from_env():
load_dotenv()
return os.getenv("OPENAI_API_KEY")
def generate_story_content():
st.session_state.timestamp = datetime.now().strftime("%Y%m%d.%H%M%S")
st.session_state.story = generate_story(story_prompt)
story_file = save_story(st.session_state.story)
st.session_state.image_version = 1
st.session_state.audio_version = 1
def generate_image_content():
if st.session_state.story:
st.session_state.image_url = generate_images_with_dalle(
st.session_state.story,
image_prompt,
size=image_sizes[selected_image_size]
)
image_file = save_image(st.session_state.image_url)
st.session_state.image_version += 1
else:
st.write("Please generate a story first.")
def generate_audio_content():
if st.session_state.story:
if st.session_state.audio_file:
os.remove(st.session_state.audio_file)
st.session_state.audio_file = generate_audio_sync(st.session_state.story, selected_voice)
st.session_state.last_voice = selected_voice
st.session_state.last_story = st.session_state.story
st.session_state.audio_version += 1
else:
st.write("Please generate a story first.")
# Streamlit web app interface
st.title("Toddler Picture Story Generator")
# Streamlit widget for API key input
api_key_source = st.radio("Choose OpenAI API Key source:", ("Load from .env", "Enter manually"))
if api_key_source == "Load from .env":
api_key = load_api_key_from_env()
if not api_key:
st.error("No API key found in .env file. Please enter it manually.")
api_key = st.text_input("Enter your OpenAI API key:", type="password")
else:
api_key = st.text_input("Enter your OpenAI API key:", type="password")
# Initialize OpenAI client with the API key
if api_key:
client = OpenAI(api_key=api_key)
else:
st.error("Please provide a valid OpenAI API key to proceed.")
# Text input for story prompt
story_idea = st.text_area("Please input the Story Idea, keywords or short sentence:", height=50)
# Text area to display and modify prompt template
default_story_prompt = f"""Create a simple story of about 100 words in American English, based on the following ideas:\n\n```\n{story_idea}\n```\n\n Make sure the story suitable for 2-3 year-old toddlers. Always try to use simple, basic, short words, short sentences, and preferably has some rhyming lines."""
story_prompt = st.text_area("Modify the story prompt if needed:", value=default_story_prompt, height=210)
# Text area to display and modify image prompt template
default_image_prompt = """Generate an image based on the following story. The style should be simple and playful, cartoonish, with soft, warm colors, and minimalistic details. The image should be suitable for a 2-year-old child, with clear, easy-to-recognize elements. Ensure that the scene evokes warmth, friendliness, and is rich in visual storytelling, but not overly complex. The composition should be balanced and visually engaging, with a focus on creating a comforting and imaginative atmosphere for storytelling. However, please don't generate any text on the image."""
image_prompt = st.text_area("Modify the image prompt if needed:", value=default_image_prompt, height=150)
# Image size selection
image_sizes = {
"Square (1024x1024)": "1024x1024",
"Landscape (1792x1024)": "1792x1024",
"Portrait (1024x1792)": "1024x1792"
}
selected_image_size = st.selectbox("Select image size:", list(image_sizes.keys()))
# List of voices for selection
voices = [
"en-US-AnaNeural",
"en-US-AriaNeural",
"en-US-AvaNeural",
"en-US-EmmaNeural",
"en-US-JennyNeural",
"en-US-MichelleNeural",
"en-US-GuyNeural",
"en-US-AndrewNeural",
"en-US-BrianNeural",
"en-US-ChristopherNeural",
"en-US-EricNeural",
"en-US-GuyNeural",
"en-US-RogerNeural",
"en-US-SteffanNeural"
]
# Dropdown list for voice selection
selected_voice = st.selectbox("Select a voice for the audio:", voices)
# Initialize session state variables
if 'story' not in st.session_state:
st.session_state.story = ""
if 'image_url' not in st.session_state:
st.session_state.image_url = ""
if 'audio_file' not in st.session_state:
st.session_state.audio_file = ""
if 'timestamp' not in st.session_state:
st.session_state.timestamp = ""
if 'last_voice' not in st.session_state:
st.session_state.last_voice = ""
if 'last_story' not in st.session_state:
st.session_state.last_story = ""
if 'image_version' not in st.session_state:
st.session_state.image_version = 1
if 'audio_version' not in st.session_state:
st.session_state.audio_version = 1
# Button to generate all components
if st.button('Generate Story, Image, and Audio'):
generate_story_content()
generate_image_content()
generate_audio_content()
# Display story in a text area with auto-adjusting height
if st.session_state.story:
story_lines = st.session_state.story.count('\n') + 1
st.text_area("Story:", value=st.session_state.story, height=story_lines * 25, key="story_display", max_chars=None)
# Display image with a frame
if st.session_state.image_url:
st.image(st.session_state.image_url, use_column_width=True)
if st.button('Regenerate Image'):
generate_image_content()
if st.session_state.image_url:
st.image(st.session_state.image_url, use_column_width=True)
# Audio player
if st.session_state.audio_file:
st.audio(st.session_state.audio_file)
if st.button('Regenerate Audio'):
if st.session_state.story:
if selected_voice != st.session_state.last_voice or st.session_state.audio_file == "" or st.session_state.story != st.session_state.last_story:
generate_audio_content()
else:
st.write("No need to regenerate audio. Voice hasn't changed and story is the same.")
else:
st.write("Please generate a story first.")
| xiaolai/Toddler-Picture-Story-Generator | 42 | A streamlit app, to generate picture books for toddlers. | Python | xiaolai | xiaolai | inblockchain |
eslint.config.js | JavaScript | import eslint from '@eslint/js';
import tseslint from '@typescript-eslint/eslint-plugin';
import tsparser from '@typescript-eslint/parser';
import globals from 'globals';
export default [
eslint.configs.recommended,
{
files: ['src/**/*.ts'],
ignores: ['src/**/*.test.ts', 'src/__tests__/**'],
languageOptions: {
parser: tsparser,
parserOptions: {
ecmaVersion: 'latest',
sourceType: 'module',
project: './tsconfig.json',
},
globals: {
...globals.node,
},
},
plugins: {
'@typescript-eslint': tseslint,
},
rules: {
// TypeScript specific rules
'@typescript-eslint/no-explicit-any': 'warn',
'@typescript-eslint/no-unused-vars': ['warn', {
argsIgnorePattern: '^_',
varsIgnorePattern: '^_',
caughtErrorsIgnorePattern: '^_'
}],
'no-unused-vars': 'off', // Use TypeScript's rule instead
'@typescript-eslint/explicit-function-return-type': 'off',
'@typescript-eslint/no-non-null-assertion': 'warn',
// General code quality
'no-console': 'off', // We use console for logging
'prefer-const': 'warn',
'no-var': 'error',
'eqeqeq': ['error', 'always'],
'curly': ['error', 'all'],
// Security
'no-eval': 'error',
'no-implied-eval': 'error',
'no-new-func': 'error',
},
},
{
// Test files don't need strict type checking
files: ['src/**/*.test.ts', 'src/__tests__/**/*.ts'],
languageOptions: {
parser: tsparser,
parserOptions: {
ecmaVersion: 'latest',
sourceType: 'module',
},
globals: {
...globals.node,
...globals.jest,
},
},
rules: {
'@typescript-eslint/no-explicit-any': 'off',
'no-unused-vars': 'off',
'@typescript-eslint/no-unused-vars': 'off',
},
},
{
ignores: ['dist/**', 'node_modules/**', 'docs/**', 'scripts/**', '*.js', '*.mjs'],
},
];
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
jest.config.js | JavaScript | export default {
preset: 'ts-jest/presets/default-esm',
testEnvironment: 'node',
extensionsToTreatAsEsm: ['.ts'],
// Run tests sequentially to avoid SQLite database locking issues
maxWorkers: 1,
moduleNameMapper: {
'^(\\.{1,2}/.*)\\.js$': '$1',
},
transform: {
'^.+\\.tsx?$': [
'ts-jest',
{
useESM: true,
},
],
},
testMatch: [
'**/__tests__/**/*.test.ts',
'**/?(*.)+(spec|test).ts'
],
collectCoverageFrom: [
'src/**/*.ts',
'!src/**/*.d.ts',
'!src/**/*.test.ts',
'!src/**/*.spec.ts',
],
coverageDirectory: 'coverage',
coverageReporters: ['text', 'lcov', 'html'],
coverageThreshold: {
global: {
branches: 70,
functions: 70,
lines: 70,
statements: 70,
},
},
setupFilesAfterEnv: ['<rootDir>/src/__tests__/setup.ts'],
// Increase timeout for tests that involve embedding initialization
testTimeout: 30000,
};
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
scripts/changelog-check.sh | Shell | #!/bin/bash
# Changelog checker - Verifies CHANGELOG.md is up to date for releases
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Get the current version from package.json
CURRENT_VERSION=$(node -p "require('./package.json').version")
# Check if CHANGELOG.md exists
if [ ! -f "CHANGELOG.md" ]; then
echo -e "${RED}❌ CHANGELOG.md not found${NC}"
exit 1
fi
# Check if current version is documented in CHANGELOG
if ! grep -q "\[$CURRENT_VERSION\]" CHANGELOG.md; then
echo -e "${YELLOW}⚠️ Warning: Version $CURRENT_VERSION not found in CHANGELOG.md${NC}"
echo ""
echo "Please add an entry for version $CURRENT_VERSION to CHANGELOG.md"
echo ""
echo "Template:"
echo "----------------------------------------"
echo "## [$CURRENT_VERSION] - $(date +%Y-%m-%d)"
echo ""
echo "### Added"
echo "- New feature description"
echo ""
echo "### Changed"
echo "- Changed functionality description"
echo ""
echo "### Fixed"
echo "- Bug fix description"
echo "----------------------------------------"
echo ""
exit 1
fi
# Check if [Unreleased] section has content
UNRELEASED_CONTENT=$(sed -n '/## \[Unreleased\]/,/## \[[0-9]/p' CHANGELOG.md | grep -v "^## " | grep -v "^$" | wc -l)
if [ "$UNRELEASED_CONTENT" -gt 0 ]; then
echo -e "${YELLOW}⚠️ [Unreleased] section has content${NC}"
echo ""
echo "Remember to move unreleased changes to the versioned section before releasing"
echo ""
fi
# Success
echo -e "${GREEN}✅ CHANGELOG.md looks good for version $CURRENT_VERSION${NC}"
# Show recent entries
echo ""
echo "Recent entries:"
echo "----------------------------------------"
sed -n '/## \['$CURRENT_VERSION'\]/,/^## /p' CHANGELOG.md | head -20
echo "----------------------------------------"
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
scripts/check-node.js | JavaScript | #!/usr/bin/env node
/* eslint-env node */
/* eslint-disable no-console */
const rawVersion = process.versions.node || "0.0.0";
const major = Number(rawVersion.split(".")[0] || 0);
const minMajor = 20;
if (Number.isNaN(major) || major < minMajor) {
console.error("❌ cccmemory requires Node.js 20 or later.");
console.error(` Detected Node.js ${rawVersion}.`);
console.error(" Please upgrade Node.js and reinstall:");
console.error(" - nvm install 22 && nvm use 22");
console.error(" - npm install -g cccmemory");
process.exit(1);
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
scripts/dev-config.js | JavaScript | #!/usr/bin/env node
/**
* Generate MCP configuration for local development testing.
*
* Usage: npm run dev:config
*
* This outputs the JSON configuration to add to your Claude Code settings
* for testing the local build instead of the published npm package.
*/
import { resolve, dirname } from "path";
import { fileURLToPath } from "url";
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
const projectRoot = resolve(__dirname, "..");
const entryPoint = resolve(projectRoot, "dist", "index.js");
const config = {
"cccmemory": {
command: "node",
args: [entryPoint],
},
};
console.log(`
╔══════════════════════════════════════════════════════════════════╗
║ Local Development MCP Configuration ║
╚══════════════════════════════════════════════════════════════════╝
Add this to your Claude Code MCP settings (~/.claude.json or VS Code settings):
${JSON.stringify({ mcpServers: config }, null, 2)}
Or just the server entry:
${JSON.stringify(config, null, 2)}
────────────────────────────────────────────────────────────────────
Entry point: ${entryPoint}
Steps to test local changes:
1. Make your code changes
2. Run: npm run build
3. Restart Claude Code (Cmd+Shift+P > "Developer: Reload Window" in VS Code)
4. Test your changes
To switch back to published version, use:
{
"cccmemory": {
"command": "npx",
"args": ["-y", "cccmemory"]
}
}
────────────────────────────────────────────────────────────────────
`);
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
scripts/mcp-smoke-test.js | JavaScript | #!/usr/bin/env node
/**
* Smoke test the local MCP server via stdio using the SDK client.
*
* Usage:
* node scripts/mcp-smoke-test.js [--full] [--dangerous] [--all] [--project /path]
*/
import process from "node:process";
import { resolve, join } from "node:path";
import { existsSync, readdirSync, statSync } from "node:fs";
import { homedir } from "node:os";
import { Client } from "@modelcontextprotocol/sdk/client/index.js";
import { StdioClientTransport } from "@modelcontextprotocol/sdk/client/stdio.js";
const args = process.argv.slice(2);
process.env.NODE_ENV = process.env.NODE_ENV || "test";
process.env.CCCMEMORY_DISABLE_AUTO_INDEX =
process.env.CCCMEMORY_DISABLE_AUTO_INDEX || "1";
const full = args.includes("--full");
const dangerous = args.includes("--dangerous");
const all = args.includes("--all");
const projectFlagIndex = args.indexOf("--project");
const projectPath = projectFlagIndex >= 0 ? args[projectFlagIndex + 1] : process.cwd();
const entryPoint = resolve(process.cwd(), "dist", "index.js");
const transport = new StdioClientTransport({
command: "node",
args: [entryPoint, "--server"],
stderr: "inherit",
env: {
...process.env,
NODE_ENV: "test",
CCCMEMORY_DISABLE_AUTO_INDEX: "1",
EMBEDDING_PROVIDER: process.env.EMBEDDING_PROVIDER || "transformers",
},
});
const client = new Client({ name: "cccmemory-smoke", version: "1.0.0" }, { capabilities: {} });
const results = [];
function parseToolResult(result) {
const text = result?.content?.[0]?.text;
if (!text) {
return null;
}
try {
return JSON.parse(text);
} catch {
return null;
}
}
async function runTool(name, params = {}) {
try {
const result = await client.callTool({ name, arguments: params });
const parsed = parseToolResult(result);
results.push({ name, ok: !result.isError });
if (result.isError) {
console.error(`✗ ${name}:`, result.content ?? result.error ?? "Unknown error");
} else {
console.error(`✓ ${name}`);
}
return { ok: !result.isError, parsed };
} catch (error) {
results.push({ name, ok: false });
console.error(`✗ ${name}:`, error instanceof Error ? error.message : String(error));
return { ok: false, parsed: null };
}
}
function findSourceFolderWithJsonl() {
const projectsDir = join(homedir(), ".claude", "projects");
if (!existsSync(projectsDir)) {
return null;
}
const entries = readdirSync(projectsDir);
for (const entry of entries) {
const folderPath = join(projectsDir, entry);
let stats;
try {
stats = statSync(folderPath);
} catch {
continue;
}
if (!stats.isDirectory()) {
continue;
}
let files;
try {
files = readdirSync(folderPath);
} catch {
continue;
}
if (files.some((file) => file.endsWith(".jsonl"))) {
return folderPath;
}
}
return null;
}
async function main() {
await client.connect(transport);
const toolList = await client.listTools();
const toolNames = toolList.tools.map((tool) => tool.name);
console.error(`Found ${toolNames.length} tools`);
const listResult = await runTool("list_recent_sessions", { limit: 3, project_path: projectPath });
const recentSessionId = listResult.parsed?.sessions?.[0]?.session_id;
const memoryKey = `__mcp_smoke_${Date.now()}`;
const sourceFolder = findSourceFolderWithJsonl();
const migrationTarget = sourceFolder ? `${sourceFolder}-migrate-test` : null;
const toolParams = {
index_conversations: {
project_path: projectPath,
session_id: recentSessionId,
include_thinking: false,
enable_git: false,
exclude_mcp_conversations: "self-only"
},
search_conversations: { query: "memory", limit: 3 },
search_project_conversations: {
query: "memory",
project_path: projectPath,
limit: 3,
include_claude_code: true,
include_codex: true,
},
get_decisions: { query: "decision", limit: 3 },
check_before_modify: { file_path: "README.md" },
get_file_evolution: { file_path: "README.md", limit: 3 },
link_commits_to_conversations: { query: "merge", limit: 3 },
search_mistakes: { query: "error", limit: 3 },
get_requirements: { component: "database" },
get_tool_history: { limit: 1, include_content: false },
find_similar_sessions: { query: "indexing", limit: 3 },
recall_and_apply: { query: "indexing", limit: 2 },
generate_documentation: { project_path: projectPath, scope: "architecture" },
discover_old_conversations: { current_project_path: projectPath },
migrate_project: sourceFolder && migrationTarget ? {
source_folder: sourceFolder,
old_project_path: projectPath,
new_project_path: `${projectPath}-migrate-test`,
dry_run: true,
mode: "migrate"
} : null,
forget_by_topic: { keywords: ["__mcp_smoke__"], confirm: false, project_path: projectPath },
search_by_file: { file_path: "README.md", limit: 2 },
list_recent_sessions: { limit: 3, project_path: projectPath },
get_latest_session_summary: {
project_path: projectPath,
source_type: "all",
limit_messages: 10,
include_tools: true,
include_errors: true,
},
index_all_projects: {
include_codex: true,
include_claude_code: true,
incremental: true
},
search_all_conversations: { query: "memory", limit: 3 },
get_all_decisions: { query: "decision", limit: 3 },
search_all_mistakes: { query: "error", limit: 3 },
remember: {
key: memoryKey,
value: "smoke-test-value",
context: "mcp smoke test",
tags: ["smoke-test"],
project_path: projectPath,
},
recall: { key: memoryKey, project_path: projectPath },
recall_relevant: { query: "smoke test", limit: 3, project_path: projectPath },
list_memory: { tags: ["smoke-test"], limit: 5, project_path: projectPath },
forget: { key: memoryKey, project_path: projectPath },
prepare_handoff: { project_path: projectPath },
resume_from_handoff: null,
list_handoffs: { limit: 3, project_path: projectPath },
get_startup_context: { query: "indexing", max_tokens: 500, project_path: projectPath },
inject_relevant_context: { message: "working on indexing", max_tokens: 500, project_path: projectPath },
};
const shouldRunAll = all || full || dangerous;
const defaultTools = [
"search_project_conversations",
"search_conversations",
"list_recent_sessions",
"get_tool_history",
"get_decisions",
"search_mistakes",
"check_before_modify",
"get_file_evolution",
];
const toolsToRun = shouldRunAll ? toolNames : defaultTools;
let handoffId = null;
for (const toolName of toolsToRun) {
if (!toolNames.includes(toolName)) {
console.error(`✗ ${toolName} not registered in MCP server`);
results.push({ name: toolName, ok: false });
continue;
}
if (toolName === "resume_from_handoff") {
if (!handoffId) {
console.error("✗ resume_from_handoff skipped (no handoff id)");
results.push({ name: toolName, ok: false });
continue;
}
await runTool(toolName, { handoff_id: handoffId, project_path: projectPath });
continue;
}
const params = toolParams[toolName];
if (!params) {
console.error(`✗ ${toolName} has no test params`);
results.push({ name: toolName, ok: false });
continue;
}
if (params === null) {
console.error(`✗ ${toolName} skipped (missing prerequisites)`);
results.push({ name: toolName, ok: false });
continue;
}
const result = await runTool(toolName, params);
if (toolName === "prepare_handoff") {
handoffId = result.parsed?.handoff?.id ?? null;
}
}
const failed = results.filter((r) => !r.ok);
if (failed.length > 0) {
console.error(`\nSmoke test finished with ${failed.length} failure(s).`);
process.exit(1);
}
console.error("\nSmoke test passed.");
process.exit(0);
}
main().catch((error) => {
console.error("Smoke test failed:", error instanceof Error ? error.message : String(error));
process.exit(1);
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
scripts/postinstall.js | JavaScript | #!/usr/bin/env node
/**
* Post-install script to automatically configure cccmemory
* in Claude Code's global configuration (~/.claude.json)
*/
/* eslint-env node */
/* eslint-disable no-console */
import { readFileSync, writeFileSync, existsSync, copyFileSync } from 'fs';
import { join } from 'path';
import { homedir } from 'os';
const CLAUDE_CONFIG_PATH = join(homedir(), '.claude.json');
const SERVER_NAME = 'cccmemory';
const ABI_RECORD_PATH = join(process.cwd(), '.node-abi.json');
function recordNodeAbi() {
const payload = {
nodeVersion: process.versions.node || 'unknown',
modules: process.versions.modules || 'unknown',
recordedAt: new Date().toISOString(),
};
try {
writeFileSync(ABI_RECORD_PATH, JSON.stringify(payload, null, 2), 'utf-8');
} catch (error) {
console.log(`⚠️ Failed to record Node ABI: ${error.message}`);
}
}
function postInstall() {
recordNodeAbi();
// Only run if this is a global installation
if (process.env.npm_config_global !== 'true') {
console.log('📦 Local installation detected - skipping global MCP configuration');
console.log(' To configure manually, run: claude mcp add --scope user cccmemory');
return;
}
console.log('🔧 Configuring cccmemory in Claude Code...');
// Check if Claude Code config exists
if (!existsSync(CLAUDE_CONFIG_PATH)) {
console.log('⚠️ Claude Code configuration not found at ~/.claude.json');
console.log(' Please install Claude Code first: https://claude.ai/download');
console.log(' Then run: claude mcp add --scope user cccmemory cccmemory');
return;
}
try {
// Read current configuration
const configContent = readFileSync(CLAUDE_CONFIG_PATH, 'utf-8');
const config = JSON.parse(configContent);
// Check if already configured
if (config.mcpServers && config.mcpServers[SERVER_NAME]) {
console.log('✓ cccmemory MCP server is already configured');
console.log(' Current command:', config.mcpServers[SERVER_NAME].command);
return;
}
// Create backup
const backupPath = `${CLAUDE_CONFIG_PATH}.backup.${Date.now()}`;
copyFileSync(CLAUDE_CONFIG_PATH, backupPath);
console.log(`📋 Created backup: ${backupPath}`);
// Initialize mcpServers object if it doesn't exist
if (!config.mcpServers) {
config.mcpServers = {};
}
// Add our MCP server configuration
config.mcpServers[SERVER_NAME] = {
type: 'stdio',
command: 'cccmemory',
args: [],
env: {}
};
// Write updated configuration
writeFileSync(
CLAUDE_CONFIG_PATH,
JSON.stringify(config, null, 2),
'utf-8'
);
console.log('✅ Successfully configured cccmemory MCP server!');
console.log();
console.log('🎉 Setup complete! You can now use these tools in Claude Code:');
console.log(' • index_conversations - Index conversation history');
console.log(' • search_conversations - Search past conversations');
console.log(' • get_decisions - Find design decisions');
console.log(' • check_before_modify - Check file context before editing');
console.log(' • forget_by_topic - Selectively delete conversations');
console.log(' • and 10 more tools...');
console.log();
console.log('📚 Documentation: https://github.com/xiaolai/cccmemory');
console.log('🔍 List tools: /mcp (in Claude Code)');
} catch (error) {
console.error('❌ Failed to configure MCP server:', error.message);
console.log();
console.log('💡 Manual configuration:');
console.log(' Add this to ~/.claude.json under "mcpServers":');
console.log(' {');
console.log(' "cccmemory": {');
console.log(' "type": "stdio",');
console.log(' "command": "cccmemory",');
console.log(' "args": []');
console.log(' }');
console.log(' }');
}
}
postInstall();
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/ConversationMemory.ts | TypeScript | /**
* Main Orchestrator - Coordinates all components for conversation memory indexing and retrieval.
*
* ConversationMemory is the primary interface for the cccmemory-mcp system.
* It orchestrates parsing, storage, extraction, and search of Claude Code conversation history.
*
* @example
* ```typescript
* const memory = new ConversationMemory();
* await memory.indexConversations({
* projectPath: '/path/to/project',
* enableGitIntegration: true
* });
* ```
*/
import { getSQLiteManager, SQLiteManager } from "./storage/SQLiteManager.js";
import { ConversationStorage } from "./storage/ConversationStorage.js";
import { ConversationParser, type ParseResult } from "./parsers/ConversationParser.js";
import { DecisionExtractor } from "./parsers/DecisionExtractor.js";
import { MistakeExtractor } from "./parsers/MistakeExtractor.js";
import { GitIntegrator } from "./parsers/GitIntegrator.js";
import { RequirementsExtractor } from "./parsers/RequirementsExtractor.js";
import { MethodologyExtractor } from "./parsers/MethodologyExtractor.js";
import { ResearchExtractor } from "./parsers/ResearchExtractor.js";
import { SolutionPatternExtractor } from "./parsers/SolutionPatternExtractor.js";
import { SemanticSearch } from "./search/SemanticSearch.js";
import { getWorktreeInfo } from "./utils/worktree.js";
/**
* Configuration options for indexing conversations.
*/
export interface IndexOptions {
/** Absolute path to the project directory to index */
projectPath: string;
/** Optional: Index only a specific session ID instead of all sessions */
sessionId?: string;
/**
* Whether to include thinking blocks in the index.
* Thinking blocks can be large and are excluded by default.
* @default false
*/
includeThinking?: boolean;
/**
* Enable git integration to link commits to conversations.
* Requires the project to be a git repository.
* @default true
*/
enableGitIntegration?: boolean;
/**
* Exclude MCP tool conversations from indexing.
* - `false`: Index all conversations (default)
* - `'self-only'`: Exclude only cccmemory MCP conversations (prevents self-referential loops)
* - `'all-mcp'` or `true`: Exclude all MCP tool conversations
* @default false
*/
excludeMcpConversations?: boolean | 'self-only' | 'all-mcp';
/**
* List of specific MCP server names to exclude.
* More granular than `excludeMcpConversations`.
* @example ['cccmemory', 'filesystem']
*/
excludeMcpServers?: string[];
/**
* Optional timestamp to skip unchanged conversation files.
* Files with mtime older than this will be skipped.
*/
lastIndexedMs?: number;
}
/**
* Main orchestrator for conversation memory operations.
*
* Coordinates parsing, storage, extraction, and search across:
* - Conversation parsing from JSONL files
* - Decision, mistake, and requirement extraction
* - Git commit integration
* - Semantic search with embeddings
*/
export class ConversationMemory {
private sqliteManager: SQLiteManager;
private storage: ConversationStorage;
private parser: ConversationParser;
private decisionExtractor: DecisionExtractor;
private mistakeExtractor: MistakeExtractor;
private requirementsExtractor: RequirementsExtractor;
private methodologyExtractor: MethodologyExtractor;
private researchExtractor: ResearchExtractor;
private solutionPatternExtractor: SolutionPatternExtractor;
private semanticSearch: SemanticSearch;
constructor() {
this.sqliteManager = getSQLiteManager();
this.storage = new ConversationStorage(this.sqliteManager);
// Enable caching by default for better performance
// Cache up to 100 query results for 5 minutes
this.storage.enableCache({ maxSize: 100, ttlMs: 300000 });
this.parser = new ConversationParser();
this.decisionExtractor = new DecisionExtractor();
this.mistakeExtractor = new MistakeExtractor();
this.requirementsExtractor = new RequirementsExtractor();
this.methodologyExtractor = new MethodologyExtractor();
this.researchExtractor = new ResearchExtractor();
this.solutionPatternExtractor = new SolutionPatternExtractor();
this.semanticSearch = new SemanticSearch(this.sqliteManager);
}
/**
* Index conversations for a project.
*
* This is the main entry point for processing conversation history.
* It performs the following operations:
* 1. Parse conversation JSONL files from the project
* 2. Store conversations, messages, and tool interactions
* 3. Extract decisions, mistakes, and requirements
* 4. Link git commits (if enabled)
* 5. Generate semantic embeddings for search
*
* @param options - Configuration options for indexing
* @returns Result object containing:
* - `embeddings_generated`: Whether embeddings were successfully generated
* - `embedding_error`: Error message if embedding generation failed
* - `indexed_folders`: List of folders that were indexed
* - `database_path`: Path to the SQLite database
*
* @throws {Error} If project path doesn't exist or conversation files can't be parsed
*
* @example
* ```typescript
* const result = await memory.indexConversations({
* projectPath: '/Users/me/my-project',
* enableGitIntegration: true,
* excludeMcpConversations: 'self-only'
* });
*
* if (result.embeddings_generated) {
* console.error('Indexed folders:', result.indexed_folders);
* } else {
* console.warn('Embeddings failed:', result.embedding_error);
* }
* ```
*/
async indexConversations(options: IndexOptions): Promise<{
embeddings_generated: boolean;
embedding_error?: string;
indexed_folders?: string[];
database_path?: string;
}> {
const { canonicalPath, worktreePaths } = getWorktreeInfo(options.projectPath);
console.error("\n=== Indexing Conversations ===");
console.error(`Project: ${canonicalPath}`);
if (options.sessionId) {
console.error(`Session: ${options.sessionId} (single session mode)`);
} else {
console.error(`Mode: All sessions`);
}
if (worktreePaths.length > 1) {
console.error(`Worktrees: ${worktreePaths.join(", ")}`);
}
// Parse conversations
let parseResult = this.parser.parseProjects(
worktreePaths,
options.sessionId,
canonicalPath,
options.lastIndexedMs
);
// Filter MCP conversations if requested
if (options.excludeMcpConversations || options.excludeMcpServers) {
parseResult = this.filterMcpConversations(parseResult, options);
}
// Store basic entities (skip FTS rebuild for performance, will rebuild once at end)
const conversationIdMap = await this.storage.storeConversations(parseResult.conversations);
const messageIdMap = await this.storage.storeMessages(parseResult.messages, {
skipFtsRebuild: true,
conversationIdMap,
});
const toolUseIdMap = await this.storage.storeToolUses(parseResult.tool_uses, messageIdMap);
await this.storage.storeToolResults(parseResult.tool_results, messageIdMap, toolUseIdMap);
await this.storage.storeFileEdits(parseResult.file_edits, conversationIdMap, messageIdMap);
// Only store thinking blocks if explicitly enabled (default: false for privacy)
if (options.includeThinking === true) {
await this.storage.storeThinkingBlocks(parseResult.thinking_blocks, messageIdMap);
}
// Extract decisions
console.error("\n=== Extracting Decisions ===");
const decisions = this.decisionExtractor.extractDecisions(
parseResult.messages,
parseResult.thinking_blocks
);
const decisionIdMap = await this.storage.storeDecisions(decisions, {
skipFtsRebuild: true,
conversationIdMap,
messageIdMap,
});
// Rebuild FTS indexes once after all data is stored
this.storage.rebuildAllFts();
// Extract mistakes
console.error("\n=== Extracting Mistakes ===");
const mistakes = this.mistakeExtractor.extractMistakes(
parseResult.messages,
parseResult.tool_results
);
const mistakeIdMap = await this.storage.storeMistakes(mistakes, conversationIdMap, messageIdMap);
// Extract requirements and validations
console.error("\n=== Extracting Requirements ===");
const requirements = this.requirementsExtractor.extractRequirements(
parseResult.messages
);
await this.storage.storeRequirements(requirements, conversationIdMap, messageIdMap);
const validations = this.requirementsExtractor.extractValidations(
parseResult.tool_uses,
parseResult.tool_results,
parseResult.messages
);
await this.storage.storeValidations(validations, conversationIdMap);
// Extract methodologies (problem-solving approaches)
console.error("\n=== Extracting Methodologies ===");
const methodologies = this.methodologyExtractor.extractMethodologies(
parseResult.messages,
parseResult.tool_uses,
parseResult.tool_results
);
await this.storage.storeMethodologies(methodologies, conversationIdMap, messageIdMap);
// Extract research findings
console.error("\n=== Extracting Research Findings ===");
const findings = this.researchExtractor.extractFindings(
parseResult.messages,
parseResult.tool_uses,
parseResult.tool_results
);
await this.storage.storeResearchFindings(findings, conversationIdMap, messageIdMap);
// Extract solution patterns
console.error("\n=== Extracting Solution Patterns ===");
const patterns = this.solutionPatternExtractor.extractPatterns(
parseResult.messages,
parseResult.tool_uses,
parseResult.tool_results
);
await this.storage.storeSolutionPatterns(patterns, conversationIdMap, messageIdMap);
// Git integration
if (options.enableGitIntegration !== false) {
try {
console.error("\n=== Integrating Git History ===");
const gitIntegrator = new GitIntegrator(canonicalPath);
const commits = await gitIntegrator.linkCommitsToConversations(
parseResult.conversations,
parseResult.file_edits,
decisions
);
const projectId = this.storage.getProjectId(canonicalPath);
await this.storage.storeGitCommits(commits, projectId, conversationIdMap, messageIdMap);
console.error(`✓ Linked ${commits.length} git commits`);
} catch (error) {
console.error("⚠️ Git integration failed:", error);
console.error(" Conversations will be indexed without git commit links");
console.error(" This is normal if the project is not a git repository");
}
}
// Index for semantic search
console.error("\n=== Indexing for Semantic Search ===");
let embeddingError: string | undefined;
try {
const messagesForEmbedding = parseResult.messages
.map((msg) => {
const internalId = messageIdMap.get(msg.id);
if (!internalId || !msg.content) {
return null;
}
return { id: internalId, content: msg.content };
})
.filter((msg): msg is { id: number; content: string } => Boolean(msg));
const decisionsForEmbedding: Array<{
id: number;
decision_text: string;
rationale?: string;
context?: string | null;
}> = [];
for (const decision of decisions) {
const internalId = decisionIdMap.get(decision.id);
if (!internalId) {
continue;
}
decisionsForEmbedding.push({
id: internalId,
decision_text: decision.decision_text,
rationale: decision.rationale,
context: decision.context ?? null,
});
}
const mistakesForEmbedding: Array<{
id: number;
what_went_wrong: string;
correction?: string | null;
mistake_type: string;
}> = [];
for (const mistake of mistakes) {
const internalId = mistakeIdMap.get(mistake.id);
if (!internalId) {
continue;
}
mistakesForEmbedding.push({
id: internalId,
what_went_wrong: mistake.what_went_wrong,
correction: mistake.correction ?? null,
mistake_type: mistake.mistake_type,
});
}
await this.semanticSearch.indexMessages(messagesForEmbedding);
await this.semanticSearch.indexDecisions(decisionsForEmbedding);
await this.semanticSearch.indexMistakes(mistakesForEmbedding);
// Also index any decisions/mistakes in DB that are missing embeddings
// (catches items created before embeddings were available)
await this.semanticSearch.indexMissingDecisionEmbeddings();
await this.semanticSearch.indexMissingMistakeEmbeddings();
console.error("✓ Semantic indexing complete");
} catch (error) {
embeddingError = (error as Error).message;
console.error("⚠️ Semantic indexing failed:", error);
console.error(" Embeddings may not be available - falling back to full-text search");
console.error(" Install @xenova/transformers for semantic search: npm install @xenova/transformers");
// Don't throw - allow indexing to complete with FTS fallback
}
// Print stats
console.error("\n=== Indexing Complete ===");
const stats = this.storage.getStats();
console.error(`Conversations: ${stats.conversations.count}`);
console.error(`Messages: ${stats.messages.count}`);
console.error(`Decisions: ${stats.decisions.count}`);
console.error(`Mistakes: ${stats.mistakes.count}`);
console.error(`Git Commits: ${stats.git_commits.count}`);
// Return embedding status and indexing metadata
return {
embeddings_generated: !embeddingError,
embedding_error: embeddingError,
indexed_folders: parseResult.indexed_folders,
database_path: this.sqliteManager.getDbPath(),
};
}
/**
* Search conversations using natural language query.
*
* Uses semantic search with embeddings if available, otherwise falls back to full-text search.
*
* @param query - Natural language search query
* @param limit - Maximum number of results to return (default: 10)
* @returns Array of search results with messages, conversations, and similarity scores
*
* @example
* ```typescript
* const results = await memory.search('authentication bug fix', 5);
* results.forEach(r => {
* console.error(`${r.similarity}: ${r.snippet}`);
* });
* ```
*/
async search(query: string, limit: number = 10) {
return this.semanticSearch.searchConversations(query, limit);
}
/**
* Search for decisions using natural language query.
*
* Searches through extracted decisions to find relevant architectural choices and technical decisions.
*
* @param query - Natural language search query
* @param limit - Maximum number of results to return (default: 10)
* @returns Array of decision search results with similarity scores
*
* @example
* ```typescript
* const decisions = await memory.searchDecisions('database choice', 3);
* decisions.forEach(d => {
* console.error(`Decision: ${d.decision.decision_text}`);
* console.error(`Rationale: ${d.decision.rationale}`);
* });
* ```
*/
async searchDecisions(query: string, limit: number = 10) {
return this.semanticSearch.searchDecisions(query, limit);
}
/**
* Get the timeline of changes for a specific file.
*
* Returns all edits, commits, and related conversations for a file across its history.
*
* @param filePath - Path to the file (relative to project root)
* @returns Timeline of file changes with conversations and commits
*
* @example
* ```typescript
* const timeline = memory.getFileTimeline('src/index.ts');
* console.error(`${timeline.length} changes to this file`);
* ```
*/
getFileTimeline(filePath: string) {
return this.storage.getFileTimeline(filePath);
}
/**
* Get statistics about the indexed conversation data.
*
* @returns Object containing counts for conversations, messages, decisions, mistakes, and commits
*
* @example
* ```typescript
* const stats = memory.getStats();
* console.error(`Indexed ${stats.conversations.count} conversations`);
* console.error(`Extracted ${stats.decisions.count} decisions`);
* ```
*/
getStats() {
return this.storage.getStats();
}
/**
* Get the underlying storage instance for direct database access.
*
* Use with caution - prefer using the high-level methods when possible.
*
* @returns ConversationStorage instance
* @internal
*/
getStorage() {
return this.storage;
}
/**
* Get the semantic search instance for advanced search operations.
*
* @returns SemanticSearch instance
* @internal
*/
getSemanticSearch() {
return this.semanticSearch;
}
/**
* Filter MCP conversations from parse results.
*
* Implements the exclusion logic for MCP tool conversations to prevent
* self-referential loops and reduce noise in the index.
*
* Strategy: Filter at MESSAGE level, not conversation level.
* - Keep all conversations
* - Exclude only messages that invoke specified MCP tools and their responses
*/
private filterMcpConversations(result: ParseResult, options: IndexOptions): ParseResult {
// Determine which MCP servers to exclude
const serversToExclude = new Set<string>();
if (options.excludeMcpServers && options.excludeMcpServers.length > 0) {
// Explicit list of servers to exclude
options.excludeMcpServers.forEach(s => serversToExclude.add(s));
} else if (options.excludeMcpConversations === 'self-only') {
// Exclude only cccmemory server
serversToExclude.add('cccmemory');
} else if (options.excludeMcpConversations === 'all-mcp' || options.excludeMcpConversations === true) {
// Exclude all MCP tool uses - collect all server names from tool uses
for (const toolUse of result.tool_uses) {
if (toolUse.tool_name.startsWith('mcp__')) {
const parts = toolUse.tool_name.split('__');
if (parts.length >= 2) {
serversToExclude.add(parts[1]);
}
}
}
}
if (serversToExclude.size === 0) {
return result; // Nothing to filter
}
// Build set of excluded tool_use IDs (tools from excluded servers)
const excludedToolUseIds = new Set<string>();
for (const toolUse of result.tool_uses) {
if (toolUse.tool_name.startsWith('mcp__')) {
const parts = toolUse.tool_name.split('__');
if (parts.length >= 2 && serversToExclude.has(parts[1])) {
excludedToolUseIds.add(toolUse.id);
}
}
}
// Build set of excluded message IDs (messages containing excluded tool uses or their results)
const excludedMessageIds = new Set<string>();
// Exclude assistant messages that contain excluded tool uses
for (const toolUse of result.tool_uses) {
if (excludedToolUseIds.has(toolUse.id)) {
excludedMessageIds.add(toolUse.message_id);
}
}
// Exclude user messages that contain tool results for excluded tool uses
for (const toolResult of result.tool_results) {
if (excludedToolUseIds.has(toolResult.tool_use_id)) {
excludedMessageIds.add(toolResult.message_id);
}
}
if (excludedMessageIds.size > 0) {
console.error(`\n⚠️ Excluding ${excludedMessageIds.size} message(s) containing MCP tool calls from: ${Array.from(serversToExclude).join(', ')}`);
}
// Build set of remaining message IDs after filtering
const remainingMessageIds = new Set(
result.messages
.filter(m => !excludedMessageIds.has(m.id))
.map(m => m.id)
);
// Filter messages and related entities
// IMPORTANT: file_edits must also be filtered to avoid FK constraint violations
return {
conversations: result.conversations, // Keep ALL conversations
messages: result.messages.filter(m => !excludedMessageIds.has(m.id)),
tool_uses: result.tool_uses.filter(t => !excludedToolUseIds.has(t.id)),
tool_results: result.tool_results.filter(tr => !excludedToolUseIds.has(tr.tool_use_id)),
file_edits: result.file_edits.filter(fe => remainingMessageIds.has(fe.message_id)),
thinking_blocks: result.thinking_blocks.filter(tb => remainingMessageIds.has(tb.message_id)),
indexed_folders: result.indexed_folders, // Preserve folder metadata
};
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/edge-cases/comprehensive-edge-cases.test.ts | TypeScript | /**
* Comprehensive Edge Case Tests
*
* Tests for boundary conditions, special inputs, concurrent operations,
* and error recovery scenarios across all features.
*/
import Database from "better-sqlite3";
import { WorkingMemoryStore } from "../../memory/WorkingMemoryStore.js";
import { SessionHandoffStore } from "../../handoff/SessionHandoffStore.js";
import { ContextInjector } from "../../context/ContextInjector.js";
import { ConversationParser } from "../../parsers/ConversationParser.js";
import { DecisionExtractor } from "../../parsers/DecisionExtractor.js";
import { MistakeExtractor } from "../../parsers/MistakeExtractor.js";
import { RequirementsExtractor } from "../../parsers/RequirementsExtractor.js";
describe("Edge Cases - Comprehensive Tests", () => {
let db: Database.Database;
let memoryStore: WorkingMemoryStore;
let handoffStore: SessionHandoffStore;
let contextInjector: ContextInjector;
beforeEach(() => {
db = new Database(":memory:");
// Create required schema
db.exec(`
CREATE TABLE IF NOT EXISTS working_memory (
id TEXT PRIMARY KEY,
key TEXT NOT NULL,
value TEXT NOT NULL,
context TEXT,
tags TEXT,
session_id TEXT,
project_path TEXT NOT NULL,
created_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL,
expires_at INTEGER,
embedding BLOB,
UNIQUE(project_path, key)
);
CREATE INDEX IF NOT EXISTS idx_wm_project ON working_memory(project_path);
CREATE INDEX IF NOT EXISTS idx_wm_project_key ON working_memory(project_path, key);
CREATE VIRTUAL TABLE IF NOT EXISTS working_memory_fts USING fts5(
id UNINDEXED,
key,
value,
context
);
CREATE TABLE IF NOT EXISTS session_handoffs (
id TEXT PRIMARY KEY,
from_session_id TEXT NOT NULL,
project_path TEXT NOT NULL,
created_at INTEGER NOT NULL,
handoff_data TEXT NOT NULL,
resumed_by_session_id TEXT,
resumed_at INTEGER
);
CREATE TABLE IF NOT EXISTS decisions (
id TEXT PRIMARY KEY,
message_id TEXT NOT NULL,
decision_text TEXT NOT NULL,
rationale TEXT,
context TEXT,
timestamp INTEGER NOT NULL
);
CREATE TABLE IF NOT EXISTS tool_uses (
id TEXT PRIMARY KEY,
message_id TEXT NOT NULL,
tool_name TEXT NOT NULL,
parameters TEXT,
result TEXT,
timestamp INTEGER NOT NULL
);
CREATE TABLE IF NOT EXISTS messages (
id TEXT PRIMARY KEY,
conversation_id TEXT NOT NULL,
role TEXT NOT NULL,
content TEXT,
timestamp INTEGER NOT NULL
);
CREATE TABLE IF NOT EXISTS conversations (
id TEXT PRIMARY KEY,
session_id TEXT NOT NULL,
project_path TEXT NOT NULL,
created_at INTEGER NOT NULL
);
`);
memoryStore = new WorkingMemoryStore(db);
handoffStore = new SessionHandoffStore(db);
contextInjector = new ContextInjector(db);
});
afterEach(() => {
db.close();
});
describe("Unicode and Special Characters", () => {
const projectPath = "/test/project";
it("should handle emoji in memory values", () => {
const result = memoryStore.remember({
key: "emoji_test",
value: "Using 🚀 rocket science 🎉 for optimization",
projectPath,
});
expect(result.value).toBe("Using 🚀 rocket science 🎉 for optimization");
const recalled = memoryStore.recall("emoji_test", projectPath);
expect(recalled?.value).toBe("Using 🚀 rocket science 🎉 for optimization");
});
it("should handle CJK characters in memory values", () => {
memoryStore.remember({
key: "cjk_test",
value: "使用中文进行测试 日本語テスト 한국어 테스트",
projectPath,
});
const recalled = memoryStore.recall("cjk_test", projectPath);
expect(recalled?.value).toBe("使用中文进行测试 日本語テスト 한국어 테스트");
});
it("should handle special characters in keys", () => {
const specialKeys = [
"key-with-dashes",
"key_with_underscores",
"key.with.dots",
"key:with:colons",
];
for (const key of specialKeys) {
memoryStore.remember({ key, value: "test", projectPath });
const recalled = memoryStore.recall(key, projectPath);
expect(recalled?.key).toBe(key);
}
});
it("should handle newlines and tabs in values", () => {
const multilineValue = `Line 1\nLine 2\n\tTabbed line\r\nWindows line`;
memoryStore.remember({
key: "multiline",
value: multilineValue,
projectPath,
});
const recalled = memoryStore.recall("multiline", projectPath);
expect(recalled?.value).toBe(multilineValue);
});
it("should handle very long strings", () => {
const longValue = "A".repeat(10000);
memoryStore.remember({
key: "long_value",
value: longValue,
projectPath,
});
const recalled = memoryStore.recall("long_value", projectPath);
expect(recalled?.value.length).toBe(10000);
});
});
describe("SQL Injection Prevention", () => {
const projectPath = "/test/project";
it("should handle SQL injection attempts in keys", () => {
const dangerousKeys = [
"test'; DROP TABLE working_memory; --",
"test\"; DELETE FROM working_memory WHERE \"1\"=\"1",
"'; INSERT INTO working_memory VALUES('hacked');--",
"test OR 1=1",
];
for (const key of dangerousKeys) {
memoryStore.remember({ key, value: "test", projectPath });
const recalled = memoryStore.recall(key, projectPath);
expect(recalled?.key).toBe(key);
}
// Database should still have correct count
const items = memoryStore.list(projectPath);
expect(items.length).toBe(dangerousKeys.length);
});
it("should handle SQL injection attempts in values", () => {
memoryStore.remember({
key: "safe_key",
value: "'; DROP TABLE working_memory; --",
projectPath,
});
const recalled = memoryStore.recall("safe_key", projectPath);
expect(recalled?.value).toBe("'; DROP TABLE working_memory; --");
});
it("should handle SQL injection in context and tags", () => {
memoryStore.remember({
key: "sql_test",
value: "test",
context: "'; DELETE FROM working_memory; --",
tags: ["'; DROP TABLE; --", "normal_tag"],
projectPath,
});
const recalled = memoryStore.recall("sql_test", projectPath);
expect(recalled?.context).toContain("DELETE");
expect(recalled?.tags).toContain("'; DROP TABLE; --");
});
});
describe("Boundary Value Tests", () => {
const projectPath = "/test/project";
it("should handle empty string key", () => {
// Empty key might be rejected or handled specially
expect(() => {
memoryStore.remember({ key: "", value: "test", projectPath });
}).not.toThrow();
});
it("should handle empty string value", () => {
memoryStore.remember({ key: "empty_value", value: "", projectPath });
const recalled = memoryStore.recall("empty_value", projectPath);
expect(recalled?.value).toBe("");
});
it("should handle TTL of 0 seconds", () => {
memoryStore.remember({
key: "zero_ttl",
value: "test",
ttl: 0,
projectPath,
});
// Should still exist (0 means no expiration, not immediate expiration)
// or immediately expire based on implementation
const recalled = memoryStore.recall("zero_ttl", projectPath);
// Either behavior is acceptable
expect(recalled === null || recalled?.value === "test").toBe(true);
});
it("should handle negative TTL (already expired)", () => {
memoryStore.remember({
key: "negative_ttl",
value: "test",
ttl: -1,
projectPath,
});
const recalled = memoryStore.recall("negative_ttl", projectPath);
expect(recalled).toBeNull();
});
it("should handle very large TTL", () => {
memoryStore.remember({
key: "large_ttl",
value: "test",
ttl: Number.MAX_SAFE_INTEGER / 1000,
projectPath,
});
const recalled = memoryStore.recall("large_ttl", projectPath);
expect(recalled?.value).toBe("test");
});
it("should handle limit of 0 (no limit)", () => {
memoryStore.remember({ key: "item1", value: "v1", projectPath });
memoryStore.remember({ key: "item2", value: "v2", projectPath });
// Limit of 0 means no limit - returns all items
const items = memoryStore.list(projectPath, { limit: 0 });
expect(items.length).toBe(2);
});
it("should handle offset larger than data size", () => {
memoryStore.remember({ key: "item1", value: "v1", projectPath });
const items = memoryStore.list(projectPath, { offset: 100 });
expect(items.length).toBe(0);
});
it("should handle negative offset gracefully", () => {
memoryStore.remember({ key: "item1", value: "v1", projectPath });
// Negative offset should be treated as 0 or error
expect(() => {
memoryStore.list(projectPath, { offset: -1 });
}).not.toThrow();
});
});
describe("Concurrent Operations", () => {
const projectPath = "/test/project";
it("should handle rapid sequential writes to same key", () => {
for (let i = 0; i < 100; i++) {
memoryStore.remember({
key: "rapid_write",
value: `value_${i}`,
projectPath,
});
}
const recalled = memoryStore.recall("rapid_write", projectPath);
expect(recalled?.value).toBe("value_99");
});
it("should handle many unique keys", () => {
for (let i = 0; i < 1000; i++) {
memoryStore.remember({
key: `key_${i}`,
value: `value_${i}`,
projectPath,
});
}
expect(memoryStore.count(projectPath)).toBe(1000);
});
it("should handle interleaved read/write operations", () => {
for (let i = 0; i < 50; i++) {
memoryStore.remember({
key: `item_${i}`,
value: `value_${i}`,
projectPath,
});
if (i > 0) {
const prev = memoryStore.recall(`item_${i - 1}`, projectPath);
expect(prev?.value).toBe(`value_${i - 1}`);
}
}
});
});
describe("Session Handoff Edge Cases", () => {
const projectPath = "/test/project";
it("should handle handoff with no data", () => {
const handoff = handoffStore.prepareHandoff({
sessionId: "session-1",
projectPath,
include: [],
});
expect(handoff.contextSummary).toBe("Empty handoff.");
expect(handoff.decisions).toEqual([]);
expect(handoff.workingMemory).toEqual([]);
});
it("should handle resuming already resumed handoff", () => {
const original = handoffStore.prepareHandoff({
sessionId: "session-1",
projectPath,
});
// Resume once
handoffStore.resumeFromHandoff({
handoffId: original.id,
projectPath,
newSessionId: "session-2",
});
// Try to resume again - should work (allows re-resume)
const secondResume = handoffStore.resumeFromHandoff({
handoffId: original.id,
projectPath,
newSessionId: "session-3",
});
expect(secondResume).not.toBeNull();
});
it("should handle resuming with non-existent handoff ID", () => {
const result = handoffStore.resumeFromHandoff({
handoffId: "nonexistent-id",
projectPath,
newSessionId: "session-1",
});
expect(result).toBeNull();
});
it("should handle multiple handoffs from same session", async () => {
const first = handoffStore.prepareHandoff({
sessionId: "session-1",
projectPath,
});
await new Promise((resolve) => setTimeout(resolve, 10));
const second = handoffStore.prepareHandoff({
sessionId: "session-1",
projectPath,
});
expect(first.id).not.toBe(second.id);
const handoffs = handoffStore.listHandoffs(projectPath);
expect(handoffs.length).toBe(2);
});
});
describe("Context Injector Edge Cases", () => {
const projectPath = "/test/project";
it("should handle very small token budget", async () => {
// Add many items
for (let i = 0; i < 10; i++) {
memoryStore.remember({
key: `item_${i}`,
value: "A moderately long value for testing purposes",
projectPath,
});
}
const context = await contextInjector.getRelevantContext({
projectPath,
maxTokens: 10,
sources: ["memory"],
});
expect(context.tokenEstimate).toBeLessThanOrEqual(10);
});
it("should handle empty query", async () => {
memoryStore.remember({
key: "test",
value: "test value",
projectPath,
});
const context = await contextInjector.getRelevantContext({
query: "",
projectPath,
sources: ["memory"],
});
// Should still return items based on other criteria
expect(context.memory.length).toBeGreaterThanOrEqual(0);
});
it("should handle project with no data", async () => {
const context = await contextInjector.getRelevantContext({
projectPath: "/nonexistent/project",
sources: ["memory", "decisions", "handoffs"],
});
expect(context.memory).toEqual([]);
expect(context.decisions).toEqual([]);
expect(context.handoff).toBeUndefined();
});
});
describe("Parser Edge Cases", () => {
let parser: ConversationParser;
let decisionExtractor: DecisionExtractor;
let mistakeExtractor: MistakeExtractor;
let requirementsExtractor: RequirementsExtractor;
beforeEach(() => {
parser = new ConversationParser();
decisionExtractor = new DecisionExtractor();
mistakeExtractor = new MistakeExtractor();
requirementsExtractor = new RequirementsExtractor();
});
it("should handle nonexistent project path", () => {
const result = parser.parseProject("/nonexistent/path/123456789");
expect(result.conversations).toEqual([]);
expect(result.messages).toEqual([]);
});
it("should extract no decisions from empty messages", () => {
const decisions = decisionExtractor.extractDecisions([], []);
expect(decisions).toEqual([]);
});
it("should extract no mistakes from empty inputs", () => {
const mistakes = mistakeExtractor.extractMistakes([], []);
expect(mistakes).toEqual([]);
});
it("should extract no requirements from empty messages", () => {
const requirements = requirementsExtractor.extractRequirements([]);
expect(requirements).toEqual([]);
});
it("should handle messages with null content", () => {
const messages = [
{
id: "1",
conversation_id: "c1",
role: "user",
content: null as unknown as string,
timestamp: Date.now(),
message_type: "human",
is_sidechain: false,
metadata: {},
},
{
id: "2",
conversation_id: "c1",
role: "assistant",
content: undefined as unknown as string,
timestamp: Date.now(),
message_type: "assistant",
is_sidechain: false,
metadata: {},
},
];
// Should not throw
expect(() => {
decisionExtractor.extractDecisions(messages, []);
}).not.toThrow();
});
});
describe("Memory FTS Edge Cases", () => {
const projectPath = "/test/project";
it("should handle FTS special characters", () => {
memoryStore.remember({
key: "fts_test",
value: "Testing * OR AND NOT + - ~ ( ) quotes\"",
projectPath,
});
// recallRelevant uses FTS
const results = memoryStore.recallRelevant({
query: "Testing",
projectPath,
});
expect(results.length).toBeGreaterThan(0);
});
it("should handle empty FTS query", () => {
memoryStore.remember({
key: "test",
value: "test value",
projectPath,
});
const results = memoryStore.recallRelevant({
query: "",
projectPath,
});
// Empty query might return all items or none
expect(Array.isArray(results)).toBe(true);
});
it("should handle query with only FTS operators", () => {
memoryStore.remember({
key: "test",
value: "test value",
projectPath,
});
const results = memoryStore.recallRelevant({
query: "OR AND NOT",
projectPath,
});
expect(Array.isArray(results)).toBe(true);
});
});
describe("Project Path Edge Cases", () => {
it("should handle project paths with spaces", () => {
const projectPath = "/path/with spaces/project";
memoryStore.remember({
key: "test",
value: "test value",
projectPath,
});
const recalled = memoryStore.recall("test", projectPath);
expect(recalled?.value).toBe("test value");
});
it("should handle project paths with special characters", () => {
const projectPath = "/path-with_special.chars/project";
memoryStore.remember({
key: "test",
value: "test value",
projectPath,
});
const recalled = memoryStore.recall("test", projectPath);
expect(recalled?.value).toBe("test value");
});
it("should handle Windows-style paths", () => {
const projectPath = "C:\\Users\\test\\project";
memoryStore.remember({
key: "test",
value: "test value",
projectPath,
});
const recalled = memoryStore.recall("test", projectPath);
expect(recalled?.value).toBe("test value");
});
it("should distinguish between similar project paths", () => {
memoryStore.remember({
key: "test",
value: "value1",
projectPath: "/project1",
});
memoryStore.remember({
key: "test",
value: "value2",
projectPath: "/project1-copy",
});
const result1 = memoryStore.recall("test", "/project1");
const result2 = memoryStore.recall("test", "/project1-copy");
expect(result1?.value).toBe("value1");
expect(result2?.value).toBe("value2");
});
});
describe("Tag Handling Edge Cases", () => {
const projectPath = "/test/project";
it("should handle empty tags array", () => {
memoryStore.remember({
key: "no_tags",
value: "test",
tags: [],
projectPath,
});
const recalled = memoryStore.recall("no_tags", projectPath);
expect(recalled?.tags).toEqual([]);
});
it("should handle tags with special characters", () => {
const specialTags = ["tag-with-dash", "tag_underscore", "tag.dot", "tag:colon"];
memoryStore.remember({
key: "special_tags",
value: "test",
tags: specialTags,
projectPath,
});
const recalled = memoryStore.recall("special_tags", projectPath);
expect(recalled?.tags).toEqual(specialTags);
});
it("should handle duplicate tags", () => {
memoryStore.remember({
key: "dup_tags",
value: "test",
tags: ["tag1", "tag1", "tag2"],
projectPath,
});
const recalled = memoryStore.recall("dup_tags", projectPath);
// May or may not deduplicate - just verify it doesn't break
expect(recalled?.tags).toContain("tag1");
expect(recalled?.tags).toContain("tag2");
});
it("should filter by non-existent tag", () => {
memoryStore.remember({
key: "item",
value: "test",
tags: ["existing"],
projectPath,
});
const items = memoryStore.list(projectPath, { tags: ["nonexistent"] });
expect(items.length).toBe(0);
});
});
describe("Timestamp Edge Cases", () => {
const projectPath = "/test/project";
it("should handle items created at exact same time", () => {
// Store multiple items as fast as possible
for (let i = 0; i < 5; i++) {
memoryStore.remember({
key: `same_time_${i}`,
value: `value_${i}`,
projectPath,
});
}
const items = memoryStore.list(projectPath);
expect(items.length).toBe(5);
});
it("should preserve creation timestamp on update", () => {
const original = memoryStore.remember({
key: "timestamp_test",
value: "original",
projectPath,
});
// Wait briefly
const createdAt = original.createdAt;
// Update the item
memoryStore.remember({
key: "timestamp_test",
value: "updated",
projectPath,
});
const updated = memoryStore.recall("timestamp_test", projectPath);
expect(updated?.createdAt).toBe(createdAt);
expect(updated?.updatedAt).toBeGreaterThanOrEqual(createdAt);
});
});
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/helpers/test-helpers.ts | TypeScript | /**
* Test Helper Utilities
* Shared utilities for all test files
*/
import { getSQLiteManager } from '../../storage/SQLiteManager';
import type { SQLiteManager } from '../../storage/SQLiteManager';
import { rmSync } from 'fs';
import { join } from 'path';
/**
* Create isolated test database
*/
export function createTestDatabase(): SQLiteManager {
const testDbPath = join('/tmp', `test-db-${Date.now()}-${Math.random()}.db`);
return getSQLiteManager(testDbPath);
}
/**
* Cleanup test database
*/
export function cleanupTestDatabase(dbPath?: string): void {
try {
getSQLiteManager().close();
if (dbPath) {
rmSync(dbPath, { force: true });
}
} catch {
// Ignore cleanup errors
}
}
/**
* Normalize object for snapshot testing
* Removes timestamps, IDs, and other dynamic values
*/
export function normalizeForSnapshot<T>(obj: T): T {
return JSON.parse(
JSON.stringify(obj, (key, value) => {
// Normalize timestamps
if (
key.includes('timestamp') ||
key.includes('time') ||
key === 'created_at' ||
key === 'updated_at'
) {
return '[TIMESTAMP]';
}
// Normalize IDs
if (key === 'id' || key.endsWith('_id')) {
return '[ID]';
}
// Normalize paths
if (key.includes('path') && typeof value === 'string' && value.startsWith('/')) {
return '[PATH]';
}
return value;
})
);
}
/**
* Wait for async operation with timeout
*/
export async function waitFor(
condition: () => boolean | Promise<boolean>,
timeout: number = 5000,
interval: number = 100
): Promise<void> {
const startTime = Date.now();
while (Date.now() - startTime < timeout) {
if (await condition()) {
return;
}
await new Promise(resolve => setTimeout(resolve, interval));
}
throw new Error(`Timeout waiting for condition after ${timeout}ms`);
}
/**
* Suppress console output during test
*/
export function suppressConsole(): () => void {
const originalLog = console.log;
const originalWarn = console.warn;
const originalError = console.error;
console.log = jest.fn();
console.warn = jest.fn();
console.error = jest.fn();
return () => {
console.log = originalLog;
console.warn = originalWarn;
console.error = originalError;
};
}
/**
* Create test conversation data
*/
export function createTestConversation(overrides?: Partial<any>): any {
return {
id: 'test-conv-' + Math.random(),
project_path: '/tmp/test-project',
first_message_at: Date.now() - 3600000,
last_message_at: Date.now(),
message_count: 10,
git_branch: 'main',
claude_version: '3.5',
metadata: {},
created_at: Date.now() - 3600000,
updated_at: Date.now(),
...overrides,
};
}
/**
* Create test message data
*/
export function createTestMessage(overrides?: Partial<any>): any {
return {
id: 'test-msg-' + Math.random(),
conversation_id: 'test-conv-123',
message_type: 'text',
role: 'user',
content: 'Test message content',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
...overrides,
};
}
/**
* Assert table row count
*/
export function assertTableCount(
db: any,
table: string,
expectedCount: number
): void {
const result = db
.prepare(`SELECT COUNT(*) as count FROM ${table}`)
.get() as { count: number };
expect(result.count).toBe(expectedCount);
}
/**
* Get all test utilities as a single export
*/
export const TestHelpers = {
createTestDatabase,
cleanupTestDatabase,
normalizeForSnapshot,
waitFor,
suppressConsole,
createTestConversation,
createTestMessage,
assertTableCount,
};
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/integration/end-to-end.test.ts | TypeScript | /**
* Integration tests - End-to-end workflows
*/
import { ConversationMemory } from '../../ConversationMemory.js';
import { resetSQLiteManager } from '../../storage/SQLiteManager.js';
import { mkdirSync, rmSync } from 'fs';
import { join } from 'path';
import { tmpdir } from 'os';
describe('End-to-End Integration', () => {
let memory: ConversationMemory;
let testDir: string;
beforeEach(() => {
memory = new ConversationMemory();
// Create temporary test directory
testDir = join(tmpdir(), `claude-memory-test-${Date.now()}`);
mkdirSync(testDir, { recursive: true });
});
afterEach(() => {
resetSQLiteManager();
// Clean up test directory
try {
rmSync(testDir, { recursive: true, force: true });
} catch (_error) {
// Ignore cleanup errors in tests
}
});
describe('Conversation Indexing Workflow', () => {
it('should handle empty project directory gracefully', async () => {
// This test verifies the system handles missing conversation files
const result = await memory.indexConversations({
projectPath: testDir,
includeThinking: false,
enableGitIntegration: false,
});
// Should complete successfully even with no conversations
// Note: embeddings_generated can be false on platforms where embedding
// providers are unavailable (e.g., macOS ARM64 with ONNX issues)
expect(typeof result.embeddings_generated).toBe('boolean');
// If embeddings failed, there should be an error message
if (!result.embeddings_generated) {
expect(typeof result.embedding_error).toBe('string');
}
});
it('should collect statistics after indexing', () => {
const stats = memory.getStats();
expect(stats).toHaveProperty('conversations');
expect(stats).toHaveProperty('messages');
expect(stats).toHaveProperty('decisions');
expect(stats).toHaveProperty('mistakes');
expect(stats).toHaveProperty('git_commits');
});
});
// Skip search tests - embeddings are optional and may not work in test environment
describe.skip('Search Workflow', () => {
it('should return empty results for new database', async () => {
const results = await memory.search('test query', 10);
expect(Array.isArray(results)).toBe(true);
expect(results.length).toBe(0);
});
it('should handle search with no results gracefully', async () => {
const results = await memory.searchDecisions('nonexistent query', 10);
expect(Array.isArray(results)).toBe(true);
expect(results.length).toBe(0);
});
});
describe('File Timeline Workflow', () => {
it('should return empty timeline for non-indexed files', () => {
const timeline = memory.getFileTimeline('src/nonexistent.ts');
expect(timeline).toHaveProperty('file_path');
expect(timeline).toHaveProperty('edits');
expect(timeline).toHaveProperty('commits');
expect(timeline).toHaveProperty('decisions');
expect(Array.isArray(timeline.edits)).toBe(true);
expect(Array.isArray(timeline.commits)).toBe(true);
expect(Array.isArray(timeline.decisions)).toBe(true);
});
it('should handle file paths with special characters', () => {
// Should not throw or cause SQL errors
expect(() => {
memory.getFileTimeline('src/file%with_special"chars.ts');
}).not.toThrow();
});
});
describe('Component Integration', () => {
it('should have working storage instance', () => {
const storage = memory.getStorage();
expect(storage).toBeDefined();
const stats = storage.getStats();
expect(stats).toHaveProperty('conversations');
});
it('should have working semantic search instance', () => {
const search = memory.getSemanticSearch();
expect(search).toBeDefined();
const stats = search.getStats();
expect(stats).toHaveProperty('total_embeddings');
expect(stats).toHaveProperty('vec_enabled');
});
});
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/integration/migration.test.ts | TypeScript | /**
* Integration tests for end-to-end migration workflow
*/
import { describe, it, expect, beforeEach, afterEach } from "@jest/globals";
import { tmpdir } from "os";
import { join } from "path";
import { mkdirSync, writeFileSync, rmSync, existsSync, readFileSync } from "fs";
import { ProjectMigration } from "../../utils/ProjectMigration.js";
import { getSQLiteManager, resetSQLiteManager } from "../../storage/SQLiteManager.js";
const insertProject = (db: ReturnType<ReturnType<typeof getSQLiteManager>["getDatabase"]>, projectPath: string) => {
const now = Date.now();
const result = db
.prepare(
"INSERT INTO projects (canonical_path, display_path, created_at, updated_at) VALUES (?, ?, ?, ?)"
)
.run(projectPath, projectPath, now, now);
return Number(result.lastInsertRowid);
};
const insertConversation = (
db: ReturnType<ReturnType<typeof getSQLiteManager>["getDatabase"]>,
projectId: number,
projectPath: string,
externalId: string,
lastMessageAt: number,
messageCount = 1
) => {
const now = Date.now();
const result = db
.prepare(
`
INSERT INTO conversations
(project_id, project_path, source_type, external_id, first_message_at, last_message_at, message_count, created_at, updated_at)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
`
)
.run(projectId, projectPath, "claude-code", externalId, lastMessageAt, lastMessageAt, messageCount, now, now);
return Number(result.lastInsertRowid);
};
const insertMessage = (
db: ReturnType<ReturnType<typeof getSQLiteManager>["getDatabase"]>,
conversationId: number,
externalId: string,
timestamp: number
) => {
db.prepare(
`
INSERT INTO messages
(conversation_id, external_id, message_type, role, content, timestamp, metadata)
VALUES (?, ?, 'user', 'user', 'content', ?, '{}')
`
).run(conversationId, externalId, timestamp);
};
describe("Migration Integration", () => {
let testDir: string;
let projectsDir: string;
let migration: ProjectMigration;
beforeEach(() => {
testDir = join(tmpdir(), `migration-integration-${Date.now()}`);
projectsDir = join(testDir, ".claude", "projects");
mkdirSync(projectsDir, { recursive: true });
process.env.HOME = testDir;
process.env.USERPROFILE = testDir;
const db = getSQLiteManager();
migration = new ProjectMigration(db, projectsDir);
});
afterEach(() => {
resetSQLiteManager();
if (existsSync(testDir)) {
rmSync(testDir, { recursive: true, force: true });
}
});
it("should migrate full conversation history end-to-end", async () => {
// Setup: Create realistic source data
const sourceFolder = join(projectsDir, "-Users-test-myproject-old");
mkdirSync(sourceFolder, { recursive: true });
// Create 5 JSONL conversation files
const sessions = ['session1', 'session2', 'session3', 'session4', 'session5'];
sessions.forEach(session => {
const content = [
'{"type":"user","uuid":"u1","sessionId":"s1","timestamp":"2024-01-01T10:00:00Z","message":{"role":"user","content":"Hello"}}',
'{"type":"assistant","uuid":"a1","parentUuid":"u1","sessionId":"s1","timestamp":"2024-01-01T10:00:01Z","message":{"role":"assistant","content":"Hi there"}}'
].join('\n');
writeFileSync(join(sourceFolder, `${session}.jsonl`), content);
});
const targetFolder = join(projectsDir, "-Users-test-myproject-new");
const oldPath = "/Users/test/myproject-old";
const newPath = "/Users/test/myproject-new";
const db = getSQLiteManager().getDatabase();
const projectId = insertProject(db, oldPath);
const conv1 = insertConversation(db, projectId, oldPath, "conv1", 2000, 10);
insertConversation(db, projectId, oldPath, "conv2", 4000, 15);
insertConversation(db, projectId, oldPath, "conv3", 6000, 20);
insertMessage(db, conv1, "m1", 1000);
insertMessage(db, conv1, "m2", 1001);
db.prepare(
`
INSERT INTO git_commits
(project_id, hash, message, author, timestamp, branch, metadata)
VALUES (?, ?, ?, ?, ?, ?, ?)
`
).run(projectId, "abc123", "Initial commit", "Test User", 1000, "main", "{}");
db.prepare(
`
INSERT INTO git_commits
(project_id, hash, message, author, timestamp, branch, metadata)
VALUES (?, ?, ?, ?, ?, ?, ?)
`
).run(projectId, "def456", "Add feature", "Test User", 3000, "main", "{}");
// Test: Execute full migration
const result = await migration.executeMigration(
sourceFolder,
targetFolder,
oldPath,
newPath,
false
);
// Verify: All files copied
expect(result.success).toBe(true);
expect(result.filesCopied).toBe(5);
sessions.forEach(session => {
expect(existsSync(join(targetFolder, `${session}.jsonl`))).toBe(true);
});
// Verify: Database updated
const projectRow = db
.prepare("SELECT canonical_path FROM projects WHERE id = ?")
.get(projectId) as { canonical_path: string };
expect(projectRow.canonical_path).toBe(newPath);
const conversations = db
.prepare("SELECT project_path FROM conversations WHERE project_id = ?")
.all(projectId) as Array<{ project_path: string }>;
expect(conversations).toHaveLength(3);
conversations.forEach(conv => {
expect(conv.project_path).toBe(newPath);
});
const messages = db
.prepare("SELECT COUNT(*) as count FROM messages")
.get() as { count: number };
expect(messages.count).toBe(2);
const commits = db
.prepare("SELECT COUNT(*) as count FROM git_commits WHERE project_id = ?")
.get(projectId) as { count: number };
expect(commits.count).toBe(2);
// Verify: Original preserved
expect(existsSync(join(sourceFolder, "session1.jsonl"))).toBe(true);
});
it("should handle legacy folder naming", async () => {
// Setup: Legacy folder with dots replaced by dashes
const legacyFolder = join(projectsDir, "-Users-test-my-project-com-old");
mkdirSync(legacyFolder, { recursive: true });
writeFileSync(join(legacyFolder, "session.jsonl"), '{}');
const db = getSQLiteManager().getDatabase();
const projectId = insertProject(db, "/Users/test/my.project.com/old");
insertConversation(db, projectId, "/Users/test/my.project.com/old", "c1", 1000);
// Test: Discover should find legacy folder
const results = await migration.discoverOldFolders("/Users/test/my.project.com/new");
// Verify: Found despite naming difference
expect(results.length).toBeGreaterThan(0);
expect(results[0].folderName).toBe("-Users-test-my-project-com-old");
});
it("should work with empty target location", async () => {
// Setup: Source with data, target doesn't exist yet
const sourceFolder = join(projectsDir, "-Users-test-source");
mkdirSync(sourceFolder, { recursive: true });
writeFileSync(join(sourceFolder, "session.jsonl"), '{}');
const db = getSQLiteManager().getDatabase();
const projectId = insertProject(db, "/old");
insertConversation(db, projectId, "/old", "c1", 1000);
const targetFolder = join(projectsDir, "-Users-test-target");
// Target doesn't exist yet
// Test: Should create target and migrate
await migration.executeMigration(sourceFolder, targetFolder, "/old", "/new", false);
// Verify: Target created with data
expect(existsSync(targetFolder)).toBe(true);
expect(existsSync(join(targetFolder, "session.jsonl"))).toBe(true);
});
it("should abort on conflicts", async () => {
// Setup: Both source and target have data
const sourceFolder = join(projectsDir, "-Users-test-source");
const targetFolder = join(projectsDir, "-Users-test-target");
mkdirSync(sourceFolder, { recursive: true });
mkdirSync(targetFolder, { recursive: true });
writeFileSync(join(sourceFolder, "source.jsonl"), '{}');
writeFileSync(join(targetFolder, "target.jsonl"), '{}');
// Test: Should detect conflict and abort
const validation = migration.validateMigration(sourceFolder, targetFolder);
expect(validation.valid).toBe(false);
// Verify: executeMigration should reject
await expect(
migration.executeMigration(sourceFolder, targetFolder, "/old", "/new", false)
).rejects.toThrow(/already has/i);
});
it("should preserve all data integrity after migration", async () => {
// Setup: Create data with specific content to verify
const sourceFolder = join(projectsDir, "-Users-test-source");
mkdirSync(sourceFolder, { recursive: true });
const jsonlContent = '{"type":"user","uuid":"unique123","sessionId":"s1","message":{"role":"user","content":"Test message"}}';
writeFileSync(join(sourceFolder, "session.jsonl"), jsonlContent);
const db = getSQLiteManager().getDatabase();
const projectId = insertProject(db, "/old/path");
const convId = insertConversation(db, projectId, "/old/path", "c1", 12345);
db.prepare(
`
UPDATE conversations
SET metadata = ?
WHERE id = ?
`
).run('{"key":"value"}', convId);
insertMessage(db, convId, "m1", 12345);
const targetFolder = join(projectsDir, "-Users-test-target");
// Test: Migrate
await migration.executeMigration(sourceFolder, targetFolder, "/old/path", "/new/path", false);
// Verify: JSONL content exactly preserved
const copiedContent = readFileSync(join(targetFolder, "session.jsonl"), "utf-8");
expect(copiedContent).toBe(jsonlContent);
// Verify: Database content preserved (except project_path)
const conv = db.prepare("SELECT * FROM conversations WHERE id = ?").get(convId) as {
id: number;
project_path: string;
metadata: string;
};
expect(conv.project_path).toBe("/new/path"); // Updated
expect(conv.metadata).toBe('{"key":"value"}'); // Preserved
const msg = db
.prepare("SELECT * FROM messages WHERE external_id = 'm1' AND conversation_id = ?")
.get(convId) as {
id: number;
content: string;
timestamp: number;
};
expect(msg.content).toBe("content");
expect(msg.timestamp).toBe(12345);
});
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/regression/baseline-functionality.test.ts | TypeScript | /**
* Regression Test Suite - Baseline Functionality
*
* These tests capture the CURRENT behavior as the baseline.
* Any changes that break these tests require explicit review.
*
* Purpose:
* - Prevent unintended behavior changes
* - Document expected behavior
* - Catch regressions early
*/
import { ConversationMemory } from '../../ConversationMemory';
import { ToolHandlers } from '../../tools/ToolHandlers';
import { getSQLiteManager, resetSQLiteManager } from '../../storage/SQLiteManager';
import type { SQLiteManager } from '../../storage/SQLiteManager';
import { rmSync } from 'fs';
import { join } from 'path';
// Skip Transformers tests in CI due to environment compatibility issues
// Also skip on macOS ARM64 where ONNX runtime has known compatibility issues
const isCI = Boolean(process.env.CI) || Boolean(process.env.GITHUB_ACTIONS);
const isMacOSArm64 = process.platform === 'darwin' && process.arch === 'arm64';
const skipTransformers = isCI || isMacOSArm64;
describe('Regression Tests - Baseline Functionality', () => {
let testDbPath: string;
beforeEach(() => {
// Use unique database for each test
testDbPath = join('/tmp', `test-regression-${Date.now()}.db`);
});
afterEach(() => {
// Cleanup test database
try {
resetSQLiteManager();
rmSync(testDbPath, { force: true });
} catch {
// Ignore cleanup errors
}
});
describe('ConversationMemory - Core Functions', () => {
beforeEach(() => {
resetSQLiteManager();
});
it('should handle empty project path gracefully', async () => {
const memory = new ConversationMemory();
// Empty directory should not throw
const result = await memory.indexConversations({
projectPath: '/tmp/nonexistent-test-dir',
sessionId: undefined,
});
// Baseline behavior: returns result with embeddings info
expect(typeof result.embeddings_generated).toBe('boolean');
// Should have stats after indexing (may have existing data from project)
const stats = memory.getStats();
expect(typeof stats.conversations.count).toBe('number');
expect(typeof stats.messages.count).toBe('number');
});
it('should return consistent stats structure', () => {
const memory = new ConversationMemory();
const stats = memory.getStats();
// Baseline: stats always has this structure
expect(stats).toHaveProperty('conversations');
expect(stats).toHaveProperty('messages');
expect(stats).toHaveProperty('decisions');
expect(stats).toHaveProperty('mistakes');
expect(typeof stats.conversations.count).toBe('number');
expect(typeof stats.messages.count).toBe('number');
});
// Skip on incompatible platforms - TransformersEmbeddings has ONNX runtime issues on macOS ARM64
(skipTransformers ? it.skip : it)('should handle search on empty database', async () => {
const memory = new ConversationMemory();
// Search with no indexed conversations
const results = await memory.search('test query', 10);
// Baseline: returns empty array, doesn't throw
expect(Array.isArray(results)).toBe(true);
expect(results.length).toBe(0);
});
});
describe('ToolHandlers - All 15 Tools', () => {
let db: SQLiteManager;
let memory: ConversationMemory;
let handlers: ToolHandlers;
beforeEach(() => {
resetSQLiteManager();
db = getSQLiteManager({ dbPath: testDbPath });
memory = new ConversationMemory();
handlers = new ToolHandlers(memory, db);
});
it('indexConversations - baseline response structure', async () => {
const result = await handlers.indexConversations({
project_path: '/tmp/nonexistent',
});
// Baseline structure
expect(result).toHaveProperty('success');
expect(result).toHaveProperty('project_path');
expect(result).toHaveProperty('stats');
expect(result).toHaveProperty('message');
expect(typeof result.success).toBe('boolean');
expect(typeof result.message).toBe('string');
});
// Skip on incompatible platforms - TransformersEmbeddings has ONNX runtime issues on macOS ARM64
(skipTransformers ? it.skip : it)('searchConversations - baseline response structure', async () => {
const result = await handlers.searchConversations({
query: 'test query',
limit: 10,
});
// Baseline structure
expect(result).toHaveProperty('query');
expect(result).toHaveProperty('results');
expect(result).toHaveProperty('total_found');
expect(result.query).toBe('test query');
expect(Array.isArray(result.results)).toBe(true);
expect(typeof result.total_found).toBe('number');
});
// Skip on incompatible platforms - TransformersEmbeddings has ONNX runtime issues on macOS ARM64
(skipTransformers ? it.skip : it)('getDecisions - baseline response structure', async () => {
const result = await handlers.getDecisions({
query: 'database',
limit: 10,
});
// Baseline structure
expect(result).toHaveProperty('query');
expect(result).toHaveProperty('decisions');
expect(result).toHaveProperty('total_found');
expect(Array.isArray(result.decisions)).toBe(true);
});
it('checkBeforeModify - baseline response structure', async () => {
const result = await handlers.checkBeforeModify({
file_path: 'src/test.ts',
});
// Baseline structure
expect(result).toHaveProperty('file_path');
expect(result).toHaveProperty('warning');
expect(result).toHaveProperty('recent_changes');
expect(result).toHaveProperty('related_decisions');
expect(result).toHaveProperty('mistakes_to_avoid');
expect(result.file_path).toBe('src/test.ts');
});
it('getFileEvolution - baseline response structure', async () => {
const result = await handlers.getFileEvolution({
file_path: 'src/test.ts',
include_decisions: true,
include_commits: true,
});
// Baseline structure
expect(result).toHaveProperty('file_path');
expect(result).toHaveProperty('total_edits');
expect(result).toHaveProperty('timeline');
expect(result).toHaveProperty('has_more');
expect(Array.isArray(result.timeline)).toBe(true);
expect(typeof result.has_more).toBe('boolean');
});
// Skip on incompatible platforms - TransformersEmbeddings has ONNX runtime issues on macOS ARM64
(skipTransformers ? it.skip : it)('searchMistakes - baseline response structure', async () => {
const result = await handlers.searchMistakes({
query: 'test',
limit: 10,
});
// Baseline structure
expect(result).toHaveProperty('query');
expect(result).toHaveProperty('mistakes');
expect(result).toHaveProperty('total_found');
expect(Array.isArray(result.mistakes)).toBe(true);
});
it('getRequirements - baseline response structure', async () => {
const result = await handlers.getRequirements({
component: 'test-component',
});
// Baseline structure
expect(result).toHaveProperty('component');
expect(result).toHaveProperty('requirements');
expect(result).toHaveProperty('total_found');
expect(Array.isArray(result.requirements)).toBe(true);
});
// Skip on incompatible platforms - TransformersEmbeddings has ONNX runtime issues on macOS ARM64
(skipTransformers ? it.skip : it)('findSimilarSessions - baseline response structure', async () => {
const result = await handlers.findSimilarSessions({
query: 'authentication',
limit: 5,
});
// Baseline structure
expect(result).toHaveProperty('query');
expect(result).toHaveProperty('sessions');
expect(result).toHaveProperty('total_found');
expect(Array.isArray(result.sessions)).toBe(true);
});
// Skip on incompatible platforms - TransformersEmbeddings has ONNX runtime issues on macOS ARM64
(skipTransformers ? it.skip : it)('recallAndApply - baseline response structure', async () => {
const result = await handlers.recallAndApply({
query: 'authentication',
context_types: ['conversations', 'decisions'],
limit: 5,
});
// Baseline structure (new in v0.6.0)
expect(result).toHaveProperty('query');
expect(result).toHaveProperty('context_summary');
expect(result).toHaveProperty('recalled_context');
expect(result).toHaveProperty('application_suggestions');
expect(result).toHaveProperty('total_items_found');
expect(typeof result.context_summary).toBe('string');
expect(Array.isArray(result.application_suggestions)).toBe(true);
expect(typeof result.total_items_found).toBe('number');
});
it('discoverOldConversations - baseline response structure', async () => {
const result = await handlers.discoverOldConversations({
current_project_path: '/tmp/test-project',
});
// Baseline structure
expect(result).toHaveProperty('success');
expect(result).toHaveProperty('current_project_path');
expect(result).toHaveProperty('candidates');
expect(result).toHaveProperty('message');
expect(Array.isArray(result.candidates)).toBe(true);
});
it.skip('migrateProject - dry run baseline behavior', async () => {
// Skipped: Requires real source folder with conversation files
// Migration validation intentionally rejects nonexistent paths
// This is correct behavior - test would need actual test data
});
});
describe('Data Integrity - Critical Workflows', () => {
it('should maintain consistent behavior across index cycles', async () => {
const memory = new ConversationMemory();
const emptyPath = '/tmp/empty-test-' + Date.now();
// First index
await memory.indexConversations({
projectPath: emptyPath,
});
const stats1 = memory.getStats();
// Second index (should be idempotent)
await memory.indexConversations({
projectPath: emptyPath,
});
const stats2 = memory.getStats();
// Should produce same results
expect(stats1.conversations.count).toBe(stats2.conversations.count);
expect(stats1.messages.count).toBe(stats2.messages.count);
});
});
describe('Edge Cases - Must Handle Gracefully', () => {
// Skip on incompatible platforms - TransformersEmbeddings has ONNX runtime issues on macOS ARM64
(skipTransformers ? it.skip : it)('should handle empty query in search', async () => {
const memory = new ConversationMemory();
// Empty query should still work
const results = await memory.search('', 10);
expect(Array.isArray(results)).toBe(true);
// Empty query returns no results
expect(results.length).toBe(0);
});
// Skip on incompatible platforms - TransformersEmbeddings has ONNX runtime issues on macOS ARM64
(skipTransformers ? it.skip : it)('should handle very large limit in search', async () => {
const memory = new ConversationMemory();
// Large limit should not crash
const results = await memory.search('test', 10000);
expect(Array.isArray(results)).toBe(true);
// Should not throw or hang
});
// Skip on incompatible platforms - TransformersEmbeddings has ONNX runtime issues on macOS ARM64
(skipTransformers ? it.skip : it)('should handle special characters in queries', async () => {
const memory = new ConversationMemory();
// Special characters should not break search
const queries = [
'emoji 🎉 test',
'unicode 中文 test',
'symbols !@#$%^&*()',
'sql injection\'; DROP TABLE conversations; --',
];
for (const query of queries) {
const results = await memory.search(query, 10);
expect(Array.isArray(results)).toBe(true);
}
});
// Skip on incompatible platforms - TransformersEmbeddings has ONNX runtime issues on macOS ARM64
(skipTransformers ? it.skip : it)('should handle concurrent searches', async () => {
const memory = new ConversationMemory();
// Multiple concurrent searches should not conflict
const promises = [
memory.search('query1', 10),
memory.search('query2', 10),
memory.search('query3', 10),
];
const results = await Promise.all(promises);
expect(results).toHaveLength(3);
expect(results.every(r => Array.isArray(r))).toBe(true);
});
});
describe('Error Handling - Graceful Degradation', () => {
it('should handle invalid project paths gracefully', async () => {
const memory = new ConversationMemory();
const invalidPaths = [
'/invalid/path/that/does/not/exist',
'',
'/dev/null',
];
for (const path of invalidPaths) {
// Should not throw
await memory.indexConversations({
projectPath: path,
});
const stats = memory.getStats();
expect(typeof stats.conversations.count).toBe('number');
}
});
// Skip on incompatible platforms - TransformersEmbeddings has ONNX runtime issues on macOS ARM64
(skipTransformers ? it.skip : it)('should handle missing tool handler arguments gracefully', async () => {
resetSQLiteManager();
const db = getSQLiteManager({ dbPath: testDbPath });
const memory = new ConversationMemory();
const handlers = new ToolHandlers(memory, db);
// Missing required arguments should be handled gracefully
// Note: Some handlers return default values instead of throwing
const searchResult = await handlers.searchConversations({} as any);
expect(searchResult).toHaveProperty('query');
expect(searchResult).toHaveProperty('results');
await expect(
handlers.checkBeforeModify({} as any)
).rejects.toThrow();
});
});
describe('Performance Baselines', () => {
// Skip on incompatible platforms - TransformersEmbeddings has ONNX runtime issues on macOS ARM64
(skipTransformers ? it.skip : it)('search should complete in reasonable time', async () => {
const memory = new ConversationMemory();
const start = Date.now();
await memory.search('test query', 10);
const duration = Date.now() - start;
// Should complete in under 5 seconds even on empty db
expect(duration).toBeLessThan(5000);
});
it('indexConversations should complete in reasonable time', async () => {
const memory = new ConversationMemory();
const start = Date.now();
await memory.indexConversations({
projectPath: '/tmp/nonexistent',
});
const duration = Date.now() - start;
// Should complete in reasonable time for empty directory
// Note: First run may include embedding model initialization which can be slow
// on some platforms (especially macOS ARM64 with ONNX runtime)
expect(duration).toBeLessThan(30000);
});
});
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/setup.ts | TypeScript | /**
* Jest test setup file
* Runs before all tests
*/
import { jest, beforeEach, afterEach } from '@jest/globals';
import { rmSync } from 'fs';
import { join } from 'path';
import { tmpdir } from 'os';
import { resetSQLiteManager } from '../storage/SQLiteManager.js';
// Set test environment
process.env.NODE_ENV = 'test';
let testDbPath = '';
beforeEach(() => {
testDbPath = join(
tmpdir(),
`cccmemory-test-${Date.now()}-${Math.random().toString(16).slice(2)}.db`
);
process.env.CCCMEMORY_DB_PATH = testDbPath;
resetSQLiteManager();
});
// Extend Jest matchers if needed
// expect.extend({ ... });
// Mock console methods to reduce noise in tests
global.console = {
...console,
// Suppress console.log in tests unless explicitly needed
log: jest.fn(),
// Suppress error/warn to keep test output clean
error: jest.fn(),
warn: jest.fn(),
};
// Clean up after each test
afterEach(() => {
jest.clearAllMocks();
resetSQLiteManager();
rmSync(testDbPath, { force: true });
rmSync(`${testDbPath}-wal`, { force: true });
rmSync(`${testDbPath}-shm`, { force: true });
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/unit/CachedConversationStorage.test.ts | TypeScript | /**
* Unit tests for ConversationStorage with caching
*/
import { describe, it, expect, beforeEach, afterEach } from "@jest/globals";
import { SQLiteManager } from "../../storage/SQLiteManager.js";
import { ConversationStorage } from "../../storage/ConversationStorage.js";
import type { Conversation, FileEdit, Message } from "../../parsers/ConversationParser.js";
import type { Decision } from "../../parsers/DecisionExtractor.js";
import type { GitCommit } from "../../parsers/GitIntegrator.js";
import * as fs from "fs";
import * as path from "path";
import * as os from "os";
describe("ConversationStorage with Caching", () => {
let dbPath: string;
let db: SQLiteManager;
let storage: ConversationStorage;
let conversationIdMap: Map<string, number>;
let messageIdMap: Map<string, number>;
beforeEach(async () => {
// Create temp database
dbPath = path.join(os.tmpdir(), `test-cached-storage-${Date.now()}.db`);
db = new SQLiteManager({ dbPath });
storage = new ConversationStorage(db);
// Create test data - store in order of dependencies
const conversations: Conversation[] = [
{
id: "conv1",
project_path: "/test/project",
first_message_at: 1000,
last_message_at: 2000,
message_count: 10,
git_branch: "main",
claude_version: "3.5",
metadata: {},
created_at: 1000,
updated_at: 2000,
},
];
// Store conversations first (foreign key dependency for messages)
conversationIdMap = await storage.storeConversations(conversations);
// Now store messages (foreign key dependency for file_edits)
const messages: Message[] = [
{
id: "msg1",
conversation_id: "conv1",
message_type: "text",
role: "user",
content: "test message",
timestamp: 1400,
is_sidechain: false,
metadata: {},
},
];
messageIdMap = await storage.storeMessages(messages, { conversationIdMap });
const fileEdits: FileEdit[] = [
{
id: "edit1",
conversation_id: "conv1",
message_id: "msg1",
file_path: "/test/file.ts",
snapshot_timestamp: 1500,
metadata: {},
},
];
const decisions: Decision[] = [
{
id: "dec1",
conversation_id: "conv1",
message_id: "msg1",
decision_text: "Use TypeScript",
rationale: "Better type safety",
alternatives_considered: ["JavaScript"],
rejected_reasons: { JavaScript: "No types" },
context: "language",
related_files: ["/test/file.ts"],
related_commits: [],
timestamp: 1500,
},
];
const commits: GitCommit[] = [
{
hash: "abc123",
message: "Initial commit",
author: "Test User",
timestamp: 1600,
files_changed: ["/test/file.ts"],
conversation_id: "conv1",
metadata: {},
},
];
// Note: conversations already stored above
await storage.storeFileEdits(fileEdits, conversationIdMap, messageIdMap);
await storage.storeDecisions(decisions, { conversationIdMap, messageIdMap });
const projectId = storage.getProjectId("/test/project");
await storage.storeGitCommits(commits, projectId, conversationIdMap, messageIdMap);
});
afterEach(() => {
db.close();
if (fs.existsSync(dbPath)) {
fs.unlinkSync(dbPath);
}
});
describe("Cache Integration", () => {
it("should enable caching", () => {
storage.enableCache({ maxSize: 100, ttlMs: 60000 });
expect(storage.isCacheEnabled()).toBe(true);
});
it("should disable caching", () => {
storage.enableCache({ maxSize: 100, ttlMs: 60000 });
storage.disableCache();
expect(storage.isCacheEnabled()).toBe(false);
});
it("should return cache statistics when enabled", () => {
storage.enableCache({ maxSize: 100, ttlMs: 60000 });
const stats = storage.getCacheStats();
expect(stats).toBeDefined();
expect(stats).not.toBeNull();
if (stats) {
expect(stats.size).toBe(0);
expect(stats.maxSize).toBe(100);
expect(stats.hits).toBe(0);
expect(stats.misses).toBe(0);
}
});
it("should return null stats when caching disabled", () => {
const stats = storage.getCacheStats();
expect(stats).toBeNull();
});
});
describe("Cached getConversation", () => {
it("should cache conversation lookups", () => {
storage.enableCache({ maxSize: 100, ttlMs: 60000 });
// First call - cache miss
const conv1 = storage.getConversation("conv1");
expect(conv1).not.toBeNull();
expect(conv1?.id).toBe("conv1");
const stats1 = storage.getCacheStats();
expect(stats1?.misses).toBe(1);
expect(stats1?.hits).toBe(0);
// Second call - cache hit
const conv2 = storage.getConversation("conv1");
expect(conv2).toEqual(conv1);
const stats2 = storage.getCacheStats();
expect(stats2?.misses).toBe(1);
expect(stats2?.hits).toBe(1);
});
it("should work without cache", () => {
// Cache disabled by default
const conv = storage.getConversation("conv1");
expect(conv).not.toBeNull();
expect(conv?.id).toBe("conv1");
const stats = storage.getCacheStats();
expect(stats).toBeNull();
});
it("should invalidate cache on update", async () => {
storage.enableCache({ maxSize: 100, ttlMs: 60000 });
// Cache the conversation
const conv1 = storage.getConversation("conv1");
expect(conv1?.message_count).toBe(10);
// Update conversation
await storage.storeConversations([
{
id: "conv1",
project_path: "/test/project",
first_message_at: 1000,
last_message_at: 3000,
message_count: 20, // Changed
git_branch: "main",
claude_version: "3.5",
metadata: {},
created_at: 1000,
updated_at: 3000,
},
]);
// Should get fresh data
const conv2 = storage.getConversation("conv1");
expect(conv2?.message_count).toBe(20);
});
});
describe("Cached getFileTimeline", () => {
it("should cache file timeline queries", () => {
storage.enableCache({ maxSize: 100, ttlMs: 60000 });
// First call - cache miss (timeline + edits + decisions + commits = 4 misses)
const timeline1 = storage.getFileTimeline("/test/file.ts");
expect(timeline1.edits.length).toBe(1);
expect(timeline1.decisions.length).toBe(1);
expect(timeline1.commits.length).toBe(1);
const stats1 = storage.getCacheStats();
expect(stats1?.misses).toBe(4); // timeline, edits, decisions, commits all miss
// Second call - cache hit
const timeline2 = storage.getFileTimeline("/test/file.ts");
expect(timeline2).toEqual(timeline1);
const stats2 = storage.getCacheStats();
expect(stats2?.hits).toBe(1);
});
it("should invalidate timeline cache on file edit", async () => {
storage.enableCache({ maxSize: 100, ttlMs: 60000 });
// Cache the timeline
const timeline1 = storage.getFileTimeline("/test/file.ts");
expect(timeline1.edits.length).toBe(1);
// Add message for new edit
const newMessageIdMap = await storage.storeMessages([
{
id: "msg2",
conversation_id: "conv1",
message_type: "text",
role: "assistant",
content: "edited file",
timestamp: 1650,
is_sidechain: false,
metadata: {},
},
], { conversationIdMap });
const mergedMessageIdMap = new Map(messageIdMap);
for (const [key, value] of newMessageIdMap) {
mergedMessageIdMap.set(key, value);
}
// Add new file edit
await storage.storeFileEdits([
{
id: "edit2",
conversation_id: "conv1",
message_id: "msg2",
file_path: "/test/file.ts",
snapshot_timestamp: 1700,
metadata: {},
},
], conversationIdMap, mergedMessageIdMap);
// Should get fresh data
const timeline2 = storage.getFileTimeline("/test/file.ts");
expect(timeline2.edits.length).toBe(2);
});
});
describe("Cached getFileEdits", () => {
it("should cache file edits queries", () => {
storage.enableCache({ maxSize: 100, ttlMs: 60000 });
// First call - cache miss
const edits1 = storage.getFileEdits("/test/file.ts");
expect(edits1.length).toBe(1);
const stats1 = storage.getCacheStats();
expect(stats1?.misses).toBe(1);
// Second call - cache hit
const edits2 = storage.getFileEdits("/test/file.ts");
expect(edits2).toEqual(edits1);
const stats2 = storage.getCacheStats();
expect(stats2?.hits).toBe(1);
});
});
describe("Cached getDecisionsForFile", () => {
it("should cache decisions for file queries", () => {
storage.enableCache({ maxSize: 100, ttlMs: 60000 });
// First call - cache miss
const decisions1 = storage.getDecisionsForFile("/test/file.ts");
expect(decisions1.length).toBe(1);
// Second call - cache hit
const decisions2 = storage.getDecisionsForFile("/test/file.ts");
expect(decisions2).toEqual(decisions1);
const stats = storage.getCacheStats();
expect(stats?.hits).toBe(1);
expect(stats?.misses).toBe(1);
});
});
describe("Cached getCommitsForFile", () => {
it("should cache commits for file queries", () => {
storage.enableCache({ maxSize: 100, ttlMs: 60000 });
// First call - cache miss
const commits1 = storage.getCommitsForFile("/test/file.ts");
expect(commits1.length).toBe(1);
// Second call - cache hit
const commits2 = storage.getCommitsForFile("/test/file.ts");
expect(commits2).toEqual(commits1);
const stats = storage.getCacheStats();
expect(stats?.hits).toBe(1);
expect(stats?.misses).toBe(1);
});
});
describe("Cache Performance", () => {
it("should improve performance on repeated queries", () => {
storage.enableCache({ maxSize: 100, ttlMs: 60000 });
// Warm up cache
for (let i = 0; i < 10; i++) {
storage.getConversation("conv1");
storage.getFileTimeline("/test/file.ts");
}
const stats = storage.getCacheStats();
// First call of each method results in misses (conv + timeline with its 3 sub-calls = 5 misses)
// Next 9 calls each = 18 hits (both methods cached)
expect(stats?.misses).toBe(5); // conversation (1) + timeline (4: timeline, edits, decisions, commits)
expect(stats?.hits).toBe(18);
expect(stats?.hitRate).toBeCloseTo(0.78, 1); // 18 / (18 + 5) = 0.78
});
it("should respect cache size limits", () => {
storage.enableCache({ maxSize: 2, ttlMs: 60000 });
// Fill cache beyond capacity
storage.getConversation("conv1"); // Entry 1
storage.getFileTimeline("/test/file.ts"); // Entry 2
storage.getFileEdits("/test/file.ts"); // Entry 3, evicts entry 1
const stats = storage.getCacheStats();
expect(stats?.size).toBeLessThanOrEqual(2);
expect(stats?.evictions).toBeGreaterThan(0);
});
});
describe("Cache Invalidation", () => {
it("should invalidate all related caches on storeConversations", async () => {
storage.enableCache({ maxSize: 100, ttlMs: 60000 });
// Cache some queries
storage.getConversation("conv1");
// Update conversation
await storage.storeConversations([
{
id: "conv1",
project_path: "/test/project",
first_message_at: 1000,
last_message_at: 4000,
message_count: 30,
git_branch: "main",
claude_version: "3.5",
metadata: {},
created_at: 1000,
updated_at: 4000,
},
]);
// Next query should fetch fresh data
const conv = storage.getConversation("conv1");
expect(conv?.message_count).toBe(30);
});
it("should clear cache on clearCache call", () => {
storage.enableCache({ maxSize: 100, ttlMs: 60000 });
// Cache some queries
storage.getConversation("conv1");
storage.getFileTimeline("/test/file.ts");
const stats1 = storage.getCacheStats();
expect(stats1?.size).toBeGreaterThan(0);
// Clear cache
storage.clearCache();
const stats2 = storage.getCacheStats();
expect(stats2?.size).toBe(0);
expect(stats2?.hits).toBe(0); // Stats should be reset too
});
});
describe("Cache Configuration", () => {
it("should allow reconfiguring cache", () => {
storage.enableCache({ maxSize: 10, ttlMs: 1000 });
let stats = storage.getCacheStats();
expect(stats?.maxSize).toBe(10);
// Reconfigure
storage.enableCache({ maxSize: 50, ttlMs: 5000 });
stats = storage.getCacheStats();
expect(stats?.maxSize).toBe(50);
});
it("should clear cache on reconfigure (new behavior)", () => {
storage.enableCache({ maxSize: 100, ttlMs: 60000 });
// Cache a query
storage.getConversation("conv1");
const stats1 = storage.getCacheStats();
expect(stats1?.size).toBe(1);
// Reconfigure - creates new cache instance (clears old entries)
storage.enableCache({ maxSize: 100, ttlMs: 30000 });
const stats2 = storage.getCacheStats();
expect(stats2?.size).toBe(0); // New cache is empty
// Next access will miss and repopulate cache
storage.getConversation("conv1");
const stats3 = storage.getCacheStats();
expect(stats3?.misses).toBe(1);
expect(stats3?.size).toBe(1);
});
});
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/unit/CodeAnalyzer.test.ts | TypeScript | import { describe, it, expect } from "@jest/globals";
import { mkdtempSync, mkdirSync, writeFileSync, rmSync } from "fs";
import { join } from "path";
import { tmpdir } from "os";
import { CodeAnalyzer } from "../../documentation/CodeAnalyzer.js";
describe("CodeAnalyzer", () => {
it("collects code files and applies filters", async () => {
const root = mkdtempSync(join(tmpdir(), "cccmemory-code-"));
try {
mkdirSync(join(root, "src"), { recursive: true });
mkdirSync(join(root, "node_modules", "dep"), { recursive: true });
mkdirSync(join(root, "dist"), { recursive: true });
writeFileSync(join(root, "README.md"), "# Readme");
writeFileSync(join(root, "src", "index.ts"), "export const value = 1;");
writeFileSync(join(root, "src", "notes.txt"), "ignore");
writeFileSync(join(root, "node_modules", "dep", "index.js"), "ignore");
writeFileSync(join(root, "dist", "bundle.js"), "ignore");
const analyzer = new CodeAnalyzer();
const result = await analyzer.analyze(root);
const paths = result.files.map((file) => file.path).sort();
expect(paths).toEqual(["README.md", "src/index.ts"]);
const filtered = await analyzer.analyze(root, "src");
const filteredPaths = filtered.files.map((file) => file.path);
expect(filteredPaths).toEqual(["src/index.ts"]);
} finally {
rmSync(root, { recursive: true, force: true });
}
});
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/unit/CodexConversationParser.test.ts | TypeScript | /**
* Unit tests for CodexConversationParser
*/
import { CodexConversationParser } from "../../parsers/CodexConversationParser.js";
import { mkdirSync, writeFileSync, rmSync, existsSync } from "fs";
import { join } from "path";
import { tmpdir } from "os";
describe("CodexConversationParser", () => {
let testCodexPath: string;
let parser: CodexConversationParser;
beforeEach(() => {
// Create temporary directory for test
testCodexPath = join(tmpdir(), `codex-test-${Date.now()}`);
mkdirSync(testCodexPath, { recursive: true });
parser = new CodexConversationParser();
});
afterEach(() => {
// Cleanup temporary directory
if (existsSync(testCodexPath)) {
rmSync(testCodexPath, { recursive: true, force: true });
}
});
describe("parseSession", () => {
it("should throw error if sessions directory does not exist", () => {
expect(() => parser.parseSession("/nonexistent/path")).toThrow(
"Codex sessions directory not found"
);
});
it("should return empty result for empty sessions directory", () => {
const sessionsDir = join(testCodexPath, "sessions");
mkdirSync(sessionsDir, { recursive: true });
const result = parser.parseSession(testCodexPath);
expect(result.conversations).toHaveLength(0);
expect(result.messages).toHaveLength(0);
expect(result.tool_uses).toHaveLength(0);
expect(result.tool_results).toHaveLength(0);
});
it("should parse a simple Codex session file", () => {
// Create session directory structure: sessions/2025/01/17/
const sessionDir = join(testCodexPath, "sessions", "2025", "01", "17");
mkdirSync(sessionDir, { recursive: true });
// Create a session file with UUID format matching real Codex files
const sessionId = "00000001-0000-0000-0000-000000000001";
const sessionFile = join(sessionDir, `rollout-2025-01-17T10-00-00-${sessionId}.jsonl`);
const sessionMeta = {
timestamp: "2025-01-17T10:00:00.000Z",
type: "session_meta",
payload: {
id: sessionId,
timestamp: "2025-01-17T10:00:00.000Z",
cwd: "/test/project",
originator: "cli",
cli_version: "1.0.0",
model_provider: "anthropic",
git: {
branch: "main",
commit_hash: "abc123",
repository_url: "https://github.com/test/repo",
},
},
};
const userMessage = {
timestamp: "2025-01-17T10:00:01.000Z",
type: "response_item",
payload: {
id: "msg-1",
role: "user",
content: "Hello, how are you?",
},
};
const assistantMessage = {
timestamp: "2025-01-17T10:00:02.000Z",
type: "response_item",
payload: {
id: "msg-2",
role: "assistant",
content: [
{
type: "text",
text: "I am doing well!",
},
],
},
};
const sessionContent = [sessionMeta, userMessage, assistantMessage]
.map((entry) => JSON.stringify(entry))
.join("\n");
writeFileSync(sessionFile, sessionContent);
// Parse the session
const result = parser.parseSession(testCodexPath);
expect(result.conversations).toHaveLength(1);
expect(result.conversations[0].id).toBe(sessionId);
expect(result.conversations[0].project_path).toBe("/test/project");
expect(result.conversations[0].source_type).toBe("codex");
expect(result.conversations[0].git_branch).toBe("main");
expect(result.conversations[0].message_count).toBe(2);
expect(result.messages).toHaveLength(2);
expect(result.messages[0].role).toBe("user");
expect(result.messages[0].content).toBe("Hello, how are you?");
expect(result.messages[1].role).toBe("assistant");
});
it("should extract tool uses from assistant messages", () => {
const sessionDir = join(testCodexPath, "sessions", "2025", "01", "17");
mkdirSync(sessionDir, { recursive: true });
const sessionId = "00000002-0000-0000-0000-000000000002";
const sessionFile = join(sessionDir, `rollout-2025-01-17T10-00-00-${sessionId}.jsonl`);
const sessionMeta = {
timestamp: "2025-01-17T10:00:00.000Z",
type: "session_meta",
payload: {
id: sessionId,
timestamp: "2025-01-17T10:00:00.000Z",
cwd: "/test/project",
},
};
const toolUseMessage = {
timestamp: "2025-01-17T10:00:01.000Z",
type: "response_item",
payload: {
id: "msg-1",
role: "assistant",
content: [
{
type: "tool_use",
id: "tool-1",
name: "bash",
input: {
command: "ls -la",
},
},
],
},
};
const toolResultMessage = {
timestamp: "2025-01-17T10:00:02.000Z",
type: "response_item",
payload: {
id: "msg-2",
role: "user",
content: [
{
type: "tool_result",
tool_use_id: "tool-1",
content: "file1.txt\nfile2.txt",
stdout: "file1.txt\nfile2.txt",
},
],
},
};
const sessionContent = [sessionMeta, toolUseMessage, toolResultMessage]
.map((entry) => JSON.stringify(entry))
.join("\n");
writeFileSync(sessionFile, sessionContent);
const result = parser.parseSession(testCodexPath);
expect(result.tool_uses).toHaveLength(1);
expect(result.tool_uses[0].tool_name).toBe("bash");
expect(result.tool_uses[0].tool_input).toEqual({ command: "ls -la" });
expect(result.tool_results).toHaveLength(1);
expect(result.tool_results[0].tool_use_id).toBe("tool-1");
expect(result.tool_results[0].content).toBe("file1.txt\nfile2.txt");
expect(result.tool_results[0].stdout).toBe("file1.txt\nfile2.txt");
});
it("should extract thinking blocks", () => {
const sessionDir = join(testCodexPath, "sessions", "2025", "01", "17");
mkdirSync(sessionDir, { recursive: true });
const sessionId = "00000003-0000-0000-0000-000000000003";
const sessionFile = join(sessionDir, `rollout-2025-01-17T10-00-00-${sessionId}.jsonl`);
const sessionMeta = {
timestamp: "2025-01-17T10:00:00.000Z",
type: "session_meta",
payload: {
id: sessionId,
timestamp: "2025-01-17T10:00:00.000Z",
cwd: "/test/project",
},
};
const thinkingMessage = {
timestamp: "2025-01-17T10:00:01.000Z",
type: "response_item",
payload: {
id: "msg-1",
role: "assistant",
content: [
{
type: "thinking",
thinking: "Let me analyze this problem...",
signature: "sha256:abc123",
},
{
type: "text",
text: "Based on my analysis...",
},
],
},
};
const sessionContent = [sessionMeta, thinkingMessage]
.map((entry) => JSON.stringify(entry))
.join("\n");
writeFileSync(sessionFile, sessionContent);
const result = parser.parseSession(testCodexPath);
expect(result.thinking_blocks).toHaveLength(1);
expect(result.thinking_blocks[0].thinking_content).toBe("Let me analyze this problem...");
expect(result.thinking_blocks[0].signature).toBe("sha256:abc123");
});
it("should filter by session ID", () => {
const sessionDir = join(testCodexPath, "sessions", "2025", "01", "17");
mkdirSync(sessionDir, { recursive: true });
// Create two session files with UUID format
const sessionId1 = "00000004-0000-0000-0000-000000000001";
const sessionId2 = "00000004-0000-0000-0000-000000000002";
for (const sessionId of [sessionId1, sessionId2]) {
const sessionFile = join(sessionDir, `rollout-2025-01-17T10-00-00-${sessionId}.jsonl`);
const sessionMeta = {
timestamp: "2025-01-17T10:00:00.000Z",
type: "session_meta",
payload: {
id: sessionId,
timestamp: "2025-01-17T10:00:00.000Z",
cwd: "/test/project",
},
};
writeFileSync(sessionFile, JSON.stringify(sessionMeta));
}
// Parse only session-1
const result = parser.parseSession(testCodexPath, sessionId1);
expect(result.conversations).toHaveLength(1);
expect(result.conversations[0].id).toBe(sessionId1);
});
it("should handle malformed JSONL lines gracefully", () => {
const sessionDir = join(testCodexPath, "sessions", "2025", "01", "17");
mkdirSync(sessionDir, { recursive: true });
const sessionId = "00000005-0000-0000-0000-000000000005";
const sessionFile = join(sessionDir, `rollout-2025-01-17T10-00-00-${sessionId}.jsonl`);
const sessionMeta = {
timestamp: "2025-01-17T10:00:00.000Z",
type: "session_meta",
payload: {
id: sessionId,
timestamp: "2025-01-17T10:00:00.000Z",
cwd: "/test/project",
},
};
// Include a malformed line
const sessionContent = [
JSON.stringify(sessionMeta),
"{ invalid json",
JSON.stringify({
timestamp: "2025-01-17T10:00:01.000Z",
type: "response_item",
payload: {
id: "msg-1",
role: "user",
content: "Hello",
},
}),
].join("\n");
writeFileSync(sessionFile, sessionContent);
const result = parser.parseSession(testCodexPath);
// Should parse session and valid message, skip malformed line
expect(result.conversations).toHaveLength(1);
expect(result.messages).toHaveLength(1);
});
it("should handle empty session files", () => {
const sessionDir = join(testCodexPath, "sessions", "2025", "01", "17");
mkdirSync(sessionDir, { recursive: true });
const sessionFile = join(sessionDir, "rollout-2025-01-17T10-00-00-00000006-0000-0000-0000-000000000006.jsonl");
writeFileSync(sessionFile, "");
const result = parser.parseSession(testCodexPath);
expect(result.conversations).toHaveLength(0);
expect(result.messages).toHaveLength(0);
});
it("should skip files without session_meta", () => {
const sessionDir = join(testCodexPath, "sessions", "2025", "01", "17");
mkdirSync(sessionDir, { recursive: true });
const sessionFile = join(sessionDir, "rollout-2025-01-17T10-00-00-00000007-0000-0000-0000-000000000007.jsonl");
const messageOnly = {
timestamp: "2025-01-17T10:00:01.000Z",
type: "response_item",
payload: {
id: "msg-1",
role: "user",
content: "Hello",
},
};
writeFileSync(sessionFile, JSON.stringify(messageOnly));
const result = parser.parseSession(testCodexPath);
// Should skip session without metadata
expect(result.conversations).toHaveLength(0);
expect(result.messages).toHaveLength(0);
});
});
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/unit/ContextInjector.test.ts | TypeScript | /**
* Unit tests for ContextInjector
*/
import Database from "better-sqlite3";
import { ContextInjector } from "../../context/ContextInjector.js";
import { WorkingMemoryStore } from "../../memory/WorkingMemoryStore.js";
import { SessionHandoffStore } from "../../handoff/SessionHandoffStore.js";
describe("ContextInjector", () => {
let db: Database.Database;
let injector: ContextInjector;
let memoryStore: WorkingMemoryStore;
let handoffStore: SessionHandoffStore;
const projectPath = "/test/project";
beforeEach(() => {
db = new Database(":memory:");
// Create required tables
db.exec(`
CREATE TABLE IF NOT EXISTS working_memory (
id TEXT PRIMARY KEY,
key TEXT NOT NULL,
value TEXT NOT NULL,
context TEXT,
tags TEXT,
session_id TEXT,
project_path TEXT NOT NULL,
created_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL,
expires_at INTEGER,
embedding BLOB,
UNIQUE(project_path, key)
);
CREATE VIRTUAL TABLE IF NOT EXISTS working_memory_fts USING fts5(
id UNINDEXED,
key,
value,
context
);
CREATE TABLE IF NOT EXISTS session_handoffs (
id TEXT PRIMARY KEY,
from_session_id TEXT NOT NULL,
project_path TEXT NOT NULL,
created_at INTEGER NOT NULL,
handoff_data TEXT NOT NULL,
resumed_by_session_id TEXT,
resumed_at INTEGER
);
CREATE TABLE IF NOT EXISTS decisions (
id TEXT PRIMARY KEY,
message_id TEXT NOT NULL,
decision_text TEXT NOT NULL,
rationale TEXT,
context TEXT,
timestamp INTEGER NOT NULL
);
CREATE TABLE IF NOT EXISTS tool_uses (
id TEXT PRIMARY KEY,
message_id TEXT NOT NULL,
tool_name TEXT NOT NULL,
parameters TEXT,
result TEXT,
timestamp INTEGER NOT NULL
);
CREATE TABLE IF NOT EXISTS messages (
id TEXT PRIMARY KEY,
conversation_id TEXT NOT NULL,
role TEXT NOT NULL,
content TEXT,
timestamp INTEGER NOT NULL
);
CREATE TABLE IF NOT EXISTS conversations (
id TEXT PRIMARY KEY,
session_id TEXT NOT NULL,
project_path TEXT NOT NULL,
created_at INTEGER NOT NULL
);
`);
injector = new ContextInjector(db);
memoryStore = new WorkingMemoryStore(db);
handoffStore = new SessionHandoffStore(db);
});
afterEach(() => {
db.close();
});
describe("getRelevantContext", () => {
it("should return empty context when nothing is stored", async () => {
const context = await injector.getRelevantContext({
projectPath,
});
expect(context.decisions).toEqual([]);
expect(context.memory).toEqual([]);
expect(context.recentFiles).toEqual([]);
expect(context.handoff).toBeUndefined();
});
it("should include working memory items", async () => {
memoryStore.remember({
key: "storage_choice",
value: "Using SQLite",
projectPath,
});
const context = await injector.getRelevantContext({
projectPath,
sources: ["memory"],
});
expect(context.memory.length).toBe(1);
expect(context.memory[0].key).toBe("storage_choice");
});
it("should include handoff when available", async () => {
const handoff = handoffStore.prepareHandoff({
sessionId: "session-1",
projectPath,
});
const context = await injector.getRelevantContext({
projectPath,
sources: ["handoffs"],
});
expect(context.handoff).toBeDefined();
expect(context.handoff?.id).toBe(handoff.id);
});
it("should filter by query when provided", async () => {
memoryStore.remember({
key: "database",
value: "Using PostgreSQL for production",
projectPath,
});
memoryStore.remember({
key: "testing",
value: "Jest for unit tests",
projectPath,
});
const context = await injector.getRelevantContext({
query: "PostgreSQL",
projectPath,
sources: ["memory"],
});
// Semantic search should find the database-related item
expect(context.memory.length).toBeGreaterThan(0);
});
it("should respect token budget", async () => {
// Add many items
for (let i = 0; i < 20; i++) {
memoryStore.remember({
key: `item_${i}`,
value: "A moderately long value that takes up some tokens " + i,
projectPath,
});
}
const context = await injector.getRelevantContext({
projectPath,
maxTokens: 100,
sources: ["memory"],
});
// Should not include all 20 items due to token limit
expect(context.memory.length).toBeLessThan(20);
expect(context.tokenEstimate).toBeLessThanOrEqual(100);
});
it("should prioritize critical items", async () => {
memoryStore.remember({
key: "low_priority",
value: "Regular information",
tags: [],
projectPath,
});
memoryStore.remember({
key: "critical_info",
value: "Very important decision",
tags: ["critical"],
projectPath,
});
const context = await injector.getRelevantContext({
projectPath,
maxTokens: 50, // Tight budget
sources: ["memory"],
});
// Critical items should be prioritized
const hasCritical = context.memory.some(m => m.key === "critical_info");
expect(hasCritical).toBe(true);
});
});
describe("formatForInjection", () => {
it("should format context as markdown", async () => {
memoryStore.remember({
key: "storage",
value: "SQLite",
projectPath,
});
// Create handoff to be retrieved by context injector
handoffStore.prepareHandoff({
sessionId: "session-1",
projectPath,
include: ["memory"],
});
const context = await injector.getRelevantContext({
projectPath,
sources: ["handoffs", "memory"],
});
const formatted = injector.formatForInjection(context);
expect(formatted).toContain("## Previous Session Context");
expect(formatted).toContain("## Remembered Context");
expect(formatted).toContain("**storage**");
});
it("should include section for each available source", async () => {
// Add working memory
memoryStore.remember({
key: "test",
value: "value",
projectPath,
});
const context = await injector.getRelevantContext({
projectPath,
sources: ["memory"],
});
const formatted = injector.formatForInjection(context);
expect(formatted).toContain("Remembered Context");
});
it("should return empty string for empty context", async () => {
const context = await injector.getRelevantContext({
projectPath,
});
const formatted = injector.formatForInjection(context);
// Should be empty or minimal
expect(formatted.trim().length).toBeLessThan(50);
});
});
describe("token estimation", () => {
it("should provide reasonable token estimates", async () => {
const testText = "This is a sample text for token estimation testing.";
memoryStore.remember({
key: "test",
value: testText,
projectPath,
});
const context = await injector.getRelevantContext({
projectPath,
sources: ["memory"],
});
// Token estimate should be positive (roughly 1/4 of character count)
expect(context.tokenEstimate).toBeGreaterThan(0);
});
});
describe("summary generation", () => {
it("should generate descriptive summary", async () => {
memoryStore.remember({
key: "item1",
value: "value1",
projectPath,
});
memoryStore.remember({
key: "item2",
value: "value2",
projectPath,
});
const context = await injector.getRelevantContext({
projectPath,
sources: ["memory"],
});
expect(context.summary).toContain(projectPath);
expect(context.summary).toContain("memory item");
});
});
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/unit/ConversationParser.test.ts | TypeScript | /**
* Unit tests for ConversationParser with streaming support
*/
import { ConversationParser } from "../../parsers/ConversationParser.js";
import { mkdirSync, writeFileSync, rmSync, existsSync } from "fs";
import { join } from "path";
import { tmpdir } from "os";
describe("ConversationParser", () => {
let testProjectPath: string;
let testClaudePath: string;
let parser: ConversationParser;
beforeEach(() => {
// Create temporary directories for test
const timestamp = Date.now();
testProjectPath = join(tmpdir(), `test-project-${timestamp}`);
testClaudePath = join(tmpdir(), `.claude-test-${timestamp}`);
mkdirSync(testProjectPath, { recursive: true });
mkdirSync(testClaudePath, { recursive: true });
parser = new ConversationParser();
});
afterEach(() => {
// Cleanup temporary directories
for (const path of [testProjectPath, testClaudePath]) {
if (existsSync(path)) {
rmSync(path, { recursive: true, force: true });
}
}
});
describe("parseFromFolder", () => {
it("should return empty result for non-existent folder", () => {
const result = parser.parseFromFolder("/nonexistent/path");
expect(result.conversations).toHaveLength(0);
expect(result.messages).toHaveLength(0);
});
it("should return empty result for folder without JSONL files", () => {
const result = parser.parseFromFolder(testClaudePath);
expect(result.conversations).toHaveLength(0);
expect(result.messages).toHaveLength(0);
});
it("should parse a simple conversation file", () => {
const sessionId = "test-session-001";
const sessionFile = join(testClaudePath, `${sessionId}.jsonl`);
const messages = [
{
type: "user",
uuid: "msg-001",
sessionId,
timestamp: "2025-01-17T10:00:00.000Z",
message: { role: "user", content: "Hello, Claude!" },
},
{
type: "assistant",
uuid: "msg-002",
parentUuid: "msg-001",
sessionId,
timestamp: "2025-01-17T10:00:01.000Z",
message: {
role: "assistant",
content: [{ type: "text", text: "Hello! How can I help?" }],
},
},
];
writeFileSync(
sessionFile,
messages.map((m) => JSON.stringify(m)).join("\n")
);
const result = parser.parseFromFolder(testClaudePath, testProjectPath);
expect(result.conversations).toHaveLength(1);
expect(result.conversations[0].id).toBe(sessionId);
expect(result.conversations[0].project_path).toBe(testProjectPath);
expect(result.conversations[0].message_count).toBe(2);
expect(result.messages).toHaveLength(2);
expect(result.messages[0].role).toBe("user");
expect(result.messages[0].content).toBe("Hello, Claude!");
expect(result.messages[1].role).toBe("assistant");
expect(result.messages[1].content).toBe("Hello! How can I help?");
});
it("should extract tool uses and results", () => {
const sessionId = "test-session-tools";
const sessionFile = join(testClaudePath, `${sessionId}.jsonl`);
const messages = [
{
type: "user",
uuid: "msg-001",
sessionId,
timestamp: "2025-01-17T10:00:00.000Z",
message: { role: "user", content: "List files" },
},
{
type: "assistant",
uuid: "msg-002",
sessionId,
timestamp: "2025-01-17T10:00:01.000Z",
message: {
role: "assistant",
content: [
{
type: "tool_use",
id: "tool-001",
name: "bash",
input: { command: "ls -la" },
},
],
},
},
{
type: "user",
uuid: "msg-003",
sessionId,
timestamp: "2025-01-17T10:00:02.000Z",
message: {
role: "user",
content: [
{
type: "tool_result",
tool_use_id: "tool-001",
content: "file1.txt\nfile2.txt",
},
],
},
toolUseResult: {
stdout: "file1.txt\nfile2.txt",
},
},
];
writeFileSync(
sessionFile,
messages.map((m) => JSON.stringify(m)).join("\n")
);
const result = parser.parseFromFolder(testClaudePath);
expect(result.tool_uses).toHaveLength(1);
expect(result.tool_uses[0].tool_name).toBe("bash");
expect(result.tool_uses[0].tool_input).toEqual({ command: "ls -la" });
expect(result.tool_results).toHaveLength(1);
expect(result.tool_results[0].tool_use_id).toBe("tool-001");
expect(result.tool_results[0].stdout).toBe("file1.txt\nfile2.txt");
});
it("should skip tool calls without valid ids or stored messages", () => {
const sessionId = "test-session-tool-safety";
const sessionFile = join(testClaudePath, `${sessionId}.jsonl`);
const messages = [
// Valid tool use + result
{
type: "assistant",
uuid: "msg-001",
sessionId,
timestamp: "2025-01-17T10:00:01.000Z",
message: {
role: "assistant",
content: [
{ type: "tool_use", id: "tool-001", name: "bash", input: { command: "ls" } },
],
},
},
{
type: "user",
uuid: "msg-002",
sessionId,
timestamp: "2025-01-17T10:00:02.000Z",
message: {
role: "user",
content: [{ type: "tool_result", tool_use_id: "tool-001", content: "ok" }],
},
},
// Tool use missing id (should be skipped)
{
type: "assistant",
uuid: "msg-003",
sessionId,
timestamp: "2025-01-17T10:00:03.000Z",
message: {
role: "assistant",
content: [{ type: "tool_use", name: "bash", input: { command: "pwd" } }],
},
},
// Tool result missing tool_use_id (should be skipped)
{
type: "user",
uuid: "msg-004",
sessionId,
timestamp: "2025-01-17T10:00:04.000Z",
message: {
role: "user",
content: [{ type: "tool_result", content: "no id" }],
},
},
// Tool use on message without sessionId (message not stored)
{
type: "assistant",
uuid: "msg-005",
timestamp: "2025-01-17T10:00:05.000Z",
message: {
role: "assistant",
content: [
{ type: "tool_use", id: "tool-002", name: "bash", input: { command: "whoami" } },
],
},
},
// Tool result referencing tool-002 (should be skipped because tool-002 skipped)
{
type: "user",
uuid: "msg-006",
sessionId,
timestamp: "2025-01-17T10:00:06.000Z",
message: {
role: "user",
content: [{ type: "tool_result", tool_use_id: "tool-002", content: "user" }],
},
},
];
writeFileSync(
sessionFile,
messages.map((m) => JSON.stringify(m)).join("\n")
);
const result = parser.parseFromFolder(testClaudePath);
expect(result.tool_uses).toHaveLength(1);
expect(result.tool_uses[0].id).toBe("tool-001");
expect(result.tool_results).toHaveLength(1);
expect(result.tool_results[0].tool_use_id).toBe("tool-001");
});
it("should extract thinking blocks", () => {
const sessionId = "test-session-thinking";
const sessionFile = join(testClaudePath, `${sessionId}.jsonl`);
const messages = [
{
type: "user",
uuid: "msg-001",
sessionId,
timestamp: "2025-01-17T10:00:00.000Z",
message: { role: "user", content: "Analyze this" },
},
{
type: "assistant",
uuid: "msg-002",
sessionId,
timestamp: "2025-01-17T10:00:01.000Z",
message: {
role: "assistant",
content: [
{
type: "thinking",
thinking: "Let me analyze this step by step...",
signature: "sha256:abc123",
},
{ type: "text", text: "Here is my analysis." },
],
},
},
];
writeFileSync(
sessionFile,
messages.map((m) => JSON.stringify(m)).join("\n")
);
const result = parser.parseFromFolder(testClaudePath);
expect(result.thinking_blocks).toHaveLength(1);
expect(result.thinking_blocks[0].thinking_content).toBe(
"Let me analyze this step by step..."
);
expect(result.thinking_blocks[0].signature).toBe("sha256:abc123");
});
it("should handle malformed JSONL lines gracefully", () => {
const sessionId = "test-session-errors";
const sessionFile = join(testClaudePath, `${sessionId}.jsonl`);
const content = [
JSON.stringify({
type: "user",
uuid: "msg-001",
sessionId,
timestamp: "2025-01-17T10:00:00.000Z",
message: { role: "user", content: "Hello" },
}),
"{ invalid json line",
JSON.stringify({
type: "assistant",
uuid: "msg-002",
sessionId,
timestamp: "2025-01-17T10:00:01.000Z",
message: { role: "assistant", content: [{ type: "text", text: "Hi!" }] },
}),
].join("\n");
writeFileSync(sessionFile, content);
const result = parser.parseFromFolder(testClaudePath);
// Should parse valid messages and track error
expect(result.conversations).toHaveLength(1);
expect(result.messages).toHaveLength(2);
expect(result.parse_errors).toBeDefined();
expect(result.parse_errors).toHaveLength(1);
expect(result.parse_errors![0].line).toBe(2);
// Error message format varies by Node version
expect(result.parse_errors![0].error).toMatch(/JSON|token|parse/i);
});
it("should skip unchanged files in incremental mode", async () => {
const sessionId = "test-session-incremental";
const sessionFile = join(testClaudePath, `${sessionId}.jsonl`);
const messages = [
{
type: "user",
uuid: "msg-001",
sessionId,
timestamp: "2025-01-17T10:00:00.000Z",
message: { role: "user", content: "Hello" },
},
];
writeFileSync(
sessionFile,
messages.map((m) => JSON.stringify(m)).join("\n")
);
// First parse (no lastIndexedMs)
const result1 = parser.parseFromFolder(testClaudePath);
expect(result1.conversations).toHaveLength(1);
// Second parse with lastIndexedMs in the future
const futureTime = Date.now() + 100000;
const result2 = parser.parseFromFolder(testClaudePath, undefined, futureTime);
expect(result2.conversations).toHaveLength(0);
});
it("should handle empty JSONL files", () => {
const sessionFile = join(testClaudePath, "empty-session.jsonl");
writeFileSync(sessionFile, "");
const result = parser.parseFromFolder(testClaudePath);
expect(result.conversations).toHaveLength(0);
expect(result.messages).toHaveLength(0);
});
it("should handle files with only whitespace lines", () => {
const sessionFile = join(testClaudePath, "whitespace-session.jsonl");
writeFileSync(sessionFile, " \n\n \n");
const result = parser.parseFromFolder(testClaudePath);
expect(result.conversations).toHaveLength(0);
expect(result.messages).toHaveLength(0);
});
it("should parse multiple conversation files", () => {
for (let i = 1; i <= 3; i++) {
const sessionId = `multi-session-${i}`;
const sessionFile = join(testClaudePath, `${sessionId}.jsonl`);
const msg = {
type: "user",
uuid: `msg-${i}`,
sessionId,
timestamp: `2025-01-17T10:0${i}:00.000Z`,
message: { role: "user", content: `Message ${i}` },
};
writeFileSync(sessionFile, JSON.stringify(msg));
}
const result = parser.parseFromFolder(testClaudePath);
expect(result.conversations).toHaveLength(3);
expect(result.messages).toHaveLength(3);
});
it("should detect MCP tool usage", () => {
const sessionId = "test-session-mcp";
const sessionFile = join(testClaudePath, `${sessionId}.jsonl`);
const messages = [
{
type: "user",
uuid: "msg-001",
sessionId,
timestamp: "2025-01-17T10:00:00.000Z",
message: { role: "user", content: "Search conversations" },
},
{
type: "assistant",
uuid: "msg-002",
sessionId,
timestamp: "2025-01-17T10:00:01.000Z",
message: {
role: "assistant",
content: [
{
type: "tool_use",
id: "tool-001",
name: "mcp__cccmemory__search_conversations",
input: { query: "test" },
},
],
},
},
];
writeFileSync(
sessionFile,
messages.map((m) => JSON.stringify(m)).join("\n")
);
const result = parser.parseFromFolder(testClaudePath);
expect(result.conversations).toHaveLength(1);
const metadata = result.conversations[0].metadata as {
mcp_usage?: { detected: boolean; servers: string[] };
};
expect(metadata.mcp_usage?.detected).toBe(true);
expect(metadata.mcp_usage?.servers).toContain("cccmemory");
});
it("should filter NaN timestamps", () => {
const sessionId = "test-session-nan-timestamps";
const sessionFile = join(testClaudePath, `${sessionId}.jsonl`);
const messages = [
{
type: "user",
uuid: "msg-001",
sessionId,
timestamp: "invalid-date",
message: { role: "user", content: "Message with bad timestamp" },
},
{
type: "assistant",
uuid: "msg-002",
sessionId,
timestamp: "2025-01-17T10:00:01.000Z",
message: {
role: "assistant",
content: [{ type: "text", text: "Valid timestamp" }],
},
},
];
writeFileSync(
sessionFile,
messages.map((m) => JSON.stringify(m)).join("\n")
);
const result = parser.parseFromFolder(testClaudePath);
// Conversation should be created with valid timestamp
expect(result.conversations).toHaveLength(1);
expect(result.conversations[0].first_message_at).not.toBeNaN();
expect(result.conversations[0].last_message_at).not.toBeNaN();
});
});
describe("parseFromFolderAsync (streaming)", () => {
it("should parse a conversation file using streaming", async () => {
const sessionId = "test-async-session-001";
const sessionFile = join(testClaudePath, `${sessionId}.jsonl`);
const messages = [
{
type: "user",
uuid: "async-msg-001",
sessionId,
timestamp: "2025-01-17T10:00:00.000Z",
message: { role: "user", content: "Hello from streaming!" },
},
{
type: "assistant",
uuid: "async-msg-002",
parentUuid: "async-msg-001",
sessionId,
timestamp: "2025-01-17T10:00:01.000Z",
message: {
role: "assistant",
content: [{ type: "text", text: "Streaming response!" }],
},
},
];
writeFileSync(
sessionFile,
messages.map((m) => JSON.stringify(m)).join("\n")
);
const result = await parser.parseFromFolderAsync(testClaudePath, testProjectPath);
expect(result.conversations).toHaveLength(1);
expect(result.conversations[0].id).toBe(sessionId);
expect(result.messages).toHaveLength(2);
expect(result.messages[0].content).toBe("Hello from streaming!");
});
it("should handle large files efficiently with streaming", async () => {
const sessionId = "test-large-file";
const sessionFile = join(testClaudePath, `${sessionId}.jsonl`);
// Create a file with many lines
const lineCount = 1000;
const lines: string[] = [];
for (let i = 0; i < lineCount; i++) {
lines.push(
JSON.stringify({
type: i % 2 === 0 ? "user" : "assistant",
uuid: `msg-${i.toString().padStart(4, "0")}`,
sessionId,
timestamp: new Date(Date.now() + i * 1000).toISOString(),
message: {
role: i % 2 === 0 ? "user" : "assistant",
content:
i % 2 === 0
? `User message ${i}`
: [{ type: "text", text: `Assistant response ${i}` }],
},
})
);
}
writeFileSync(sessionFile, lines.join("\n"));
const result = await parser.parseFromFolderAsync(testClaudePath);
expect(result.conversations).toHaveLength(1);
expect(result.messages).toHaveLength(lineCount);
expect(result.conversations[0].message_count).toBe(lineCount);
});
it("should handle malformed lines gracefully with streaming", async () => {
const sessionId = "test-async-errors";
const sessionFile = join(testClaudePath, `${sessionId}.jsonl`);
const content = [
JSON.stringify({
type: "user",
uuid: "async-msg-001",
sessionId,
timestamp: "2025-01-17T10:00:00.000Z",
message: { role: "user", content: "Valid message" },
}),
"{ broken json",
'{"also": broken',
JSON.stringify({
type: "assistant",
uuid: "async-msg-002",
sessionId,
timestamp: "2025-01-17T10:00:01.000Z",
message: {
role: "assistant",
content: [{ type: "text", text: "Another valid message" }],
},
}),
].join("\n");
writeFileSync(sessionFile, content);
const result = await parser.parseFromFolderAsync(testClaudePath);
expect(result.conversations).toHaveLength(1);
expect(result.messages).toHaveLength(2);
expect(result.parse_errors).toBeDefined();
expect(result.parse_errors!.length).toBe(2);
});
it("should skip unchanged files in incremental mode with streaming", async () => {
const sessionId = "test-async-incremental";
const sessionFile = join(testClaudePath, `${sessionId}.jsonl`);
writeFileSync(
sessionFile,
JSON.stringify({
type: "user",
uuid: "msg-inc-001",
sessionId,
timestamp: "2025-01-17T10:00:00.000Z",
message: { role: "user", content: "Hello" },
})
);
// Parse with future lastIndexedMs
const futureTime = Date.now() + 100000;
const result = await parser.parseFromFolderAsync(
testClaudePath,
undefined,
futureTime
);
expect(result.conversations).toHaveLength(0);
expect(result.messages).toHaveLength(0);
});
it("should produce same results as sync parseFromFolder", async () => {
const sessionId = "test-sync-async-compare";
const sessionFile = join(testClaudePath, `${sessionId}.jsonl`);
const messages = [
{
type: "user",
uuid: "cmp-msg-001",
sessionId,
timestamp: "2025-01-17T10:00:00.000Z",
message: { role: "user", content: "Compare sync/async" },
},
{
type: "assistant",
uuid: "cmp-msg-002",
sessionId,
timestamp: "2025-01-17T10:00:01.000Z",
message: {
role: "assistant",
content: [
{
type: "tool_use",
id: "tool-cmp",
name: "test_tool",
input: { test: true },
},
],
},
},
{
type: "user",
uuid: "cmp-msg-003",
sessionId,
timestamp: "2025-01-17T10:00:02.000Z",
message: {
role: "user",
content: [
{
type: "tool_result",
tool_use_id: "tool-cmp",
content: "Result",
},
],
},
},
];
writeFileSync(
sessionFile,
messages.map((m) => JSON.stringify(m)).join("\n")
);
const syncResult = parser.parseFromFolder(testClaudePath, testProjectPath);
const asyncResult = await parser.parseFromFolderAsync(
testClaudePath,
testProjectPath
);
// Compare key fields
expect(asyncResult.conversations.length).toBe(syncResult.conversations.length);
expect(asyncResult.messages.length).toBe(syncResult.messages.length);
expect(asyncResult.tool_uses.length).toBe(syncResult.tool_uses.length);
expect(asyncResult.tool_results.length).toBe(syncResult.tool_results.length);
// Compare conversation details
expect(asyncResult.conversations[0].id).toBe(syncResult.conversations[0].id);
expect(asyncResult.conversations[0].message_count).toBe(
syncResult.conversations[0].message_count
);
// Compare message content
for (let i = 0; i < syncResult.messages.length; i++) {
expect(asyncResult.messages[i].id).toBe(syncResult.messages[i].id);
expect(asyncResult.messages[i].content).toBe(syncResult.messages[i].content);
}
});
});
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/unit/ConversationStorage.test.ts | TypeScript | /**
* Unit tests for ConversationStorage
*/
import { ConversationStorage } from '../../storage/ConversationStorage.js';
import { getSQLiteManager, resetSQLiteManager } from '../../storage/SQLiteManager.js';
import type { Conversation, Message } from '../../parsers/ConversationParser.js';
import type { Decision } from '../../parsers/DecisionExtractor.js';
describe('ConversationStorage', () => {
let storage: ConversationStorage;
let db: ReturnType<typeof getSQLiteManager>;
beforeEach(() => {
// Use in-memory database for tests
db = getSQLiteManager({ dbPath: ':memory:' });
storage = new ConversationStorage(db);
});
afterEach(() => {
resetSQLiteManager();
});
describe('storeConversations', () => {
it('should store conversations successfully', async () => {
const conversations: Conversation[] = [
{
id: 'test-conv-1',
project_path: '/test/project',
first_message_at: Date.now(),
last_message_at: Date.now(),
message_count: 5,
git_branch: 'main',
claude_version: 'sonnet-4.5',
metadata: { test: true },
created_at: Date.now(),
updated_at: Date.now(),
},
];
await storage.storeConversations(conversations);
const retrieved = storage.getConversation('test-conv-1');
expect(retrieved).toBeDefined();
expect(retrieved?.id).toBe('test-conv-1');
expect(retrieved?.project_path).toBe('/test/project');
});
it('should handle empty conversation arrays', async () => {
await expect(storage.storeConversations([])).resolves.not.toThrow();
});
it('should update existing conversations on conflict', async () => {
const conversation: Conversation = {
id: 'test-conv-1',
project_path: '/test/project',
first_message_at: Date.now(),
last_message_at: Date.now(),
message_count: 5,
git_branch: 'main',
metadata: {},
created_at: Date.now(),
updated_at: Date.now(),
};
await storage.storeConversations([conversation]);
// Update
conversation.message_count = 10;
await storage.storeConversations([conversation]);
const retrieved = storage.getConversation('test-conv-1');
expect(retrieved?.message_count).toBe(10);
});
});
it('should resolve project aliases when ensuring project ids', () => {
const database = db.getDatabase();
const now = Date.now();
const result = database
.prepare(
'INSERT INTO projects (canonical_path, display_path, created_at, updated_at) VALUES (?, ?, ?, ?)'
)
.run('/old/path', '/old/path', now, now);
const projectId = Number(result.lastInsertRowid);
database
.prepare('INSERT INTO project_aliases (alias_path, project_id, created_at) VALUES (?, ?, ?)')
.run('/new/path', projectId, now);
const resolvedId = storage.getProjectId('/new/path');
expect(resolvedId).toBe(projectId);
const count = database
.prepare('SELECT COUNT(*) as count FROM projects')
.get() as { count: number };
expect(count.count).toBe(1);
});
describe('storeMessages', () => {
let conversationIdMap: Map<string, number>;
beforeEach(async () => {
// Create parent conversation first
const conversation: Conversation = {
id: 'test-conv-1',
project_path: '/test/project',
first_message_at: Date.now(),
last_message_at: Date.now(),
message_count: 0,
git_branch: 'main',
metadata: {},
created_at: Date.now(),
updated_at: Date.now(),
};
conversationIdMap = await storage.storeConversations([conversation]);
});
it('should store messages successfully', async () => {
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'test-conv-1',
message_type: 'user',
role: 'user',
content: 'Test message',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
await storage.storeMessages(messages, { conversationIdMap });
// Messages are stored, no exception means success
expect(true).toBe(true);
});
it('should handle empty message arrays', async () => {
await expect(storage.storeMessages([], { conversationIdMap })).resolves.not.toThrow();
});
it('should skip messages with missing conversations', async () => {
const messages: Message[] = [
{
id: 'msg-valid',
conversation_id: 'test-conv-1',
message_type: 'user',
role: 'user',
content: 'Valid',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
{
id: 'msg-invalid',
conversation_id: 'missing-conv',
message_type: 'assistant',
role: 'assistant',
content: 'Invalid',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
await storage.storeMessages(messages, { conversationIdMap });
const count = db.getDatabase().prepare('SELECT COUNT(*) as count FROM messages').get() as {
count: number;
};
expect(count.count).toBe(1);
});
});
describe('getDecisionsForFile', () => {
beforeEach(async () => {
// Create parent conversation first
const conversation: Conversation = {
id: 'test-conv-1',
project_path: '/test/project',
first_message_at: Date.now(),
last_message_at: Date.now(),
message_count: 0,
git_branch: 'main',
metadata: {},
created_at: Date.now(),
updated_at: Date.now(),
};
const conversationIdMap = await storage.storeConversations([conversation]);
// Create parent messages
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'test-conv-1',
message_type: 'assistant',
role: 'assistant',
content: 'Message 1',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
{
id: 'msg-2',
conversation_id: 'test-conv-1',
message_type: 'assistant',
role: 'assistant',
content: 'Message 2',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const messageIdMap = await storage.storeMessages(messages, { conversationIdMap });
// Setup test data
const decisions: Decision[] = [
{
id: 'decision-1',
conversation_id: 'test-conv-1',
message_id: 'msg-1',
decision_text: 'Use JWT authentication',
rationale: 'Stateless and scalable',
alternatives_considered: ['Sessions'],
rejected_reasons: { Sessions: 'Requires state' },
context: 'Auth implementation',
related_files: ['src/auth/token.ts', 'src/auth/middleware.ts'],
related_commits: [],
timestamp: Date.now(),
},
{
id: 'decision-2',
conversation_id: 'test-conv-1',
message_id: 'msg-2',
decision_text: 'Use PostgreSQL',
rationale: 'Better for relational data',
alternatives_considered: ['MongoDB'],
rejected_reasons: {},
related_files: ['src/database/connection.ts'],
related_commits: [],
timestamp: Date.now(),
},
];
await storage.storeDecisions(decisions, {
conversationIdMap,
messageIdMap,
});
});
it('should find decisions for a specific file', () => {
const decisions = storage.getDecisionsForFile('src/auth/token.ts');
expect(decisions).toHaveLength(1);
expect(decisions[0].decision_text).toBe('Use JWT authentication');
});
it('should handle files with special characters safely', () => {
// Test SQL injection prevention
const decisions = storage.getDecisionsForFile('src/auth%');
// Should not match anything due to sanitization
expect(decisions).toHaveLength(0);
});
it('should return empty array for non-existent files', () => {
const decisions = storage.getDecisionsForFile('nonexistent.ts');
expect(decisions).toHaveLength(0);
});
it('should return multiple decisions if file appears in multiple', () => {
const decisions = storage.getDecisionsForFile('src/auth/middleware.ts');
expect(decisions).toHaveLength(1);
});
});
describe('getStats', () => {
it('should return correct statistics', async () => {
const conversation: Conversation = {
id: 'test-conv-1',
project_path: '/test',
first_message_at: Date.now(),
last_message_at: Date.now(),
message_count: 5,
metadata: {},
created_at: Date.now(),
updated_at: Date.now(),
};
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'test-conv-1',
message_type: 'user',
content: 'Test',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
{
id: 'msg-2',
conversation_id: 'test-conv-1',
message_type: 'assistant',
content: 'Response',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const conversationIdMap = await storage.storeConversations([conversation]);
await storage.storeMessages(messages, { conversationIdMap });
const stats = storage.getStats();
expect(stats.conversations.count).toBe(1);
expect(stats.messages.count).toBe(2);
});
});
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/unit/DecisionExtractor.test.ts | TypeScript | /**
* Unit tests for DecisionExtractor
*/
import { DecisionExtractor } from '../../parsers/DecisionExtractor';
import type { Message, ThinkingBlock } from '../../parsers/ConversationParser';
describe('DecisionExtractor', () => {
let extractor: DecisionExtractor;
beforeEach(() => {
extractor = new DecisionExtractor();
});
describe('extractDecisions', () => {
it('should extract decisions from assistant messages', () => {
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'assistant',
content: 'We decided to use PostgreSQL instead of MongoDB because it provides better ACID guarantees.',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const decisions = extractor.extractDecisions(messages, []);
expect(decisions.length).toBeGreaterThan(0);
expect(decisions[0].decision_text).toContain('PostgreSQL');
expect(decisions[0].conversation_id).toBe('conv-1');
expect(decisions[0].message_id).toBe('msg-1');
});
it('should extract decisions from user corrections', () => {
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'assistant',
content: 'I will use approach A',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
{
id: 'msg-2',
conversation_id: 'conv-1',
message_type: 'text',
role: 'user',
content: 'No, use approach B instead because it is more efficient for the database queries',
timestamp: Date.now() + 1000,
is_sidechain: false,
metadata: {},
},
];
const decisions = extractor.extractDecisions(messages, []);
expect(decisions.length).toBeGreaterThan(0);
const correction = decisions.find(d => d.decision_text.includes('approach B'));
expect(correction).toBeDefined();
});
it('should handle empty messages array', () => {
const decisions = extractor.extractDecisions([], []);
expect(decisions).toEqual([]);
});
it('should handle messages without decisions', () => {
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'assistant',
content: 'Hello, how can I help you?',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const decisions = extractor.extractDecisions(messages, []);
expect(decisions).toEqual([]);
});
it('should extract decisions with thinking blocks', () => {
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'assistant',
content: 'We decided to use Redis instead of Memcached because it has more features.',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const thinkingBlocks: ThinkingBlock[] = [
{
id: 'think-1',
message_id: 'msg-1',
thinking_content: 'Considering Redis vs Memcached. Redis has more features.',
timestamp: Date.now(),
},
];
const decisions = extractor.extractDecisions(messages, thinkingBlocks);
// Should extract decision from message content
expect(Array.isArray(decisions)).toBe(true);
});
it('should extract multiple decisions from a conversation', () => {
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'assistant',
content: 'We decided to use TypeScript instead of JavaScript because of type safety.',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
{
id: 'msg-2',
conversation_id: 'conv-1',
message_type: 'text',
role: 'assistant',
content: 'We chose Jest over Mocha because it has better snapshot testing.',
timestamp: Date.now() + 1000,
is_sidechain: false,
metadata: {},
},
];
const decisions = extractor.extractDecisions(messages, []);
expect(decisions.length).toBeGreaterThanOrEqual(2);
expect(decisions.some(d => d.decision_text.includes('TypeScript'))).toBe(true);
expect(decisions.some(d => d.decision_text.includes('Jest'))).toBe(true);
});
it('should populate decision properties correctly', () => {
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'assistant',
content: 'For the database layer, we decided to use SQLite instead of PostgreSQL because it is simple and requires no server.',
timestamp: 12345,
is_sidechain: false,
metadata: {},
},
];
const decisions = extractor.extractDecisions(messages, []);
expect(decisions.length).toBeGreaterThan(0);
const decision = decisions[0];
expect(decision.id).toBeTruthy();
expect(decision.conversation_id).toBe('conv-1');
expect(decision.message_id).toBe('msg-1');
expect(decision.decision_text).toBeTruthy();
expect(decision.timestamp).toBe(12345);
expect(Array.isArray(decision.alternatives_considered)).toBe(true);
expect(typeof decision.rejected_reasons).toBe('object');
expect(Array.isArray(decision.related_files)).toBe(true);
expect(Array.isArray(decision.related_commits)).toBe(true);
});
it('should extract context from decision text', () => {
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'assistant',
content: 'For authentication, we decided to use JWT tokens instead of sessions because they are stateless.',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const decisions = extractor.extractDecisions(messages, []);
// Should extract decision and potentially identify auth context
expect(Array.isArray(decisions)).toBe(true);
if (decisions.length > 0) {
const decision = decisions[0];
expect(decision.id).toBeTruthy();
expect(decision.conversation_id).toBe('conv-1');
}
});
it('should handle non-text message types', () => {
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'summary',
role: 'assistant',
content: 'We decided to use React',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const decisions = extractor.extractDecisions(messages, []);
// Should still work as long as content exists
expect(Array.isArray(decisions)).toBe(true);
});
it('should handle messages with null or undefined content', () => {
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'assistant',
content: undefined,
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const decisions = extractor.extractDecisions(messages, []);
expect(decisions).toEqual([]);
});
});
describe('Decision Pattern Matching', () => {
it('should match "decided to" pattern', () => {
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'assistant',
content: 'We decided to implement feature X because users requested it.',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const decisions = extractor.extractDecisions(messages, []);
expect(decisions.length).toBeGreaterThan(0);
});
it('should match "chose over" pattern', () => {
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'assistant',
content: 'I chose React over Vue because of better TypeScript support.',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const decisions = extractor.extractDecisions(messages, []);
expect(decisions.length).toBeGreaterThan(0);
});
it('should match "using instead of" pattern', () => {
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'assistant',
content: 'Using Docker instead of VMs because it is more lightweight.',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const decisions = extractor.extractDecisions(messages, []);
expect(decisions.length).toBeGreaterThan(0);
});
});
describe('Correction Pattern Matching', () => {
// Note: Stricter patterns now require technical context (keywords like database,
// api, function, class, etc.) to filter out non-technical corrections
it('should match "no," correction pattern with technical context', () => {
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'user',
content: 'No, use the database connection pool instead',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const decisions = extractor.extractDecisions(messages, []);
expect(decisions.length).toBeGreaterThan(0);
});
it('should match "that\'s wrong" correction pattern with technical context', () => {
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'user',
content: 'That\'s wrong, the correct approach is to use the API endpoint',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const decisions = extractor.extractDecisions(messages, []);
expect(decisions.length).toBeGreaterThan(0);
});
it('should match "actually," correction pattern with technical context', () => {
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'user',
content: 'Actually, we should use the class method instead because it handles caching',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const decisions = extractor.extractDecisions(messages, []);
expect(decisions.length).toBeGreaterThan(0);
});
});
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/unit/ExtractionValidator.test.ts | TypeScript | /**
* Unit tests for ExtractionValidator
*/
import {
ExtractionValidator,
getDecisionValidator,
getMistakeValidator,
DEFAULT_DECISION_VALIDATION_CONFIG,
DEFAULT_MISTAKE_VALIDATION_CONFIG,
} from "../../parsers/ExtractionValidator.js";
describe("ExtractionValidator", () => {
describe("Decision Validation", () => {
let validator: ExtractionValidator;
beforeEach(() => {
validator = getDecisionValidator();
});
describe("Valid Decisions", () => {
it("should accept well-formed decisions", () => {
const decision = "We decided to use PostgreSQL for the database because it has better JSON support.";
const result = validator.validateDecision(decision);
expect(result.isValid).toBe(true);
expect(result.confidence).toBeGreaterThan(0.5);
});
it("should accept decisions with actionable keywords", () => {
const decisions = [
"Implement caching using Redis for better performance.",
"Choose TypeScript over JavaScript for type safety.",
"Create a new service layer for business logic.",
"Configure the API to use rate limiting.",
];
for (const decision of decisions) {
const result = validator.validateDecision(decision);
expect(result.isValid).toBe(true);
}
});
});
describe("Invalid Decisions", () => {
it("should reject too short text", () => {
const decision = "Use TypeScript";
const result = validator.validateDecision(decision);
expect(result.isValid).toBe(false);
expect(result.reasons.some((r) => r.includes("Too short"))).toBe(true);
expect(result.confidence).toBeLessThan(0.5);
});
it("should reject too long text", () => {
const decision = "a".repeat(600);
const result = validator.validateDecision(decision);
expect(result.reasons.some((r) => r.includes("Too long"))).toBe(true);
});
it("should reject session summary artifacts", () => {
const summaries = [
"Session summary: We worked on several features today.",
"In this session, we implemented the login flow.",
"Recap: Multiple bugs were fixed.",
"Here's what we accomplished today.",
];
for (const summary of summaries) {
const result = validator.validateDecision(summary);
expect(result.isValid).toBe(false);
expect(result.reasons.some((r) => r.includes("summary artifact"))).toBe(true);
}
});
it("should reject noise patterns", () => {
const noiseItems = [
"Yes",
"Ok",
"Thanks!",
"Hi there",
"Goodbye",
"1.",
"a)",
];
for (const noise of noiseItems) {
const result = validator.validateDecision(noise);
expect(result.isValid).toBe(false);
}
});
it("should reject text without actionable keywords", () => {
const decision = "The weather is nice today and the sun is shining brightly.";
const result = validator.validateDecision(decision);
expect(result.reasons.some((r) => r.includes("Missing actionable keywords"))).toBe(true);
});
it("should reject text without proper structure", () => {
const decision = "PostgreSQL Redis TypeScript React Node.js";
const result = validator.validateDecision(decision);
expect(result.reasons.some((r) => r.includes("Lacks proper sentence structure"))).toBe(true);
});
});
describe("Source Verification", () => {
it("should verify content exists in source", () => {
const decision = "We decided to use PostgreSQL for better JSON support.";
const source = "After discussion, we decided to use PostgreSQL for better JSON support and scalability.";
const result = validator.validateDecision(decision, source);
expect(result.isValid).toBe(true);
});
it("should penalize content not in source", () => {
const decision = "We decided to use MongoDB for the database layer.";
const source = "After discussion, we chose PostgreSQL for better JSON support.";
const result = validator.validateDecision(decision, source);
expect(result.reasons.some((r) => r.includes("Content not found in source"))).toBe(true);
});
});
describe("Confidence Scoring", () => {
it("should have high confidence for perfect decisions", () => {
const decision = "We decided to implement caching using Redis because it provides fast in-memory storage.";
const result = validator.validateDecision(decision);
expect(result.confidence).toBeGreaterThan(0.7);
});
it("should have lower confidence for borderline decisions", () => {
const decision = "Maybe we should consider using something else later.";
const result = validator.validateDecision(decision);
expect(result.confidence).toBeLessThan(0.7);
});
});
describe("Suggestions", () => {
it("should provide suggestions for invalid decisions", () => {
const decision = "short";
const result = validator.validateDecision(decision);
expect(result.suggestions).toBeDefined();
expect(result.suggestions?.length).toBeGreaterThan(0);
});
it("should not provide suggestions for valid decisions", () => {
const decision = "We decided to use React for the frontend because of its component-based architecture.";
const result = validator.validateDecision(decision);
expect(result.suggestions).toBeUndefined();
});
});
});
describe("Mistake Validation", () => {
let validator: ExtractionValidator;
beforeEach(() => {
validator = getMistakeValidator();
});
describe("Valid Mistakes", () => {
it("should accept well-formed mistakes", () => {
const mistakes = [
"The error occurred because the database connection was not closed properly.",
"The bug was caused by incorrect null handling in the API response.",
"The issue stemmed from a missing await keyword in async function.",
"We fixed the crash by adding proper error handling.",
];
for (const mistake of mistakes) {
const result = validator.validateMistake(mistake);
expect(result.isValid).toBe(true);
}
});
it("should accept mistakes with error keywords", () => {
const mistake = "The exception was thrown due to invalid input validation.";
const result = validator.validateMistake(mistake);
expect(result.isValid).toBe(true);
expect(result.confidence).toBeGreaterThan(0.5);
});
});
describe("Invalid Mistakes", () => {
it("should reject too short text", () => {
const mistake = "Bug found";
const result = validator.validateMistake(mistake);
expect(result.isValid).toBe(false);
expect(result.reasons.some((r) => r.includes("Too short"))).toBe(true);
});
it("should reject session summary artifacts", () => {
const summary = "Session summary: Several errors were encountered.";
const result = validator.validateMistake(summary);
expect(result.isValid).toBe(false);
});
it("should reject noise patterns", () => {
const noise = "Ok, thanks!";
const result = validator.validateMistake(noise);
expect(result.isValid).toBe(false);
});
});
describe("Different Thresholds", () => {
it("should have lower min confidence than decisions", () => {
expect(DEFAULT_MISTAKE_VALIDATION_CONFIG.minConfidence).toBeLessThan(
DEFAULT_DECISION_VALIDATION_CONFIG.minConfidence
);
});
it("should allow slightly shorter mistakes", () => {
expect(DEFAULT_MISTAKE_VALIDATION_CONFIG.minLength).toBeLessThan(
DEFAULT_DECISION_VALIDATION_CONFIG.minLength
);
});
});
});
describe("Custom Configuration", () => {
it("should accept custom actionable keywords", () => {
const validator = getDecisionValidator({
actionableKeywords: ["deploy", "launch", "release"],
});
const decision = "We will deploy the application to production on Friday.";
const result = validator.validateDecision(decision);
expect(result.isValid).toBe(true);
});
it("should accept custom min length", () => {
const validator = getDecisionValidator({
minLength: 10,
});
const decision = "Use TypeScript for safety.";
const result = validator.validateDecision(decision);
expect(result.isValid).toBe(true);
});
it("should accept custom confidence threshold", () => {
const validator = getDecisionValidator({
minConfidence: 0.9,
});
// Even good decisions may not pass very high threshold
const decision = "Use React for the frontend.";
const result = validator.validateDecision(decision);
// Result validity depends on whether confidence exceeds 0.9
expect(result.confidence).toBeDefined();
});
});
describe("Edge Cases", () => {
it("should handle empty text", () => {
const validator = getDecisionValidator();
const result = validator.validateDecision("");
expect(result.isValid).toBe(false);
});
it("should handle whitespace-only text", () => {
const validator = getDecisionValidator();
const result = validator.validateDecision(" \n\t ");
expect(result.isValid).toBe(false);
});
it("should handle unicode characters", () => {
const validator = getDecisionValidator();
const decision = "我们决定使用 PostgreSQL 数据库因为它支持 JSON。Use the database.";
const result = validator.validateDecision(decision);
// Should still check for actionable keywords
expect(result.reasons).toBeDefined();
});
it("should handle special characters", () => {
const validator = getDecisionValidator();
const decision = "We chose to implement the /api/v2/* endpoints using the new architecture.";
const result = validator.validateDecision(decision);
expect(result).toBeDefined();
});
});
describe("Factory Functions", () => {
it("should create decision validator", () => {
const validator = getDecisionValidator();
expect(validator).toBeInstanceOf(ExtractionValidator);
});
it("should create mistake validator", () => {
const validator = getMistakeValidator();
expect(validator).toBeInstanceOf(ExtractionValidator);
});
it("should allow config overrides", () => {
const validator = getDecisionValidator({ minLength: 5 });
const result = validator.validateDecision("Use Redis cache.");
// With lower min length, this should pass
expect(result.reasons.some((r) => r.includes("Too short"))).toBe(false);
});
});
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/unit/GlobalIndex.test.ts | TypeScript | /**
* Unit tests for GlobalIndex
*/
import { GlobalIndex } from "../../storage/GlobalIndex.js";
import { SQLiteManager } from "../../storage/SQLiteManager.js";
import { rmSync, existsSync } from "fs";
import { join } from "path";
import { tmpdir } from "os";
describe("GlobalIndex", () => {
let testDbPath: string;
let db: SQLiteManager;
let globalIndex: GlobalIndex;
beforeEach(() => {
// Use temporary database path for tests
testDbPath = join(tmpdir(), `global-index-test-${Date.now()}.db`);
db = new SQLiteManager({ dbPath: testDbPath });
globalIndex = new GlobalIndex(db);
});
afterEach(() => {
// Cleanup
globalIndex.close();
db.close();
if (existsSync(testDbPath)) {
rmSync(testDbPath, { force: true });
}
// Also remove WAL files
if (existsSync(`${testDbPath}-wal`)) {
rmSync(`${testDbPath}-wal`, { force: true });
}
if (existsSync(`${testDbPath}-shm`)) {
rmSync(`${testDbPath}-shm`, { force: true });
}
});
describe("registerProject", () => {
it("should register a new Claude Code project", () => {
const project = globalIndex.registerProject({
project_path: "/test/project",
source_type: "claude-code",
message_count: 100,
conversation_count: 10,
decision_count: 5,
mistake_count: 2,
});
expect(project.project_path).toBe("/test/project");
expect(project.source_type).toBe("claude-code");
expect(project.message_count).toBe(100);
expect(project.conversation_count).toBe(10);
});
it("should register a Codex project", () => {
const project = globalIndex.registerProject({
project_path: "/home/user/.codex",
source_type: "codex",
message_count: 200,
conversation_count: 20,
decision_count: 15,
mistake_count: 3,
});
expect(project.source_type).toBe("codex");
expect(project.message_count).toBe(200);
});
it("should update existing project on re-registration", () => {
// Register first time
globalIndex.registerProject({
project_path: "/test/project",
source_type: "claude-code",
message_count: 100,
conversation_count: 10,
decision_count: 5,
mistake_count: 2,
});
// Update with new counts
const updated = globalIndex.registerProject({
project_path: "/test/project",
source_type: "claude-code",
message_count: 150,
conversation_count: 15,
decision_count: 8,
mistake_count: 3,
});
expect(updated.message_count).toBe(150);
expect(updated.conversation_count).toBe(15);
// Verify only one project exists
const projects = globalIndex.getAllProjects();
expect(projects).toHaveLength(1);
});
it("should store and retrieve metadata", () => {
const metadata = {
indexed_folders: ["folder1", "folder2"],
custom_field: "value",
};
const project = globalIndex.registerProject({
project_path: "/test/project",
source_type: "claude-code",
message_count: 100,
conversation_count: 10,
decision_count: 5,
mistake_count: 2,
metadata,
});
expect(project.metadata).toEqual(metadata);
});
});
describe("getAllProjects", () => {
beforeEach(() => {
// Register multiple projects
globalIndex.registerProject({
project_path: "/test/project1",
source_type: "claude-code",
message_count: 100,
conversation_count: 10,
decision_count: 5,
mistake_count: 2,
});
globalIndex.registerProject({
project_path: "/test/project2",
source_type: "claude-code",
message_count: 200,
conversation_count: 20,
decision_count: 10,
mistake_count: 4,
});
globalIndex.registerProject({
project_path: "/home/user/.codex",
source_type: "codex",
message_count: 300,
conversation_count: 30,
decision_count: 15,
mistake_count: 6,
});
});
it("should return all projects when no filter", () => {
const projects = globalIndex.getAllProjects();
expect(projects).toHaveLength(3);
});
it("should filter Claude Code projects", () => {
const projects = globalIndex.getAllProjects("claude-code");
expect(projects).toHaveLength(2);
expect(projects.every((p) => p.source_type === "claude-code")).toBe(true);
});
it("should filter Codex projects", () => {
const projects = globalIndex.getAllProjects("codex");
expect(projects).toHaveLength(1);
expect(projects[0].source_type).toBe("codex");
expect(projects[0].project_path).toBe("/home/user/.codex");
});
});
describe("getProject", () => {
beforeEach(() => {
globalIndex.registerProject({
project_path: "/test/project",
source_type: "claude-code",
message_count: 100,
conversation_count: 10,
decision_count: 5,
mistake_count: 2,
});
});
it("should retrieve project by path", () => {
const project = globalIndex.getProject("/test/project");
expect(project).toBeDefined();
expect(project?.project_path).toBe("/test/project");
});
it("should return null for non-existent project", () => {
const project = globalIndex.getProject("/nonexistent");
expect(project).toBeNull();
});
});
describe("getGlobalStats", () => {
it("should return zero stats for empty index", () => {
const stats = globalIndex.getGlobalStats();
expect(stats.total_projects).toBe(0);
expect(stats.claude_code_projects).toBe(0);
expect(stats.codex_projects).toBe(0);
expect(stats.total_messages).toBe(0);
expect(stats.total_conversations).toBe(0);
});
it("should aggregate stats correctly", () => {
globalIndex.registerProject({
project_path: "/test/project1",
source_type: "claude-code",
message_count: 100,
conversation_count: 10,
decision_count: 5,
mistake_count: 2,
});
globalIndex.registerProject({
project_path: "/test/project2",
source_type: "claude-code",
message_count: 200,
conversation_count: 20,
decision_count: 10,
mistake_count: 4,
});
globalIndex.registerProject({
project_path: "/home/user/.codex",
source_type: "codex",
message_count: 300,
conversation_count: 30,
decision_count: 15,
mistake_count: 6,
});
const stats = globalIndex.getGlobalStats();
expect(stats.total_projects).toBe(3);
expect(stats.claude_code_projects).toBe(2);
expect(stats.codex_projects).toBe(1);
expect(stats.total_messages).toBe(600);
expect(stats.total_conversations).toBe(60);
expect(stats.total_decisions).toBe(30);
expect(stats.total_mistakes).toBe(12);
});
});
describe("removeProject", () => {
beforeEach(() => {
globalIndex.registerProject({
project_path: "/test/project",
source_type: "claude-code",
message_count: 100,
conversation_count: 10,
decision_count: 5,
mistake_count: 2,
});
});
it("should remove project successfully", () => {
const removed = globalIndex.removeProject("/test/project");
expect(removed).toBe(true);
const project = globalIndex.getProject("/test/project");
expect(project).toBeNull();
});
it("should return false for non-existent project", () => {
const removed = globalIndex.removeProject("/nonexistent");
expect(removed).toBe(false);
});
});
describe("getDbPath", () => {
it("should return the database path", () => {
const path = globalIndex.getDbPath();
expect(path).toBe(testDbPath);
});
});
describe("close", () => {
it("should close database connection", () => {
// Should not throw
expect(() => globalIndex.close()).not.toThrow();
// After close, operations should fail or create new instance
// This is a basic test - in real usage, accessing after close would error
});
});
describe("database persistence", () => {
it("should persist data across instances", () => {
// Register project with first instance
globalIndex.registerProject({
project_path: "/test/project",
source_type: "claude-code",
message_count: 100,
conversation_count: 10,
decision_count: 5,
mistake_count: 2,
});
globalIndex.close();
db.close();
// Create new instance with same path
const newDb = new SQLiteManager({ dbPath: testDbPath });
const newIndex = new GlobalIndex(newDb);
const project = newIndex.getProject("/test/project");
expect(project).toBeDefined();
expect(project?.message_count).toBe(100);
newIndex.close();
newDb.close();
});
});
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/unit/HybridReranker.test.ts | TypeScript | /**
* Unit tests for HybridReranker
*/
import {
HybridReranker,
getHybridReranker,
getRerankConfig,
DEFAULT_RERANK_CONFIG,
} from "../../search/HybridReranker.js";
import type { RankableResult } from "../../search/HybridReranker.js";
describe("HybridReranker", () => {
describe("Constructor and Configuration", () => {
it("should create with default config", () => {
const reranker = new HybridReranker();
const config = reranker.getConfig();
expect(config.rrfK).toBe(DEFAULT_RERANK_CONFIG.rrfK);
expect(config.vectorWeight).toBe(DEFAULT_RERANK_CONFIG.vectorWeight);
expect(config.ftsWeight).toBe(DEFAULT_RERANK_CONFIG.ftsWeight);
expect(config.enabled).toBe(true);
});
it("should accept custom config", () => {
const reranker = new HybridReranker({
rrfK: 30,
vectorWeight: 0.5,
ftsWeight: 0.5,
});
const config = reranker.getConfig();
expect(config.rrfK).toBe(30);
expect(config.vectorWeight).toBe(0.5);
expect(config.ftsWeight).toBe(0.5);
});
});
describe("Basic Re-ranking", () => {
it("should combine vector and FTS results", () => {
const reranker = new HybridReranker();
const vectorResults: RankableResult[] = [
{ id: 1, score: 0.9 },
{ id: 2, score: 0.8 },
{ id: 3, score: 0.7 },
];
const ftsResults: RankableResult[] = [
{ id: 2, score: 0.95 }, // Different ranking in FTS
{ id: 1, score: 0.85 },
{ id: 4, score: 0.75 }, // Only in FTS
];
const results = reranker.rerank(vectorResults, ftsResults, 10);
expect(results.length).toBeGreaterThan(0);
// All unique IDs should be present
const ids = results.map((r) => r.id);
expect(ids).toContain(1);
expect(ids).toContain(2);
expect(ids).toContain(3);
expect(ids).toContain(4);
});
it("should include rank information", () => {
const reranker = new HybridReranker();
const vectorResults: RankableResult[] = [{ id: 1, score: 0.9 }];
const ftsResults: RankableResult[] = [{ id: 1, score: 0.8 }];
const results = reranker.rerank(vectorResults, ftsResults, 10);
expect(results[0].vectorRank).toBe(1);
expect(results[0].ftsRank).toBe(1);
expect(results[0].vectorScore).toBe(0.9);
expect(results[0].ftsScore).toBe(0.8);
});
it("should mark null for missing rankings", () => {
const reranker = new HybridReranker();
const vectorResults: RankableResult[] = [{ id: 1, score: 0.9 }];
const ftsResults: RankableResult[] = [{ id: 2, score: 0.8 }];
const results = reranker.rerank(vectorResults, ftsResults, 10);
const result1 = results.find((r) => r.id === 1);
const result2 = results.find((r) => r.id === 2);
expect(result1?.vectorRank).toBe(1);
expect(result1?.ftsRank).toBeNull();
expect(result2?.vectorRank).toBeNull();
expect(result2?.ftsRank).toBe(1);
});
it("should respect limit", () => {
const reranker = new HybridReranker();
const vectorResults: RankableResult[] = [
{ id: 1, score: 0.9 },
{ id: 2, score: 0.8 },
{ id: 3, score: 0.7 },
];
const ftsResults: RankableResult[] = [
{ id: 4, score: 0.9 },
{ id: 5, score: 0.8 },
{ id: 6, score: 0.7 },
];
const results = reranker.rerank(vectorResults, ftsResults, 3);
expect(results).toHaveLength(3);
});
});
describe("RRF Scoring", () => {
it("should boost items that appear in both sources", () => {
const reranker = new HybridReranker({
vectorWeight: 0.5,
ftsWeight: 0.5,
});
const vectorResults: RankableResult[] = [
{ id: 1, score: 0.9 }, // In both
{ id: 2, score: 0.85 }, // Only vector
];
const ftsResults: RankableResult[] = [
{ id: 1, score: 0.8 }, // In both
{ id: 3, score: 0.85 }, // Only FTS
];
const results = reranker.rerank(vectorResults, ftsResults, 10);
// Item 1 should have highest combined score (in both sources)
expect(results[0].id).toBe(1);
});
it("should handle different weights", () => {
// Vector-heavy weighting
const vectorHeavy = new HybridReranker({
vectorWeight: 0.9,
ftsWeight: 0.1,
});
// FTS-heavy weighting
const ftsHeavy = new HybridReranker({
vectorWeight: 0.1,
ftsWeight: 0.9,
});
const vectorResults: RankableResult[] = [{ id: 1, score: 0.9 }];
const ftsResults: RankableResult[] = [{ id: 2, score: 0.9 }];
const vectorHeavyResults = vectorHeavy.rerank(vectorResults, ftsResults, 10);
const ftsHeavyResults = ftsHeavy.rerank(vectorResults, ftsResults, 10);
// Vector-heavy should rank vector result higher
expect(vectorHeavyResults[0].id).toBe(1);
// FTS-heavy should rank FTS result higher
expect(ftsHeavyResults[0].id).toBe(2);
});
});
describe("Overlap Boost", () => {
it("should apply boost to overlapping results", () => {
const reranker = new HybridReranker();
const vectorResults: RankableResult[] = [
{ id: 1, score: 0.8 }, // In both
{ id: 2, score: 0.9 }, // Only vector - higher initial
];
const ftsResults: RankableResult[] = [
{ id: 1, score: 0.8 }, // In both
];
const results = reranker.rerankWithOverlapBoost(
vectorResults,
ftsResults,
10,
1.5 // High boost
);
// With boost, overlapping result should be ranked higher
expect(results[0].id).toBe(1);
});
it("should use default boost of 1.2", () => {
const reranker = new HybridReranker();
const vectorResults: RankableResult[] = [{ id: 1, score: 0.9 }];
const ftsResults: RankableResult[] = [{ id: 1, score: 0.8 }];
const withoutBoost = reranker.rerank(vectorResults, ftsResults, 10);
const withBoost = reranker.rerankWithOverlapBoost(
vectorResults,
ftsResults,
10
);
expect(withBoost[0].combinedScore).toBeGreaterThan(
withoutBoost[0].combinedScore
);
});
});
describe("Disabled Re-ranking", () => {
it("should return vector results unchanged when disabled", () => {
const reranker = new HybridReranker({ enabled: false });
const vectorResults: RankableResult[] = [
{ id: 1, score: 0.9 },
{ id: 2, score: 0.8 },
];
const ftsResults: RankableResult[] = [
{ id: 3, score: 0.95 },
{ id: 4, score: 0.85 },
];
const results = reranker.rerank(vectorResults, ftsResults, 10);
expect(results).toHaveLength(2);
expect(results[0].id).toBe(1);
expect(results[0].combinedScore).toBe(0.9);
expect(results[0].ftsRank).toBeNull();
expect(results[0].ftsScore).toBeNull();
});
});
describe("Edge Cases", () => {
it("should handle empty vector results", () => {
const reranker = new HybridReranker();
const vectorResults: RankableResult[] = [];
const ftsResults: RankableResult[] = [{ id: 1, score: 0.9 }];
const results = reranker.rerank(vectorResults, ftsResults, 10);
expect(results).toHaveLength(1);
expect(results[0].vectorRank).toBeNull();
});
it("should handle empty FTS results", () => {
const reranker = new HybridReranker();
const vectorResults: RankableResult[] = [{ id: 1, score: 0.9 }];
const ftsResults: RankableResult[] = [];
const results = reranker.rerank(vectorResults, ftsResults, 10);
expect(results).toHaveLength(1);
expect(results[0].ftsRank).toBeNull();
});
it("should handle both empty", () => {
const reranker = new HybridReranker();
const results = reranker.rerank([], [], 10);
expect(results).toEqual([]);
});
it("should handle string IDs", () => {
const reranker = new HybridReranker();
const vectorResults: RankableResult[] = [{ id: "msg-123", score: 0.9 }];
const ftsResults: RankableResult[] = [{ id: "msg-123", score: 0.8 }];
const results = reranker.rerank(vectorResults, ftsResults, 10);
expect(results[0].id).toBe("msg-123");
});
});
describe("Factory Functions", () => {
it("should create reranker with config", () => {
const reranker = getHybridReranker({ rrfK: 30 });
expect(reranker.getConfig().rrfK).toBe(30);
});
it("should get config from environment", () => {
const originalEnv = process.env.CCCMEMORY_RERANK_ENABLED;
process.env.CCCMEMORY_RERANK_ENABLED = "false";
const config = getRerankConfig();
expect(config.enabled).toBe(false);
// Restore
if (originalEnv !== undefined) {
process.env.CCCMEMORY_RERANK_ENABLED = originalEnv;
} else {
delete process.env.CCCMEMORY_RERANK_ENABLED;
}
});
});
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/unit/IncrementalParser.test.ts | TypeScript | /**
* Unit tests for IncrementalParser
*/
import { writeFileSync, mkdirSync, existsSync, rmSync } from "fs";
import { join } from "path";
import { tmpdir } from "os";
import { IncrementalParser } from "../../realtime/IncrementalParser.js";
describe("IncrementalParser", () => {
let parser: IncrementalParser;
let testDir: string;
let testFile: string;
beforeEach(() => {
parser = new IncrementalParser();
testDir = join(tmpdir(), `parser-test-${Date.now()}`);
if (!existsSync(testDir)) {
mkdirSync(testDir, { recursive: true });
}
testFile = join(testDir, "test.jsonl");
});
afterEach(() => {
if (existsSync(testDir)) {
rmSync(testDir, { recursive: true, force: true });
}
});
describe("parseNewContent", () => {
it("should return empty array for non-existent file", () => {
const messages = parser.parseNewContent("/nonexistent/file.jsonl");
expect(messages).toEqual([]);
});
it("should parse a single user message", () => {
const jsonl = JSON.stringify({
type: "message",
role: "user",
content: "Hello, Claude!",
});
writeFileSync(testFile, jsonl + "\n");
const messages = parser.parseNewContent(testFile);
expect(messages.length).toBe(1);
expect(messages[0].type).toBe("user");
expect(messages[0].content).toBe("Hello, Claude!");
});
it("should parse assistant message", () => {
const jsonl = JSON.stringify({
type: "message",
role: "assistant",
content: "Hello! How can I help?",
});
writeFileSync(testFile, jsonl + "\n");
const messages = parser.parseNewContent(testFile);
expect(messages.length).toBe(1);
expect(messages[0].type).toBe("assistant");
});
it("should parse content array format", () => {
const jsonl = JSON.stringify({
role: "assistant",
content: [
{ type: "text", text: "First part." },
{ type: "text", text: "Second part." },
],
});
writeFileSync(testFile, jsonl + "\n");
const messages = parser.parseNewContent(testFile);
expect(messages.length).toBe(1);
expect(messages[0].content).toBe("First part.\nSecond part.");
});
it("should only return new lines on subsequent reads", () => {
// First write
writeFileSync(testFile, JSON.stringify({ role: "user", content: "First" }) + "\n");
const firstRead = parser.parseNewContent(testFile);
expect(firstRead.length).toBe(1);
// Add more content
writeFileSync(
testFile,
JSON.stringify({ role: "user", content: "First" }) +
"\n" +
JSON.stringify({ role: "assistant", content: "Second" }) +
"\n"
);
const secondRead = parser.parseNewContent(testFile);
expect(secondRead.length).toBe(1);
expect(secondRead[0].content).toBe("Second");
});
it("should extract tool use information", () => {
const jsonl = JSON.stringify({
role: "assistant",
content: [
{ type: "text", text: "Let me read that file." },
{
type: "tool_use",
name: "Read",
input: { file_path: "/test/file.ts" },
},
],
});
writeFileSync(testFile, jsonl + "\n");
const messages = parser.parseNewContent(testFile);
expect(messages.length).toBe(1);
expect(messages[0].toolUse).toBeDefined();
expect(messages[0].toolUse?.name).toBe("Read");
expect(messages[0].toolUse?.input).toEqual({ file_path: "/test/file.ts" });
});
it("should skip invalid JSON lines", () => {
writeFileSync(
testFile,
"invalid json\n" +
JSON.stringify({ role: "user", content: "Valid" }) +
"\n"
);
const messages = parser.parseNewContent(testFile);
expect(messages.length).toBe(1);
expect(messages[0].content).toBe("Valid");
});
it("should handle empty files", () => {
writeFileSync(testFile, "");
const messages = parser.parseNewContent(testFile);
expect(messages).toEqual([]);
});
it("should handle files with only whitespace lines", () => {
writeFileSync(testFile, " \n\n \n");
const messages = parser.parseNewContent(testFile);
expect(messages).toEqual([]);
});
});
describe("file tracking", () => {
it("should track file info after parsing", () => {
writeFileSync(testFile, JSON.stringify({ role: "user", content: "Test" }) + "\n");
parser.parseNewContent(testFile);
const fileInfo = parser.getFileInfo(testFile);
expect(fileInfo).toBeDefined();
expect(fileInfo?.path).toBe(testFile);
expect(fileInfo?.lineCount).toBe(1);
});
it("should list all tracked files", () => {
const file1 = join(testDir, "file1.jsonl");
const file2 = join(testDir, "file2.jsonl");
writeFileSync(file1, JSON.stringify({ role: "user", content: "Test1" }) + "\n");
writeFileSync(file2, JSON.stringify({ role: "user", content: "Test2" }) + "\n");
parser.parseNewContent(file1);
parser.parseNewContent(file2);
const tracked = parser.getTrackedFiles();
expect(tracked).toContain(file1);
expect(tracked).toContain(file2);
});
it("should reset file tracking", () => {
writeFileSync(testFile, JSON.stringify({ role: "user", content: "Test" }) + "\n");
parser.parseNewContent(testFile);
parser.resetFile(testFile);
expect(parser.getFileInfo(testFile)).toBeUndefined();
});
it("should reset all file tracking", () => {
const file1 = join(testDir, "file1.jsonl");
const file2 = join(testDir, "file2.jsonl");
writeFileSync(file1, JSON.stringify({ role: "user", content: "Test1" }) + "\n");
writeFileSync(file2, JSON.stringify({ role: "user", content: "Test2" }) + "\n");
parser.parseNewContent(file1);
parser.parseNewContent(file2);
parser.resetAll();
expect(parser.getTrackedFiles()).toEqual([]);
});
});
describe("message type detection", () => {
it("should detect system messages", () => {
const jsonl = JSON.stringify({
role: "system",
content: "System prompt",
});
writeFileSync(testFile, jsonl + "\n");
const messages = parser.parseNewContent(testFile);
expect(messages[0].type).toBe("system");
});
it("should handle model role as assistant", () => {
const jsonl = JSON.stringify({
role: "model",
content: "Response",
});
writeFileSync(testFile, jsonl + "\n");
const messages = parser.parseNewContent(testFile);
expect(messages[0].type).toBe("assistant");
});
});
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/unit/LiveExtractor.test.ts | TypeScript | /**
* Unit tests for LiveExtractor
*/
import Database from "better-sqlite3";
import { LiveExtractor } from "../../realtime/LiveExtractor.js";
import type { ParsedMessage } from "../../realtime/IncrementalParser.js";
describe("LiveExtractor", () => {
let db: Database.Database;
let extractor: LiveExtractor;
const testFilePath = "/Users/test/.claude/projects/-test-project/conversation.jsonl";
beforeEach(() => {
db = new Database(":memory:");
// Create required tables
db.exec(`
CREATE TABLE IF NOT EXISTS working_memory (
id TEXT PRIMARY KEY,
key TEXT NOT NULL,
value TEXT NOT NULL,
context TEXT,
tags TEXT,
session_id TEXT,
project_path TEXT NOT NULL,
created_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL,
expires_at INTEGER,
embedding BLOB,
UNIQUE(project_path, key)
);
CREATE VIRTUAL TABLE IF NOT EXISTS working_memory_fts USING fts5(
id UNINDEXED,
key,
value,
context
);
`);
extractor = new LiveExtractor(db);
});
afterEach(() => {
db.close();
});
describe("processMessages", () => {
it("should return extraction result with counts", async () => {
const messages: ParsedMessage[] = [
{
type: "assistant",
content: "I'll use TypeScript for the implementation.",
timestamp: Date.now(),
},
];
const result = await extractor.processMessages(testFilePath, messages);
expect(result.messagesProcessed).toBe(1);
expect(typeof result.decisionsExtracted).toBe("number");
expect(typeof result.filesTracked).toBe("number");
expect(typeof result.errorsDetected).toBe("number");
});
it("should extract decisions from assistant messages", async () => {
const messages: ParsedMessage[] = [
{
type: "assistant",
content:
"I've decided to use SQLite for storage. This is the best approach because it's simple and works well.",
timestamp: Date.now(),
},
];
const result = await extractor.processMessages(testFilePath, messages);
// Should have extracted at least one decision
expect(result.decisionsExtracted).toBeGreaterThanOrEqual(0);
});
it("should track file operations from tool uses", async () => {
const messages: ParsedMessage[] = [
{
type: "assistant",
content: "Reading the file...",
toolUse: {
name: "Read",
input: { file_path: "/test/file.ts" },
},
timestamp: Date.now(),
},
];
const result = await extractor.processMessages(testFilePath, messages);
expect(result.filesTracked).toBe(1);
});
it("should detect errors from tool results", async () => {
const messages: ParsedMessage[] = [
{
type: "user",
content: "",
toolResult: {
name: "Bash",
output: "Error: Command failed with exit code 1",
isError: true,
},
timestamp: Date.now(),
},
];
const result = await extractor.processMessages(testFilePath, messages);
expect(result.errorsDetected).toBe(1);
});
it("should not process user messages for decisions", async () => {
const messages: ParsedMessage[] = [
{
type: "user",
content: "I've decided to use PostgreSQL.",
timestamp: Date.now(),
},
];
const result = await extractor.processMessages(testFilePath, messages);
// User messages shouldn't be processed for decisions
expect(result.decisionsExtracted).toBe(0);
});
});
describe("decision extraction", () => {
it("should extract 'I will' pattern decisions", async () => {
const messages: ParsedMessage[] = [
{
type: "assistant",
content: "I will implement the caching layer using Redis for better performance.",
timestamp: Date.now(),
},
];
await extractor.processMessages(testFilePath, messages);
// Check that decision was stored in working memory
const stored = db
.prepare("SELECT * FROM working_memory WHERE tags LIKE '%decision%'")
.all() as Array<{ value: string }>;
expect(stored.length).toBeGreaterThanOrEqual(0);
});
it("should extract 'Let us' pattern decisions", async () => {
const messages: ParsedMessage[] = [
{
type: "assistant",
content: "Let's use Jest for testing since it's already configured.",
timestamp: Date.now(),
},
];
const extractResult = await extractor.processMessages(testFilePath, messages);
expect(extractResult.decisionsExtracted).toBeGreaterThanOrEqual(0);
});
});
describe("file operation tracking", () => {
it("should track Read operations", async () => {
const messages: ParsedMessage[] = [
{
type: "assistant",
content: "",
toolUse: {
name: "Read",
input: { file_path: "/path/to/file.ts" },
},
timestamp: Date.now(),
},
];
const result = await extractor.processMessages(testFilePath, messages);
expect(result.filesTracked).toBe(1);
// Verify stored in memory
const stored = db
.prepare("SELECT * FROM working_memory WHERE tags LIKE '%read%'")
.get() as { value: string } | undefined;
expect(stored).toBeDefined();
expect(stored?.value).toContain("/path/to/file.ts");
});
it("should track Edit operations", async () => {
const messages: ParsedMessage[] = [
{
type: "assistant",
content: "",
toolUse: {
name: "Edit",
input: { file_path: "/path/to/file.ts", old_string: "old", new_string: "new" },
},
timestamp: Date.now(),
},
];
const result = await extractor.processMessages(testFilePath, messages);
expect(result.filesTracked).toBe(1);
const stored = db
.prepare("SELECT * FROM working_memory WHERE tags LIKE '%edit%'")
.get() as { value: string } | undefined;
expect(stored?.value).toContain("edit:");
});
it("should track Write operations", async () => {
const messages: ParsedMessage[] = [
{
type: "assistant",
content: "",
toolUse: {
name: "Write",
input: { file_path: "/path/to/new-file.ts", content: "..." },
},
timestamp: Date.now(),
},
];
const result = await extractor.processMessages(testFilePath, messages);
expect(result.filesTracked).toBe(1);
});
it("should update rather than duplicate file entries", async () => {
const messages: ParsedMessage[] = [
{
type: "assistant",
content: "",
toolUse: {
name: "Read",
input: { file_path: "/path/to/file.ts" },
},
timestamp: Date.now(),
},
{
type: "assistant",
content: "",
toolUse: {
name: "Edit",
input: { file_path: "/path/to/file.ts" },
},
timestamp: Date.now(),
},
];
await extractor.processMessages(testFilePath, messages);
// Should only have one entry for the file (with updated action)
const stored = db
.prepare("SELECT * FROM working_memory WHERE key LIKE 'file_%'")
.all() as Array<{ key: string }>;
// Should have exactly one entry for this file
const fileEntries = stored.filter((s) => s.key.includes("file_"));
expect(fileEntries.length).toBe(1);
});
});
describe("error detection", () => {
it("should detect errors in content", async () => {
const messages: ParsedMessage[] = [
{
type: "assistant",
content: "I encountered an error: Cannot read property 'foo' of undefined",
timestamp: Date.now(),
},
];
const result = await extractor.processMessages(testFilePath, messages);
expect(result.errorsDetected).toBeGreaterThan(0);
});
it("should store errors in working memory", async () => {
const messages: ParsedMessage[] = [
{
type: "user",
content: "",
toolResult: {
name: "Bash",
output: "failed: network connection refused",
isError: true,
},
timestamp: Date.now(),
},
];
await extractor.processMessages(testFilePath, messages);
const stored = db
.prepare("SELECT * FROM working_memory WHERE tags LIKE '%error%'")
.all() as Array<{ value: string }>;
expect(stored.length).toBeGreaterThan(0);
});
});
describe("project path extraction", () => {
it("should extract project path from Claude conversation file path", async () => {
const messages: ParsedMessage[] = [
{
type: "assistant",
content: "Test message",
timestamp: Date.now(),
},
];
// Just verify processing completes without error
await extractor.processMessages(testFilePath, messages);
expect(true).toBe(true);
});
});
describe("getMemoryStore", () => {
it("should provide access to the working memory store", () => {
const store = extractor.getMemoryStore();
expect(store).toBeDefined();
expect(typeof store.remember).toBe("function");
expect(typeof store.recall).toBe("function");
});
});
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/unit/Logger.test.ts | TypeScript | /**
* Unit tests for Logger
*/
import { jest } from '@jest/globals';
import { Logger, LogLevel, createLogger } from '../../utils/Logger';
describe('Logger', () => {
const originalLog = console.log;
const originalWarn = console.warn;
const originalError = console.error;
const originalDebug = console.debug;
let consoleLogMock: ReturnType<typeof jest.fn>;
let consoleWarnMock: ReturnType<typeof jest.fn>;
let consoleErrorMock: ReturnType<typeof jest.fn>;
let consoleDebugMock: ReturnType<typeof jest.fn>;
beforeEach(() => {
consoleLogMock = jest.fn();
consoleWarnMock = jest.fn();
consoleErrorMock = jest.fn();
consoleDebugMock = jest.fn();
console.log = consoleLogMock;
console.warn = consoleWarnMock;
console.error = consoleErrorMock;
console.debug = consoleDebugMock;
});
afterEach(() => {
console.log = originalLog;
console.warn = originalWarn;
console.error = originalError;
console.debug = originalDebug;
});
// Note: All logging goes to stderr (console.error) to avoid interfering with MCP JSON-RPC on stdout
describe('Log Levels', () => {
it('should log debug messages when level is DEBUG', () => {
const logger = new Logger({ level: LogLevel.DEBUG });
logger.debug('test debug');
expect(consoleErrorMock).toHaveBeenCalledWith(
expect.stringContaining('[DEBUG] test debug')
);
});
it('should not log debug messages when level is INFO', () => {
const logger = new Logger({ level: LogLevel.INFO });
logger.debug('test debug');
expect(consoleErrorMock).not.toHaveBeenCalledWith(
expect.stringContaining('[DEBUG]')
);
});
it('should log info messages when level is INFO', () => {
const logger = new Logger({ level: LogLevel.INFO });
logger.info('test info');
expect(consoleErrorMock).toHaveBeenCalledWith(
expect.stringContaining('[INFO] test info')
);
});
it('should log warnings when level is WARN', () => {
const logger = new Logger({ level: LogLevel.WARN });
logger.warn('test warning');
expect(consoleErrorMock).toHaveBeenCalledWith(
expect.stringContaining('[WARN] test warning')
);
});
it('should not log info when level is WARN', () => {
const logger = new Logger({ level: LogLevel.WARN });
logger.info('test info');
expect(consoleErrorMock).not.toHaveBeenCalledWith(
expect.stringContaining('[INFO]')
);
});
it('should log errors when level is ERROR', () => {
const logger = new Logger({ level: LogLevel.ERROR });
logger.error('test error');
expect(consoleErrorMock).toHaveBeenCalledWith(
expect.stringContaining('[ERROR] test error')
);
});
it('should not log anything when level is SILENT', () => {
const logger = new Logger({ level: LogLevel.SILENT });
logger.debug('debug');
logger.info('info');
logger.warn('warn');
logger.error('error');
expect(consoleErrorMock).not.toHaveBeenCalled();
});
});
describe('Formatting', () => {
it('should include prefix when configured', () => {
const logger = new Logger({ prefix: 'TestModule', level: LogLevel.INFO });
logger.info('test message');
expect(consoleErrorMock).toHaveBeenCalledWith(
expect.stringContaining('[TestModule]')
);
});
it('should include timestamp when configured', () => {
const logger = new Logger({ timestamp: true, level: LogLevel.INFO });
logger.info('test message');
expect(consoleErrorMock).toHaveBeenCalledWith(
expect.stringMatching(/^\d{4}-\d{2}-\d{2}T/)
);
});
it('should format with all components', () => {
const logger = new Logger({
prefix: 'Test',
timestamp: true,
level: LogLevel.INFO,
});
logger.info('message');
const call = consoleErrorMock.mock.calls[0][0] as string;
expect(call).toMatch(/^\d{4}/); // Timestamp
expect(call).toContain('[Test]'); // Prefix
expect(call).toContain('[INFO]'); // Level
expect(call).toContain('message'); // Message
});
});
describe('Child Loggers', () => {
it('should create child logger with combined prefix', () => {
const parent = new Logger({ prefix: 'Parent', level: LogLevel.INFO });
const child = parent.child('Child');
child.info('test');
expect(consoleErrorMock).toHaveBeenCalledWith(
expect.stringContaining('[Parent:Child]')
);
});
it('should inherit log level from parent', () => {
const parent = new Logger({ level: LogLevel.ERROR });
const child = parent.child('Child');
child.info('should not log');
child.error('should log');
// Both info and error go to stderr, but info shouldn't log when level is ERROR
const calls = consoleErrorMock.mock.calls.map(c => c[0] as string);
expect(calls.some(c => c.includes('[INFO]'))).toBe(false);
expect(calls.some(c => c.includes('[ERROR]'))).toBe(true);
});
});
describe('Dynamic Level Changes', () => {
it('should allow changing log level', () => {
const logger = new Logger({ level: LogLevel.ERROR });
logger.info('not logged');
// Initially should not have [INFO] calls
let calls = consoleErrorMock.mock.calls.map(c => c[0] as string);
expect(calls.some(c => c.includes('[INFO]'))).toBe(false);
logger.setLevel(LogLevel.INFO);
logger.info('now logged');
calls = consoleErrorMock.mock.calls.map(c => c[0] as string);
expect(calls.some(c => c.includes('[INFO]'))).toBe(true);
});
it('should return current log level', () => {
const logger = new Logger({ level: LogLevel.WARN });
expect(logger.getLevel()).toBe(LogLevel.WARN);
logger.setLevel(LogLevel.DEBUG);
expect(logger.getLevel()).toBe(LogLevel.DEBUG);
});
});
describe('Success Messages', () => {
it('should log success messages at INFO level', () => {
const logger = new Logger({ level: LogLevel.INFO });
logger.success('operation complete');
expect(consoleErrorMock).toHaveBeenCalledWith(
expect.stringContaining('[✓] operation complete')
);
});
it('should not log success when level is WARN', () => {
const logger = new Logger({ level: LogLevel.WARN });
logger.success('operation complete');
// Success uses INFO level, so it shouldn't log when level is WARN
const calls = consoleErrorMock.mock.calls.map(c => c[0] as string);
expect(calls.some(c => c.includes('[✓]'))).toBe(false);
});
});
describe('Additional Arguments', () => {
it('should pass additional arguments to console', () => {
const logger = new Logger({ level: LogLevel.INFO });
const obj = { foo: 'bar' };
logger.info('message', obj, 123);
expect(consoleErrorMock).toHaveBeenCalledWith(
expect.any(String),
obj,
123
);
});
});
describe('Factory Function', () => {
it('should create logger with module prefix', () => {
const logger = createLogger('MyModule');
logger.info('test');
expect(consoleErrorMock).toHaveBeenCalledWith(
expect.stringContaining('[MyModule]')
);
});
});
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/unit/MigrationToolHandlers.test.ts | TypeScript | /**
* Unit tests for Migration Tool Handlers
* Following TDD approach - tests written FIRST
*/
import { describe, it, expect, beforeEach, afterEach } from "@jest/globals";
import { tmpdir } from "os";
import { join, basename, dirname } from "path";
import { mkdirSync, writeFileSync, rmSync, existsSync, readdirSync } from "fs";
import { ToolHandlers } from "../../tools/ToolHandlers.js";
import { ConversationMemory } from "../../ConversationMemory.js";
import { getSQLiteManager, resetSQLiteManager } from "../../storage/SQLiteManager.js";
const insertProject = (db: ReturnType<ReturnType<typeof getSQLiteManager>["getDatabase"]>, projectPath: string) => {
const now = Date.now();
const result = db
.prepare(
"INSERT INTO projects (canonical_path, display_path, created_at, updated_at) VALUES (?, ?, ?, ?)"
)
.run(projectPath, projectPath, now, now);
return Number(result.lastInsertRowid);
};
const insertConversation = (
db: ReturnType<ReturnType<typeof getSQLiteManager>["getDatabase"]>,
projectId: number,
projectPath: string,
externalId: string,
lastMessageAt: number,
messageCount = 1
) => {
const now = Date.now();
const result = db
.prepare(
`
INSERT INTO conversations
(project_id, project_path, source_type, external_id, first_message_at, last_message_at, message_count, created_at, updated_at)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
`
)
.run(projectId, projectPath, "claude-code", externalId, lastMessageAt, lastMessageAt, messageCount, now, now);
return Number(result.lastInsertRowid);
};
const insertMessage = (
db: ReturnType<ReturnType<typeof getSQLiteManager>["getDatabase"]>,
conversationId: number,
externalId: string,
timestamp: number
) => {
db.prepare(
`
INSERT INTO messages
(conversation_id, external_id, message_type, role, content, timestamp, metadata)
VALUES (?, ?, 'user', 'user', 'content', ?, '{}')
`
).run(conversationId, externalId, timestamp);
};
describe("Migration Tool Handlers", () => {
let testDir: string;
let projectsDir: string;
let handlers: ToolHandlers;
let memory: ConversationMemory;
beforeEach(() => {
// Create temp directory structure
testDir = join(tmpdir(), `migration-tool-test-${Date.now()}`);
projectsDir = join(testDir, ".claude", "projects");
mkdirSync(projectsDir, { recursive: true });
// Mock HOME to use test directory
process.env.HOME = testDir;
process.env.USERPROFILE = testDir;
const db = getSQLiteManager();
memory = new ConversationMemory();
handlers = new ToolHandlers(memory, db, projectsDir);
});
afterEach(() => {
resetSQLiteManager();
if (existsSync(testDir)) {
rmSync(testDir, { recursive: true, force: true });
}
});
describe("discover_old_conversations", () => {
it("should discover old conversation folders", async () => {
// Setup: Create old folder with database
const oldFolder = join(projectsDir, "-Users-test-old-project");
mkdirSync(oldFolder, { recursive: true });
const db = getSQLiteManager().getDatabase();
const projectId = insertProject(db, "/Users/test/old-project");
const conv1 = insertConversation(db, projectId, "/Users/test/old-project", "c1", 2000);
const conv2 = insertConversation(db, projectId, "/Users/test/old-project", "c2", 4000);
insertMessage(db, conv1, "m1", 1000);
insertMessage(db, conv1, "m2", 1001);
insertMessage(db, conv2, "m3", 1002);
writeFileSync(join(oldFolder, "session1.jsonl"), '{}');
writeFileSync(join(oldFolder, "session2.jsonl"), '{}');
// Test: Discover
const result = await handlers.discoverOldConversations({
current_project_path: "/Users/test/new-project",
});
// Verify: Found the old folder
expect(result.success).toBe(true);
expect(result.candidates).toHaveLength(1);
expect(result.candidates[0].folder_name).toBe("-Users-test-old-project");
expect(result.candidates[0].stored_project_path).toBe("/Users/test/old-project");
expect(result.candidates[0].stats.conversations).toBe(2);
expect(result.candidates[0].stats.files).toBe(2);
expect(result.message).toContain("Found 1 potential old conversation folder");
});
it("should return empty list when no candidates found", async () => {
// Test: No old folders exist
const result = await handlers.discoverOldConversations({
current_project_path: "/Users/test/project",
});
// Verify: Empty results
expect(result.success).toBe(true);
expect(result.candidates).toHaveLength(0);
expect(result.message).toContain("No old conversation folders found");
});
it("should rank candidates by score", async () => {
// Setup: Create multiple folders with different similarity
const folder1 = join(projectsDir, "-Users-test-exact-project");
const folder2 = join(projectsDir, "-Users-test-similar-project");
const folder3 = join(projectsDir, "-Users-different-other");
[folder1, folder2, folder3].forEach((folder) => {
mkdirSync(folder, { recursive: true });
writeFileSync(join(folder, "session.jsonl"), '{}');
});
const db = getSQLiteManager().getDatabase();
const projectId1 = insertProject(db, "/Users/test/exact-project");
insertConversation(db, projectId1, "/Users/test/exact-project", "c1", 1000);
const projectId2 = insertProject(db, "/Users/test/similar-project");
insertConversation(db, projectId2, "/Users/test/similar-project", "c2", 1000);
// Test: Discover with path similar to folder1
const result = await handlers.discoverOldConversations({
current_project_path: "/Users/test/exact-project-renamed",
});
// Verify: Ranked by score (exact match should be first)
expect(result.success).toBe(true);
expect(result.candidates.length).toBeGreaterThan(0);
expect(result.candidates[0].score).toBeGreaterThan(result.candidates[1]?.score || 0);
});
it("should include statistics for each candidate", async () => {
// Setup: Create folder with multiple conversations
const oldFolder = join(projectsDir, "-Users-test-project");
mkdirSync(oldFolder, { recursive: true });
const db = getSQLiteManager().getDatabase();
const projectId = insertProject(db, "/Users/test/project");
const conv1 = insertConversation(db, projectId, "/Users/test/project", "c1", 5000);
const conv2 = insertConversation(db, projectId, "/Users/test/project", "c2", 6000);
insertMessage(db, conv1, "m1", 1000);
insertMessage(db, conv1, "m2", 1001);
insertMessage(db, conv2, "m3", 1002);
writeFileSync(join(oldFolder, "s1.jsonl"), '{}');
writeFileSync(join(oldFolder, "s2.jsonl"), '{}');
writeFileSync(join(oldFolder, "s3.jsonl"), '{}');
// Test: Discover
const result = await handlers.discoverOldConversations({
current_project_path: "/Users/test/project-new",
});
// Verify: Statistics included
expect(result.candidates[0].stats.conversations).toBe(2);
expect(result.candidates[0].stats.messages).toBe(3);
expect(result.candidates[0].stats.files).toBe(3);
expect(result.candidates[0].stats.last_activity).toBe(6000);
});
});
describe("migrate_project", () => {
it("should migrate conversation history successfully", async () => {
// Setup: Create source folder
const sourceFolder = join(projectsDir, "-Users-test-old");
mkdirSync(sourceFolder, { recursive: true });
writeFileSync(join(sourceFolder, "session1.jsonl"), 'content1');
writeFileSync(join(sourceFolder, "session2.jsonl"), 'content2');
const db = getSQLiteManager().getDatabase();
const projectId = insertProject(db, "/Users/test/old");
insertConversation(db, projectId, "/Users/test/old", "c1", 1000);
insertConversation(db, projectId, "/Users/test/old", "c2", 2000);
// Test: Execute migration
const result = await handlers.migrateProject({
source_folder: sourceFolder,
old_project_path: "/Users/test/old",
new_project_path: "/Users/test/new",
dry_run: false,
});
// Verify: Migration successful
expect(result.success).toBe(true);
expect(result.files_copied).toBe(2);
expect(result.database_updated).toBe(true);
expect(result.message).toContain("Successfully migrated");
expect(result.backup_created).toBe(true);
// Verify: Target folder created
const targetFolder = join(projectsDir, "-Users-test-new");
expect(existsSync(targetFolder)).toBe(true);
expect(existsSync(join(targetFolder, "session1.jsonl"))).toBe(true);
expect(existsSync(join(targetFolder, "session2.jsonl"))).toBe(true);
// Verify: Database updated
const rows = db
.prepare("SELECT project_path FROM conversations WHERE project_id = ? ORDER BY external_id")
.all(projectId) as Array<{ project_path: string }>;
expect(rows).toHaveLength(2);
rows.forEach(row => {
expect(row.project_path).toBe("/Users/test/new");
});
});
it("should perform dry run without making changes", async () => {
// Setup: Create source folder
const sourceFolder = join(projectsDir, "-Users-test-source");
mkdirSync(sourceFolder, { recursive: true });
writeFileSync(join(sourceFolder, "session.jsonl"), 'content');
// Test: Dry run
const result = await handlers.migrateProject({
source_folder: sourceFolder,
old_project_path: "/old",
new_project_path: "/new",
dry_run: true,
});
// Verify: Reports what would be done but doesn't do it
expect(result.success).toBe(true);
expect(result.files_copied).toBe(1);
expect(result.database_updated).toBe(false);
expect(result.message).toContain("Dry run");
// Verify: Target folder NOT created
const targetFolder = join(projectsDir, "-Users-test-target");
expect(existsSync(targetFolder)).toBe(false);
});
it("should detect and report conflicts", async () => {
// Setup: Create source with data
const sourceFolder = join(projectsDir, "-Users-test-source");
mkdirSync(sourceFolder, { recursive: true });
writeFileSync(join(sourceFolder, "source.jsonl"), 'source');
// Create target folder with existing data (using same naming as handler)
const targetFolder = join(projectsDir, "-new");
mkdirSync(targetFolder, { recursive: true });
writeFileSync(join(targetFolder, "existing.jsonl"), 'existing');
// Test: Should detect conflict
await expect(
handlers.migrateProject({
source_folder: sourceFolder,
old_project_path: "/old",
new_project_path: "/new",
dry_run: false,
})
).rejects.toThrow(/already has/i);
});
it("should validate source folder exists", async () => {
// Test: Non-existent source (but path must be under projectsDir to pass containment check)
const nonExistentFolder = join(projectsDir, "-Users-nonexistent-folder");
await expect(
handlers.migrateProject({
source_folder: nonExistentFolder,
old_project_path: "/old",
new_project_path: "/new",
dry_run: false,
})
).rejects.toThrow(/does not exist/i);
});
it("should create backup before migration", async () => {
// Setup: Create source folder
const sourceFolder = join(projectsDir, "-Users-test-source");
mkdirSync(sourceFolder, { recursive: true });
writeFileSync(join(sourceFolder, "session.jsonl"), 'content');
const db = getSQLiteManager().getDatabase();
const projectId = insertProject(db, "/old");
insertConversation(db, projectId, "/old", "c1", 1000);
// Test: Execute migration
await handlers.migrateProject({
source_folder: sourceFolder,
old_project_path: "/old",
new_project_path: "/new",
dry_run: false,
});
// Verify: Backup created
const dbPath = getSQLiteManager().getDbPath();
const backupDir = dirname(dbPath);
const backups = readdirSync(backupDir).filter((name) =>
name.startsWith(`${basename(dbPath)}.bak.`)
);
expect(backups.length).toBeGreaterThan(0);
});
it("should preserve original source data", async () => {
// Setup: Create source folder
const sourceFolder = join(projectsDir, "-Users-test-source");
mkdirSync(sourceFolder, { recursive: true });
const originalContent = 'original content';
writeFileSync(join(sourceFolder, "session.jsonl"), originalContent);
// Test: Execute migration
await handlers.migrateProject({
source_folder: sourceFolder,
old_project_path: "/old",
new_project_path: "/new",
dry_run: false,
});
// Verify: Original still exists and unchanged
expect(existsSync(join(sourceFolder, "session.jsonl"))).toBe(true);
});
});
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/unit/MistakeExtractor.test.ts | TypeScript | /**
* Unit tests for MistakeExtractor
*/
import { MistakeExtractor } from '../../parsers/MistakeExtractor';
import type { Message, ToolResult } from '../../parsers/ConversationParser';
describe('MistakeExtractor', () => {
let extractor: MistakeExtractor;
beforeEach(() => {
extractor = new MistakeExtractor();
});
describe('extractMistakes', () => {
it('should extract mistakes from tool errors', () => {
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'assistant',
content: 'Running the test',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const toolResults: ToolResult[] = [
{
id: 'tool-1',
tool_use_id: 'use-1',
message_id: 'msg-1',
content: 'TypeError: undefined is not a function',
stdout: '',
stderr: 'TypeError: undefined is not a function at src/app.ts:42',
is_error: true,
is_image: false,
timestamp: Date.now(),
},
];
const mistakes = extractor.extractMistakes(messages, toolResults);
// Tool errors are always extracted (no min severity for real errors)
expect(mistakes.length).toBeGreaterThan(0);
expect(mistakes[0].mistake_type).toBe('tool_error');
expect(mistakes[0].what_went_wrong).toContain('TypeError');
});
it('should extract mistakes from user corrections', () => {
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'assistant',
content: 'I will use the legacy database approach',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
{
id: 'msg-2',
conversation_id: 'conv-1',
message_type: 'text',
role: 'user',
content: "That's wrong, you should use the new API endpoint instead because it has better caching.",
timestamp: Date.now() + 1000,
is_sidechain: false,
metadata: {},
},
];
const mistakes = extractor.extractMistakes(messages, []);
// Stricter patterns require technical context and explicit correction
expect(mistakes.length).toBeGreaterThan(0);
const correction = mistakes.find(m => m.user_correction_message);
expect(correction).toBeDefined();
// "should use" pattern triggers wrong_approach classification
expect(correction?.mistake_type).toBe('wrong_approach');
});
it('should extract mistakes from error discussions', () => {
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'assistant',
content: 'Error: The function broke because of incorrect logic. This is a logic error.',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const mistakes = extractor.extractMistakes(messages, []);
expect(mistakes.length).toBeGreaterThan(0);
expect(mistakes[0].mistake_type).toBe('logic_error');
});
it('should handle empty inputs', () => {
const mistakes = extractor.extractMistakes([], []);
expect(mistakes).toEqual([]);
});
it('should deduplicate similar mistakes from same message', () => {
// Test deduplication: same message_id, same content prefix, same timestamp
// The new signature includes message_id to prevent collisions, so we test
// that duplicates from the same message are properly deduped
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'assistant',
content: 'Error: This broke. Error: This broke again.',
timestamp: 12345,
is_sidechain: false,
metadata: {},
},
];
const mistakes = extractor.extractMistakes(messages, []);
// Multiple errors from same message with same signature should dedupe
expect(mistakes.length).toBeLessThanOrEqual(2);
});
});
describe('Mistake Type Classification', () => {
it('should classify logic errors', () => {
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'assistant',
content: 'Error: TypeError caused by a logic error in the condition. The function returned undefined.',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const mistakes = extractor.extractMistakes(messages, []);
// Stricter ERROR_INDICATORS require explicit error patterns
expect(mistakes.length).toBeGreaterThan(0);
expect(mistakes[0].mistake_type).toBe('logic_error');
});
it('should classify wrong approach', () => {
const messages: Message[] = [
{
id: 'msg-0',
conversation_id: 'conv-1',
message_type: 'text',
role: 'assistant',
content: 'Let me implement it',
timestamp: Date.now() - 1000,
is_sidechain: false,
metadata: {},
},
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'user',
content: 'No, that is the wrong approach. We should use the better way.',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const mistakes = extractor.extractMistakes(messages, []);
expect(Array.isArray(mistakes)).toBe(true);
if (mistakes.length > 0) {
expect(mistakes[0].mistake_type).toBe('wrong_approach');
}
});
it('should classify misunderstandings', () => {
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'assistant',
content: 'I misunderstood the requirement. Error: This didn\'t work.',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const mistakes = extractor.extractMistakes(messages, []);
expect(Array.isArray(mistakes)).toBe(true);
expect(mistakes.length).toBeGreaterThan(0);
// Note: May be classified as logic_error or misunderstanding depending on pattern matching order
expect(['misunderstanding', 'logic_error']).toContain(mistakes[0].mistake_type);
});
it('should classify syntax errors', () => {
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'assistant',
content: 'Error: Got a syntax error in the code. This is broken.',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const mistakes = extractor.extractMistakes(messages, []);
expect(Array.isArray(mistakes)).toBe(true);
if (mistakes.length > 0) {
expect(mistakes[0].mistake_type).toBe('syntax_error');
}
});
});
describe('Correction Extraction', () => {
it('should extract corrections with explicit error message', () => {
// Stricter patterns require explicit correction indicators like "that's wrong"
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'assistant',
content: 'Using method A',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
{
id: 'msg-2',
conversation_id: 'conv-1',
message_type: 'text',
role: 'user',
content: "That's wrong, you should use method B for better performance.",
timestamp: Date.now() + 1000,
is_sidechain: false,
metadata: {},
},
];
const mistakes = extractor.extractMistakes(messages, []);
expect(mistakes.length).toBeGreaterThan(0);
const withCorrection = mistakes.find(m => m.correction);
expect(withCorrection).toBeDefined();
// The "should" pattern captures everything after "should" until the period
expect(withCorrection?.correction).toContain('use method B');
});
it('should extract corrections with "should"', () => {
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'assistant',
content: 'Doing X',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
{
id: 'msg-2',
conversation_id: 'conv-1',
message_type: 'text',
role: 'user',
content: 'No, you should use Y for this case.',
timestamp: Date.now() + 1000,
is_sidechain: false,
metadata: {},
},
];
const mistakes = extractor.extractMistakes(messages, []);
expect(Array.isArray(mistakes)).toBe(true);
if (mistakes.length > 0) {
const withCorrection = mistakes.find(m => m.correction);
if (withCorrection) {
expect(withCorrection.correction).toBeTruthy();
}
}
});
});
describe('File Extraction', () => {
it('should extract files from error messages', () => {
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'assistant',
content: 'Running test',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const toolResults: ToolResult[] = [
{
id: 'tool-1',
tool_use_id: 'use-1',
message_id: 'msg-1',
content: 'Error in src/components/Button.tsx',
stdout: '',
stderr: 'Error in src/components/Button.tsx',
is_error: true,
is_image: false,
timestamp: Date.now(),
},
];
const mistakes = extractor.extractMistakes(messages, toolResults);
expect(mistakes.length).toBeGreaterThan(0);
expect(mistakes[0].files_affected).toContain('src/components/Button.tsx');
});
it('should extract files from message metadata', () => {
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'user',
content: 'No, fix the file',
timestamp: Date.now(),
is_sidechain: false,
metadata: {
files: ['/src/utils/helper.ts'],
},
},
{
id: 'msg-0',
conversation_id: 'conv-1',
message_type: 'text',
role: 'assistant',
content: 'Editing',
timestamp: Date.now() - 1000,
is_sidechain: false,
metadata: {},
},
];
const mistakes = extractor.extractMistakes(messages, []);
expect(Array.isArray(mistakes)).toBe(true);
});
});
describe('Severity Scoring', () => {
it('should score mistakes with corrections higher', () => {
const mistakeWithCorrection = {
id: '1',
conversation_id: 'conv-1',
message_id: 'msg-1',
mistake_type: 'logic_error' as const,
what_went_wrong: 'Error occurred',
correction: 'Fixed by doing X',
files_affected: [],
timestamp: Date.now(),
};
const mistakeWithoutCorrection = {
id: '2',
conversation_id: 'conv-1',
message_id: 'msg-2',
mistake_type: 'logic_error' as const,
what_went_wrong: 'Error occurred',
files_affected: [],
timestamp: Date.now(),
};
const score1 = extractor.scoreMistakeSeverity(mistakeWithCorrection);
const score2 = extractor.scoreMistakeSeverity(mistakeWithoutCorrection);
expect(score1).toBeGreaterThan(score2);
});
it('should score user corrections highest', () => {
const userCorrected = {
id: '1',
conversation_id: 'conv-1',
message_id: 'msg-1',
mistake_type: 'logic_error' as const,
what_went_wrong: 'Error occurred',
user_correction_message: 'No, fix this',
files_affected: [],
timestamp: Date.now(),
};
const notUserCorrected = {
id: '2',
conversation_id: 'conv-1',
message_id: 'msg-2',
mistake_type: 'logic_error' as const,
what_went_wrong: 'Error occurred',
files_affected: [],
timestamp: Date.now(),
};
const score1 = extractor.scoreMistakeSeverity(userCorrected);
const score2 = extractor.scoreMistakeSeverity(notUserCorrected);
expect(score1).toBeGreaterThan(score2);
});
it('should score by mistake type severity', () => {
const logicError = {
id: '1',
conversation_id: 'conv-1',
message_id: 'msg-1',
mistake_type: 'logic_error' as const,
what_went_wrong: 'Error',
files_affected: [],
timestamp: Date.now(),
};
const syntaxError = {
id: '2',
conversation_id: 'conv-1',
message_id: 'msg-2',
mistake_type: 'syntax_error' as const,
what_went_wrong: 'Error',
files_affected: [],
timestamp: Date.now(),
};
const score1 = extractor.scoreMistakeSeverity(logicError);
const score2 = extractor.scoreMistakeSeverity(syntaxError);
expect(score1).toBeGreaterThan(score2);
});
});
describe('Edge Cases', () => {
it('should handle messages with null content', () => {
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'assistant',
content: undefined,
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const mistakes = extractor.extractMistakes(messages, []);
expect(mistakes).toEqual([]);
});
it('should handle tool results without stderr', () => {
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'assistant',
content: 'Running',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const toolResults: ToolResult[] = [
{
id: 'tool-1',
tool_use_id: 'use-1',
message_id: 'msg-1',
content: 'Failed',
stdout: '',
stderr: '',
is_error: true,
is_image: false,
timestamp: Date.now(),
},
];
const mistakes = extractor.extractMistakes(messages, toolResults);
expect(Array.isArray(mistakes)).toBe(true);
});
it('should handle corrections without previous assistant message', () => {
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'user',
content: 'No, that is wrong',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const mistakes = extractor.extractMistakes(messages, []);
// Should not crash, may return empty array
expect(Array.isArray(mistakes)).toBe(true);
});
});
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/unit/ModelRegistry.test.ts | TypeScript | /**
* Unit tests for ModelRegistry
*/
import {
MODEL_REGISTRY,
getModelsByProvider,
getModelInfo,
getModelDimensions,
getAllModels,
modelExists,
getModelsByQuality,
getRecommendedModel,
type ModelInfo,
} from "../../embeddings/ModelRegistry.js";
describe("ModelRegistry", () => {
describe("MODEL_REGISTRY", () => {
it("should contain all expected models", () => {
const modelNames = MODEL_REGISTRY.map(m => m.name);
// Ollama models
expect(modelNames).toContain("mxbai-embed-large");
expect(modelNames).toContain("nomic-embed-text");
expect(modelNames).toContain("all-minilm");
expect(modelNames).toContain("snowflake-arctic-embed");
// Transformers models
expect(modelNames).toContain("Xenova/all-MiniLM-L6-v2");
expect(modelNames).toContain("Xenova/all-mpnet-base-v2");
expect(modelNames).toContain("Xenova/bge-small-en-v1.5");
expect(modelNames).toContain("Xenova/bge-base-en-v1.5");
// OpenAI models
expect(modelNames).toContain("text-embedding-3-small");
expect(modelNames).toContain("text-embedding-3-large");
expect(modelNames).toContain("text-embedding-ada-002");
// Should have exactly 11 models
expect(MODEL_REGISTRY).toHaveLength(11);
});
it("should have valid dimensions for all models", () => {
for (const model of MODEL_REGISTRY) {
expect(model.dimensions).toBeGreaterThan(0);
expect(model.dimensions).toBeLessThanOrEqual(10000);
expect(Number.isInteger(model.dimensions)).toBe(true);
}
});
it("should have no duplicate model names", () => {
const names = MODEL_REGISTRY.map(m => m.name);
const uniqueNames = new Set(names);
expect(uniqueNames.size).toBe(names.length);
});
it("should have valid provider values", () => {
const validProviders = ["ollama", "transformers", "openai"];
for (const model of MODEL_REGISTRY) {
expect(validProviders).toContain(model.provider);
}
});
it("should have valid quality values", () => {
const validQualities = ["low", "medium", "high", "highest"];
for (const model of MODEL_REGISTRY) {
expect(validQualities).toContain(model.quality);
}
});
it("should have description for all models", () => {
for (const model of MODEL_REGISTRY) {
expect(model.description).toBeTruthy();
expect(model.description.length).toBeGreaterThan(0);
}
});
it("should have installation instructions for Ollama models", () => {
const ollamaModels = MODEL_REGISTRY.filter(m => m.provider === "ollama");
for (const model of ollamaModels) {
expect(model.installation).toBeTruthy();
expect(model.installation).toContain("ollama pull");
}
});
it("should have cost information for OpenAI models", () => {
const openAIModels = MODEL_REGISTRY.filter(m => m.provider === "openai");
for (const model of openAIModels) {
expect(model.cost).toBeTruthy();
expect(model.cost).toMatch(/\$/);
}
});
});
describe("getModelsByProvider", () => {
it("should return Ollama models", () => {
const models = getModelsByProvider("ollama");
expect(models.length).toBeGreaterThan(0);
expect(models.every(m => m.provider === "ollama")).toBe(true);
expect(models.map(m => m.name)).toContain("mxbai-embed-large");
});
it("should return Transformers models", () => {
const models = getModelsByProvider("transformers");
expect(models.length).toBeGreaterThan(0);
expect(models.every(m => m.provider === "transformers")).toBe(true);
expect(models.map(m => m.name)).toContain("Xenova/all-MiniLM-L6-v2");
});
it("should return OpenAI models", () => {
const models = getModelsByProvider("openai");
expect(models.length).toBeGreaterThan(0);
expect(models.every(m => m.provider === "openai")).toBe(true);
expect(models.map(m => m.name)).toContain("text-embedding-3-small");
});
it("should return empty array for unknown provider", () => {
const models = getModelsByProvider("unknown");
expect(models).toEqual([]);
});
});
describe("getModelInfo", () => {
it("should find model by exact name match", () => {
const model = getModelInfo("mxbai-embed-large");
expect(model).toBeTruthy();
expect(model?.name).toBe("mxbai-embed-large");
expect(model?.dimensions).toBe(1024);
expect(model?.provider).toBe("ollama");
});
it("should find model by partial name match", () => {
const model = getModelInfo("mxbai");
expect(model).toBeTruthy();
expect(model?.name).toBe("mxbai-embed-large");
});
it("should find model when query contains model name", () => {
const model = getModelInfo("mxbai-embed-large:latest");
expect(model).toBeTruthy();
expect(model?.name).toBe("mxbai-embed-large");
});
it("should return null for unknown model", () => {
const model = getModelInfo("nonexistent-model");
expect(model).toBeNull();
});
it("should prioritize exact match over partial match", () => {
// If we search for "all-minilm" (exact), should not match "all-MiniLM-L6-v2" (partial)
const model = getModelInfo("all-minilm");
expect(model).toBeTruthy();
expect(model?.name).toBe("all-minilm");
expect(model?.provider).toBe("ollama");
});
});
describe("getModelDimensions", () => {
it("should return dimensions for known model", () => {
expect(getModelDimensions("mxbai-embed-large")).toBe(1024);
expect(getModelDimensions("nomic-embed-text")).toBe(768);
expect(getModelDimensions("all-minilm")).toBe(384);
expect(getModelDimensions("text-embedding-3-small")).toBe(1536);
expect(getModelDimensions("text-embedding-3-large")).toBe(3072);
});
it("should return dimensions for partial match", () => {
expect(getModelDimensions("mxbai")).toBe(1024);
expect(getModelDimensions("nomic")).toBe(768);
});
it("should return null for unknown model", () => {
expect(getModelDimensions("unknown-model")).toBeNull();
});
});
describe("getAllModels", () => {
it("should return all models", () => {
const models = getAllModels();
expect(models).toEqual(MODEL_REGISTRY);
expect(models.length).toBe(11);
});
it("should return a copy, not the original array", () => {
const models = getAllModels();
expect(models).toEqual(MODEL_REGISTRY);
// Should be same content, but could be same reference (no requirement to copy)
});
});
describe("modelExists", () => {
it("should return true for existing models", () => {
expect(modelExists("mxbai-embed-large")).toBe(true);
expect(modelExists("Xenova/all-MiniLM-L6-v2")).toBe(true);
expect(modelExists("text-embedding-3-small")).toBe(true);
});
it("should return true for partial matches", () => {
expect(modelExists("mxbai")).toBe(true);
expect(modelExists("nomic")).toBe(true);
});
it("should return false for nonexistent models", () => {
expect(modelExists("nonexistent-model")).toBe(false);
expect(modelExists("")).toBe(false);
});
});
describe("getModelsByQuality", () => {
it("should return low quality models", () => {
const models = getModelsByQuality("low");
expect(models.length).toBeGreaterThan(0);
expect(models.every(m => m.quality === "low")).toBe(true);
expect(models.map(m => m.name)).toContain("all-minilm");
});
it("should return medium quality models", () => {
const models = getModelsByQuality("medium");
expect(models.length).toBeGreaterThan(0);
expect(models.every(m => m.quality === "medium")).toBe(true);
expect(models.map(m => m.name)).toContain("nomic-embed-text");
});
it("should return high quality models", () => {
const models = getModelsByQuality("high");
expect(models.length).toBeGreaterThan(0);
expect(models.every(m => m.quality === "high")).toBe(true);
expect(models.map(m => m.name)).toContain("mxbai-embed-large");
});
it("should return highest quality models", () => {
const models = getModelsByQuality("highest");
expect(models.length).toBeGreaterThan(0);
expect(models.every(m => m.quality === "highest")).toBe(true);
// OpenAI models should be highest quality
expect(models.map(m => m.name)).toContain("text-embedding-3-small");
});
it("should return empty array for no matches", () => {
// All quality levels should have matches, but test the filtering logic
const allQualities: Array<ModelInfo["quality"]> = ["low", "medium", "high", "highest"];
for (const quality of allQualities) {
const models = getModelsByQuality(quality);
expect(Array.isArray(models)).toBe(true);
}
});
});
describe("getRecommendedModel", () => {
it("should return high-quality model for Ollama", () => {
const model = getRecommendedModel("ollama");
expect(model).toBeTruthy();
expect(model?.provider).toBe("ollama");
expect(["high", "highest"]).toContain(model?.quality);
});
it("should return medium-quality model for Transformers (best available)", () => {
const model = getRecommendedModel("transformers");
expect(model).toBeTruthy();
expect(model?.provider).toBe("transformers");
// Transformers models have max quality of "medium"
// Function should prefer medium over low
expect(model?.quality).toBe("medium");
});
it("should return high-quality model for OpenAI", () => {
const model = getRecommendedModel("openai");
expect(model).toBeTruthy();
expect(model?.provider).toBe("openai");
expect(["high", "highest"]).toContain(model?.quality);
});
it("should return null for unknown provider", () => {
const model = getRecommendedModel("unknown");
expect(model).toBeNull();
});
});
describe("Model dimensions consistency", () => {
it("should have consistent dimensions across providers", () => {
// Check common dimension values
const dims384 = MODEL_REGISTRY.filter(m => m.dimensions === 384);
const dims768 = MODEL_REGISTRY.filter(m => m.dimensions === 768);
const dims1024 = MODEL_REGISTRY.filter(m => m.dimensions === 1024);
expect(dims384.length).toBeGreaterThan(0);
expect(dims768.length).toBeGreaterThan(0);
expect(dims1024.length).toBeGreaterThan(0);
});
});
describe("Provider-specific attributes", () => {
it("should have installation only for Ollama models", () => {
const withInstallation = MODEL_REGISTRY.filter(m => m.installation);
expect(withInstallation.every(m => m.provider === "ollama")).toBe(true);
});
it("should have cost only for OpenAI models", () => {
const withCost = MODEL_REGISTRY.filter(m => m.cost);
expect(withCost.every(m => m.provider === "openai")).toBe(true);
});
it("Transformers models should have neither installation nor cost", () => {
const transformersModels = MODEL_REGISTRY.filter(m => m.provider === "transformers");
expect(transformersModels.every(m => !m.installation && !m.cost)).toBe(true);
});
});
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/unit/OllamaEmbeddings.test.ts | TypeScript | /**
* Unit tests for OllamaEmbeddings
*/
import { jest } from '@jest/globals';
import { OllamaEmbeddings } from '../../embeddings/providers/OllamaEmbeddings';
describe('OllamaEmbeddings', () => {
describe('Constructor', () => {
it('should create instance with default parameters', () => {
const embeddings = new OllamaEmbeddings();
const info = embeddings.getModelInfo();
expect(info.provider).toBe('ollama');
expect(info.model).toBe('mxbai-embed-large');
expect(info.dimensions).toBe(1024);
expect(info.available).toBe(false); // Not initialized yet
});
it('should create instance with custom base URL', () => {
const embeddings = new OllamaEmbeddings('http://custom:11434');
const info = embeddings.getModelInfo();
expect(info.provider).toBe('ollama');
});
it('should remove trailing slash from base URL', () => {
const embeddings = new OllamaEmbeddings('http://localhost:11434/');
// Verify it doesn't throw during construction
expect(embeddings).toBeDefined();
});
it('should create instance with custom model', () => {
const embeddings = new OllamaEmbeddings('http://localhost:11434', 'custom-model', 512);
const info = embeddings.getModelInfo();
expect(info.model).toBe('custom-model');
expect(info.dimensions).toBe(512);
});
});
describe('isAvailable', () => {
it('should return false before initialization', () => {
const embeddings = new OllamaEmbeddings();
expect(embeddings.isAvailable()).toBe(false);
});
});
describe('embed', () => {
it('should throw error when not initialized', async () => {
const embeddings = new OllamaEmbeddings();
await expect(embeddings.embed('test')).rejects.toThrow('not available');
});
});
describe('embedBatch', () => {
it('should throw error when not initialized', async () => {
const embeddings = new OllamaEmbeddings();
await expect(embeddings.embedBatch(['test1', 'test2'])).rejects.toThrow('not initialized');
});
});
describe('getModelInfo', () => {
it('should return correct model information', () => {
const embeddings = new OllamaEmbeddings('http://localhost:11434', 'test-model', 768);
const info = embeddings.getModelInfo();
expect(info).toEqual({
provider: 'ollama',
model: 'test-model',
dimensions: 768,
available: false,
});
});
});
describe('Initialize - Failure Cases', () => {
beforeEach(() => {
// Mock fetch to simulate Ollama not being available
global.fetch = jest.fn() as jest.MockedFunction<typeof fetch>;
});
afterEach(() => {
jest.restoreAllMocks();
});
it('should handle Ollama API error response', async () => {
(global.fetch as jest.MockedFunction<typeof fetch>).mockResolvedValueOnce({
ok: false,
status: 500,
} as Response);
const consoleWarnSpy = jest.spyOn(console, 'warn').mockImplementation(() => {});
const embeddings = new OllamaEmbeddings();
await embeddings.initialize();
expect(embeddings.isAvailable()).toBe(false);
consoleWarnSpy.mockRestore();
});
it('should not throw during initialization failure', async () => {
(global.fetch as jest.MockedFunction<typeof fetch>).mockRejectedValueOnce(
new Error('Network error')
);
const embeddings = new OllamaEmbeddings();
// Should not throw, just mark as unavailable
await expect(embeddings.initialize()).resolves.not.toThrow();
expect(embeddings.isAvailable()).toBe(false);
});
});
describe('Edge Cases', () => {
it('should handle base URL with various formats', () => {
const embeddings1 = new OllamaEmbeddings('http://localhost:11434');
const embeddings2 = new OllamaEmbeddings('http://localhost:11434/');
const embeddings3 = new OllamaEmbeddings('https://remote-ollama.com');
expect(embeddings1).toBeDefined();
expect(embeddings2).toBeDefined();
expect(embeddings3).toBeDefined();
});
it('should handle unknown model with default dimensions', () => {
const consoleWarnSpy = jest.spyOn(console, 'warn').mockImplementation(() => {});
const embeddings = new OllamaEmbeddings('http://localhost:11434', 'unknown-model');
const info = embeddings.getModelInfo();
// Should use fallback dimensions (768)
expect(info.dimensions).toBeGreaterThan(0);
consoleWarnSpy.mockRestore();
});
});
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/unit/OpenAIEmbeddings.test.ts | TypeScript | /**
* Unit tests for OpenAIEmbeddings
*/
import { jest } from '@jest/globals';
import { OpenAIEmbeddings } from '../../embeddings/providers/OpenAIEmbeddings';
describe('OpenAIEmbeddings', () => {
describe('Constructor', () => {
it('should create instance with API key and default model', () => {
const embeddings = new OpenAIEmbeddings('test-api-key');
const info = embeddings.getModelInfo();
expect(info.provider).toBe('openai');
expect(info.model).toBe('text-embedding-3-small');
expect(info.dimensions).toBe(1536);
expect(info.available).toBe(false); // Not initialized yet
});
it('should create instance with custom model', () => {
const embeddings = new OpenAIEmbeddings('test-key', 'text-embedding-ada-002', 1536);
const info = embeddings.getModelInfo();
expect(info.model).toBe('text-embedding-ada-002');
expect(info.dimensions).toBe(1536);
});
it('should use default dimensions if not specified', () => {
const embeddings = new OpenAIEmbeddings('test-key', 'text-embedding-3-small');
const info = embeddings.getModelInfo();
expect(info.dimensions).toBe(1536);
});
it('should create instance with custom dimensions', () => {
const embeddings = new OpenAIEmbeddings('test-key', 'custom-model', 3072);
const info = embeddings.getModelInfo();
expect(info.dimensions).toBe(3072);
});
});
describe('isAvailable', () => {
it('should return false before initialization', () => {
const embeddings = new OpenAIEmbeddings('test-key');
expect(embeddings.isAvailable()).toBe(false);
});
});
describe('embed', () => {
it('should throw error when not initialized', async () => {
const embeddings = new OpenAIEmbeddings('test-key');
await expect(embeddings.embed('test')).rejects.toThrow('not available');
});
});
describe('embedBatch', () => {
it('should throw error when not initialized', async () => {
const embeddings = new OpenAIEmbeddings('test-key');
await expect(embeddings.embedBatch(['test1', 'test2'])).rejects.toThrow('not initialized');
});
});
describe('getModelInfo', () => {
it('should return correct model information', () => {
const embeddings = new OpenAIEmbeddings('test-key', 'test-model', 768);
const info = embeddings.getModelInfo();
expect(info).toEqual({
provider: 'openai',
model: 'test-model',
dimensions: 768,
available: false,
});
});
});
describe('Initialize - Failure Cases', () => {
it('should handle missing API key', async () => {
const consoleWarnSpy = jest.spyOn(console, 'warn').mockImplementation(() => {});
const embeddings = new OpenAIEmbeddings('');
await embeddings.initialize();
expect(embeddings.isAvailable()).toBe(false);
consoleWarnSpy.mockRestore();
});
// Skip tests that require network calls or actual OpenAI SDK
// These tests timeout because they attempt to load and use the OpenAI SDK
it.skip('should handle missing OpenAI package', async () => {
const consoleWarnSpy = jest.spyOn(console, 'warn').mockImplementation(() => {});
const embeddings = new OpenAIEmbeddings('test-key');
await embeddings.initialize();
// Should log warning about missing package
expect(embeddings.isAvailable()).toBe(false);
consoleWarnSpy.mockRestore();
});
it.skip('should not throw during initialization failure', async () => {
const embeddings = new OpenAIEmbeddings('invalid-key');
// Should not throw, just mark as unavailable
await expect(embeddings.initialize()).resolves.not.toThrow();
expect(embeddings.isAvailable()).toBe(false);
});
});
describe('Edge Cases', () => {
it('should handle empty API key', () => {
const embeddings = new OpenAIEmbeddings('');
const info = embeddings.getModelInfo();
expect(info.available).toBe(false);
});
it('should handle various model names', () => {
const models = [
'text-embedding-3-small',
'text-embedding-3-large',
'text-embedding-ada-002',
'custom-model',
];
for (const model of models) {
const embeddings = new OpenAIEmbeddings('test-key', model);
const info = embeddings.getModelInfo();
expect(info.model).toBe(model);
expect(info.dimensions).toBeGreaterThan(0);
}
});
it('should handle unknown model with default dimensions', () => {
const embeddings = new OpenAIEmbeddings('test-key', 'unknown-model');
const info = embeddings.getModelInfo();
// Should use fallback dimensions (1536)
expect(info.dimensions).toBe(1536);
});
});
describe('API Key Handling', () => {
it('should store API key from constructor', () => {
const embeddings = new OpenAIEmbeddings('my-secret-key');
// Verify it doesn't throw during construction
expect(embeddings).toBeDefined();
});
it('should handle whitespace in API key', () => {
const embeddings = new OpenAIEmbeddings(' test-key ');
expect(embeddings).toBeDefined();
});
});
describe('Model Information', () => {
it('should provide complete model info before initialization', () => {
const embeddings = new OpenAIEmbeddings('test-key', 'test-model', 512);
const info = embeddings.getModelInfo();
expect(info).toHaveProperty('provider');
expect(info).toHaveProperty('model');
expect(info).toHaveProperty('dimensions');
expect(info).toHaveProperty('available');
expect(info.provider).toBe('openai');
});
});
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/unit/ProjectMigration.test.ts | TypeScript | /**
* Unit tests for ProjectMigration
* Following TDD approach - tests written FIRST
*/
import { describe, it, expect, beforeEach, afterEach } from "@jest/globals";
import { tmpdir } from "os";
import { join, basename, dirname } from "path";
import { mkdirSync, writeFileSync, rmSync, existsSync, readFileSync, readdirSync } from "fs";
import { ProjectMigration } from "../../utils/ProjectMigration.js";
import { getSQLiteManager, resetSQLiteManager } from "../../storage/SQLiteManager.js";
const insertProject = (db: ReturnType<ReturnType<typeof getSQLiteManager>["getDatabase"]>, projectPath: string) => {
const now = Date.now();
const result = db
.prepare(
"INSERT INTO projects (canonical_path, display_path, created_at, updated_at) VALUES (?, ?, ?, ?)"
)
.run(projectPath, projectPath, now, now);
return Number(result.lastInsertRowid);
};
const insertConversation = (
db: ReturnType<ReturnType<typeof getSQLiteManager>["getDatabase"]>,
projectId: number,
projectPath: string,
externalId: string,
lastMessageAt: number,
messageCount = 1
) => {
const now = Date.now();
const result = db
.prepare(
`
INSERT INTO conversations
(project_id, project_path, source_type, external_id, first_message_at, last_message_at, message_count, created_at, updated_at)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
`
)
.run(projectId, projectPath, "claude-code", externalId, lastMessageAt, lastMessageAt, messageCount, now, now);
return Number(result.lastInsertRowid);
};
const insertMessage = (
db: ReturnType<ReturnType<typeof getSQLiteManager>["getDatabase"]>,
conversationId: number,
externalId: string,
timestamp: number
) => {
db.prepare(
`
INSERT INTO messages
(conversation_id, external_id, message_type, role, content, timestamp, metadata)
VALUES (?, ?, 'user', 'user', 'content', ?, '{}')
`
).run(conversationId, externalId, timestamp);
};
describe("ProjectMigration", () => {
let testDir: string;
let projectsDir: string;
let migration: ProjectMigration;
beforeEach(() => {
// Create temp directory structure
testDir = join(tmpdir(), `migration-test-${Date.now()}`);
projectsDir = join(testDir, ".claude", "projects");
mkdirSync(projectsDir, { recursive: true });
// Mock HOME to use test directory
process.env.HOME = testDir;
process.env.USERPROFILE = testDir;
const db = getSQLiteManager();
migration = new ProjectMigration(db, projectsDir);
});
afterEach(() => {
resetSQLiteManager();
if (existsSync(testDir)) {
rmSync(testDir, { recursive: true, force: true });
}
});
describe("discoverOldFolders", () => {
it("should find folder by exact database project_path match", async () => {
// Setup: Create old folder with database
const oldFolder = join(projectsDir, "-Users-test-old-project");
mkdirSync(oldFolder, { recursive: true });
writeFileSync(join(oldFolder, "session.jsonl"), "{}");
const db = getSQLiteManager().getDatabase();
const projectId = insertProject(db, "/Users/test/old-project");
insertConversation(db, projectId, "/Users/test/old-project", "conv1", 2000, 10);
// Test: Discover from new path
const results = await migration.discoverOldFolders("/Users/test/new-project");
// Verify: Should find the old folder
expect(results).toHaveLength(1);
expect(results[0].folderName).toBe("-Users-test-old-project");
expect(results[0].storedProjectPath).toBe("/Users/test/old-project");
expect(results[0].score).toBeGreaterThan(0);
});
it("should score folder by path similarity", async () => {
// Setup: Create folder with similar path
const oldFolder = join(projectsDir, "-Users-test-oldname-project");
mkdirSync(oldFolder, { recursive: true });
writeFileSync(join(oldFolder, "session.jsonl"), "{}");
const db = getSQLiteManager().getDatabase();
const projectId = insertProject(db, "/Users/test/oldname/project");
insertConversation(db, projectId, "/Users/test/oldname/project", "c1", 1000);
// Test: Check with newname (only one component different)
const results = await migration.discoverOldFolders("/Users/test/newname/project");
// Verify: Should have high score (path similarity)
expect(results).toHaveLength(1);
expect(results[0].score).toBeGreaterThan(65); // High similarity
});
it("should find folder by name pattern matching", async () => {
// Setup: Create folder without database but matching pattern
const oldFolder = join(projectsDir, "-Users-test-myproject");
mkdirSync(oldFolder, { recursive: true });
// Add some JSONL files
writeFileSync(join(oldFolder, "session1.jsonl"), '{"type":"user"}');
writeFileSync(join(oldFolder, "session2.jsonl"), '{"type":"assistant"}');
// Test: Similar project path
const results = await migration.discoverOldFolders("/Users/test/myproject-renamed");
// Verify: Should still find it based on folder name
expect(results).toHaveLength(1);
expect(results[0].folderName).toBe("-Users-test-myproject");
});
it("should return empty array when no matches found", async () => {
// Test: Discover with no existing folders
const results = await migration.discoverOldFolders("/Users/test/nonexistent");
// Verify: Empty results
expect(results).toEqual([]);
});
it("should rank results by confidence score", async () => {
// Setup: Create multiple candidate folders
// Folder 1: Exact path match (should score highest)
const folder1 = join(projectsDir, "-Users-test-project");
mkdirSync(folder1, { recursive: true });
writeFileSync(join(folder1, "session.jsonl"), "{}");
// Folder 2: Similar path (medium score)
const folder2 = join(projectsDir, "-Users-test-old-project");
mkdirSync(folder2, { recursive: true });
writeFileSync(join(folder2, "session.jsonl"), "{}");
// Folder 3: Different path (low score)
const folder3 = join(projectsDir, "-Users-other-something");
mkdirSync(folder3, { recursive: true });
writeFileSync(join(folder3, "file.jsonl"), '{}');
const db = getSQLiteManager().getDatabase();
const projectId1 = insertProject(db, "/Users/test/project");
insertConversation(db, projectId1, "/Users/test/project", "c1", 1000);
const projectId2 = insertProject(db, "/Users/test/old-project");
insertConversation(db, projectId2, "/Users/test/old-project", "c2", 900);
// Test: Discover
const results = await migration.discoverOldFolders("/Users/test/project");
// Verify: Sorted by score, highest first
expect(results.length).toBeGreaterThanOrEqual(2);
expect(results[0].score).toBeGreaterThan(results[1].score);
expect(results[0].folderName).toBe("-Users-test-project");
});
it("should include statistics (conversations, messages, lastActivity)", async () => {
// Setup: Create folder with stats
const oldFolder = join(projectsDir, "-Users-test-project");
mkdirSync(oldFolder, { recursive: true });
writeFileSync(join(oldFolder, "session.jsonl"), "{}");
const db = getSQLiteManager().getDatabase();
const projectId = insertProject(db, "/Users/test/project");
const conv1 = insertConversation(db, projectId, "/Users/test/project", "c1", 1000);
const conv2 = insertConversation(db, projectId, "/Users/test/project", "c2", 2000);
insertMessage(db, conv1, "m1", 1000);
insertMessage(db, conv1, "m2", 1001);
insertMessage(db, conv2, "m3", 1002);
// Test: Discover
const results = await migration.discoverOldFolders("/Users/test/project-new");
// Verify: Stats included
expect(results[0].stats.conversations).toBe(2);
expect(results[0].stats.messages).toBe(3);
expect(results[0].stats.lastActivity).toBe(2000);
});
it("should handle missing database gracefully", async () => {
// Setup: Folder with JSONL but no database
const oldFolder = join(projectsDir, "-Users-test-project");
mkdirSync(oldFolder, { recursive: true });
writeFileSync(join(oldFolder, "session.jsonl"), '{"type":"user"}');
// Test: Should not crash
const results = await migration.discoverOldFolders("/Users/test/project");
// Verify: Still finds folder based on name/files
expect(results).toHaveLength(1);
expect(results[0].storedProjectPath).toBeNull();
});
it("should handle corrupted database files", async () => {
// Setup: Create corrupted database
const oldFolder = join(projectsDir, "-Users-test-project");
mkdirSync(oldFolder, { recursive: true });
const dbPath = join(oldFolder, ".cccmemory.db");
writeFileSync(dbPath, "NOT A VALID DATABASE FILE");
// Test: Should handle gracefully
const results = await migration.discoverOldFolders("/Users/test/project");
// Verify: Should still include folder (just can't read DB)
expect(results.length).toBeGreaterThanOrEqual(0); // May or may not include based on other factors
});
});
describe("validateMigration", () => {
it("should detect conflicts when target already has data", () => {
// Setup: Create both source and target with data
const sourceFolder = join(projectsDir, "-Users-test-old");
const targetFolder = join(projectsDir, "-Users-test-new");
mkdirSync(sourceFolder, { recursive: true });
mkdirSync(targetFolder, { recursive: true });
// Both have JSONL files
writeFileSync(join(sourceFolder, "session1.jsonl"), '{}');
writeFileSync(join(targetFolder, "session2.jsonl"), '{}');
// Test: Validate
const result = migration.validateMigration(sourceFolder, targetFolder);
// Verify: Should detect conflict
expect(result.valid).toBe(false);
expect(result.errors).toContain("Target folder already has conversation data");
});
it("should allow migration when no database exists", () => {
// Setup: Source with JSONL data only
const sourceFolder = join(projectsDir, "-Users-test-source");
mkdirSync(sourceFolder, { recursive: true });
writeFileSync(join(sourceFolder, "session.jsonl"), "{}");
const targetFolder = join(projectsDir, "-Users-test-target");
// Test: Validate
const result = migration.validateMigration(sourceFolder, targetFolder);
// Verify: Should pass validation
expect(result.valid).toBe(true);
expect(result.errors).toHaveLength(0);
});
it("should verify source has JSONL files", () => {
// Setup: Source folder with no files
const sourceFolder = join(projectsDir, "-Users-test-empty");
const targetFolder = join(projectsDir, "-Users-test-target");
mkdirSync(sourceFolder, { recursive: true });
// Test: Validate
const result = migration.validateMigration(sourceFolder, targetFolder);
// Verify: Should warn about no files
expect(result.valid).toBe(false);
expect(result.errors).toContain("Source folder has no conversation files");
});
it("should reject migration from non-existent folder", () => {
const sourceFolder = join(projectsDir, "-Users-test-nonexistent");
const targetFolder = join(projectsDir, "-Users-test-target");
// Test: Validate
const result = migration.validateMigration(sourceFolder, targetFolder);
// Verify: Should fail
expect(result.valid).toBe(false);
expect(result.errors).toContain("Source folder does not exist");
});
it("should calculate accurate migration statistics", () => {
// Setup: Source with known data
const sourceFolder = join(projectsDir, "-Users-test-source");
mkdirSync(sourceFolder, { recursive: true });
// Add JSONL files
writeFileSync(join(sourceFolder, "session1.jsonl"), '{}');
writeFileSync(join(sourceFolder, "session2.jsonl"), '{}');
const targetFolder = join(projectsDir, "-Users-test-target");
// Test: Validate
const result = migration.validateMigration(sourceFolder, targetFolder);
// Verify: Should include stats
expect(result.valid).toBe(true);
expect(result.stats?.conversations).toBe(2);
expect(result.stats?.messages).toBe(0);
expect(result.stats?.files).toBe(2);
});
});
describe("executeMigration", () => {
it("should copy all JSONL files to new folder", async () => {
// Setup
const sourceFolder = join(projectsDir, "-Users-test-source");
const targetFolder = join(projectsDir, "-Users-test-target");
mkdirSync(sourceFolder, { recursive: true });
writeFileSync(join(sourceFolder, "session1.jsonl"), 'content1');
writeFileSync(join(sourceFolder, "session2.jsonl"), 'content2');
// Test: Execute migration
await migration.executeMigration(
sourceFolder,
targetFolder,
"/old/path",
"/new/path",
false
);
// Verify: Files copied
expect(existsSync(join(targetFolder, "session1.jsonl"))).toBe(true);
expect(existsSync(join(targetFolder, "session2.jsonl"))).toBe(true);
});
it("should update project_path in database", async () => {
// Setup
const sourceFolder = join(projectsDir, "-Users-test-source");
const targetFolder = join(projectsDir, "-Users-test-target");
mkdirSync(sourceFolder, { recursive: true });
writeFileSync(join(sourceFolder, "s.jsonl"), '{}');
const db = getSQLiteManager().getDatabase();
const projectId = insertProject(db, "/old/path");
insertConversation(db, projectId, "/old/path", "c1", 1000);
insertConversation(db, projectId, "/old/path", "c2", 2000);
// Test: Execute
await migration.executeMigration(sourceFolder, targetFolder, "/old/path", "/new/path", false);
// Verify: Paths updated
const rows = db
.prepare("SELECT project_path FROM conversations WHERE project_id = ? ORDER BY external_id")
.all(projectId) as Array<{ project_path: string }>;
expect(rows).toHaveLength(2);
expect(rows[0].project_path).toBe("/new/path");
expect(rows[1].project_path).toBe("/new/path");
});
it("should create backup before migration", async () => {
// Setup
const sourceFolder = join(projectsDir, "-Users-test-source");
const targetFolder = join(projectsDir, "-Users-test-target");
mkdirSync(sourceFolder, { recursive: true });
writeFileSync(join(sourceFolder, "s.jsonl"), '{}');
const db = getSQLiteManager().getDatabase();
const projectId = insertProject(db, "/old");
insertConversation(db, projectId, "/old", "c1", 1000);
// Test: Execute
await migration.executeMigration(sourceFolder, targetFolder, "/old", "/new", false);
// Verify: Backup created
const dbPath = getSQLiteManager().getDbPath();
const backupDir = dirname(dbPath);
const backups = readdirSync(backupDir).filter((name) =>
name.startsWith(`${basename(dbPath)}.bak.`)
);
expect(backups.length).toBeGreaterThan(0);
});
it("should rollback on error", async () => {
// Setup: Create scenario that will fail (target project already exists)
const sourceFolder = join(projectsDir, "-Users-test-source");
const targetFolder = join(projectsDir, "-Users-test-target");
mkdirSync(sourceFolder, { recursive: true });
writeFileSync(join(sourceFolder, "s.jsonl"), '{}');
const db = getSQLiteManager().getDatabase();
const projectId = insertProject(db, "/old");
insertConversation(db, projectId, "/old", "c1", 1000);
insertProject(db, "/new");
// Test: Should throw
await expect(
migration.executeMigration(sourceFolder, targetFolder, "/old", "/new", false)
).rejects.toThrow();
// Verify: Target folder should not be created or should be cleaned up
// (Specific behavior depends on implementation)
});
it("should verify file counts after copy", async () => {
// Setup
const sourceFolder = join(projectsDir, "-Users-test-source");
const targetFolder = join(projectsDir, "-Users-test-target");
mkdirSync(sourceFolder, { recursive: true });
writeFileSync(join(sourceFolder, "s1.jsonl"), '{}');
writeFileSync(join(sourceFolder, "s2.jsonl"), '{}');
// Test: Execute
const result = await migration.executeMigration(
sourceFolder,
targetFolder,
"/old",
"/new",
false
);
// Verify: Counts match
expect(result.filesCopied).toBe(2);
});
it("should preserve original data (copy not move)", async () => {
// Setup
const sourceFolder = join(projectsDir, "-Users-test-source");
const targetFolder = join(projectsDir, "-Users-test-target");
mkdirSync(sourceFolder, { recursive: true });
writeFileSync(join(sourceFolder, "session.jsonl"), 'original');
// Test: Execute
await migration.executeMigration(sourceFolder, targetFolder, "/old", "/new", false);
// Verify: Original still exists
expect(existsSync(join(sourceFolder, "session.jsonl"))).toBe(true);
});
});
describe("scoring algorithms", () => {
it("should score exact path match as 100", () => {
const score = migration.scorePath("/Users/test/project", "/Users/test/project");
expect(score).toBe(100);
});
it("should score one-component-different as 80", () => {
const score = migration.scorePath(
"/Users/test/newname/project",
"/Users/test/oldname/project"
);
expect(score).toBeGreaterThanOrEqual(70); // High score for rename
});
it("should score folder name similarity correctly", () => {
const score1 = migration.scoreFolderName(
"-Users-test-project",
"-Users-test-project"
);
expect(score1).toBe(100); // Exact match
const score2 = migration.scoreFolderName(
"-Users-test-newproject",
"-Users-test-oldproject"
);
expect(score2).toBeGreaterThan(50); // Similar
const score3 = migration.scoreFolderName(
"-Users-test-project",
"-Users-other-something"
);
expect(score3).toBeLessThan(50); // Different
});
it("should combine multiple score factors", () => {
// This tests the overall scoring logic
// Path similarity + folder name + JSONL files should combine
const score = migration.calculateOverallScore({
pathScore: 80,
folderScore: 60,
hasDatabase: true,
jsonlCount: 10
});
expect(score).toBeGreaterThan(80); // Should boost with files
});
});
describe("merge mode", () => {
it("should allow merge when target has existing data", async () => {
// Setup: Source and target both have data
const sourceFolder = join(projectsDir, "-source");
const targetFolder = join(projectsDir, "-target");
mkdirSync(sourceFolder, { recursive: true });
mkdirSync(targetFolder, { recursive: true });
writeFileSync(join(sourceFolder, "source-session.jsonl"), "source-data");
writeFileSync(join(targetFolder, "target-session.jsonl"), "target-data");
const result = await migration.executeMigration(
sourceFolder,
targetFolder,
"/old-project",
"/new-project",
false,
"merge"
);
expect(result.success).toBe(true);
});
it("should copy only new JSONL files in merge mode", async () => {
// Setup
const sourceFolder = join(projectsDir, "-source");
const targetFolder = join(projectsDir, "-target");
mkdirSync(sourceFolder, { recursive: true });
mkdirSync(targetFolder, { recursive: true });
// Source has 2 files
writeFileSync(join(sourceFolder, "session-1.jsonl"), "data1");
writeFileSync(join(sourceFolder, "session-2.jsonl"), "data2");
// Target already has session-1
writeFileSync(join(targetFolder, "session-1.jsonl"), "existing");
// Test: Execute merge
const result = await migration.executeMigration(
sourceFolder,
targetFolder,
"/old",
"/new",
false,
"merge"
);
// Verify: Only session-2 copied (session-1 skipped)
expect(result.filesCopied).toBe(1);
// Verify: session-1.jsonl not overwritten
const content1 = readFileSync(join(targetFolder, "session-1.jsonl"), "utf-8");
expect(content1).toBe("existing");
// Verify: session-2.jsonl copied
expect(existsSync(join(targetFolder, "session-2.jsonl"))).toBe(true);
});
it("should reject merge when mode='migrate' and target has data", async () => {
// Setup: Target has existing data
const sourceFolder = join(projectsDir, "-source");
const targetFolder = join(projectsDir, "-target");
mkdirSync(sourceFolder, { recursive: true });
mkdirSync(targetFolder, { recursive: true });
writeFileSync(join(sourceFolder, "session.jsonl"), "source");
writeFileSync(join(targetFolder, "existing.jsonl"), "target");
// Test: Execute with mode='migrate' (default)
await expect(
migration.executeMigration(sourceFolder, targetFolder, "/old", "/new", false, "migrate")
).rejects.toThrow("Target folder already has conversation data");
});
});
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/unit/QueryCache.test.ts | TypeScript | /**
* Unit tests for QueryCache - LRU cache for database queries
*/
import { describe, it, expect, beforeEach } from "@jest/globals";
import { QueryCache } from "../../cache/QueryCache.js";
describe("QueryCache", () => {
let cache: QueryCache;
beforeEach(() => {
cache = new QueryCache({ maxSize: 3, ttlMs: 5000 });
});
describe("Basic Operations", () => {
it("should store and retrieve values", () => {
cache.set("key1", { data: "value1" });
const result = cache.get("key1");
expect(result).toEqual({ data: "value1" });
});
it("should return undefined for non-existent keys", () => {
const result = cache.get("nonexistent");
expect(result).toBeUndefined();
});
it("should overwrite existing keys", () => {
cache.set("key1", { data: "value1" });
cache.set("key1", { data: "value2" });
const result = cache.get("key1");
expect(result).toEqual({ data: "value2" });
});
it("should check if key exists", () => {
cache.set("key1", { data: "value1" });
expect(cache.has("key1")).toBe(true);
expect(cache.has("key2")).toBe(false);
});
it("should delete keys", () => {
cache.set("key1", { data: "value1" });
cache.delete("key1");
expect(cache.has("key1")).toBe(false);
expect(cache.get("key1")).toBeUndefined();
});
it("should clear all entries", () => {
cache.set("key1", { data: "value1" });
cache.set("key2", { data: "value2" });
cache.clear();
expect(cache.size()).toBe(0);
expect(cache.has("key1")).toBe(false);
expect(cache.has("key2")).toBe(false);
});
});
describe("LRU Eviction", () => {
it("should evict least recently used item when capacity is exceeded", () => {
cache.set("key1", { data: "value1" });
cache.set("key2", { data: "value2" });
cache.set("key3", { data: "value3" });
cache.set("key4", { data: "value4" }); // Should evict key1
expect(cache.has("key1")).toBe(false);
expect(cache.has("key2")).toBe(true);
expect(cache.has("key3")).toBe(true);
expect(cache.has("key4")).toBe(true);
expect(cache.size()).toBe(3);
});
it("should update access order on get", () => {
cache.set("key1", { data: "value1" });
cache.set("key2", { data: "value2" });
cache.set("key3", { data: "value3" });
// Access key1 to make it most recently used
cache.get("key1");
// Add key4, should evict key2 (least recently used)
cache.set("key4", { data: "value4" });
expect(cache.has("key1")).toBe(true);
expect(cache.has("key2")).toBe(false);
expect(cache.has("key3")).toBe(true);
expect(cache.has("key4")).toBe(true);
});
it("should update access order on has check", () => {
cache.set("key1", { data: "value1" });
cache.set("key2", { data: "value2" });
cache.set("key3", { data: "value3" });
// Check key1 to make it most recently used
cache.has("key1");
// Add key4, should evict key2
cache.set("key4", { data: "value4" });
expect(cache.has("key1")).toBe(true);
expect(cache.has("key2")).toBe(false);
});
it("should update access order on set of existing key", () => {
cache.set("key1", { data: "value1" });
cache.set("key2", { data: "value2" });
cache.set("key3", { data: "value3" });
// Update key1 to make it most recently used
cache.set("key1", { data: "updated" });
// Add key4, should evict key2
cache.set("key4", { data: "value4" });
expect(cache.get("key1")).toEqual({ data: "updated" });
expect(cache.has("key2")).toBe(false);
});
});
describe("TTL (Time To Live)", () => {
it("should expire entries after TTL", async () => {
const shortTtlCache = new QueryCache({ maxSize: 10, ttlMs: 100 });
shortTtlCache.set("key1", { data: "value1" });
expect(shortTtlCache.has("key1")).toBe(true);
// Wait for TTL to expire
await new Promise((resolve) => setTimeout(resolve, 150));
expect(shortTtlCache.has("key1")).toBe(false);
expect(shortTtlCache.get("key1")).toBeUndefined();
});
it("should not return expired entries", async () => {
const shortTtlCache = new QueryCache({ maxSize: 10, ttlMs: 100 });
shortTtlCache.set("key1", { data: "value1" });
// Wait for expiration
await new Promise((resolve) => setTimeout(resolve, 150));
const result = shortTtlCache.get("key1");
expect(result).toBeUndefined();
});
it("should clean up expired entries on access", async () => {
const shortTtlCache = new QueryCache({ maxSize: 10, ttlMs: 100 });
shortTtlCache.set("key1", { data: "value1" });
shortTtlCache.set("key2", { data: "value2" });
await new Promise((resolve) => setTimeout(resolve, 150));
// Accessing should trigger cleanup
shortTtlCache.get("key1");
expect(shortTtlCache.size()).toBe(0);
});
it("should reset TTL on update", async () => {
const shortTtlCache = new QueryCache({ maxSize: 10, ttlMs: 200 });
shortTtlCache.set("key1", { data: "value1" });
// Wait 100ms, then update (should reset TTL)
await new Promise((resolve) => setTimeout(resolve, 100));
shortTtlCache.set("key1", { data: "updated" });
// Wait another 150ms (total 250ms from original)
await new Promise((resolve) => setTimeout(resolve, 150));
// Should still be valid because TTL was reset
expect(shortTtlCache.get("key1")).toEqual({ data: "updated" });
});
});
describe("Cache Statistics", () => {
it("should track cache hits", () => {
cache.set("key1", { data: "value1" });
cache.get("key1"); // Hit
cache.get("key1"); // Hit
const stats = cache.getStats();
expect(stats.hits).toBe(2);
});
it("should track cache misses", () => {
cache.get("nonexistent1"); // Miss
cache.get("nonexistent2"); // Miss
const stats = cache.getStats();
expect(stats.misses).toBe(2);
});
it("should calculate hit rate", () => {
cache.set("key1", { data: "value1" });
cache.get("key1"); // Hit
cache.get("key2"); // Miss
cache.get("key1"); // Hit
cache.get("key3"); // Miss
const stats = cache.getStats();
expect(stats.hits).toBe(2);
expect(stats.misses).toBe(2);
expect(stats.hitRate).toBeCloseTo(0.5, 2);
});
it("should handle zero requests in hit rate calculation", () => {
const stats = cache.getStats();
expect(stats.hitRate).toBe(0);
});
it("should track evictions", () => {
cache.set("key1", { data: "value1" });
cache.set("key2", { data: "value2" });
cache.set("key3", { data: "value3" });
cache.set("key4", { data: "value4" }); // Evicts key1
cache.set("key5", { data: "value5" }); // Evicts key2
const stats = cache.getStats();
expect(stats.evictions).toBe(2);
});
it("should reset statistics", () => {
cache.set("key1", { data: "value1" });
cache.get("key1");
cache.get("key2");
cache.resetStats();
const stats = cache.getStats();
expect(stats.hits).toBe(0);
expect(stats.misses).toBe(0);
expect(stats.evictions).toBe(0);
expect(stats.hitRate).toBe(0);
});
it("should include size in statistics", () => {
cache.set("key1", { data: "value1" });
cache.set("key2", { data: "value2" });
const stats = cache.getStats();
expect(stats.size).toBe(2);
expect(stats.maxSize).toBe(3);
});
});
describe("Edge Cases", () => {
it("should handle cache size of 1", () => {
const smallCache = new QueryCache({ maxSize: 1, ttlMs: 5000 });
smallCache.set("key1", { data: "value1" });
smallCache.set("key2", { data: "value2" }); // Should evict key1
expect(smallCache.has("key1")).toBe(false);
expect(smallCache.has("key2")).toBe(true);
expect(smallCache.size()).toBe(1);
});
it("should handle undefined and null values", () => {
cache.set("key1", undefined);
cache.set("key2", null);
expect(cache.get("key1")).toBeUndefined();
expect(cache.get("key2")).toBeNull();
expect(cache.has("key1")).toBe(true);
expect(cache.has("key2")).toBe(true);
});
it("should handle complex objects", () => {
const complexObj = {
nested: { deep: { value: [1, 2, 3] } },
array: [{ id: 1 }, { id: 2 }],
};
cache.set("key1", complexObj);
const result = cache.get("key1");
expect(result).toEqual(complexObj);
});
it("should handle rapid consecutive operations", () => {
for (let i = 0; i < 100; i++) {
cache.set(`key${i}`, { data: `value${i}` });
}
// Should only keep last 3 entries
expect(cache.size()).toBe(3);
expect(cache.has("key97")).toBe(true);
expect(cache.has("key98")).toBe(true);
expect(cache.has("key99")).toBe(true);
});
it("should handle deleting non-existent keys", () => {
expect(() => cache.delete("nonexistent")).not.toThrow();
});
});
describe("Configuration", () => {
it("should use default configuration if not provided", () => {
const defaultCache = new QueryCache();
defaultCache.set("key1", { data: "value1" });
expect(defaultCache.get("key1")).toEqual({ data: "value1" });
});
it("should respect custom max size", () => {
const largeCache = new QueryCache({ maxSize: 100, ttlMs: 5000 });
for (let i = 0; i < 100; i++) {
largeCache.set(`key${i}`, { data: `value${i}` });
}
expect(largeCache.size()).toBe(100);
largeCache.set("key100", { data: "value100" });
expect(largeCache.size()).toBe(100); // Should evict oldest
});
it("should validate configuration", () => {
expect(() => new QueryCache({ maxSize: 0, ttlMs: 5000 })).toThrow();
expect(() => new QueryCache({ maxSize: -1, ttlMs: 5000 })).toThrow();
expect(() => new QueryCache({ maxSize: 10, ttlMs: 0 })).toThrow();
expect(() => new QueryCache({ maxSize: 10, ttlMs: -1 })).toThrow();
});
});
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/unit/QueryExpander.test.ts | TypeScript | /**
* Unit tests for QueryExpander
*/
import {
QueryExpander,
getQueryExpander,
getExpansionConfig,
DEFAULT_EXPANSION_CONFIG,
} from "../../search/QueryExpander.js";
describe("QueryExpander", () => {
describe("Constructor and Configuration", () => {
it("should create with default config (disabled)", () => {
const expander = new QueryExpander();
const config = expander.getConfig();
expect(config.enabled).toBe(false);
expect(config.maxVariants).toBe(DEFAULT_EXPANSION_CONFIG.maxVariants);
});
it("should accept custom config", () => {
const expander = new QueryExpander({
enabled: true,
maxVariants: 5,
});
const config = expander.getConfig();
expect(config.enabled).toBe(true);
expect(config.maxVariants).toBe(5);
});
});
describe("Disabled Expansion", () => {
it("should return original query when disabled", () => {
const expander = new QueryExpander({ enabled: false });
const variants = expander.expand("error in database");
expect(variants).toEqual(["error in database"]);
});
});
describe("Basic Expansion", () => {
let expander: QueryExpander;
beforeEach(() => {
expander = new QueryExpander({ enabled: true, maxVariants: 5 });
});
it("should expand known terms", () => {
const variants = expander.expand("error");
expect(variants).toContain("error");
expect(variants.length).toBeGreaterThan(1);
// Should include synonyms
expect(
variants.some((v) =>
["bug", "issue", "problem", "exception", "failure"].includes(v)
)
).toBe(true);
});
it("should expand multi-word queries", () => {
const variants = expander.expand("database error");
expect(variants).toContain("database error");
expect(variants.length).toBeGreaterThan(1);
});
it("should preserve word order in expansions", () => {
const variants = expander.expand("api endpoint");
for (const variant of variants) {
const words = variant.split(" ");
expect(words).toHaveLength(2);
}
});
it("should respect maxVariants limit", () => {
const limitedExpander = new QueryExpander({
enabled: true,
maxVariants: 2,
});
const variants = limitedExpander.expand("error database api");
expect(variants.length).toBeLessThanOrEqual(2);
});
});
describe("Domain Synonyms", () => {
let expander: QueryExpander;
beforeEach(() => {
expander = new QueryExpander({ enabled: true, maxVariants: 10 });
});
it("should expand error-related terms", () => {
const synonyms = expander.getSynonyms("error");
expect(synonyms).toContain("bug");
expect(synonyms).toContain("issue");
expect(synonyms).toContain("problem");
});
it("should expand API-related terms", () => {
const synonyms = expander.getSynonyms("api");
expect(synonyms).toContain("endpoint");
expect(synonyms).toContain("interface");
});
it("should expand database-related terms", () => {
const synonyms = expander.getSynonyms("database");
expect(synonyms).toContain("db");
expect(synonyms).toContain("datastore");
});
it("should expand function-related terms", () => {
const synonyms = expander.getSynonyms("function");
expect(synonyms).toContain("method");
expect(synonyms).toContain("handler");
});
it("should expand authentication terms", () => {
const synonyms = expander.getSynonyms("auth");
expect(synonyms).toContain("authentication");
expect(synonyms).toContain("login");
});
});
describe("Synonym Lookup", () => {
let expander: QueryExpander;
beforeEach(() => {
expander = new QueryExpander({ enabled: true });
});
it("should get synonyms for known word", () => {
const synonyms = expander.getSynonyms("error");
expect(synonyms.length).toBeGreaterThan(0);
});
it("should return empty array for unknown word", () => {
const synonyms = expander.getSynonyms("xyznonexistent");
expect(synonyms).toEqual([]);
});
it("should handle case-insensitive lookup", () => {
const synonyms1 = expander.getSynonyms("Error");
const synonyms2 = expander.getSynonyms("ERROR");
const synonyms3 = expander.getSynonyms("error");
expect(synonyms1).toEqual(synonyms3);
expect(synonyms2).toEqual(synonyms3);
});
it("should check if word has synonyms", () => {
expect(expander.hasSynonyms("error")).toBe(true);
expect(expander.hasSynonyms("xyznonexistent")).toBe(false);
});
});
describe("Custom Synonyms", () => {
it("should merge custom synonyms with defaults", () => {
const customSynonyms = new Map([["myterm", ["synonym1", "synonym2"]]]);
const expander = new QueryExpander({
enabled: true,
customSynonyms,
});
const synonyms = expander.getSynonyms("myterm");
expect(synonyms).toContain("synonym1");
expect(synonyms).toContain("synonym2");
});
it("should extend existing synonyms", () => {
const customSynonyms = new Map([["error", ["glitch", "malfunction"]]]);
const expander = new QueryExpander({
enabled: true,
customSynonyms,
});
const synonyms = expander.getSynonyms("error");
// Should have original synonyms
expect(synonyms).toContain("bug");
// Should have new synonyms
expect(synonyms).toContain("glitch");
expect(synonyms).toContain("malfunction");
});
it("should add synonyms dynamically", () => {
const expander = new QueryExpander({ enabled: true });
expander.addSynonyms("custom", ["alt1", "alt2"]);
const synonyms = expander.getSynonyms("custom");
expect(synonyms).toContain("alt1");
expect(synonyms).toContain("alt2");
});
it("should not duplicate synonyms when adding", () => {
const expander = new QueryExpander({ enabled: true });
expander.addSynonyms("error", ["bug", "newterm"]);
const synonyms = expander.getSynonyms("error");
const bugCount = synonyms.filter((s) => s === "bug").length;
expect(bugCount).toBe(1);
});
});
describe("Query Processing", () => {
let expander: QueryExpander;
beforeEach(() => {
expander = new QueryExpander({ enabled: true, maxVariants: 5 });
});
it("should handle query with no expandable words", () => {
const variants = expander.expand("xyz abc 123");
// Should only return original
expect(variants).toEqual(["xyz abc 123"]);
});
it("should handle single word query", () => {
const variants = expander.expand("error");
expect(variants).toContain("error");
expect(variants.length).toBeGreaterThan(1);
});
it("should handle mixed expandable and non-expandable words", () => {
const variants = expander.expand("critical error found");
expect(variants).toContain("critical error found");
// Should have variants with error synonyms
expect(
variants.some((v) => v.includes("bug") || v.includes("issue"))
).toBe(true);
});
});
describe("Edge Cases", () => {
let expander: QueryExpander;
beforeEach(() => {
expander = new QueryExpander({ enabled: true, maxVariants: 5 });
});
it("should handle empty query", () => {
const variants = expander.expand("");
expect(variants).toEqual([""]);
});
it("should handle whitespace-only query", () => {
const variants = expander.expand(" ");
expect(variants.length).toBeGreaterThanOrEqual(1);
});
it("should handle query with special characters", () => {
const variants = expander.expand("error: api/v2");
// Should not crash
expect(variants.length).toBeGreaterThanOrEqual(1);
});
});
describe("Environment Configuration", () => {
it("should read enabled from environment", () => {
const original = process.env.CCCMEMORY_QUERY_EXPANSION;
process.env.CCCMEMORY_QUERY_EXPANSION = "true";
const config = getExpansionConfig();
expect(config.enabled).toBe(true);
// Restore
if (original !== undefined) {
process.env.CCCMEMORY_QUERY_EXPANSION = original;
} else {
delete process.env.CCCMEMORY_QUERY_EXPANSION;
}
});
it("should read maxVariants from environment", () => {
const original = process.env.CCCMEMORY_MAX_QUERY_VARIANTS;
process.env.CCCMEMORY_MAX_QUERY_VARIANTS = "10";
const config = getExpansionConfig();
expect(config.maxVariants).toBe(10);
// Restore
if (original !== undefined) {
process.env.CCCMEMORY_MAX_QUERY_VARIANTS = original;
} else {
delete process.env.CCCMEMORY_MAX_QUERY_VARIANTS;
}
});
});
describe("Factory Function", () => {
it("should create expander with config", () => {
const expander = getQueryExpander({ enabled: true });
expect(expander).toBeInstanceOf(QueryExpander);
expect(expander.getConfig().enabled).toBe(true);
});
it("should create expander without config", () => {
const expander = getQueryExpander();
expect(expander).toBeInstanceOf(QueryExpander);
});
});
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/unit/RequirementsExtractor.test.ts | TypeScript | /**
* Unit tests for RequirementsExtractor
*/
import { RequirementsExtractor } from '../../parsers/RequirementsExtractor';
import type { Message, ToolUse, ToolResult } from '../../parsers/ConversationParser';
describe('RequirementsExtractor', () => {
let extractor: RequirementsExtractor;
beforeEach(() => {
extractor = new RequirementsExtractor();
});
describe('extractRequirements', () => {
it('should extract dependency requirements', () => {
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'user',
content: 'We need to use the Express library for the API server',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const requirements = extractor.extractRequirements(messages);
expect(requirements.length).toBeGreaterThan(0);
const depReq = requirements.find(r => r.type === 'dependency');
expect(depReq).toBeDefined();
expect(depReq?.description).toContain('Express');
});
it('should extract performance requirements', () => {
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'user',
content: 'Response time must be under 200ms for the API',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const requirements = extractor.extractRequirements(messages);
expect(requirements.length).toBeGreaterThan(0);
const perfReq = requirements.find(r => r.type === 'performance');
expect(perfReq).toBeDefined();
expect(perfReq?.description).toContain('200ms');
});
it('should extract compatibility requirements', () => {
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'user',
content: 'The app must support Node.js version 18 or higher',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const requirements = extractor.extractRequirements(messages);
expect(requirements.length).toBeGreaterThan(0);
const compatReq = requirements.find(r => r.type === 'compatibility');
expect(compatReq).toBeDefined();
});
it('should extract business requirements', () => {
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'user',
content: 'Business requirement: costs must not exceed $1000 per month',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const requirements = extractor.extractRequirements(messages);
expect(requirements.length).toBeGreaterThan(0);
const bizReq = requirements.find(r => r.type === 'business');
expect(bizReq).toBeDefined();
});
it('should handle empty messages array', () => {
const requirements = extractor.extractRequirements([]);
expect(requirements).toEqual([]);
});
it('should deduplicate similar requirements', () => {
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'user',
content: 'We need to use the React library',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
{
id: 'msg-2',
conversation_id: 'conv-1',
message_type: 'text',
role: 'user',
content: 'We need to use the React library',
timestamp: Date.now() + 1000,
is_sidechain: false,
metadata: {},
},
];
const requirements = extractor.extractRequirements(messages);
// Should deduplicate identical requirements
const reactReqs = requirements.filter(r => r.description.includes('React'));
expect(reactReqs.length).toBe(1);
});
});
describe('extractValidations', () => {
it('should extract npm test validations', () => {
const toolUses: ToolUse[] = [
{
id: 'use-1',
message_id: 'msg-1',
tool_name: 'Bash',
tool_input: { command: 'npm test' },
timestamp: Date.now(),
},
];
const toolResults: ToolResult[] = [
{
id: 'res-1',
tool_use_id: 'use-1',
message_id: 'msg-1',
content: 'All tests passed ✓',
stdout: 'All tests passed ✓',
stderr: '',
is_error: false,
is_image: false,
timestamp: Date.now(),
},
];
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'assistant',
content: 'Running tests',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const validations = extractor.extractValidations(toolUses, toolResults, messages);
expect(validations.length).toBeGreaterThan(0);
expect(validations[0].test_command).toBe('npm test');
expect(validations[0].result).toBe('passed');
});
it('should extract pytest validations', () => {
const toolUses: ToolUse[] = [
{
id: 'use-1',
message_id: 'msg-1',
tool_name: 'Bash',
tool_input: { command: 'pytest tests/' },
timestamp: Date.now(),
},
];
const toolResults: ToolResult[] = [
{
id: 'res-1',
tool_use_id: 'use-1',
message_id: 'msg-1',
content: '3 failed tests',
stdout: '3 failed tests',
stderr: '',
is_error: false,
is_image: false,
timestamp: Date.now(),
},
];
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'assistant',
content: 'Running Python tests',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const validations = extractor.extractValidations(toolUses, toolResults, messages);
expect(validations.length).toBeGreaterThan(0);
expect(validations[0].test_command).toContain('pytest');
expect(validations[0].result).toBe('failed');
});
it('should handle test errors', () => {
const toolUses: ToolUse[] = [
{
id: 'use-1',
message_id: 'msg-1',
tool_name: 'Bash',
tool_input: { command: 'npm test' },
timestamp: Date.now(),
},
];
const toolResults: ToolResult[] = [
{
id: 'res-1',
tool_use_id: 'use-1',
message_id: 'msg-1',
content: 'Error: command not found',
stdout: '',
stderr: 'Error: command not found',
is_error: true,
is_image: false,
timestamp: Date.now(),
},
];
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'assistant',
content: 'Running tests',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const validations = extractor.extractValidations(toolUses, toolResults, messages);
expect(validations.length).toBeGreaterThan(0);
expect(validations[0].result).toBe('error');
});
it('should extract performance data from test results', () => {
const toolUses: ToolUse[] = [
{
id: 'use-1',
message_id: 'msg-1',
tool_name: 'Bash',
tool_input: { command: 'npm test' },
timestamp: Date.now(),
},
];
const toolResults: ToolResult[] = [
{
id: 'res-1',
tool_use_id: 'use-1',
message_id: 'msg-1',
content: 'Tests passed in 1.5s. 10 passed',
stdout: 'Tests passed in 1.5s. 10 passed',
stderr: '',
is_error: false,
is_image: false,
timestamp: Date.now(),
},
];
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'assistant',
content: 'Running tests',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const validations = extractor.extractValidations(toolUses, toolResults, messages);
expect(validations.length).toBeGreaterThan(0);
expect(validations[0].performance_data).toBeDefined();
expect(validations[0].performance_data?.duration_ms).toBe(1500);
expect(validations[0].performance_data?.tests_passed).toBe(10);
});
it('should ignore non-test commands', () => {
const toolUses: ToolUse[] = [
{
id: 'use-1',
message_id: 'msg-1',
tool_name: 'Bash',
tool_input: { command: 'ls -la' },
timestamp: Date.now(),
},
];
const toolResults: ToolResult[] = [
{
id: 'res-1',
tool_use_id: 'use-1',
message_id: 'msg-1',
content: 'file1.txt file2.txt',
stdout: 'file1.txt file2.txt',
stderr: '',
is_error: false,
is_image: false,
timestamp: Date.now(),
},
];
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'assistant',
content: 'Listing files',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const validations = extractor.extractValidations(toolUses, toolResults, messages);
expect(validations).toEqual([]);
});
it('should handle empty tool uses', () => {
const validations = extractor.extractValidations([], [], []);
expect(validations).toEqual([]);
});
});
describe('Rationale Extraction', () => {
it('should extract rationale with "because"', () => {
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'user',
content: 'We need to use PostgreSQL library because it provides better ACID guarantees',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const requirements = extractor.extractRequirements(messages);
expect(requirements.length).toBeGreaterThan(0);
const withRationale = requirements.find(r => r.rationale);
expect(withRationale).toBeDefined();
if (withRationale) {
expect(withRationale.rationale).toContain('ACID');
}
});
it('should extract rationale with "since"', () => {
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'user',
content: 'Install React package since it handles UI components efficiently',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const requirements = extractor.extractRequirements(messages);
expect(Array.isArray(requirements)).toBe(true);
});
});
describe('Component Extraction', () => {
it('should extract affected components from message', () => {
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'user',
content: 'The frontend must support React, and the backend needs Express library',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const requirements = extractor.extractRequirements(messages);
expect(requirements.length).toBeGreaterThan(0);
const withComponents = requirements.find(r => r.affects_components.length > 0);
expect(withComponents).toBeDefined();
if (withComponents) {
expect(withComponents.affects_components).toContain('frontend');
expect(withComponents.affects_components).toContain('backend');
}
});
it('should handle messages without component keywords', () => {
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'user',
content: 'Need to use some library',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const requirements = extractor.extractRequirements(messages);
expect(Array.isArray(requirements)).toBe(true);
});
});
describe('Edge Cases', () => {
it('should handle messages with null content', () => {
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'user',
content: undefined,
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const requirements = extractor.extractRequirements(messages);
expect(requirements).toEqual([]);
});
it('should handle tool uses without command', () => {
const toolUses: ToolUse[] = [
{
id: 'use-1',
message_id: 'msg-1',
tool_name: 'Bash',
tool_input: {},
timestamp: Date.now(),
},
];
const validations = extractor.extractValidations(toolUses, [], []);
expect(validations).toEqual([]);
});
it('should handle non-string commands', () => {
const toolUses: ToolUse[] = [
{
id: 'use-1',
message_id: 'msg-1',
tool_name: 'Bash',
tool_input: { command: 123 },
timestamp: Date.now(),
},
];
const validations = extractor.extractValidations(toolUses, [], []);
expect(validations).toEqual([]);
});
it('should handle tool results without matching tool use', () => {
const toolUses: ToolUse[] = [
{
id: 'use-1',
message_id: 'msg-1',
tool_name: 'Bash',
tool_input: { command: 'npm test' },
timestamp: Date.now(),
},
];
const toolResults: ToolResult[] = [
{
id: 'res-1',
tool_use_id: 'use-999',
message_id: 'msg-1',
content: 'Tests passed',
stdout: 'Tests passed',
stderr: '',
is_error: false,
is_image: false,
timestamp: Date.now(),
},
];
const messages: Message[] = [
{
id: 'msg-1',
conversation_id: 'conv-1',
message_type: 'text',
role: 'assistant',
content: 'Testing',
timestamp: Date.now(),
is_sidechain: false,
metadata: {},
},
];
const validations = extractor.extractValidations(toolUses, toolResults, messages);
// Should not crash, may return empty array
expect(Array.isArray(validations)).toBe(true);
});
});
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/unit/ResultAggregator.test.ts | TypeScript | /**
* Unit tests for ResultAggregator
*/
import {
ResultAggregator,
getResultAggregator,
} from "../../search/ResultAggregator.js";
import type { ChunkSearchResult } from "../../embeddings/VectorStore.js";
describe("ResultAggregator", () => {
describe("Constructor and Configuration", () => {
it("should create with default config", () => {
const aggregator = new ResultAggregator();
expect(aggregator).toBeDefined();
});
it("should accept custom config", () => {
const aggregator = new ResultAggregator({
minSimilarity: 0.5,
limit: 5,
deduplicate: false,
});
expect(aggregator).toBeDefined();
});
});
describe("Aggregation", () => {
it("should aggregate chunks by message ID", () => {
const aggregator = new ResultAggregator({ minSimilarity: 0.0 });
const chunks: ChunkSearchResult[] = [
createChunkResult(1, "chunk1", 0, 0.9, "Content from chunk 1"),
createChunkResult(1, "chunk2", 1, 0.7, "Content from chunk 2"),
createChunkResult(2, "chunk3", 0, 0.8, "Different message content"),
];
const results = aggregator.aggregate(chunks);
expect(results).toHaveLength(2);
// Message 1 should have higher similarity (0.9) and come first
expect(results[0].messageId).toBe(1);
expect(results[0].similarity).toBe(0.9);
expect(results[0].matchedChunks).toHaveLength(2);
});
it("should use max similarity from chunks", () => {
const aggregator = new ResultAggregator({ minSimilarity: 0.0 });
const chunks: ChunkSearchResult[] = [
createChunkResult(1, "chunk1", 0, 0.5, "Low score chunk"),
createChunkResult(1, "chunk2", 1, 0.9, "High score chunk"),
createChunkResult(1, "chunk3", 2, 0.3, "Lower score chunk"),
];
const results = aggregator.aggregate(chunks);
expect(results).toHaveLength(1);
expect(results[0].similarity).toBe(0.9);
expect(results[0].bestSnippet).toBe("High score chunk");
});
it("should filter by minimum similarity", () => {
const aggregator = new ResultAggregator({ minSimilarity: 0.5 });
const chunks: ChunkSearchResult[] = [
createChunkResult(1, "chunk1", 0, 0.6, "Above threshold"),
createChunkResult(2, "chunk2", 0, 0.3, "Below threshold"),
createChunkResult(3, "chunk3", 0, 0.8, "Well above threshold"),
];
const results = aggregator.aggregate(chunks);
expect(results).toHaveLength(2);
expect(results.map((r) => r.messageId)).toEqual([3, 1]);
});
it("should respect limit", () => {
const aggregator = new ResultAggregator({
minSimilarity: 0.0,
limit: 2,
deduplicate: false, // Disable deduplication for this test
});
const chunks: ChunkSearchResult[] = [
createChunkResult(1, "chunk1", 0, 0.9, "First unique content about apples"),
createChunkResult(2, "chunk2", 0, 0.8, "Second unique content about bananas"),
createChunkResult(3, "chunk3", 0, 0.7, "Third unique content about oranges"),
createChunkResult(4, "chunk4", 0, 0.6, "Fourth unique content about grapes"),
];
const results = aggregator.aggregate(chunks);
expect(results).toHaveLength(2);
});
it("should sort by similarity descending", () => {
const aggregator = new ResultAggregator({ minSimilarity: 0.0 });
const chunks: ChunkSearchResult[] = [
createChunkResult(1, "chunk1", 0, 0.5, "Medium"),
createChunkResult(2, "chunk2", 0, 0.9, "Highest"),
createChunkResult(3, "chunk3", 0, 0.3, "Lowest"),
createChunkResult(4, "chunk4", 0, 0.7, "High"),
];
const results = aggregator.aggregate(chunks);
expect(results[0].messageId).toBe(2);
expect(results[1].messageId).toBe(4);
expect(results[2].messageId).toBe(1);
expect(results[3].messageId).toBe(3);
});
});
describe("Deduplication", () => {
it("should deduplicate similar content", () => {
const aggregator = new ResultAggregator({
minSimilarity: 0.0,
deduplicate: true,
deduplicationThreshold: 0.7,
});
const chunks: ChunkSearchResult[] = [
createChunkResult(1, "chunk1", 0, 0.9, "The quick brown fox jumps over the lazy dog"),
createChunkResult(2, "chunk2", 0, 0.8, "The quick brown fox jumps over the lazy cat"), // Very similar
createChunkResult(3, "chunk3", 0, 0.7, "Completely different content about programming"),
];
const results = aggregator.aggregate(chunks);
// Should have deduplicated similar messages
expect(results.length).toBeLessThanOrEqual(3);
// First and third should definitely be in results
expect(results.some((r) => r.messageId === 1)).toBe(true);
expect(results.some((r) => r.messageId === 3)).toBe(true);
});
it("should not deduplicate when disabled", () => {
const aggregator = new ResultAggregator({
minSimilarity: 0.0,
deduplicate: false,
});
const chunks: ChunkSearchResult[] = [
createChunkResult(1, "chunk1", 0, 0.9, "Identical content here"),
createChunkResult(2, "chunk2", 0, 0.8, "Identical content here"),
];
const results = aggregator.aggregate(chunks);
expect(results).toHaveLength(2);
});
});
describe("Merge Results", () => {
it("should merge chunk and message results", () => {
const aggregator = new ResultAggregator({ minSimilarity: 0.0 });
const chunkResults = aggregator.aggregate([
createChunkResult(1, "chunk1", 0, 0.8, "Chunk content"),
]);
const messageResults = [
{ messageId: 2, content: "Message only content", similarity: 0.7 },
{ messageId: 1, content: "Better message content", similarity: 0.9 }, // Higher score
];
const merged = aggregator.mergeResults(chunkResults, messageResults);
expect(merged).toHaveLength(2);
// Message 1 should have the higher similarity from message results
const msg1 = merged.find((r) => r.messageId === 1);
expect(msg1?.similarity).toBe(0.9);
expect(msg1?.bestSnippet).toBe("Better message content");
});
it("should add new message results", () => {
const aggregator = new ResultAggregator({ minSimilarity: 0.0 });
const chunkResults = aggregator.aggregate([
createChunkResult(1, "chunk1", 0, 0.8, "Chunk content"),
]);
const messageResults = [
{ messageId: 2, content: "New message content", similarity: 0.7 },
];
const merged = aggregator.mergeResults(chunkResults, messageResults);
expect(merged).toHaveLength(2);
expect(merged.some((r) => r.messageId === 2)).toBe(true);
});
});
describe("Edge Cases", () => {
it("should handle empty input", () => {
const aggregator = new ResultAggregator();
const results = aggregator.aggregate([]);
expect(results).toEqual([]);
});
it("should handle all chunks below threshold", () => {
const aggregator = new ResultAggregator({ minSimilarity: 0.9 });
const chunks: ChunkSearchResult[] = [
createChunkResult(1, "chunk1", 0, 0.5, "Content"),
createChunkResult(2, "chunk2", 0, 0.3, "Content"),
];
const results = aggregator.aggregate(chunks);
expect(results).toEqual([]);
});
});
describe("Factory Function", () => {
it("should create aggregator with config", () => {
const aggregator = getResultAggregator({ limit: 5 });
expect(aggregator).toBeInstanceOf(ResultAggregator);
});
});
});
// Helper function to create test chunk results
function createChunkResult(
messageId: number,
chunkId: string,
chunkIndex: number,
similarity: number,
content: string
): ChunkSearchResult {
return {
chunkId,
messageId,
chunkIndex,
totalChunks: 3,
content,
startOffset: chunkIndex * 100,
endOffset: (chunkIndex + 1) * 100,
similarity,
strategy: "sentence",
};
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/unit/SQLiteManagerPath.test.ts | TypeScript | import fs from 'fs';
import os from 'os';
import path from 'path';
import { getSQLiteManager, resetSQLiteManager } from '../../storage/SQLiteManager.js';
describe('SQLiteManager path resolution', () => {
const originalEnv = { ...process.env };
afterEach(() => {
process.env = { ...originalEnv };
resetSQLiteManager();
});
it('uses a single DB path when CCCMEMORY_DB_MODE=single', () => {
const tempHome = fs.mkdtempSync(path.join(os.tmpdir(), 'cccmemory-home-'));
process.env.HOME = tempHome;
process.env.CCCMEMORY_DB_MODE = 'single';
delete process.env.CCCMEMORY_DB_PATH;
const db = getSQLiteManager();
expect(db.getDbPath()).toBe(path.join(tempHome, '.cccmemory.db'));
db.close();
});
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/unit/SemanticSearch.test.ts | TypeScript | /**
* Unit tests for SemanticSearch
*/
import { jest } from '@jest/globals';
import { SemanticSearch } from '../../search/SemanticSearch';
import { getSQLiteManager, resetSQLiteManager } from '../../storage/SQLiteManager';
// Skip Transformers tests in CI due to environment compatibility issues
// Also skip on macOS ARM64 where ONNX runtime has known compatibility issues
const isCI = Boolean(process.env.CI) || Boolean(process.env.GITHUB_ACTIONS);
const isMacOSArm64 = process.platform === 'darwin' && process.arch === 'arm64';
const skipTransformers = isCI || isMacOSArm64;
describe('SemanticSearch', () => {
let semanticSearch: SemanticSearch;
beforeEach(() => {
// Use in-memory database for tests
const sqliteManager = getSQLiteManager({ dbPath: ':memory:' });
// Disable foreign keys for testing
sqliteManager.getDatabase().pragma('foreign_keys = OFF');
// Silence console logs during tests
jest.spyOn(console, 'log').mockImplementation(() => {});
jest.spyOn(console, 'error').mockImplementation(() => {});
jest.spyOn(console, 'warn').mockImplementation(() => {});
semanticSearch = new SemanticSearch(sqliteManager);
// Force vectorStore to use BLOB storage
const vectorStore = (semanticSearch as unknown as { vectorStore: { hasVecExtension: boolean } }).vectorStore;
(vectorStore as { hasVecExtension: boolean }).hasVecExtension = false;
});
afterEach(() => {
resetSQLiteManager();
jest.restoreAllMocks();
});
describe('Constructor', () => {
it('should create SemanticSearch instance', () => {
expect(semanticSearch).toBeDefined();
});
});
// Skip on incompatible platforms - TransformersEmbeddings has ONNX runtime issues on macOS ARM64
(skipTransformers ? describe.skip : describe)('indexMessages', () => {
it('should index messages with content', async () => {
const messages = [
{ id: 1, content: 'Hello world' },
{ id: 2, content: 'Hi there' },
];
// Should handle gracefully even if embeddings not available
await expect(semanticSearch.indexMessages(messages)).resolves.not.toThrow();
});
it('should handle empty messages array', async () => {
await expect(semanticSearch.indexMessages([])).resolves.not.toThrow();
});
it('should handle messages without content', async () => {
const messages = [
{ id: 1, content: '' },
{ id: 2, content: undefined },
];
await expect(semanticSearch.indexMessages(messages)).resolves.not.toThrow();
});
});
// Skip on incompatible platforms - TransformersEmbeddings has ONNX runtime issues on macOS ARM64
(skipTransformers ? describe.skip : describe)('indexDecisions', () => {
it('should index decisions', async () => {
const decisions = [
{
id: 1,
decision_text: 'Use PostgreSQL',
rationale: 'Better for structured data',
context: 'Database selection',
},
];
await expect(semanticSearch.indexDecisions(decisions)).resolves.not.toThrow();
});
it('should handle empty decisions array', async () => {
await expect(semanticSearch.indexDecisions([])).resolves.not.toThrow();
});
it('should handle decisions with minimal data', async () => {
const decisions = [
{
id: 1,
decision_text: 'Use PostgreSQL',
rationale: undefined,
context: undefined,
},
];
await expect(semanticSearch.indexDecisions(decisions)).resolves.not.toThrow();
});
});
// Skip on incompatible platforms - TransformersEmbeddings has ONNX runtime issues on macOS ARM64
(skipTransformers ? describe.skip : describe)('searchConversations', () => {
it('should handle search without indexed data', async () => {
const results = await semanticSearch.searchConversations('hello', 10);
// Should return empty array or handle gracefully
expect(Array.isArray(results)).toBe(true);
});
it('should not throw on valid queries', async () => {
await expect(
semanticSearch.searchConversations('test query', 10)
).resolves.not.toThrow();
});
it('should handle empty query', async () => {
await expect(
semanticSearch.searchConversations('', 10)
).resolves.not.toThrow();
});
});
// Skip on incompatible platforms - TransformersEmbeddings has ONNX runtime issues on macOS ARM64
(skipTransformers ? describe.skip : describe)('Edge Cases', () => {
it('should handle messages with very long content', async () => {
const longContent = 'a'.repeat(10000);
const messages = [
{ id: 1, content: longContent },
];
await expect(semanticSearch.indexMessages(messages)).resolves.not.toThrow();
});
it('should handle messages with special characters', async () => {
const messages = [
{ id: 1, content: '你好 🎉 "quotes" \'single\'' },
];
await expect(semanticSearch.indexMessages(messages)).resolves.not.toThrow();
});
it('should handle concurrent indexing', async () => {
const messages = [
{ id: 1, content: 'Message 1' },
{ id: 2, content: 'Message 2' },
];
const promises = [
semanticSearch.indexMessages(messages.slice(0, 1)),
semanticSearch.indexMessages(messages.slice(1, 2)),
];
await expect(Promise.all(promises)).resolves.not.toThrow();
});
});
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/unit/SessionHandoffStore.test.ts | TypeScript | /**
* Unit tests for SessionHandoffStore
*/
import Database from "better-sqlite3";
import { SessionHandoffStore } from "../../handoff/SessionHandoffStore.js";
describe("SessionHandoffStore", () => {
let db: Database.Database;
let store: SessionHandoffStore;
const projectPath = "/test/project";
beforeEach(() => {
// Create in-memory database with required schema
db = new Database(":memory:");
// Create required tables
db.exec(`
-- Session Handoffs table
CREATE TABLE IF NOT EXISTS session_handoffs (
id TEXT PRIMARY KEY,
from_session_id TEXT NOT NULL,
project_path TEXT NOT NULL,
created_at INTEGER NOT NULL,
handoff_data TEXT NOT NULL,
resumed_by_session_id TEXT,
resumed_at INTEGER
);
-- Working Memory table
CREATE TABLE IF NOT EXISTS working_memory (
id TEXT PRIMARY KEY,
key TEXT NOT NULL,
value TEXT NOT NULL,
context TEXT,
tags TEXT,
session_id TEXT,
project_path TEXT NOT NULL,
created_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL,
expires_at INTEGER,
embedding BLOB,
UNIQUE(project_path, key)
);
CREATE VIRTUAL TABLE IF NOT EXISTS working_memory_fts USING fts5(
id UNINDEXED,
key,
value,
context
);
-- Decisions table (minimal for testing)
CREATE TABLE IF NOT EXISTS decisions (
id TEXT PRIMARY KEY,
message_id TEXT NOT NULL,
decision_text TEXT NOT NULL,
rationale TEXT,
context TEXT,
timestamp INTEGER NOT NULL
);
-- Tool uses table (minimal for testing)
CREATE TABLE IF NOT EXISTS tool_uses (
id TEXT PRIMARY KEY,
message_id TEXT NOT NULL,
tool_name TEXT NOT NULL,
parameters TEXT,
result TEXT,
timestamp INTEGER NOT NULL
);
-- Messages table (minimal for testing)
CREATE TABLE IF NOT EXISTS messages (
id TEXT PRIMARY KEY,
conversation_id TEXT NOT NULL,
role TEXT NOT NULL,
content TEXT,
timestamp INTEGER NOT NULL
);
-- Conversations table (minimal for testing)
CREATE TABLE IF NOT EXISTS conversations (
id TEXT PRIMARY KEY,
session_id TEXT NOT NULL,
project_path TEXT NOT NULL,
created_at INTEGER NOT NULL
);
`);
store = new SessionHandoffStore(db);
});
afterEach(() => {
db.close();
});
describe("prepareHandoff", () => {
it("should create a handoff document", () => {
const handoff = store.prepareHandoff({
sessionId: "session-123",
projectPath,
});
expect(handoff.id).toBeDefined();
expect(handoff.fromSessionId).toBe("session-123");
expect(handoff.projectPath).toBe(projectPath);
expect(handoff.createdAt).toBeDefined();
expect(handoff.contextSummary).toBeDefined();
});
it("should store handoff in database", () => {
const handoff = store.prepareHandoff({
sessionId: "session-123",
projectPath,
});
const row = db
.prepare("SELECT * FROM session_handoffs WHERE id = ?")
.get(handoff.id) as { id: string; from_session_id: string } | undefined;
expect(row).toBeDefined();
expect(row?.from_session_id).toBe("session-123");
});
it("should include working memory items when requested", () => {
// Add some working memory
db.prepare(
`INSERT INTO working_memory (id, key, value, project_path, created_at, updated_at)
VALUES (?, ?, ?, ?, ?, ?)`
).run("mem-1", "storage", "SQLite", projectPath, Date.now(), Date.now());
const handoff = store.prepareHandoff({
sessionId: "session-123",
projectPath,
include: ["memory"],
});
expect(handoff.workingMemory.length).toBe(1);
expect(handoff.workingMemory[0].key).toBe("storage");
});
it("should use default session ID if not provided", () => {
const handoff = store.prepareHandoff({
projectPath,
});
expect(handoff.fromSessionId).toBe("current");
});
it("should include selective data based on include array", () => {
const handoff = store.prepareHandoff({
projectPath,
include: ["decisions"],
});
// Should have attempted to get decisions (empty array due to no data)
expect(handoff.decisions).toEqual([]);
// Should not have working memory since it wasn't included
expect(handoff.workingMemory).toEqual([]);
});
});
describe("resumeFromHandoff", () => {
it("should resume from a specific handoff by ID", () => {
const original = store.prepareHandoff({
sessionId: "session-123",
projectPath,
});
const resumed = store.resumeFromHandoff({
handoffId: original.id,
projectPath,
newSessionId: "session-456",
});
expect(resumed).not.toBeNull();
expect(resumed?.id).toBe(original.id);
expect(resumed?.resumedBy).toBe("session-456");
});
it("should resume from most recent unresumed handoff when no ID specified", async () => {
// Create two handoffs with a delay to ensure different timestamps
store.prepareHandoff({
sessionId: "session-1",
projectPath,
});
// Wait a bit to ensure different timestamps
await new Promise((resolve) => setTimeout(resolve, 10));
const second = store.prepareHandoff({
sessionId: "session-2",
projectPath,
});
const resumed = store.resumeFromHandoff({
projectPath,
newSessionId: "session-3",
});
expect(resumed).not.toBeNull();
expect(resumed?.fromSessionId).toBe("session-2");
expect(resumed?.id).toBe(second.id);
});
it("should return null when no unresumed handoff found", () => {
// Create and resume a handoff
const handoff = store.prepareHandoff({
sessionId: "session-1",
projectPath,
});
store.resumeFromHandoff({
handoffId: handoff.id,
projectPath,
newSessionId: "session-2",
});
// Try to resume again without specifying ID
const result = store.resumeFromHandoff({
projectPath,
newSessionId: "session-3",
});
expect(result).toBeNull();
});
it("should mark handoff as resumed in database", () => {
const handoff = store.prepareHandoff({
sessionId: "session-1",
projectPath,
});
store.resumeFromHandoff({
handoffId: handoff.id,
projectPath,
newSessionId: "session-2",
});
const row = db
.prepare("SELECT * FROM session_handoffs WHERE id = ?")
.get(handoff.id) as {
resumed_by_session_id: string;
resumed_at: number;
} | undefined;
expect(row?.resumed_by_session_id).toBe("session-2");
expect(row?.resumed_at).toBeDefined();
});
it("should inject working memory when requested", () => {
// Create handoff with working memory
db.prepare(
`INSERT INTO working_memory (id, key, value, project_path, created_at, updated_at)
VALUES (?, ?, ?, ?, ?, ?)`
).run("mem-1", "test_key", "test_value", projectPath, Date.now(), Date.now());
const handoff = store.prepareHandoff({
sessionId: "session-1",
projectPath,
include: ["memory"],
});
// Clear working memory
db.prepare("DELETE FROM working_memory").run();
// Resume with inject_context
store.resumeFromHandoff({
handoffId: handoff.id,
projectPath,
newSessionId: "session-2",
injectContext: true,
});
// Check working memory was restored
const restored = db
.prepare("SELECT * FROM working_memory WHERE key = ?")
.get("test_key") as { value: string } | undefined;
expect(restored?.value).toBe("test_value");
});
});
describe("listHandoffs", () => {
let secondHandoff: ReturnType<typeof store.prepareHandoff>;
beforeEach(async () => {
// Create several handoffs with delays to ensure different timestamps
store.prepareHandoff({
sessionId: "session-1",
projectPath,
});
await new Promise((resolve) => setTimeout(resolve, 10));
secondHandoff = store.prepareHandoff({
sessionId: "session-2",
projectPath,
});
await new Promise((resolve) => setTimeout(resolve, 10));
store.prepareHandoff({
sessionId: "session-3",
projectPath,
});
// Resume the second one
store.resumeFromHandoff({
handoffId: secondHandoff.id,
projectPath,
newSessionId: "session-4",
});
});
it("should list unresumed handoffs by default", () => {
const handoffs = store.listHandoffs(projectPath);
expect(handoffs.length).toBe(2);
expect(handoffs.every((h) => h.resumedBy === undefined)).toBe(true);
});
it("should include resumed handoffs when requested", () => {
const handoffs = store.listHandoffs(projectPath, { includeResumed: true });
expect(handoffs.length).toBe(3);
});
it("should respect limit parameter", () => {
const handoffs = store.listHandoffs(projectPath, { limit: 1 });
expect(handoffs.length).toBe(1);
});
it("should order by created_at descending", () => {
const handoffs = store.listHandoffs(projectPath);
// Most recent first
expect(handoffs[0].fromSessionId).toBe("session-3");
});
it("should include summary for each handoff", () => {
const handoffs = store.listHandoffs(projectPath);
expect(handoffs.every((h) => typeof h.summary === "string")).toBe(true);
});
});
describe("getHandoff", () => {
it("should get a specific handoff by ID", () => {
const created = store.prepareHandoff({
sessionId: "session-1",
projectPath,
});
const handoff = store.getHandoff(created.id);
expect(handoff).not.toBeNull();
expect(handoff?.id).toBe(created.id);
expect(handoff?.fromSessionId).toBe("session-1");
});
it("should return null for non-existent ID", () => {
const handoff = store.getHandoff("nonexistent");
expect(handoff).toBeNull();
});
});
describe("deleteHandoff", () => {
it("should delete a handoff by ID", () => {
const handoff = store.prepareHandoff({
sessionId: "session-1",
projectPath,
});
const deleted = store.deleteHandoff(handoff.id);
expect(deleted).toBe(true);
expect(store.getHandoff(handoff.id)).toBeNull();
});
it("should return false for non-existent ID", () => {
const deleted = store.deleteHandoff("nonexistent");
expect(deleted).toBe(false);
});
});
describe("context summary generation", () => {
it("should generate summary with decision count", () => {
// Set up test data for decisions
const convId = "conv-1";
const msgId = "msg-1";
db.prepare(
`INSERT INTO conversations (id, session_id, project_path, created_at)
VALUES (?, ?, ?, ?)`
).run(convId, "session-1", projectPath, Date.now());
db.prepare(
`INSERT INTO messages (id, conversation_id, role, content, timestamp)
VALUES (?, ?, ?, ?, ?)`
).run(msgId, convId, "assistant", "test", Date.now());
db.prepare(
`INSERT INTO decisions (id, message_id, decision_text, timestamp)
VALUES (?, ?, ?, ?)`
).run("dec-1", msgId, "Use TypeScript", Date.now());
const handoff = store.prepareHandoff({
sessionId: "session-1",
projectPath,
include: ["decisions"],
});
expect(handoff.contextSummary).toContain("1 decision");
});
it("should indicate empty handoff", () => {
const handoff = store.prepareHandoff({
sessionId: "session-1",
projectPath,
include: [],
});
expect(handoff.contextSummary).toBe("Empty handoff.");
});
});
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/unit/SnippetGenerator.test.ts | TypeScript | /**
* Unit tests for SnippetGenerator
*/
import {
SnippetGenerator,
getSnippetGenerator,
generateSnippet,
} from "../../search/SnippetGenerator.js";
describe("SnippetGenerator", () => {
describe("Constructor and Configuration", () => {
it("should create with default config", () => {
const generator = new SnippetGenerator();
expect(generator).toBeDefined();
});
it("should accept custom config", () => {
const generator = new SnippetGenerator({
targetLength: 100,
highlight: false,
ellipsis: "…",
});
expect(generator).toBeDefined();
});
});
describe("Basic Snippet Generation", () => {
it("should return content as-is when shorter than target", () => {
const generator = new SnippetGenerator({ targetLength: 100 });
const content = "Short content.";
const snippet = generator.generate(content, "content");
expect(snippet).toContain("content");
});
it("should highlight query terms", () => {
const generator = new SnippetGenerator({
highlight: true,
highlightStart: "**",
highlightEnd: "**",
});
const content = "This is a test sentence with important keyword here.";
const snippet = generator.generate(content, "important keyword");
expect(snippet).toContain("**important**");
expect(snippet).toContain("**keyword**");
});
it("should not highlight when disabled", () => {
const generator = new SnippetGenerator({ highlight: false });
const content = "This is a test sentence with keyword here.";
const snippet = generator.generate(content, "keyword");
expect(snippet).not.toContain("**");
expect(snippet).toContain("keyword");
});
it("should add ellipsis for truncated content", () => {
const generator = new SnippetGenerator({
targetLength: 50,
ellipsis: "...",
});
const content = "This is a very long sentence that definitely needs to be truncated because it exceeds the target length significantly.";
const snippet = generator.generate(content, "truncated");
expect(snippet).toContain("...");
});
});
describe("Query Term Handling", () => {
it("should filter stop words from query", () => {
const generator = new SnippetGenerator({ highlight: true });
const content = "The quick brown fox jumps over the lazy dog.";
// "the" and "over" are stop words
const snippet = generator.generate(content, "the quick fox over");
// "quick" and "fox" should be highlighted, but not stop words
expect(snippet).toContain("**quick**");
expect(snippet).toContain("**fox**");
});
it("should handle case-insensitive matching", () => {
const generator = new SnippetGenerator({ highlight: true });
const content = "JavaScript is a programming language.";
const snippet = generator.generate(content, "javascript programming");
expect(snippet).toContain("**JavaScript**");
expect(snippet).toContain("**programming**");
});
it("should handle empty query", () => {
const generator = new SnippetGenerator();
const content = "Some content here.";
const snippet = generator.generate(content, "");
expect(snippet).toBeTruthy();
});
it("should handle query with no matches", () => {
const generator = new SnippetGenerator();
const content = "The quick brown fox jumps over the lazy dog.";
const snippet = generator.generate(content, "zebra elephant giraffe");
// Should return beginning of content
expect(snippet).toContain("quick");
});
});
describe("Region Selection", () => {
it("should find best region with highest match density", () => {
const generator = new SnippetGenerator({
targetLength: 50,
highlight: false,
});
const content = "Unrelated intro text here. The error occurred in the database layer causing issues. More unrelated text follows.";
const snippet = generator.generate(content, "error database");
expect(snippet).toContain("error");
expect(snippet).toContain("database");
});
it("should prefer sentence boundaries", () => {
const generator = new SnippetGenerator({
targetLength: 100,
preferSentenceBoundaries: true,
highlight: false,
});
const content = "First sentence. The important keyword is here. Third sentence.";
const snippet = generator.generate(content, "keyword");
expect(snippet).toContain("keyword");
});
});
describe("Word Boundary Handling", () => {
it("should not cut words in the middle", () => {
const generator = new SnippetGenerator({
targetLength: 30,
highlight: false,
});
const content = "The internationalization process is complex and requires attention.";
const snippet = generator.generate(content, "process");
// Should not have partial words
const words = snippet.replace(/\.\.\./g, "").trim().split(/\s+/);
for (const word of words) {
// Each word should be complete (not cut off)
expect(word.length).toBeGreaterThan(0);
}
});
});
describe("Custom Highlight Markers", () => {
it("should use custom highlight markers", () => {
const generator = new SnippetGenerator({
highlight: true,
highlightStart: "<mark>",
highlightEnd: "</mark>",
});
const content = "Find the keyword here.";
const snippet = generator.generate(content, "keyword");
expect(snippet).toContain("<mark>keyword</mark>");
});
});
describe("Edge Cases", () => {
it("should handle empty content", () => {
const generator = new SnippetGenerator();
const snippet = generator.generate("", "test");
expect(snippet).toBe("");
});
it("should handle content with special regex characters", () => {
const generator = new SnippetGenerator({ highlight: true });
const content = "The regex pattern is /test.*pattern/g in JavaScript.";
const snippet = generator.generate(content, "test pattern");
// Should not throw and should contain the terms
expect(snippet).toContain("test");
expect(snippet).toContain("pattern");
});
it("should handle very long single word", () => {
const generator = new SnippetGenerator({
targetLength: 50,
});
const content = "prefix " + "a".repeat(100) + " suffix";
const snippet = generator.generate(content, "prefix");
expect(snippet).toBeTruthy();
});
it("should handle unicode characters", () => {
const generator = new SnippetGenerator({ highlight: true });
const content = "日本語のテキストとEnglish混合コンテンツ。";
const snippet = generator.generate(content, "English");
expect(snippet).toContain("**English**");
});
it("should handle multiple occurrences of same term", () => {
const generator = new SnippetGenerator({ highlight: true });
const content = "Error in first error and second error found.";
const snippet = generator.generate(content, "error");
// Should highlight all occurrences
const matches = snippet.match(/\*\*error\*\*/gi) || [];
expect(matches.length).toBeGreaterThanOrEqual(1);
});
});
describe("Factory Functions", () => {
it("should create generator with config", () => {
const generator = getSnippetGenerator({ targetLength: 150 });
expect(generator).toBeInstanceOf(SnippetGenerator);
});
it("should generate snippet with default config", () => {
const content = "Test content with keyword here.";
const snippet = generateSnippet(content, "keyword");
expect(snippet).toContain("keyword");
});
});
describe("Long Content Handling", () => {
it("should handle very long content efficiently", () => {
const generator = new SnippetGenerator({ targetLength: 100 });
// Create long content with keyword in the middle
const longContent =
"Intro text. ".repeat(50) +
"The important error message appears here. " +
"More text. ".repeat(50);
const snippet = generator.generate(longContent, "error message");
expect(snippet).toContain("error");
expect(snippet.length).toBeLessThanOrEqual(300); // Reasonable length
});
});
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/unit/TextChunker.test.ts | TypeScript | /**
* Unit tests for TextChunker and chunking strategies
*/
import { jest } from "@jest/globals";
import {
TextChunker,
resetTextChunker,
getTextChunker,
estimateTokens,
DEFAULT_CHUNKING_CONFIG,
} from "../../chunking/TextChunker.js";
describe("TextChunker", () => {
beforeEach(() => {
resetTextChunker();
jest.spyOn(console, "log").mockImplementation(() => {});
});
afterEach(() => {
jest.restoreAllMocks();
});
describe("Constructor and Configuration", () => {
it("should create with default config", () => {
const chunker = new TextChunker();
const config = chunker.getConfig();
expect(config.chunkSize).toBe(DEFAULT_CHUNKING_CONFIG.chunkSize);
expect(config.overlap).toBe(DEFAULT_CHUNKING_CONFIG.overlap);
expect(config.strategy).toBe(DEFAULT_CHUNKING_CONFIG.strategy);
});
it("should accept custom config", () => {
const chunker = new TextChunker({
chunkSize: 300,
overlap: 0.2,
strategy: "sliding_window",
});
const config = chunker.getConfig();
expect(config.chunkSize).toBe(300);
expect(config.overlap).toBe(0.2);
expect(config.strategy).toBe("sliding_window");
});
});
describe("Token Estimation", () => {
it("should estimate tokens for plain text", () => {
const text = "This is a simple test sentence with eight words.";
const tokens = estimateTokens(text, DEFAULT_CHUNKING_CONFIG);
// ~4 chars per token for prose
expect(tokens).toBeGreaterThan(0);
expect(tokens).toBeLessThan(text.length); // Should be less than char count
});
it("should estimate tokens for code", () => {
const code = `function hello() {
console.log("Hello, world!");
return true;
}`;
const tokens = estimateTokens(code, DEFAULT_CHUNKING_CONFIG);
expect(tokens).toBeGreaterThan(0);
});
});
describe("needsChunking", () => {
it("should return false for short text", () => {
const chunker = new TextChunker({ enabled: true, chunkSize: 450 });
const shortText = "This is a short message.";
expect(chunker.needsChunking(shortText)).toBe(false);
});
it("should return true for long text", () => {
const chunker = new TextChunker({ enabled: true, chunkSize: 50 });
const longText = "This is a much longer message that contains many words and should exceed the token limit for chunking to be necessary. ".repeat(10);
expect(chunker.needsChunking(longText)).toBe(true);
});
it("should return false when chunking is disabled", () => {
const chunker = new TextChunker({ enabled: false });
const longText = "Very long text ".repeat(1000);
expect(chunker.needsChunking(longText)).toBe(false);
});
});
describe("Sentence Chunking", () => {
it("should return single chunk for short text", () => {
const chunker = new TextChunker({ enabled: true, strategy: "sentence" });
const text = "This is a short sentence.";
const result = chunker.chunk(text);
expect(result.wasChunked).toBe(false);
expect(result.chunks).toHaveLength(1);
expect(result.chunks[0].content).toBe(text);
});
it("should split long text into multiple chunks", () => {
const chunker = new TextChunker({
enabled: true,
strategy: "sentence",
chunkSize: 20, // Very small for testing (20 tokens ~ 80 chars)
minChunkSize: 5,
});
// Make sure the text is long enough to require chunking
const text = "First sentence with some words here. Second sentence follows with more content. Third sentence appears with additional text. Fourth sentence concludes this paragraph.";
const result = chunker.chunk(text);
expect(result.wasChunked).toBe(true);
expect(result.chunks.length).toBeGreaterThan(1);
});
it("should preserve code blocks", () => {
const chunker = new TextChunker({
enabled: true,
strategy: "sentence",
chunkSize: 100,
});
const text = `Some text before.
\`\`\`javascript
function test() {
return 42;
}
\`\`\`
Some text after.`;
const result = chunker.chunk(text);
// Code block should not be split
const codeChunk = result.chunks.find((c) =>
c.content.includes("```javascript")
);
expect(codeChunk).toBeDefined();
if (codeChunk) {
expect(codeChunk.content).toContain("return 42");
}
});
it("should track chunk indices correctly", () => {
const chunker = new TextChunker({
enabled: true,
strategy: "sentence",
chunkSize: 30,
});
const text = "Sentence one. Sentence two. Sentence three.";
const result = chunker.chunk(text);
for (let i = 0; i < result.chunks.length; i++) {
expect(result.chunks[i].index).toBe(i);
expect(result.chunks[i].totalChunks).toBe(result.chunks.length);
}
});
it("should track character offsets", () => {
const chunker = new TextChunker({
enabled: true,
strategy: "sentence",
chunkSize: 100,
});
const text = "First part of text. Second part of the content.";
const result = chunker.chunk(text);
for (const chunk of result.chunks) {
expect(chunk.startOffset).toBeGreaterThanOrEqual(0);
expect(chunk.endOffset).toBeLessThanOrEqual(text.length);
expect(chunk.endOffset).toBeGreaterThan(chunk.startOffset);
}
});
});
describe("Sliding Window Chunking", () => {
it("should create overlapping chunks", () => {
const chunker = new TextChunker({
enabled: true,
strategy: "sliding_window",
chunkSize: 50,
overlap: 0.2,
});
const text = "Word ".repeat(100);
const result = chunker.chunk(text);
expect(result.wasChunked).toBe(true);
expect(result.strategy).toBe("sliding_window");
});
});
describe("Batch Chunking", () => {
it("should chunk multiple texts", () => {
const chunker = new TextChunker({ enabled: true });
const texts = [
"First short text.",
"Second short text.",
"Third short text.",
];
const results = chunker.chunkBatch(texts);
expect(results).toHaveLength(3);
results.forEach((result) => {
expect(result.chunks.length).toBeGreaterThanOrEqual(1);
});
});
it("should flatten batch with source tracking", () => {
const chunker = new TextChunker({ enabled: true, chunkSize: 50 });
const texts = [
{ id: "msg1", content: "Short message one." },
{ id: "msg2", content: "Short message two." },
];
const flattened = chunker.chunkBatchFlat(texts);
expect(flattened.length).toBeGreaterThanOrEqual(2);
for (const chunk of flattened) {
expect(["msg1", "msg2"]).toContain(chunk.sourceId);
}
});
});
describe("Disabled Chunking", () => {
it("should return single chunk when disabled", () => {
const chunker = new TextChunker({ enabled: false });
const longText = "Very long text ".repeat(1000);
const result = chunker.chunk(longText);
expect(result.wasChunked).toBe(false);
expect(result.chunks).toHaveLength(1);
expect(result.chunks[0].content).toBe(longText);
});
});
describe("Global Instance", () => {
it("should return same instance without config", () => {
const instance1 = getTextChunker();
const instance2 = getTextChunker();
expect(instance1).toBe(instance2);
});
it("should create new instance with config", () => {
const instance1 = getTextChunker();
const instance2 = getTextChunker({ chunkSize: 200 });
expect(instance1).not.toBe(instance2);
});
it("should reset global instance", () => {
const instance1 = getTextChunker();
resetTextChunker();
const instance2 = getTextChunker();
expect(instance1).not.toBe(instance2);
});
});
describe("Edge Cases", () => {
it("should handle empty text", () => {
const chunker = new TextChunker({ enabled: true });
const result = chunker.chunk("");
expect(result.chunks).toHaveLength(1);
expect(result.chunks[0].content).toBe("");
});
it("should handle text with only whitespace", () => {
const chunker = new TextChunker({ enabled: true });
const result = chunker.chunk(" \n\t ");
expect(result.chunks).toHaveLength(1);
});
it("should handle unicode characters", () => {
const chunker = new TextChunker({ enabled: true });
const text = "你好世界!这是一个测试。🎉 Emojis work too!";
const result = chunker.chunk(text);
expect(result.chunks[0].content).toContain("你好");
expect(result.chunks[0].content).toContain("🎉");
});
it("should handle very long single word", () => {
const chunker = new TextChunker({ enabled: true, chunkSize: 10 });
const longWord = "a".repeat(1000);
const result = chunker.chunk(longWord);
// Should handle without crashing
expect(result.chunks.length).toBeGreaterThanOrEqual(1);
});
});
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/unit/ToolHandlers.test.ts | TypeScript | /**
* Unit tests for ToolHandlers
*/
import fs from 'fs';
import os from 'os';
import path from 'path';
import { ToolHandlers } from '../../tools/ToolHandlers.js';
import { ConversationMemory } from '../../ConversationMemory.js';
import { getSQLiteManager, resetSQLiteManager } from '../../storage/SQLiteManager.js';
import { ConversationStorage } from '../../storage/ConversationStorage.js';
describe('ToolHandlers', () => {
let handlers: ToolHandlers;
let memory: ConversationMemory;
let db: ReturnType<typeof getSQLiteManager>;
beforeEach(() => {
memory = new ConversationMemory();
db = getSQLiteManager({ dbPath: ':memory:' });
handlers = new ToolHandlers(memory, db);
});
afterEach(() => {
resetSQLiteManager();
});
// Skip semantic search tests - embeddings are optional and may not work in test environment
describe.skip('searchConversations', () => {
it('should return properly typed search results', async () => {
const result = await handlers.searchConversations({
query: 'test query',
limit: 10,
});
expect(result).toHaveProperty('query');
expect(result).toHaveProperty('results');
expect(result).toHaveProperty('total_found');
expect(Array.isArray(result.results)).toBe(true);
});
it('should respect limit parameter', async () => {
const result = await handlers.searchConversations({
query: 'test',
limit: 5,
});
expect(result.results.length).toBeLessThanOrEqual(5);
});
it('should handle date_range filter', async () => {
const now = Date.now();
const result = await handlers.searchConversations({
query: 'test',
limit: 10,
date_range: [now - 86400000, now], // Last 24 hours
});
expect(result).toBeDefined();
});
});
describe.skip('getDecisions', () => {
it('should return properly typed decision results', async () => {
const result = await handlers.getDecisions({
query: 'authentication',
limit: 10,
});
expect(result).toHaveProperty('query');
expect(result).toHaveProperty('decisions');
expect(result).toHaveProperty('total_found');
expect(Array.isArray(result.decisions)).toBe(true);
});
it('should filter by file_path when provided', async () => {
const result = await handlers.getDecisions({
query: 'authentication',
file_path: 'src/auth/token.ts',
limit: 10,
});
expect(result.file_path).toBe('src/auth/token.ts');
});
});
describe('checkBeforeModify', () => {
it('should return file context information', async () => {
const result = await handlers.checkBeforeModify({
file_path: 'src/auth/token.ts',
});
expect(result).toHaveProperty('file_path');
expect(result).toHaveProperty('warning');
expect(result).toHaveProperty('recent_changes');
expect(result).toHaveProperty('related_decisions');
expect(result).toHaveProperty('mistakes_to_avoid');
});
it('should have properly structured recent_changes', async () => {
const result = await handlers.checkBeforeModify({
file_path: 'test.ts',
});
expect(result.recent_changes).toHaveProperty('edits');
expect(result.recent_changes).toHaveProperty('commits');
expect(Array.isArray(result.recent_changes.edits)).toBe(true);
expect(Array.isArray(result.recent_changes.commits)).toBe(true);
});
});
describe('getFileEvolution', () => {
it('should return timeline with events', async () => {
const result = await handlers.getFileEvolution({
file_path: 'src/index.ts',
include_decisions: true,
include_commits: true,
});
expect(result).toHaveProperty('file_path');
expect(result).toHaveProperty('total_edits');
expect(result).toHaveProperty('timeline');
expect(result).toHaveProperty('has_more');
expect(Array.isArray(result.timeline)).toBe(true);
expect(typeof result.has_more).toBe('boolean');
});
it('should exclude decisions when requested', async () => {
const result = await handlers.getFileEvolution({
file_path: 'src/index.ts',
include_decisions: false,
});
const decisionEvents = result.timeline.filter(e => e.type === 'decision');
expect(decisionEvents.length).toBe(0);
});
it('should exclude commits when requested', async () => {
const result = await handlers.getFileEvolution({
file_path: 'src/index.ts',
include_commits: false,
});
const commitEvents = result.timeline.filter(e => e.type === 'commit');
expect(commitEvents.length).toBe(0);
});
});
describe('listRecentSessions', () => {
it('returns external session ids for session_id', async () => {
const storage = new ConversationStorage(db);
const projectPath = fs.mkdtempSync(path.join(os.tmpdir(), 'cccmemory-test-'));
const now = Date.now();
await storage.storeConversations([
{
id: 'session-external-123',
project_path: projectPath,
source_type: 'claude-code',
first_message_at: now,
last_message_at: now,
message_count: 1,
metadata: {},
created_at: now,
updated_at: now,
},
]);
const result = await handlers.listRecentSessions({ project_path: projectPath });
expect(result.sessions.length).toBeGreaterThan(0);
expect(result.sessions[0].session_id).toBe('session-external-123');
});
});
describe('getLatestSessionSummary', () => {
it('summarizes the latest session with recent user message', async () => {
const storage = new ConversationStorage(db);
const projectPath = fs.mkdtempSync(path.join(os.tmpdir(), 'cccmemory-test-'));
const now = Date.now();
const conversationIdMap = await storage.storeConversations([
{
id: 'session-latest',
project_path: projectPath,
source_type: 'claude-code',
first_message_at: now - 1000,
last_message_at: now,
message_count: 3,
metadata: {},
created_at: now - 1000,
updated_at: now,
},
]);
await storage.storeMessages(
[
{
id: 'msg-1',
conversation_id: 'session-latest',
message_type: 'user',
role: 'user',
content: 'initial question',
timestamp: now - 900,
is_sidechain: false,
metadata: {},
},
{
id: 'msg-2',
conversation_id: 'session-latest',
message_type: 'assistant',
role: 'assistant',
content: 'assistant reply',
timestamp: now - 800,
is_sidechain: false,
metadata: {},
},
{
id: 'msg-3',
conversation_id: 'session-latest',
message_type: 'user',
role: 'user',
content: 'latest issue to solve',
timestamp: now - 100,
is_sidechain: false,
metadata: {},
},
],
{ skipFtsRebuild: true, conversationIdMap }
);
const result = await handlers.getLatestSessionSummary({
project_path: projectPath,
source_type: 'claude-code',
limit_messages: 5,
include_tools: false,
include_errors: false,
});
expect(result.success).toBe(true);
expect(result.found).toBe(true);
expect(result.session?.session_id).toBe('session-latest');
expect(result.summary?.problem_statement).toContain('latest issue to solve');
});
});
describe('searchMistakes', () => {
it('should return properly typed mistake results', async () => {
const result = await handlers.searchMistakes({
query: 'error',
limit: 10,
});
expect(result).toHaveProperty('query');
expect(result).toHaveProperty('mistakes');
expect(result).toHaveProperty('total_found');
expect(Array.isArray(result.mistakes)).toBe(true);
});
it('should filter by mistake_type when provided', async () => {
const result = await handlers.searchMistakes({
query: 'error',
mistake_type: 'logic_error',
limit: 10,
});
expect(result.mistake_type).toBe('logic_error');
});
it('should sanitize query to prevent SQL injection', async () => {
// Should not throw or cause issues
await expect(handlers.searchMistakes({
query: "test'; DROP TABLE mistakes; --",
limit: 10,
})).resolves.toBeDefined();
});
});
describe('getRequirements', () => {
it('should return requirements for component', async () => {
const result = await handlers.getRequirements({
component: 'authentication',
});
expect(result).toHaveProperty('component');
expect(result).toHaveProperty('requirements');
expect(result).toHaveProperty('total_found');
expect(Array.isArray(result.requirements)).toBe(true);
});
it('should filter by type when provided', async () => {
const result = await handlers.getRequirements({
component: 'auth',
type: 'security',
});
expect(result.type).toBe('security');
});
it('should sanitize component name to prevent SQL injection', async () => {
await expect(handlers.getRequirements({
component: "test%' OR '1'='1",
})).resolves.toBeDefined();
});
});
describe('getToolHistory', () => {
it('should return tool usage history with pagination metadata', async () => {
const result = await handlers.getToolHistory({
limit: 20,
});
expect(result).toHaveProperty('tool_uses');
expect(result).toHaveProperty('total_found');
expect(result).toHaveProperty('total_in_database');
expect(result).toHaveProperty('has_more');
expect(result).toHaveProperty('offset');
expect(Array.isArray(result.tool_uses)).toBe(true);
expect(result.offset).toBe(0);
});
it('should filter by tool_name when provided', async () => {
const result = await handlers.getToolHistory({
tool_name: 'Bash',
limit: 20,
});
expect(result.tool_name).toBe('Bash');
});
it('should filter by file_path when provided', async () => {
const result = await handlers.getToolHistory({
file_path: 'src/index.ts',
limit: 20,
});
expect(result.file_path).toBe('src/index.ts');
});
it('should support pagination with offset', async () => {
const page1 = await handlers.getToolHistory({
limit: 5,
offset: 0,
});
const page2 = await handlers.getToolHistory({
limit: 5,
offset: 5,
});
expect(page1.offset).toBe(0);
expect(page2.offset).toBe(5);
// Both pages should have same total_in_database
expect(page1.total_in_database).toBe(page2.total_in_database);
});
it('should truncate content when max_content_length is set', async () => {
const result = await handlers.getToolHistory({
limit: 20,
max_content_length: 50,
include_content: true,
});
// Check if any results have content
const withContent = result.tool_uses.filter(t => t.result.content);
if (withContent.length > 0) {
withContent.forEach(tool => {
// Content should be <= max_content_length + truncation indicator length
if (tool.result.content) {
expect(tool.result.content.length).toBeLessThanOrEqual(100); // 50 + "... (truncated)"
}
// If content was truncated, should have flag
if (tool.result.content_truncated) {
expect(tool.result.content).toContain('... (truncated)');
}
});
}
});
it('should return metadata only when include_content is false', async () => {
const result = await handlers.getToolHistory({
limit: 20,
include_content: false,
});
// Should not have content, stdout, stderr fields
result.tool_uses.forEach(tool => {
expect(tool.result.content).toBeUndefined();
expect(tool.result.stdout).toBeUndefined();
expect(tool.result.stderr).toBeUndefined();
// Should still have is_error
expect(tool.result).toHaveProperty('is_error');
});
});
it('should filter by date_range when provided', async () => {
const now = Date.now();
const oneDayAgo = now - 86400000;
const result = await handlers.getToolHistory({
date_range: [oneDayAgo, now],
limit: 20,
});
expect(result).toHaveProperty('tool_uses');
// All results should be within range
result.tool_uses.forEach(tool => {
const timestamp = new Date(tool.timestamp).getTime();
expect(timestamp).toBeGreaterThanOrEqual(oneDayAgo);
expect(timestamp).toBeLessThanOrEqual(now);
});
});
it('should filter by errors_only when provided', async () => {
const result = await handlers.getToolHistory({
errors_only: true,
limit: 20,
});
// All results should be errors
result.tool_uses.forEach(tool => {
expect(tool.result.is_error).toBe(true);
});
});
it('should calculate has_more correctly', async () => {
const result = await handlers.getToolHistory({
limit: 5,
offset: 0,
});
// has_more should be true if total_in_database > offset + total_found
const expectedHasMore = result.total_in_database > (result.offset + result.total_found);
expect(result.has_more).toBe(expectedHasMore);
});
it('should handle empty results gracefully', async () => {
const result = await handlers.getToolHistory({
tool_name: 'NonExistentTool',
limit: 20,
});
expect(result.tool_uses).toEqual([]);
expect(result.total_found).toBe(0);
expect(result.total_in_database).toBe(0);
expect(result.has_more).toBe(false);
});
});
describe.skip('findSimilarSessions', () => {
it('should return similar sessions grouped by conversation', async () => {
const result = await handlers.findSimilarSessions({
query: 'authentication',
limit: 5,
});
expect(result).toHaveProperty('query');
expect(result).toHaveProperty('sessions');
expect(result).toHaveProperty('total_found');
expect(Array.isArray(result.sessions)).toBe(true);
expect(result.sessions.length).toBeLessThanOrEqual(5);
});
it('should include relevant messages in each session', async () => {
const result = await handlers.findSimilarSessions({
query: 'test',
limit: 3,
});
if (result.sessions.length > 0) {
expect(result.sessions[0]).toHaveProperty('relevant_messages');
expect(Array.isArray(result.sessions[0].relevant_messages)).toBe(true);
}
});
});
describe('Global Index Methods', () => {
describe('indexAllProjects', () => {
it('should return properly typed response', async () => {
const result = await handlers.indexAllProjects({
include_codex: false,
include_claude_code: false,
});
expect(result).toHaveProperty('success');
expect(result).toHaveProperty('global_index_path');
expect(result).toHaveProperty('projects_indexed');
expect(result).toHaveProperty('claude_code_projects');
expect(result).toHaveProperty('codex_projects');
expect(result).toHaveProperty('total_messages');
expect(result).toHaveProperty('total_conversations');
expect(result).toHaveProperty('projects');
expect(result).toHaveProperty('errors');
expect(result).toHaveProperty('message');
expect(Array.isArray(result.projects)).toBe(true);
expect(Array.isArray(result.errors)).toBe(true);
});
it('should handle non-existent Codex path gracefully', async () => {
const result = await handlers.indexAllProjects({
include_codex: true,
include_claude_code: false,
codex_path: '/nonexistent/path',
});
expect(result.success).toBe(true);
// Should be 0 since path doesn't exist, but may find projects if default path is used
expect(typeof result.codex_projects).toBe('number');
expect(result.codex_projects).toBeGreaterThanOrEqual(0);
});
it('should handle non-existent Claude projects path gracefully', async () => {
const result = await handlers.indexAllProjects({
include_codex: false,
include_claude_code: true,
claude_projects_path: '/nonexistent/path',
});
expect(result.success).toBe(true);
// Should be 0 since path doesn't exist, but may find projects if default path is used
expect(typeof result.claude_code_projects).toBe('number');
expect(result.claude_code_projects).toBeGreaterThanOrEqual(0);
});
});
describe('searchAllConversations', () => {
beforeEach(async () => {
// Initialize global index
await handlers.indexAllProjects({
include_codex: false,
include_claude_code: false,
});
});
it('should return properly typed response', async () => {
const result = await handlers.searchAllConversations({
query: 'test query',
limit: 10,
});
expect(result).toHaveProperty('query');
expect(result).toHaveProperty('results');
expect(result).toHaveProperty('total_found');
expect(result).toHaveProperty('projects_searched');
expect(result).toHaveProperty('search_stats');
expect(result).toHaveProperty('message');
expect(Array.isArray(result.results)).toBe(true);
expect(result.search_stats).toHaveProperty('claude_code_results');
expect(result.search_stats).toHaveProperty('codex_results');
});
it('should respect limit parameter', async () => {
const result = await handlers.searchAllConversations({
query: 'test',
limit: 5,
});
expect(result.results.length).toBeLessThanOrEqual(5);
});
it('should filter by source_type', async () => {
const result = await handlers.searchAllConversations({
query: 'test',
limit: 10,
source_type: 'claude-code',
});
expect(result.results.every(r => r.source_type === 'claude-code' || r.source_type === undefined)).toBe(true);
});
it('should include project_path in results', async () => {
const result = await handlers.searchAllConversations({
query: 'test',
limit: 10,
});
result.results.forEach(r => {
expect(r).toHaveProperty('project_path');
expect(r).toHaveProperty('source_type');
});
});
});
describe('getAllDecisions', () => {
beforeEach(async () => {
// Initialize global index
await handlers.indexAllProjects({
include_codex: false,
include_claude_code: false,
});
});
it('should return properly typed response', async () => {
const result = await handlers.getAllDecisions({
query: 'authentication',
limit: 10,
});
expect(result).toHaveProperty('query');
expect(result).toHaveProperty('decisions');
expect(result).toHaveProperty('total_found');
expect(result).toHaveProperty('projects_searched');
expect(result).toHaveProperty('message');
expect(Array.isArray(result.decisions)).toBe(true);
});
it('should respect limit parameter', async () => {
const result = await handlers.getAllDecisions({
query: 'test',
limit: 5,
});
expect(result.decisions.length).toBeLessThanOrEqual(5);
});
it('should filter by source_type', async () => {
const result = await handlers.getAllDecisions({
query: 'test',
limit: 10,
source_type: 'codex',
});
expect(result.decisions.every(d => d.source_type === 'codex' || d.source_type === undefined)).toBe(true);
});
it('should include project_path in decisions', async () => {
const result = await handlers.getAllDecisions({
query: 'test',
limit: 10,
});
result.decisions.forEach(d => {
expect(d).toHaveProperty('project_path');
expect(d).toHaveProperty('source_type');
});
});
});
describe('searchAllMistakes', () => {
beforeEach(async () => {
// Initialize global index
await handlers.indexAllProjects({
include_codex: false,
include_claude_code: false,
});
});
it('should return properly typed response', async () => {
const result = await handlers.searchAllMistakes({
query: 'bug',
limit: 10,
});
expect(result).toHaveProperty('query');
expect(result).toHaveProperty('mistakes');
expect(result).toHaveProperty('total_found');
expect(result).toHaveProperty('projects_searched');
expect(result).toHaveProperty('message');
expect(Array.isArray(result.mistakes)).toBe(true);
});
it('should respect limit parameter', async () => {
const result = await handlers.searchAllMistakes({
query: 'error',
limit: 5,
});
expect(result.mistakes.length).toBeLessThanOrEqual(5);
});
it('should filter by mistake_type when provided', async () => {
const result = await handlers.searchAllMistakes({
query: 'test',
mistake_type: 'logic_error',
limit: 10,
});
expect(result.mistakes.every(m =>
m.mistake_type === 'logic_error' || m.mistake_type === undefined
)).toBe(true);
});
it('should filter by source_type', async () => {
const result = await handlers.searchAllMistakes({
query: 'test',
limit: 10,
source_type: 'claude-code',
});
expect(result.mistakes.every(m =>
m.source_type === 'claude-code' || m.source_type === undefined
)).toBe(true);
});
it('should include project_path in mistakes', async () => {
const result = await handlers.searchAllMistakes({
query: 'test',
limit: 10,
});
result.mistakes.forEach(m => {
expect(m).toHaveProperty('project_path');
expect(m).toHaveProperty('source_type');
});
});
});
});
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/unit/TransformersEmbeddings.test.ts | TypeScript | /**
* Unit tests for TransformersEmbeddings
*/
import { jest } from '@jest/globals';
import { TransformersEmbeddings } from '../../embeddings/providers/TransformersEmbeddings';
describe('TransformersEmbeddings', () => {
describe('Constructor', () => {
it('should create instance with default model', () => {
const embeddings = new TransformersEmbeddings();
const info = embeddings.getModelInfo();
expect(info.provider).toBe('transformers');
expect(info.model).toBe('Xenova/all-MiniLM-L6-v2');
expect(info.dimensions).toBe(384);
expect(info.available).toBe(false); // Not initialized yet
});
it('should create instance with custom model', () => {
const embeddings = new TransformersEmbeddings('custom-model', 512);
const info = embeddings.getModelInfo();
expect(info.model).toBe('custom-model');
expect(info.dimensions).toBe(512);
});
it('should use default dimensions if not specified', () => {
const embeddings = new TransformersEmbeddings('Xenova/all-MiniLM-L6-v2');
const info = embeddings.getModelInfo();
expect(info.dimensions).toBe(384);
});
});
describe('isAvailable', () => {
it('should return false before initialization', () => {
const embeddings = new TransformersEmbeddings();
expect(embeddings.isAvailable()).toBe(false);
});
});
describe('embed', () => {
it('should throw error when not initialized', async () => {
const embeddings = new TransformersEmbeddings();
await expect(embeddings.embed('test')).rejects.toThrow('not available');
});
});
describe('embedBatch', () => {
it('should throw error when not initialized', async () => {
const embeddings = new TransformersEmbeddings();
await expect(embeddings.embedBatch(['test1', 'test2'])).rejects.toThrow('not initialized');
});
});
describe('getModelInfo', () => {
it('should return correct model information', () => {
const embeddings = new TransformersEmbeddings('test-model', 768);
const info = embeddings.getModelInfo();
expect(info).toEqual({
provider: 'transformers',
model: 'test-model',
dimensions: 768,
available: false,
});
});
});
describe('Edge Cases', () => {
it('should handle empty model name with defaults', () => {
const embeddings = new TransformersEmbeddings();
const info = embeddings.getModelInfo();
expect(info.model).toBeTruthy();
expect(info.dimensions).toBeGreaterThan(0);
});
it('should handle zero dimensions by using default', () => {
const embeddings = new TransformersEmbeddings('model');
const info = embeddings.getModelInfo();
// Should use default dimensions from ModelRegistry or fallback
expect(info.dimensions).toBeGreaterThan(0);
});
});
describe('Initialize', () => {
it('should not throw during initialization', async () => {
const consoleLogSpy = jest.spyOn(console, 'log').mockImplementation(() => {});
const embeddings = new TransformersEmbeddings();
// Should not throw
await expect(embeddings.initialize()).resolves.not.toThrow();
// May or may not be available depending on if @xenova/transformers is installed
expect(typeof embeddings.isAvailable()).toBe('boolean');
consoleLogSpy.mockRestore();
});
it('should handle initialization gracefully', async () => {
const embeddings = new TransformersEmbeddings();
await embeddings.initialize();
// Should have a valid state after initialization attempt
const info = embeddings.getModelInfo();
expect(info).toBeDefined();
expect(info.provider).toBe('transformers');
});
});
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/unit/VectorStore.test.ts | TypeScript | /**
* Unit tests for VectorStore
*/
import { jest } from '@jest/globals';
import { VectorStore } from '../../embeddings/VectorStore';
import { getSQLiteManager, resetSQLiteManager } from '../../storage/SQLiteManager';
describe('VectorStore', () => {
let vectorStore: VectorStore;
beforeEach(() => {
// Use in-memory database for tests
const sqliteManager = getSQLiteManager({ dbPath: ':memory:' });
// Disable foreign keys for testing (embeddings don't need actual messages)
sqliteManager.getDatabase().pragma('foreign_keys = OFF');
// Silence console logs during tests
jest.spyOn(console, 'log').mockImplementation(() => {});
jest.spyOn(console, 'error').mockImplementation(() => {});
jest.spyOn(console, 'warn').mockImplementation(() => {});
vectorStore = new VectorStore(sqliteManager);
// Force vectorStore to use BLOB storage by disabling vec extension
// This makes tests simpler since getEmbeddingCount() queries BLOB tables
(vectorStore as unknown as { hasVecExtension: boolean }).hasVecExtension = false;
const db = sqliteManager.getDatabase();
const insertMessage = (id: number) => {
db.prepare(
`INSERT OR IGNORE INTO messages
(id, conversation_id, external_id, message_type, timestamp, is_sidechain, metadata)
VALUES (?, ?, ?, ?, ?, ?, ?)`
).run(id, 1, `msg-${id}`, 'test', Date.now(), 0, '{}');
};
const seededIds = [
1, 2, 3, 10, 11, 12, 100, 200, 201, 202, 203, 204,
...Array.from({ length: 10 }, (_, i) => 1000 + i),
];
for (const id of seededIds) {
insertMessage(id);
}
});
afterEach(() => {
// Clean up
resetSQLiteManager();
jest.restoreAllMocks();
});
describe('Constructor', () => {
it('should create VectorStore instance', () => {
expect(vectorStore).toBeDefined();
expect(typeof vectorStore.isVecEnabled()).toBe('boolean');
});
it('should detect vec extension availability', () => {
const hasVec = vectorStore.isVecEnabled();
expect(typeof hasVec).toBe('boolean');
});
});
describe('isVecEnabled', () => {
it('should return boolean value', () => {
const enabled = vectorStore.isVecEnabled();
expect([true, false]).toContain(enabled);
});
});
describe('storeMessageEmbedding', () => {
it('should store message embedding', async () => {
const embedding = new Float32Array([0.1, 0.2, 0.3, 0.4]);
await vectorStore.storeMessageEmbedding(1, 'test content', embedding);
// Verify it was stored
const count = vectorStore.getEmbeddingCount();
expect(count).toBeGreaterThan(0);
});
it('should handle multiple embeddings', async () => {
const embedding1 = new Float32Array([0.1, 0.2, 0.3]);
const embedding2 = new Float32Array([0.4, 0.5, 0.6]);
await vectorStore.storeMessageEmbedding(1, 'content 1', embedding1);
await vectorStore.storeMessageEmbedding(2, 'content 2', embedding2);
const count = vectorStore.getEmbeddingCount();
expect(count).toBe(2);
});
it('should replace existing embedding with same ID', async () => {
const embedding1 = new Float32Array([0.1, 0.2]);
const embedding2 = new Float32Array([0.3, 0.4]);
await vectorStore.storeMessageEmbedding(1, 'content 1', embedding1);
await vectorStore.storeMessageEmbedding(1, 'content 2', embedding2);
const count = vectorStore.getEmbeddingCount();
expect(count).toBe(1);
});
it('should handle empty content', async () => {
const embedding = new Float32Array([0.1, 0.2]);
await vectorStore.storeMessageEmbedding(1, '', embedding);
const count = vectorStore.getEmbeddingCount();
expect(count).toBe(1);
});
it('should handle large embeddings', async () => {
const embedding = new Float32Array(1536); // OpenAI embedding size
for (let i = 0; i < 1536; i++) {
embedding[i] = Math.random();
}
await vectorStore.storeMessageEmbedding(100, 'large embedding', embedding);
const count = vectorStore.getEmbeddingCount();
expect(count).toBe(1);
});
});
describe('storeDecisionEmbedding', () => {
it('should store decision embedding', async () => {
const embedding = new Float32Array([0.1, 0.2, 0.3]);
await vectorStore.storeDecisionEmbedding(10, embedding);
// Decision embeddings are in a separate table, so this should not throw
expect(true).toBe(true);
});
it('should handle multiple decision embeddings', async () => {
const embedding1 = new Float32Array([0.1, 0.2]);
const embedding2 = new Float32Array([0.3, 0.4]);
await vectorStore.storeDecisionEmbedding(10, embedding1);
await vectorStore.storeDecisionEmbedding(11, embedding2);
// Should not throw
expect(true).toBe(true);
});
it('should replace existing decision embedding', async () => {
const embedding1 = new Float32Array([0.1, 0.2]);
const embedding2 = new Float32Array([0.3, 0.4]);
await vectorStore.storeDecisionEmbedding(10, embedding1);
await vectorStore.storeDecisionEmbedding(10, embedding2);
// Should not throw
expect(true).toBe(true);
});
});
describe('searchMessages', () => {
beforeEach(async () => {
// Store some test embeddings
await vectorStore.storeMessageEmbedding(1, 'hello world', new Float32Array([1.0, 0.0, 0.0]));
await vectorStore.storeMessageEmbedding(2, 'goodbye world', new Float32Array([0.0, 1.0, 0.0]));
await vectorStore.storeMessageEmbedding(3, 'test message', new Float32Array([0.0, 0.0, 1.0]));
});
it('should search for similar messages', async () => {
const queryEmbedding = new Float32Array([1.0, 0.0, 0.0]);
const results = await vectorStore.searchMessages(queryEmbedding, 3);
expect(Array.isArray(results)).toBe(true);
expect(results.length).toBeGreaterThan(0);
expect(results.length).toBeLessThanOrEqual(3);
});
it('should return results with correct structure', async () => {
const queryEmbedding = new Float32Array([1.0, 0.0, 0.0]);
const results = await vectorStore.searchMessages(queryEmbedding, 1);
expect(results.length).toBeGreaterThan(0);
expect(results[0]).toHaveProperty('id');
expect(results[0]).toHaveProperty('content');
expect(results[0]).toHaveProperty('similarity');
expect(typeof results[0].similarity).toBe('number');
});
it('should limit results', async () => {
const queryEmbedding = new Float32Array([1.0, 0.0, 0.0]);
const results = await vectorStore.searchMessages(queryEmbedding, 2);
expect(results.length).toBeLessThanOrEqual(2);
});
it('should handle search with no stored embeddings', async () => {
vectorStore.clearAllEmbeddings();
const queryEmbedding = new Float32Array([1.0, 0.0, 0.0]);
const results = await vectorStore.searchMessages(queryEmbedding, 10);
expect(Array.isArray(results)).toBe(true);
expect(results.length).toBe(0);
});
it('should return most similar results first', async () => {
// Clear and add new test data
vectorStore.clearAllEmbeddings();
await vectorStore.storeMessageEmbedding(10, 'exact match', new Float32Array([1.0, 0.0]));
await vectorStore.storeMessageEmbedding(11, 'close match', new Float32Array([0.9, 0.1]));
await vectorStore.storeMessageEmbedding(12, 'far match', new Float32Array([0.0, 1.0]));
const queryEmbedding = new Float32Array([1.0, 0.0]);
const results = await vectorStore.searchMessages(queryEmbedding, 3);
// First result should be most similar (highest similarity score)
expect(results[0].similarity).toBeGreaterThanOrEqual(results[1].similarity);
expect(results[1].similarity).toBeGreaterThanOrEqual(results[2].similarity);
});
});
describe('getEmbeddingCount', () => {
it('should return 0 for empty store', () => {
vectorStore.clearAllEmbeddings();
expect(vectorStore.getEmbeddingCount()).toBe(0);
});
it('should return correct count after storing embeddings', async () => {
vectorStore.clearAllEmbeddings();
await vectorStore.storeMessageEmbedding(1, 'test 1', new Float32Array([0.1]));
expect(vectorStore.getEmbeddingCount()).toBe(1);
await vectorStore.storeMessageEmbedding(2, 'test 2', new Float32Array([0.2]));
expect(vectorStore.getEmbeddingCount()).toBe(2);
});
it('should not double count replaced embeddings', async () => {
vectorStore.clearAllEmbeddings();
await vectorStore.storeMessageEmbedding(1, 'test 1', new Float32Array([0.1]));
await vectorStore.storeMessageEmbedding(1, 'test 1 updated', new Float32Array([0.2]));
expect(vectorStore.getEmbeddingCount()).toBe(1);
});
});
describe('clearAllEmbeddings', () => {
it('should clear all message embeddings', async () => {
await vectorStore.storeMessageEmbedding(1, 'test', new Float32Array([0.1]));
await vectorStore.storeMessageEmbedding(2, 'test', new Float32Array([0.2]));
expect(vectorStore.getEmbeddingCount()).toBe(2);
vectorStore.clearAllEmbeddings();
expect(vectorStore.getEmbeddingCount()).toBe(0);
});
it('should not throw on empty store', () => {
vectorStore.clearAllEmbeddings();
expect(() => vectorStore.clearAllEmbeddings()).not.toThrow();
});
it('should clear decision embeddings too', async () => {
await vectorStore.storeDecisionEmbedding(10, new Float32Array([0.1]));
vectorStore.clearAllEmbeddings();
// Should not throw when trying to query
expect(vectorStore.getEmbeddingCount()).toBe(0);
});
});
describe('Edge Cases', () => {
it('should handle zero-length embeddings gracefully', async () => {
const embedding = new Float32Array(0);
await expect(
vectorStore.storeMessageEmbedding(200, 'empty embedding', embedding)
).resolves.not.toThrow();
});
it('should handle very long content strings', async () => {
const longContent = 'a'.repeat(100000);
const embedding = new Float32Array([0.1, 0.2]);
await vectorStore.storeMessageEmbedding(201, longContent, embedding);
const count = vectorStore.getEmbeddingCount();
expect(count).toBe(1);
});
it('should handle special characters in content', async () => {
const specialContent = 'Test with 你好 émojis 🎉 and "quotes"';
const embedding = new Float32Array([0.1, 0.2]);
await vectorStore.storeMessageEmbedding(202, specialContent, embedding);
const results = await vectorStore.searchMessages(embedding, 1);
expect(results[0].content).toBe(specialContent);
});
it('should handle embeddings with all zeros', async () => {
const zeroEmbedding = new Float32Array([0.0, 0.0, 0.0]);
await vectorStore.storeMessageEmbedding(203, 'zero embedding', zeroEmbedding);
const count = vectorStore.getEmbeddingCount();
expect(count).toBe(1);
});
it('should handle embeddings with negative values', async () => {
const negativeEmbedding = new Float32Array([-0.5, 0.3, -0.2]);
await vectorStore.storeMessageEmbedding(204, 'negative values', negativeEmbedding);
const count = vectorStore.getEmbeddingCount();
expect(count).toBe(1);
});
it('should handle concurrent stores', async () => {
const promises = [];
for (let i = 0; i < 10; i++) {
promises.push(
vectorStore.storeMessageEmbedding(
i + 1000,
`content ${i}`,
new Float32Array([i / 10, (10 - i) / 10])
)
);
}
await Promise.all(promises);
const count = vectorStore.getEmbeddingCount();
expect(count).toBe(10);
});
});
describe('Cosine Similarity', () => {
it('should calculate similarity between identical vectors as 1.0', async () => {
vectorStore.clearAllEmbeddings();
const embedding = new Float32Array([1.0, 0.0, 0.0]);
await vectorStore.storeMessageEmbedding(1, 'test', embedding);
const results = await vectorStore.searchMessages(embedding, 1);
expect(results[0].similarity).toBeCloseTo(1.0, 5);
});
it('should calculate similarity between orthogonal vectors as 0.0', async () => {
vectorStore.clearAllEmbeddings();
await vectorStore.storeMessageEmbedding(1, 'test', new Float32Array([1.0, 0.0]));
const queryEmbedding = new Float32Array([0.0, 1.0]);
const results = await vectorStore.searchMessages(queryEmbedding, 1);
expect(results[0].similarity).toBeCloseTo(0.0, 5);
});
it('should handle normalized embeddings', async () => {
vectorStore.clearAllEmbeddings();
// Normalized vectors (length = 1)
const norm1 = new Float32Array([0.6, 0.8]);
const norm2 = new Float32Array([0.8, 0.6]);
await vectorStore.storeMessageEmbedding(1, 'test1', norm1);
await vectorStore.storeMessageEmbedding(2, 'test2', norm2);
const results = await vectorStore.searchMessages(norm1, 2);
expect(results[0].id).toBe(1);
expect(results[0].similarity).toBeCloseTo(1.0, 5);
});
});
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/unit/WorkingMemoryStore.test.ts | TypeScript | /**
* Unit tests for WorkingMemoryStore
*/
import Database from "better-sqlite3";
import { WorkingMemoryStore } from "../../memory/WorkingMemoryStore.js";
describe("WorkingMemoryStore", () => {
let db: Database.Database;
let store: WorkingMemoryStore;
const projectPath = "/test/project";
beforeEach(() => {
// Create in-memory database with required schema
db = new Database(":memory:");
// Create working_memory table
db.exec(`
CREATE TABLE IF NOT EXISTS working_memory (
id TEXT PRIMARY KEY,
key TEXT NOT NULL,
value TEXT NOT NULL,
context TEXT,
tags TEXT,
session_id TEXT,
project_path TEXT NOT NULL,
created_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL,
expires_at INTEGER,
embedding BLOB,
UNIQUE(project_path, key)
);
CREATE INDEX IF NOT EXISTS idx_wm_project ON working_memory(project_path);
CREATE INDEX IF NOT EXISTS idx_wm_project_key ON working_memory(project_path, key);
CREATE VIRTUAL TABLE IF NOT EXISTS working_memory_fts USING fts5(
id UNINDEXED,
key,
value,
context
);
`);
store = new WorkingMemoryStore(db);
});
afterEach(() => {
db.close();
});
describe("remember", () => {
it("should store a new memory item", () => {
const result = store.remember({
key: "storage_decision",
value: "Using SQLite for simplicity",
projectPath,
});
expect(result.id).toBeDefined();
expect(result.key).toBe("storage_decision");
expect(result.value).toBe("Using SQLite for simplicity");
expect(result.projectPath).toBe(projectPath);
expect(result.tags).toEqual([]);
expect(result.createdAt).toBeDefined();
expect(result.updatedAt).toBeDefined();
});
it("should store memory with context and tags", () => {
const result = store.remember({
key: "api_key",
value: "Use environment variables",
context: "Security best practice for credential management",
tags: ["security", "config"],
projectPath,
});
expect(result.context).toBe("Security best practice for credential management");
expect(result.tags).toEqual(["security", "config"]);
});
it("should update existing key instead of duplicating", () => {
store.remember({
key: "storage",
value: "PostgreSQL",
projectPath,
});
const updated = store.remember({
key: "storage",
value: "SQLite",
projectPath,
});
expect(updated.value).toBe("SQLite");
// Should have only one item
const items = store.list(projectPath);
expect(items.length).toBe(1);
expect(items[0].value).toBe("SQLite");
});
it("should store memory with TTL", () => {
const result = store.remember({
key: "temp_setting",
value: "test",
ttl: 3600, // 1 hour
projectPath,
});
expect(result.expiresAt).toBeDefined();
// Should expire approximately 1 hour from now
const expectedExpiry = Date.now() + 3600 * 1000;
expect(result.expiresAt).toBeGreaterThan(expectedExpiry - 1000);
expect(result.expiresAt).toBeLessThan(expectedExpiry + 1000);
});
it("should store memory with session ID", () => {
const result = store.remember({
key: "session_data",
value: "user preferences",
sessionId: "session-123",
projectPath,
});
expect(result.sessionId).toBe("session-123");
});
});
describe("recall", () => {
it("should recall a stored item by key", () => {
store.remember({
key: "api_endpoint",
value: "https://api.example.com",
projectPath,
});
const result = store.recall("api_endpoint", projectPath);
expect(result).not.toBeNull();
expect(result?.key).toBe("api_endpoint");
expect(result?.value).toBe("https://api.example.com");
});
it("should return null for non-existent key", () => {
const result = store.recall("nonexistent", projectPath);
expect(result).toBeNull();
});
it("should not recall expired items", async () => {
store.remember({
key: "expired_item",
value: "will expire",
ttl: -1, // Already expired
projectPath,
});
const result = store.recall("expired_item", projectPath);
expect(result).toBeNull();
});
it("should scope by project path", () => {
store.remember({
key: "setting",
value: "project1_value",
projectPath: "/project1",
});
store.remember({
key: "setting",
value: "project2_value",
projectPath: "/project2",
});
const result1 = store.recall("setting", "/project1");
const result2 = store.recall("setting", "/project2");
expect(result1?.value).toBe("project1_value");
expect(result2?.value).toBe("project2_value");
});
});
describe("recallMany", () => {
beforeEach(() => {
store.remember({
key: "decision1",
value: "Use TypeScript",
tags: ["tech", "language"],
sessionId: "session-1",
projectPath,
});
store.remember({
key: "decision2",
value: "Use SQLite",
tags: ["tech", "database"],
sessionId: "session-1",
projectPath,
});
store.remember({
key: "decision3",
value: "Use Jest",
tags: ["tech", "testing"],
sessionId: "session-2",
projectPath,
});
});
it("should recall all items for a project", () => {
const results = store.recallMany({ projectPath });
expect(results.length).toBe(3);
});
it("should filter by session ID", () => {
const results = store.recallMany({
projectPath,
sessionId: "session-1",
});
expect(results.length).toBe(2);
});
it("should filter by tags", () => {
const results = store.recallMany({
projectPath,
tags: ["database"],
});
expect(results.length).toBe(1);
expect(results[0].value).toBe("Use SQLite");
});
it("should match any tag in the list", () => {
const results = store.recallMany({
projectPath,
tags: ["database", "testing"],
});
expect(results.length).toBe(2);
});
});
describe("recallRelevant", () => {
beforeEach(() => {
store.remember({
key: "auth_decision",
value: "Using JWT for authentication",
context: "Chosen for stateless API design",
projectPath,
});
store.remember({
key: "db_decision",
value: "PostgreSQL for production",
context: "Better scalability",
projectPath,
});
});
it("should find relevant items by semantic search", () => {
const results = store.recallRelevant({
query: "authentication",
projectPath,
});
expect(results.length).toBeGreaterThan(0);
expect(results[0].key).toBe("auth_decision");
});
it("should include similarity scores", () => {
const results = store.recallRelevant({
query: "JWT",
projectPath,
});
expect(results.length).toBeGreaterThan(0);
expect(results[0].similarity).toBeDefined();
expect(results[0].similarity).toBeGreaterThanOrEqual(0);
expect(results[0].similarity).toBeLessThanOrEqual(1);
});
it("should respect limit parameter", () => {
const results = store.recallRelevant({
query: "decision",
projectPath,
limit: 1,
});
expect(results.length).toBe(1);
});
});
describe("forget", () => {
it("should delete an item by key", () => {
store.remember({
key: "to_forget",
value: "temporary",
projectPath,
});
const deleted = store.forget("to_forget", projectPath);
expect(deleted).toBe(true);
const result = store.recall("to_forget", projectPath);
expect(result).toBeNull();
});
it("should return false for non-existent key", () => {
const deleted = store.forget("nonexistent", projectPath);
expect(deleted).toBe(false);
});
});
describe("forgetAll", () => {
it("should delete all items for a project", () => {
store.remember({ key: "item1", value: "v1", projectPath });
store.remember({ key: "item2", value: "v2", projectPath });
store.remember({ key: "other", value: "v3", projectPath: "/other/project" });
const deleted = store.forgetAll(projectPath);
expect(deleted).toBe(2);
expect(store.count(projectPath)).toBe(0);
expect(store.count("/other/project")).toBe(1);
});
});
describe("list", () => {
beforeEach(() => {
for (let i = 0; i < 5; i++) {
store.remember({
key: `item${i}`,
value: `value${i}`,
tags: i % 2 === 0 ? ["even"] : ["odd"],
projectPath,
});
}
});
it("should list all items for a project", () => {
const items = store.list(projectPath);
expect(items.length).toBe(5);
});
it("should respect limit parameter", () => {
const items = store.list(projectPath, { limit: 2 });
expect(items.length).toBe(2);
});
it("should respect offset parameter", () => {
const items = store.list(projectPath, { offset: 3 });
expect(items.length).toBe(2);
});
it("should filter by tags", () => {
const items = store.list(projectPath, { tags: ["even"] });
expect(items.length).toBe(3);
});
});
describe("count", () => {
it("should return correct count", () => {
expect(store.count(projectPath)).toBe(0);
store.remember({ key: "item1", value: "v1", projectPath });
expect(store.count(projectPath)).toBe(1);
store.remember({ key: "item2", value: "v2", projectPath });
expect(store.count(projectPath)).toBe(2);
});
it("should not count expired items", () => {
store.remember({ key: "active", value: "v1", projectPath });
store.remember({ key: "expired", value: "v2", ttl: -1, projectPath });
// Force cleanup by triggering recall
store.recall("trigger_cleanup", projectPath);
expect(store.count(projectPath)).toBe(1);
});
});
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/unit/constants.test.ts | TypeScript | /**
* Unit tests for Constants
*
* Validates that all constants are properly defined and have expected values.
*/
import {
DB_CONFIG,
EMBEDDING_CONFIG,
SEARCH_CONFIG,
PATH_PATTERNS,
TIME,
LIMITS,
MIGRATION_CONFIG,
MCP_CONFIG,
ERROR_MESSAGES,
SUCCESS_MESSAGES,
} from '../../utils/constants';
describe('Constants', () => {
describe('DB_CONFIG', () => {
it('should have valid database configuration values', () => {
expect(DB_CONFIG.CACHE_SIZE_KB).toBeGreaterThan(0);
expect(DB_CONFIG.MMAP_SIZE).toBeGreaterThan(0);
expect(DB_CONFIG.PAGE_SIZE).toBeGreaterThan(0);
expect(DB_CONFIG.WAL_AUTOCHECKPOINT).toBeGreaterThan(0);
});
it('should have valid file names', () => {
expect(DB_CONFIG.DB_FILE_NAME).toBe('.cccmemory.db');
expect(DB_CONFIG.BACKUP_SUFFIX).toBe('.bak');
});
it('should provide TypeScript compile-time immutability', () => {
// `as const` provides TypeScript compile-time immutability
// Runtime modification is technically possible but prevented by TypeScript
// This test documents the immutability expectation
expect(typeof DB_CONFIG.CACHE_SIZE_KB).toBe('number');
});
});
describe('EMBEDDING_CONFIG', () => {
it('should have valid dimension values', () => {
expect(EMBEDDING_CONFIG.OLLAMA_DEFAULT_DIMENSIONS).toBe(1024);
expect(EMBEDDING_CONFIG.TRANSFORMERS_DEFAULT_DIMENSIONS).toBe(384);
expect(EMBEDDING_CONFIG.OPENAI_DEFAULT_DIMENSIONS).toBe(1536);
});
it('should have valid model names', () => {
expect(EMBEDDING_CONFIG.OLLAMA_DEFAULT_MODEL).toBe('mxbai-embed-large');
expect(EMBEDDING_CONFIG.TRANSFORMERS_DEFAULT_MODEL).toBe('Xenova/all-MiniLM-L6-v2');
expect(EMBEDDING_CONFIG.OPENAI_DEFAULT_MODEL).toBe('text-embedding-ada-002');
});
it('should have valid batch size', () => {
expect(EMBEDDING_CONFIG.BATCH_SIZE).toBeGreaterThan(0);
expect(EMBEDDING_CONFIG.BATCH_SIZE).toBeLessThanOrEqual(1000);
});
it('should have valid similarity threshold', () => {
expect(EMBEDDING_CONFIG.DEFAULT_SIMILARITY_THRESHOLD).toBeGreaterThanOrEqual(0);
expect(EMBEDDING_CONFIG.DEFAULT_SIMILARITY_THRESHOLD).toBeLessThanOrEqual(1);
});
});
describe('SEARCH_CONFIG', () => {
it('should have valid limit values', () => {
expect(SEARCH_CONFIG.DEFAULT_LIMIT).toBe(10);
expect(SEARCH_CONFIG.MAX_LIMIT).toBe(100);
expect(SEARCH_CONFIG.DEFAULT_LIMIT).toBeLessThanOrEqual(SEARCH_CONFIG.MAX_LIMIT);
});
it('should have valid snippet context size', () => {
expect(SEARCH_CONFIG.SNIPPET_CONTEXT_CHARS).toBeGreaterThan(0);
});
it('should have valid date range defaults', () => {
expect(SEARCH_CONFIG.DEFAULT_DAYS_BACK).toBeGreaterThan(0);
});
});
describe('PATH_PATTERNS', () => {
it('should have valid path components', () => {
expect(PATH_PATTERNS.CLAUDE_DIR).toBe('.claude');
expect(PATH_PATTERNS.PROJECTS_DIR).toBe('projects');
expect(PATH_PATTERNS.LEGACY_PREFIX).toBe('-Users-');
expect(PATH_PATTERNS.CONFIG_FILE).toBe('.claude-memory-config.jsonc');
});
});
describe('TIME', () => {
it('should have correct time conversions', () => {
expect(TIME.SECOND).toBe(1000);
expect(TIME.MINUTE).toBe(60 * 1000);
expect(TIME.HOUR).toBe(60 * 60 * 1000);
expect(TIME.DAY).toBe(24 * 60 * 60 * 1000);
expect(TIME.WEEK).toBe(7 * 24 * 60 * 60 * 1000);
});
it('should have correct time relationships', () => {
expect(TIME.MINUTE).toBe(TIME.SECOND * 60);
expect(TIME.HOUR).toBe(TIME.MINUTE * 60);
expect(TIME.DAY).toBe(TIME.HOUR * 24);
expect(TIME.WEEK).toBe(TIME.DAY * 7);
});
});
describe('LIMITS', () => {
it('should have valid length limits', () => {
expect(LIMITS.MAX_MESSAGE_LENGTH).toBeGreaterThan(0);
expect(LIMITS.MAX_FILE_PATH_LENGTH).toBeGreaterThan(0);
expect(LIMITS.MAX_DECISION_LENGTH).toBeGreaterThan(0);
});
it('should have valid batch size limits', () => {
expect(LIMITS.MAX_BATCH_SIZE).toBeGreaterThan(0);
expect(LIMITS.MAX_SEARCH_RESULTS).toBeGreaterThan(0);
});
it('should have valid similarity score range', () => {
expect(LIMITS.MIN_SIMILARITY_SCORE).toBe(0.0);
expect(LIMITS.MAX_SIMILARITY_SCORE).toBe(1.0);
expect(LIMITS.MIN_SIMILARITY_SCORE).toBeLessThan(LIMITS.MAX_SIMILARITY_SCORE);
});
});
describe('MIGRATION_CONFIG', () => {
it('should have valid migration thresholds', () => {
expect(MIGRATION_CONFIG.MIN_CONVERSATIONS_FOR_MIGRATION).toBeGreaterThanOrEqual(1);
expect(MIGRATION_CONFIG.MIN_SIMILARITY_SCORE_FOR_MATCH).toBeGreaterThanOrEqual(0);
expect(MIGRATION_CONFIG.MIN_SIMILARITY_SCORE_FOR_MATCH).toBeLessThanOrEqual(1);
});
it('should have valid backup configuration', () => {
expect(typeof MIGRATION_CONFIG.AUTO_BACKUP).toBe('boolean');
expect(typeof MIGRATION_CONFIG.KEEP_SOURCE_FILES).toBe('boolean');
});
});
describe('MCP_CONFIG', () => {
it('should have valid timeout', () => {
expect(MCP_CONFIG.TOOL_TIMEOUT_MS).toBeGreaterThan(0);
});
it('should have valid batch size', () => {
expect(MCP_CONFIG.BATCH_PROCESSING_SIZE).toBeGreaterThan(0);
});
});
describe('ERROR_MESSAGES', () => {
it('should have all error messages defined', () => {
expect(ERROR_MESSAGES.NO_CONVERSATIONS_FOUND).toBeTruthy();
expect(ERROR_MESSAGES.INDEX_REQUIRED).toBeTruthy();
expect(ERROR_MESSAGES.INVALID_PROJECT_PATH).toBeTruthy();
expect(ERROR_MESSAGES.DATABASE_ERROR).toBeTruthy();
expect(ERROR_MESSAGES.EMBEDDING_ERROR).toBeTruthy();
});
it('should have non-empty error messages', () => {
Object.values(ERROR_MESSAGES).forEach(message => {
expect(message.length).toBeGreaterThan(0);
});
});
});
describe('SUCCESS_MESSAGES', () => {
it('should have all success messages defined', () => {
expect(SUCCESS_MESSAGES.INDEX_COMPLETE).toBeTruthy();
expect(SUCCESS_MESSAGES.MIGRATION_COMPLETE).toBeTruthy();
expect(SUCCESS_MESSAGES.BACKUP_CREATED).toBeTruthy();
});
it('should have non-empty success messages', () => {
Object.values(SUCCESS_MESSAGES).forEach(message => {
expect(message.length).toBeGreaterThan(0);
});
});
});
describe('Type Safety', () => {
it('should have readonly properties', () => {
// TypeScript will catch attempts to modify at compile time
// This test documents the immutability expectation
expect(Object.isFrozen(DB_CONFIG)).toBe(false); // `as const` doesn't freeze at runtime
// But TypeScript will prevent: DB_CONFIG.CACHE_SIZE_KB = 999
});
});
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/unit/safeJson.test.ts | TypeScript | /**
* Tests for safeJsonParse utility
*/
import { safeJsonParse } from "../../utils/safeJson.js";
describe("safeJsonParse", () => {
describe("valid JSON", () => {
it("should parse valid JSON object", () => {
const result = safeJsonParse('{"key": "value"}', {});
expect(result).toEqual({ key: "value" });
});
it("should parse valid JSON array", () => {
const result = safeJsonParse("[1, 2, 3]", []);
expect(result).toEqual([1, 2, 3]);
});
it("should parse valid JSON string", () => {
const result = safeJsonParse('"hello"', "");
expect(result).toBe("hello");
});
it("should parse valid JSON number", () => {
const result = safeJsonParse("42", 0);
expect(result).toBe(42);
});
it("should parse valid JSON boolean", () => {
const result = safeJsonParse("true", false);
expect(result).toBe(true);
});
it("should parse valid JSON null", () => {
const result = safeJsonParse("null", "default");
expect(result).toBeNull();
});
});
describe("invalid JSON", () => {
it("should return fallback for malformed JSON", () => {
const fallback = { default: true };
const result = safeJsonParse("{invalid}", fallback);
expect(result).toBe(fallback);
});
it("should return fallback for undefined input", () => {
const fallback = {};
const result = safeJsonParse(undefined as unknown as string, fallback);
expect(result).toBe(fallback);
});
it("should return fallback for null input", () => {
const fallback: string[] = [];
const result = safeJsonParse(null as unknown as string, fallback);
expect(result).toBe(fallback);
});
it("should return fallback for empty string", () => {
const fallback = { empty: true };
const result = safeJsonParse("", fallback);
expect(result).toBe(fallback);
});
it("should return fallback for truncated JSON", () => {
const fallback: unknown[] = [];
const result = safeJsonParse('{"key": "val', fallback);
expect(result).toBe(fallback);
});
it("should return fallback for non-string input", () => {
const fallback = {};
const result = safeJsonParse(123 as unknown as string, fallback);
expect(result).toBe(fallback);
});
});
describe("edge cases", () => {
it("should handle nested objects", () => {
const json = '{"a": {"b": {"c": 1}}}';
const result = safeJsonParse(json, {});
expect(result).toEqual({ a: { b: { c: 1 } } });
});
it("should handle special characters in strings", () => {
const json = '{"text": "line1\\nline2\\ttab"}';
const result = safeJsonParse(json, {});
expect(result).toEqual({ text: "line1\nline2\ttab" });
});
it("should handle unicode", () => {
const json = '{"emoji": "😀", "chinese": "中文"}';
const result = safeJsonParse(json, {});
expect(result).toEqual({ emoji: "😀", chinese: "中文" });
});
it("should preserve type of fallback", () => {
const arrayFallback: number[] = [1, 2, 3];
const result = safeJsonParse("invalid", arrayFallback);
expect(Array.isArray(result)).toBe(true);
});
});
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/__tests__/unit/sanitization.test.ts | TypeScript | /**
* Unit tests for input sanitization utilities
*/
import {
sanitizeForLike,
validateFilePath,
sanitizeProjectPath,
sanitizeSQLIdentifier,
pathToProjectFolderName,
} from '../../utils/sanitization.js';
describe('sanitization utilities', () => {
describe('sanitizeForLike', () => {
it('should escape % characters', () => {
expect(sanitizeForLike('test%file')).toBe('test\\%file');
});
it('should escape _ characters', () => {
expect(sanitizeForLike('test_file')).toBe('test\\_file');
});
it('should escape " characters', () => {
expect(sanitizeForLike('test"file')).toBe('test\\"file');
});
it('should escape \\ characters', () => {
expect(sanitizeForLike('test\\file')).toBe('test\\\\file');
});
it('should escape multiple special characters', () => {
expect(sanitizeForLike('test%_"\\file')).toBe('test\\%\\_\\"\\\\file');
});
it('should handle strings without special characters', () => {
expect(sanitizeForLike('normalfile')).toBe('normalfile');
});
it('should handle empty strings', () => {
expect(sanitizeForLike('')).toBe('');
});
});
describe('validateFilePath', () => {
it('should accept valid file paths', () => {
expect(validateFilePath('/Users/test/file.txt')).toBe('/Users/test/file.txt');
expect(validateFilePath('src/auth/token.ts')).toBe('src/auth/token.ts');
});
it('should remove null bytes', () => {
expect(validateFilePath('test\0file')).toBe('testfile');
});
it('should reject path traversal with ..', () => {
expect(() => validateFilePath('../etc/passwd')).toThrow('Path traversal detected');
expect(() => validateFilePath('test/../secret')).toThrow('Path traversal detected');
});
it('should reject access to system directories', () => {
expect(() => validateFilePath('/etc/passwd')).toThrow('system directories');
expect(() => validateFilePath('/sys/kernel')).toThrow('system directories');
expect(() => validateFilePath('/proc/cpuinfo')).toThrow('system directories');
});
it('should allow relative paths', () => {
expect(validateFilePath('src/index.ts')).toBe('src/index.ts');
});
});
describe('sanitizeProjectPath', () => {
it('should accept valid project paths', () => {
expect(sanitizeProjectPath('/Users/test/project')).toBe('/Users/test/project');
});
it('should remove null bytes', () => {
expect(sanitizeProjectPath('/path\0/project')).toBe('/path/project');
});
it('should reject path traversal', () => {
expect(() => sanitizeProjectPath('../secret')).toThrow('Path traversal detected');
});
it('should normalize multiple slashes', () => {
expect(sanitizeProjectPath('/path//to///project')).toBe('/path/to/project');
});
it('should remove trailing slashes', () => {
expect(sanitizeProjectPath('/path/to/project/')).toBe('/path/to/project');
});
it('should handle absolute paths', () => {
expect(sanitizeProjectPath('/absolute/path')).toBe('/absolute/path');
});
});
describe('sanitizeSQLIdentifier', () => {
it('should accept valid SQL identifiers', () => {
expect(sanitizeSQLIdentifier('table_name')).toBe('table_name');
expect(sanitizeSQLIdentifier('Column123')).toBe('Column123');
expect(sanitizeSQLIdentifier('_private')).toBe('_private');
});
it('should reject identifiers starting with numbers', () => {
expect(() => sanitizeSQLIdentifier('123table')).toThrow('Invalid SQL identifier');
});
it('should reject identifiers with special characters', () => {
expect(() => sanitizeSQLIdentifier('table-name')).toThrow('Invalid SQL identifier');
expect(() => sanitizeSQLIdentifier('table.name')).toThrow('Invalid SQL identifier');
expect(() => sanitizeSQLIdentifier('table name')).toThrow('Invalid SQL identifier');
});
it('should reject empty identifiers', () => {
expect(() => sanitizeSQLIdentifier('')).toThrow('Invalid SQL identifier');
});
it('should accept identifiers with underscores', () => {
expect(sanitizeSQLIdentifier('my_table_name')).toBe('my_table_name');
});
});
describe('Windows path handling', () => {
describe('pathToProjectFolderName - Windows paths', () => {
it('should handle Windows absolute paths with drive letter', () => {
const result = pathToProjectFolderName('C:\\Users\\user\\project');
expect(result).toBe('C-Users-user-project');
});
it('should handle Windows paths with lowercase drive letter', () => {
const result = pathToProjectFolderName('c:\\users\\user\\project');
expect(result).toBe('c-users-user-project');
});
it('should handle Windows UNC paths', () => {
const result = pathToProjectFolderName('\\\\server\\share\\project');
expect(result).toBe('-server-share-project');
});
it('should handle mixed forward and backward slashes', () => {
const result = pathToProjectFolderName('C:\\Users/user\\github/project');
expect(result).toBe('C-Users-user-github-project');
});
it('should handle multiple consecutive backslashes', () => {
const result = pathToProjectFolderName('C:\\\\Users\\\\\\project');
expect(result).toBe('C-Users-project');
});
});
describe('sanitizeProjectPath - Windows normalization', () => {
it('should normalize Windows paths with multiple backslashes', () => {
const result = sanitizeProjectPath('C:\\path\\\\to\\\\\\project');
// Result will be platform-specific, just verify no error
expect(result).toBeTruthy();
expect(result).not.toContain('\\\\\\');
});
it('should remove trailing backslashes on Windows-style paths', () => {
const result = sanitizeProjectPath('C:\\path\\to\\project\\');
expect(result.endsWith('\\')).toBe(false);
expect(result.endsWith('/')).toBe(false);
});
});
describe('validateFilePath - Windows system directories', () => {
// Mock Windows platform for testing
const originalPlatform = process.platform;
beforeEach(() => {
Object.defineProperty(process, 'platform', {
value: 'win32',
writable: true,
configurable: true,
});
});
afterEach(() => {
Object.defineProperty(process, 'platform', {
value: originalPlatform,
writable: true,
configurable: true,
});
});
it('should reject Windows system directories', () => {
expect(() => validateFilePath('C:\\Windows\\System32')).toThrow('system directories');
expect(() => validateFilePath('C:\\Windows\\notepad.exe')).toThrow('system directories');
});
it('should reject Program Files directory', () => {
expect(() => validateFilePath('C:\\Program Files\\app')).toThrow('system directories');
});
it('should reject ProgramData directory', () => {
expect(() => validateFilePath('C:\\ProgramData\\config')).toThrow('system directories');
});
it('should allow user directories on Windows', () => {
expect(validateFilePath('C:\\Users\\user\\project\\file.txt'))
.toBe('C:\\Users\\user\\project\\file.txt');
});
it('should be case-insensitive for Windows paths', () => {
expect(() => validateFilePath('c:\\windows\\system32')).toThrow('system directories');
expect(() => validateFilePath('C:\\WINDOWS\\System32')).toThrow('system directories');
});
});
});
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/cache/QueryCache.ts | TypeScript | /**
* QueryCache - LRU cache for database query results
*
* Provides an in-memory caching layer with:
* - LRU (Least Recently Used) eviction policy
* - TTL (Time To Live) for automatic expiration
* - Cache statistics (hits, misses, evictions, hit rate)
* - Smart invalidation support
*
* Use this to cache expensive database queries like conversation searches,
* file timelines, and decision lookups.
*
* @example
* ```typescript
* const cache = new QueryCache({ maxSize: 100, ttlMs: 60000 });
*
* // Store query result
* cache.set('conversations:all', conversations);
*
* // Retrieve from cache
* const cached = cache.get('conversations:all');
* if (cached) {
* return cached; // Cache hit
* }
*
* // Cache miss - query database
* const result = await queryDatabase();
* cache.set('conversations:all', result);
* return result;
* ```
*/
/**
* Configuration options for QueryCache
*/
export interface QueryCacheConfig {
/** Maximum number of entries to store */
maxSize: number;
/** Time to live in milliseconds before entries expire */
ttlMs: number;
}
/**
* Cache entry with timestamp for TTL
*/
interface CacheEntry<T> {
/** Cached value */
value: T;
/** Timestamp when entry was created/updated */
timestamp: number;
}
/**
* Cache statistics for monitoring
*/
export interface CacheStats {
/** Number of cache hits */
hits: number;
/** Number of cache misses */
misses: number;
/** Number of entries evicted */
evictions: number;
/** Cache hit rate (0-1) */
hitRate: number;
/** Current cache size */
size: number;
/** Maximum cache size */
maxSize: number;
}
/**
* LRU cache with TTL support for database query results.
*
* Uses a Map for O(1) access and maintains LRU order by moving
* accessed entries to the end of the Map.
*/
export class QueryCache {
private cache: Map<string, CacheEntry<unknown>>;
private readonly config: QueryCacheConfig;
private stats = {
hits: 0,
misses: 0,
evictions: 0,
};
/**
* Create a new QueryCache.
*
* @param config - Cache configuration
* @throws {Error} If configuration is invalid
*
* @example
* ```typescript
* // Create cache with 100 entries, 1 minute TTL
* const cache = new QueryCache({ maxSize: 100, ttlMs: 60000 });
* ```
*/
constructor(config: Partial<QueryCacheConfig> = {}) {
// Default configuration
this.config = {
maxSize: config.maxSize ?? 100,
ttlMs: config.ttlMs ?? 300000, // 5 minutes default
};
// Validate configuration
if (this.config.maxSize <= 0) {
throw new Error("maxSize must be greater than 0");
}
if (this.config.ttlMs <= 0) {
throw new Error("ttlMs must be greater than 0");
}
this.cache = new Map();
}
/**
* Store a value in the cache.
*
* If the key already exists, updates the value and resets TTL.
* If cache is full, evicts the least recently used entry.
*
* @param key - Cache key
* @param value - Value to cache
*
* @example
* ```typescript
* cache.set('user:123', { id: 123, name: 'Alice' });
* ```
*/
set<T>(key: string, value: T): void {
// Remove old entry if exists (to update position)
if (this.cache.has(key)) {
this.cache.delete(key);
}
// Evict LRU entry if at capacity
if (this.cache.size >= this.config.maxSize) {
const firstKey = this.cache.keys().next().value;
if (firstKey !== undefined) {
this.cache.delete(firstKey);
this.stats.evictions++;
}
}
// Add new entry at end (most recently used)
this.cache.set(key, {
value: value as unknown,
timestamp: Date.now(),
});
}
/**
* Retrieve a value from the cache.
*
* Returns undefined if key doesn't exist or entry has expired.
* Updates access order (moves entry to end as most recently used).
*
* @param key - Cache key
* @returns Cached value or undefined
*
* @example
* ```typescript
* const user = cache.get<User>('user:123');
* if (user) {
* console.log('Cache hit:', user.name);
* } else {
* console.log('Cache miss');
* }
* ```
*/
get<T>(key: string): T | undefined {
const entry = this.cache.get(key);
if (!entry) {
this.stats.misses++;
return undefined;
}
// Check if expired
if (this.isExpired(entry)) {
this.cache.delete(key);
this.stats.misses++;
return undefined;
}
// Move to end (most recently used)
this.cache.delete(key);
this.cache.set(key, entry);
this.stats.hits++;
return entry.value as T;
}
/**
* Check if a key exists in the cache.
*
* Returns false if key doesn't exist or entry has expired.
* Updates access order (moves entry to end as most recently used).
*
* @param key - Cache key
* @returns True if key exists and not expired
*
* @example
* ```typescript
* if (cache.has('user:123')) {
* const user = cache.get('user:123');
* }
* ```
*/
has(key: string): boolean {
const entry = this.cache.get(key);
if (!entry) {
return false;
}
// Check if expired
if (this.isExpired(entry)) {
this.cache.delete(key);
return false;
}
// Move to end (most recently used)
this.cache.delete(key);
this.cache.set(key, entry);
return true;
}
/**
* Delete a key from the cache.
*
* Does nothing if key doesn't exist.
*
* @param key - Cache key to delete
*
* @example
* ```typescript
* cache.delete('user:123');
* ```
*/
delete(key: string): void {
this.cache.delete(key);
}
/**
* Clear all entries from the cache.
*
* Resets the cache but preserves statistics.
*
* @example
* ```typescript
* cache.clear(); // Remove all cached data
* ```
*/
clear(): void {
this.cache.clear();
}
/**
* Get current cache size.
*
* @returns Number of entries in cache
*
* @example
* ```typescript
* console.log(`Cache contains ${cache.size()} entries`);
* ```
*/
size(): number {
// Clean up expired entries first
this.cleanupExpired();
return this.cache.size;
}
/**
* Get cache statistics.
*
* Provides insight into cache performance:
* - Hit/miss counts
* - Hit rate percentage
* - Eviction count
* - Current size
*
* @returns Cache statistics object
*
* @example
* ```typescript
* const stats = cache.getStats();
* console.log(`Hit rate: ${(stats.hitRate * 100).toFixed(1)}%`);
* console.log(`Evictions: ${stats.evictions}`);
* ```
*/
getStats(): CacheStats {
const totalRequests = this.stats.hits + this.stats.misses;
const hitRate = totalRequests > 0 ? this.stats.hits / totalRequests : 0;
return {
hits: this.stats.hits,
misses: this.stats.misses,
evictions: this.stats.evictions,
hitRate,
size: this.cache.size,
maxSize: this.config.maxSize,
};
}
/**
* Reset statistics counters.
*
* Clears hit/miss/eviction counts but preserves cached data.
*
* @example
* ```typescript
* cache.resetStats(); // Start fresh statistics
* ```
*/
resetStats(): void {
this.stats = {
hits: 0,
misses: 0,
evictions: 0,
};
}
/**
* Check if an entry has expired based on TTL.
*
* @param entry - Cache entry to check
* @returns True if entry is expired
* @internal
*/
private isExpired(entry: CacheEntry<unknown>): boolean {
const age = Date.now() - entry.timestamp;
return age > this.config.ttlMs;
}
/**
* Remove all expired entries from the cache.
*
* Called automatically by size() to keep cache clean.
*
* @internal
*/
private cleanupExpired(): void {
const keysToDelete: string[] = [];
for (const [key, entry] of this.cache.entries()) {
if (this.isExpired(entry)) {
keysToDelete.push(key);
}
}
for (const key of keysToDelete) {
this.cache.delete(key);
}
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/chunking/ChunkingConfig.ts | TypeScript | /**
* Chunking Configuration Types
* Defines configuration options for text chunking strategies
*/
export type ChunkingStrategy = "sentence" | "sliding_window" | "paragraph";
export interface ChunkingConfig {
/** Enable or disable chunking (default: true) */
enabled: boolean;
/** Chunking strategy to use (default: "sentence") */
strategy: ChunkingStrategy;
/** Target chunk size in tokens (default: 450 for 512 limit with margin) */
chunkSize: number;
/** Overlap between chunks as a fraction (default: 0.1 = 10%) */
overlap: number;
/** Minimum chunk size in tokens - don't split smaller texts (default: 50) */
minChunkSize: number;
/** Maximum chunk size as hard limit (default: 500) */
maxChunkSize: number;
/** Characters per token estimate for prose (default: 4) */
charsPerTokenProse: number;
/** Characters per token estimate for code (default: 3.5) */
charsPerTokenCode: number;
}
export const DEFAULT_CHUNKING_CONFIG: ChunkingConfig = {
enabled: true,
strategy: "sentence",
chunkSize: 450,
overlap: 0.1,
minChunkSize: 50,
maxChunkSize: 500,
charsPerTokenProse: 4,
charsPerTokenCode: 3.5,
};
/**
* Result of chunking a text
*/
export interface TextChunk {
/** The chunk content */
content: string;
/** Index of this chunk within the source text */
index: number;
/** Total number of chunks from the source text */
totalChunks: number;
/** Character offset where this chunk starts in the original text */
startOffset: number;
/** Character offset where this chunk ends in the original text */
endOffset: number;
/** Estimated token count for this chunk */
estimatedTokens: number;
/** Strategy used to create this chunk */
strategy: ChunkingStrategy;
}
/**
* Metadata about the chunking operation
*/
export interface ChunkingResult {
/** Original text that was chunked */
originalLength: number;
/** Whether the text was actually chunked or returned as-is */
wasChunked: boolean;
/** Chunks produced */
chunks: TextChunk[];
/** Strategy used */
strategy: ChunkingStrategy;
/** Estimated total tokens in original text */
estimatedTotalTokens: number;
}
/**
* Get chunking config from environment or defaults
*/
export function getChunkingConfig(): ChunkingConfig {
const config = { ...DEFAULT_CHUNKING_CONFIG };
// Environment overrides
if (process.env.CCCMEMORY_CHUNKING_ENABLED !== undefined) {
config.enabled = process.env.CCCMEMORY_CHUNKING_ENABLED === "true";
}
if (process.env.CCCMEMORY_CHUNK_SIZE) {
const size = parseInt(process.env.CCCMEMORY_CHUNK_SIZE, 10);
if (!isNaN(size) && size > 0) {
config.chunkSize = size;
}
}
if (process.env.CCCMEMORY_CHUNKING_STRATEGY) {
const strategy = process.env.CCCMEMORY_CHUNKING_STRATEGY as ChunkingStrategy;
if (["sentence", "sliding_window", "paragraph"].includes(strategy)) {
config.strategy = strategy;
}
}
if (process.env.CCCMEMORY_CHUNK_OVERLAP) {
const overlap = parseFloat(process.env.CCCMEMORY_CHUNK_OVERLAP);
if (!isNaN(overlap) && overlap >= 0 && overlap < 1) {
config.overlap = overlap;
}
}
return config;
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/chunking/TextChunker.ts | TypeScript | /**
* Text Chunker Factory
* Provides unified interface for text chunking strategies
*/
import type {
ChunkingConfig,
ChunkingResult,
TextChunk,
} from "./ChunkingConfig.js";
import {
DEFAULT_CHUNKING_CONFIG,
getChunkingConfig,
} from "./ChunkingConfig.js";
import { chunkWithSentences, estimateTokens } from "./strategies/SentenceChunker.js";
import { chunkWithSlidingWindow } from "./strategies/SlidingWindowChunker.js";
/**
* Interface for chunking strategies
*/
export interface ChunkingStrategy {
chunk(text: string, config: ChunkingConfig): ChunkingResult;
}
/**
* Text Chunker - Factory for creating and using chunking strategies
*/
export class TextChunker {
private config: ChunkingConfig;
constructor(config?: Partial<ChunkingConfig>) {
// Merge with defaults and environment config
const envConfig = getChunkingConfig();
this.config = {
...DEFAULT_CHUNKING_CONFIG,
...envConfig,
...config,
};
}
/**
* Get current configuration
*/
getConfig(): ChunkingConfig {
return { ...this.config };
}
/**
* Check if text needs chunking based on estimated token count
*/
needsChunking(text: string): boolean {
if (!this.config.enabled) {
return false;
}
const estimatedTokenCount = estimateTokens(text, this.config);
return estimatedTokenCount > this.config.chunkSize;
}
/**
* Estimate token count for text
*/
estimateTokens(text: string): number {
return estimateTokens(text, this.config);
}
/**
* Chunk text using configured strategy
*/
chunk(text: string): ChunkingResult {
// If chunking disabled, return single chunk
if (!this.config.enabled) {
return {
originalLength: text.length,
wasChunked: false,
chunks: [
{
content: text,
index: 0,
totalChunks: 1,
startOffset: 0,
endOffset: text.length,
estimatedTokens: estimateTokens(text, this.config),
strategy: this.config.strategy,
},
],
strategy: this.config.strategy,
estimatedTotalTokens: estimateTokens(text, this.config),
};
}
// Select strategy based on configuration
switch (this.config.strategy) {
case "sentence":
return chunkWithSentences(text, this.config);
case "sliding_window":
return chunkWithSlidingWindow(text, this.config);
case "paragraph":
// Fall back to sentence chunking for now
// Paragraph chunking would split at \n\n boundaries
return chunkWithSentences(text, this.config);
default:
// Default to sentence chunking
return chunkWithSentences(text, this.config);
}
}
/**
* Chunk multiple texts in batch
*/
chunkBatch(texts: string[]): ChunkingResult[] {
return texts.map((text) => this.chunk(text));
}
/**
* Flatten chunks from multiple texts into a single array with source tracking
*/
chunkBatchFlat(
texts: Array<{ id: string | number; content: string }>
): Array<TextChunk & { sourceId: string | number }> {
const results: Array<TextChunk & { sourceId: string | number }> = [];
for (const { id, content } of texts) {
const result = this.chunk(content);
for (const chunk of result.chunks) {
results.push({
...chunk,
sourceId: id,
});
}
}
return results;
}
}
/**
* Global chunker instance with default config
*/
let defaultChunker: TextChunker | null = null;
/**
* Get or create global chunker instance
*/
export function getTextChunker(config?: Partial<ChunkingConfig>): TextChunker {
if (config) {
return new TextChunker(config);
}
if (!defaultChunker) {
defaultChunker = new TextChunker();
}
return defaultChunker;
}
/**
* Reset global chunker (useful for testing)
*/
export function resetTextChunker(): void {
defaultChunker = null;
}
// Re-export types and utilities
export type { ChunkingConfig, ChunkingResult, TextChunk };
export { DEFAULT_CHUNKING_CONFIG, getChunkingConfig } from "./ChunkingConfig.js";
export { estimateTokens } from "./strategies/SentenceChunker.js";
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/chunking/index.ts | TypeScript | /**
* Text Chunking Module
* Provides text chunking strategies for handling long messages that exceed embedding model limits
*/
export {
TextChunker,
getTextChunker,
resetTextChunker,
estimateTokens,
DEFAULT_CHUNKING_CONFIG,
getChunkingConfig,
} from "./TextChunker.js";
export type {
ChunkingConfig,
ChunkingResult,
TextChunk,
ChunkingStrategy as ChunkingStrategyType,
} from "./ChunkingConfig.js";
export { chunkWithSentences } from "./strategies/SentenceChunker.js";
export { chunkWithSlidingWindow } from "./strategies/SlidingWindowChunker.js";
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/chunking/strategies/SentenceChunker.ts | TypeScript | /**
* Sentence-Aware Text Chunker
* Splits text at sentence boundaries while respecting code blocks and paragraphs
*/
import type { ChunkingConfig, TextChunk, ChunkingResult } from "../ChunkingConfig.js";
/**
* Estimate token count using character ratios
*/
function estimateTokens(text: string, config: ChunkingConfig): number {
// Detect if text is mostly code
const codeBlockPattern = /```[\s\S]*?```|`[^`\n]+`/g;
const codeMatches = text.match(codeBlockPattern) || [];
const codeLength = codeMatches.reduce((sum, m) => sum + m.length, 0);
const proseLength = text.length - codeLength;
const codeTokens = codeLength / config.charsPerTokenCode;
const proseTokens = proseLength / config.charsPerTokenProse;
return Math.ceil(codeTokens + proseTokens);
}
/**
* Detect if text contains code patterns
*/
function isCodeLike(text: string): boolean {
// Check for code block markers
if (text.includes("```") || text.includes(" ")) {
return true;
}
// Check for common code patterns
const codePatterns = [
/^(const|let|var|function|class|import|export|if|for|while|return)\s/m,
/[{};]$/m,
/^\s*(public|private|protected)\s/m,
/=>/,
/\(\)\s*{/,
];
return codePatterns.some((p) => p.test(text));
}
/**
* Split text into sentences, preserving code blocks
*/
function splitIntoSentences(text: string): string[] {
const sentences: string[] = [];
// Match code blocks to preserve them
const codeBlockPattern = /```[\s\S]*?```/g;
const codeBlocks: Array<{ start: number; end: number; content: string }> = [];
let match;
while ((match = codeBlockPattern.exec(text)) !== null) {
codeBlocks.push({
start: match.index,
end: match.index + match[0].length,
content: match[0],
});
}
// Process text segments between code blocks
const segments: Array<{ text: string; isCode: boolean }> = [];
let lastEnd = 0;
for (const block of codeBlocks) {
if (block.start > lastEnd) {
segments.push({ text: text.slice(lastEnd, block.start), isCode: false });
}
segments.push({ text: block.content, isCode: true });
lastEnd = block.end;
}
if (lastEnd < text.length) {
segments.push({ text: text.slice(lastEnd), isCode: false });
}
// If no code blocks, treat entire text as prose
if (segments.length === 0) {
segments.push({ text, isCode: false });
}
// Process each segment
for (const segment of segments) {
if (segment.isCode) {
// Keep code blocks intact
sentences.push(segment.text);
} else {
// Split prose at sentence boundaries
// Handle common sentence endings: . ! ? followed by space or end
const sentencePattern = /[^.!?\n]+[.!?]+(?:\s+|$)|[^.!?\n]+$/g;
let sentenceMatch;
while ((sentenceMatch = sentencePattern.exec(segment.text)) !== null) {
const sentence = sentenceMatch[0].trim();
if (sentence.length > 0) {
sentences.push(sentence);
}
}
}
}
return sentences;
}
/**
* Merge sentences into chunks respecting token limits
*/
function mergeSentencesIntoChunks(
sentences: string[],
config: ChunkingConfig,
originalText: string
): TextChunk[] {
const chunks: TextChunk[] = [];
let currentChunk: string[] = [];
let currentTokens = 0;
let currentStartOffset = 0;
const overlapTokens = Math.floor(config.chunkSize * config.overlap);
const targetSize = config.chunkSize - overlapTokens;
for (let i = 0; i < sentences.length; i++) {
const sentence = sentences[i];
const sentenceTokens = estimateTokens(sentence, config);
// If single sentence exceeds max chunk size, it needs special handling
if (sentenceTokens > config.maxChunkSize) {
// Flush current chunk first
if (currentChunk.length > 0) {
const content = currentChunk.join(" ");
const endOffset = originalText.indexOf(content, currentStartOffset) + content.length;
chunks.push({
content,
index: chunks.length,
totalChunks: 0, // Updated later
startOffset: currentStartOffset,
endOffset: Math.min(endOffset, originalText.length),
estimatedTokens: currentTokens,
strategy: "sentence",
});
currentStartOffset = endOffset;
currentChunk = [];
currentTokens = 0;
}
// Split long sentence using sliding window fallback
const words = sentence.split(/\s+/);
let wordChunk: string[] = [];
let wordTokens = 0;
for (const word of words) {
const wordTokenCount = estimateTokens(word + " ", config);
if (wordTokens + wordTokenCount > config.maxChunkSize && wordChunk.length > 0) {
const content = wordChunk.join(" ");
const offset = originalText.indexOf(content, currentStartOffset);
chunks.push({
content,
index: chunks.length,
totalChunks: 0,
startOffset: offset >= 0 ? offset : currentStartOffset,
endOffset: Math.min((offset >= 0 ? offset : currentStartOffset) + content.length, originalText.length),
estimatedTokens: wordTokens,
strategy: "sentence",
});
currentStartOffset = offset >= 0 ? offset + content.length : currentStartOffset + content.length;
wordChunk = [];
wordTokens = 0;
}
wordChunk.push(word);
wordTokens += wordTokenCount;
}
// Add remaining words
if (wordChunk.length > 0) {
currentChunk = wordChunk;
currentTokens = wordTokens;
}
continue;
}
// Check if adding this sentence would exceed target size
if (currentTokens + sentenceTokens > targetSize && currentChunk.length > 0) {
// Create chunk from current sentences
const content = currentChunk.join(" ");
const contentIndex = originalText.indexOf(content, currentStartOffset);
const effectiveStart = contentIndex >= 0 ? contentIndex : currentStartOffset;
chunks.push({
content,
index: chunks.length,
totalChunks: 0,
startOffset: effectiveStart,
endOffset: Math.min(effectiveStart + content.length, originalText.length),
estimatedTokens: currentTokens,
strategy: "sentence",
});
// Start new chunk with overlap from previous sentences
const overlapSentences: string[] = [];
let overlapTokenCount = 0;
// Add sentences from end of current chunk for overlap
for (let j = currentChunk.length - 1; j >= 0 && overlapTokenCount < overlapTokens; j--) {
const overlapSentence = currentChunk[j];
const tokens = estimateTokens(overlapSentence, config);
if (overlapTokenCount + tokens <= overlapTokens) {
overlapSentences.unshift(overlapSentence);
overlapTokenCount += tokens;
} else {
break;
}
}
currentStartOffset = effectiveStart + content.length - overlapSentences.join(" ").length;
currentChunk = overlapSentences;
currentTokens = overlapTokenCount;
}
currentChunk.push(sentence);
currentTokens += sentenceTokens;
}
// Add final chunk
if (currentChunk.length > 0) {
const content = currentChunk.join(" ");
const contentIndex = originalText.indexOf(content, currentStartOffset);
const effectiveStart = contentIndex >= 0 ? contentIndex : currentStartOffset;
chunks.push({
content,
index: chunks.length,
totalChunks: 0,
startOffset: effectiveStart,
endOffset: Math.min(effectiveStart + content.length, originalText.length),
estimatedTokens: currentTokens,
strategy: "sentence",
});
}
// Update totalChunks for all chunks
const totalChunks = chunks.length;
for (const chunk of chunks) {
chunk.totalChunks = totalChunks;
}
return chunks;
}
/**
* Chunk text using sentence-aware strategy
*/
export function chunkWithSentences(
text: string,
config: ChunkingConfig
): ChunkingResult {
const estimatedTokens = estimateTokens(text, config);
// Don't chunk if text is small enough
if (estimatedTokens <= config.chunkSize) {
return {
originalLength: text.length,
wasChunked: false,
chunks: [
{
content: text,
index: 0,
totalChunks: 1,
startOffset: 0,
endOffset: text.length,
estimatedTokens,
strategy: "sentence",
},
],
strategy: "sentence",
estimatedTotalTokens: estimatedTokens,
};
}
// Split into sentences
const sentences = splitIntoSentences(text);
// Merge into chunks
const chunks = mergeSentencesIntoChunks(sentences, config, text);
return {
originalLength: text.length,
wasChunked: chunks.length > 1,
chunks,
strategy: "sentence",
estimatedTotalTokens: estimatedTokens,
};
}
export { estimateTokens, isCodeLike };
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/chunking/strategies/SlidingWindowChunker.ts | TypeScript | /**
* Sliding Window Text Chunker
* Simple overlap-based chunking for fallback scenarios
*/
import type { ChunkingConfig, TextChunk, ChunkingResult } from "../ChunkingConfig.js";
import { estimateTokens } from "./SentenceChunker.js";
/**
* Find the nearest word boundary at or before the given position
*/
function findWordBoundary(text: string, position: number, searchBackward: boolean = true): number {
if (position >= text.length) {
return text.length;
}
if (position <= 0) {
return 0;
}
// If already at a space, return position
if (text[position] === " " || text[position] === "\n") {
return position;
}
if (searchBackward) {
// Search backward for space
for (let i = position; i >= 0; i--) {
if (text[i] === " " || text[i] === "\n") {
return i + 1;
}
}
return 0;
} else {
// Search forward for space
for (let i = position; i < text.length; i++) {
if (text[i] === " " || text[i] === "\n") {
return i;
}
}
return text.length;
}
}
/**
* Estimate character count for target token size
*/
function estimateCharsForTokens(tokens: number, config: ChunkingConfig, text: string): number {
// Use average of prose and code ratios, weighted by content
const codeBlockPattern = /```[\s\S]*?```|`[^`\n]+`/g;
const codeMatches = text.match(codeBlockPattern) || [];
const codeLength = codeMatches.reduce((sum, m) => sum + m.length, 0);
const codeRatio = text.length > 0 ? codeLength / text.length : 0;
const avgCharsPerToken =
config.charsPerTokenCode * codeRatio +
config.charsPerTokenProse * (1 - codeRatio);
return Math.floor(tokens * avgCharsPerToken);
}
/**
* Chunk text using sliding window strategy
*/
export function chunkWithSlidingWindow(
text: string,
config: ChunkingConfig
): ChunkingResult {
const estimatedTotalTokens = estimateTokens(text, config);
// Don't chunk if text is small enough
if (estimatedTotalTokens <= config.chunkSize) {
return {
originalLength: text.length,
wasChunked: false,
chunks: [
{
content: text,
index: 0,
totalChunks: 1,
startOffset: 0,
endOffset: text.length,
estimatedTokens: estimatedTotalTokens,
strategy: "sliding_window",
},
],
strategy: "sliding_window",
estimatedTotalTokens,
};
}
const chunks: TextChunk[] = [];
// Calculate window sizes in characters
const windowChars = estimateCharsForTokens(config.chunkSize, config, text);
const overlapChars = Math.floor(windowChars * config.overlap);
const stepChars = windowChars - overlapChars;
let position = 0;
while (position < text.length) {
// Calculate end position (with word boundary adjustment)
let endPosition = Math.min(position + windowChars, text.length);
// Adjust to word boundary if not at end
if (endPosition < text.length) {
endPosition = findWordBoundary(text, endPosition, true);
// Ensure we make progress
if (endPosition <= position) {
endPosition = findWordBoundary(text, position + windowChars, false);
}
}
const content = text.slice(position, endPosition).trim();
if (content.length > 0) {
chunks.push({
content,
index: chunks.length,
totalChunks: 0, // Updated later
startOffset: position,
endOffset: endPosition,
estimatedTokens: estimateTokens(content, config),
strategy: "sliding_window",
});
}
// Move window forward
if (endPosition >= text.length) {
break;
}
// Calculate next start position
let nextPosition = position + stepChars;
// Adjust to word boundary
nextPosition = findWordBoundary(text, nextPosition, true);
// Ensure we make progress
if (nextPosition <= position) {
nextPosition = position + 1;
}
position = nextPosition;
}
// Update totalChunks for all chunks
const totalChunks = chunks.length;
for (const chunk of chunks) {
chunk.totalChunks = totalChunks;
}
return {
originalLength: text.length,
wasChunked: chunks.length > 1,
chunks,
strategy: "sliding_window",
estimatedTotalTokens,
};
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/cli/commands.ts | TypeScript | /**
* Command execution and parsing for CLI
*/
import chalk from "chalk";
import Table from "cli-table3";
import { ToolHandlers } from "../tools/ToolHandlers.js";
import { getSQLiteManager } from "../storage/SQLiteManager.js";
import { showHelp, showCommandHelp } from "./help.js";
import { ConfigManager } from "../embeddings/ConfigManager.js";
import { getMcpStatus } from "../utils/McpConfig.js";
import {
getModelsByProvider,
getAllModels,
getModelsByQuality,
getRecommendedModel,
modelExists,
type ModelInfo
} from "../embeddings/ModelRegistry.js";
import { readFileSync } from "fs";
import { join, dirname } from "path";
import { fileURLToPath } from "url";
import prompts from "prompts";
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
/**
* Get version from package.json
*/
function getVersion(): string {
try {
const packageJsonPath = join(__dirname, "..", "..", "package.json");
const packageJson = JSON.parse(readFileSync(packageJsonPath, "utf-8"));
return packageJson.version;
} catch (_error) {
return "unknown";
}
}
/**
* Parse command line arguments
*/
function parseArgs(input: string): { command: string; args: string[]; options: Record<string, string | boolean> } {
const parts = input.match(/(?:[^\s"]+|"[^"]*")+/g) || [];
const command = parts[0] || "";
const rest = parts.slice(1);
const args: string[] = [];
const options: Record<string, string | boolean> = {};
for (let i = 0; i < rest.length; i++) {
const part = rest[i];
if (part.startsWith("--")) {
const key = part.slice(2);
const nextPart = rest[i + 1];
if (nextPart && !nextPart.startsWith("--")) {
options[key] = nextPart.replace(/^"(.*)"$/, "$1");
i++;
} else {
options[key] = true;
}
} else {
args.push(part.replace(/^"(.*)"$/, "$1"));
}
}
return { command, args, options };
}
/**
* Execute a command
*/
export async function executeCommand(
input: string,
handlers: ToolHandlers
): Promise<string | null> {
const { command, args, options } = parseArgs(input);
// Handle exit commands
if (command === "exit" || command === "quit" || command === "q") {
return "exit";
}
// Handle clear
if (command === "clear") {
return "clear";
}
// Handle help
if (command === "help" || command === "?") {
if (args.length > 0) {
return showCommandHelp(args[0]);
}
return showHelp();
}
// Handle version
if (command === "version") {
return chalk.cyan(`CCCMemory v${getVersion()}`);
}
// Handle status/stats
if (command === "status" || command === "stats") {
return await handleStatus();
}
// Handle index
if (command === "index") {
return await handleIndex(handlers, options);
}
// Handle reindex
if (command === "reindex") {
return await handleReindex(handlers, options);
}
// Handle search
if (command === "search" || command === "find") {
if (args.length === 0) {
return chalk.yellow("Usage: search <query> [options]");
}
return await handleSearch(handlers, args.join(" "), options);
}
// Handle decisions
if (command === "decisions" || command === "why") {
if (args.length === 0) {
return chalk.yellow("Usage: decisions <topic> [options]");
}
return await handleDecisions(handlers, args.join(" "), options);
}
// Handle mistakes
if (command === "mistakes" || command === "errors") {
if (args.length === 0) {
return chalk.yellow("Usage: mistakes <query> [options]");
}
return await handleMistakes(handlers, args.join(" "), options);
}
// Handle check
if (command === "check") {
if (args.length === 0) {
return chalk.yellow("Usage: check <file>");
}
return await handleCheck(handlers, args[0]);
}
// Handle history/evolution
if (command === "history" || command === "evolution") {
if (args.length === 0) {
return chalk.yellow("Usage: history <file> [options]");
}
return await handleHistory(handlers, args[0], options);
}
// Handle commits/git
if (command === "commits" || command === "git") {
return await handleCommits(handlers, args.join(" "), options);
}
// Handle similar/related
if (command === "similar" || command === "related") {
if (args.length === 0) {
return chalk.yellow("Usage: similar <query> [options]");
}
return await handleSimilar(handlers, args.join(" "), options);
}
// Handle requirements/deps
if (command === "requirements" || command === "deps") {
if (args.length === 0) {
return chalk.yellow("Usage: requirements <component> [options]");
}
return await handleRequirements(handlers, args.join(" "), options);
}
// Handle tools
if (command === "tools" || command === "history-tools") {
return await handleTools(handlers, options);
}
// Handle docs/generate
if (command === "docs" || command === "generate") {
return await handleDocs(handlers, options);
}
// Handle reset
if (command === "reset") {
return await handleReset();
}
// Handle vacuum
if (command === "vacuum") {
return await handleVacuum();
}
// Handle config
if (command === "config") {
if (args.length === 0) {
return handleConfigShow();
} else if (args.length === 2) {
return handleConfigSet(args[0], args[1]);
} else {
return chalk.yellow("Usage: config (show current config)\n config <key> <value> (set config value)");
}
}
// Handle models
if (command === "models") {
return handleModels(args);
}
// Handle select-model (interactive)
if (command === "select-model" || command === "select") {
return await handleSelectModel();
}
// Handle get
if (command === "get") {
if (args.length === 0) {
return chalk.yellow("Usage: get <key>");
}
return handleConfigGet(args[0]);
}
// Handle set
if (command === "set") {
if (args.length < 2) {
return chalk.yellow("Usage: set <key> <value>");
}
return handleConfigSet(args[0], args[1]);
}
// Handle commands
if (command === "commands") {
return showHelp();
}
// Handle init-mcp
if (command === "init-mcp") {
return await handleInitMcp();
}
// Handle remove-mcp
if (command === "remove-mcp") {
return await handleRemoveMcp();
}
// Handle mcp-status
if (command === "mcp-status") {
return handleMcpStatus();
}
// Unknown command
return chalk.yellow(`Unknown command: ${command}\nType 'help' for available commands.`);
}
/**
* Handle status command
*/
async function handleStatus(): Promise<string> {
const dbManager = getSQLiteManager();
const db = dbManager.getDatabase();
const stats = dbManager.getStats();
const dbPath = stats.dbPath.replace(process.env.HOME || "", "~");
// Query counts from database
const conversations = (db.prepare("SELECT COUNT(*) as count FROM conversations").get() as { count: number }).count;
const messages = (db.prepare("SELECT COUNT(*) as count FROM messages").get() as { count: number }).count;
const decisions = (db.prepare("SELECT COUNT(*) as count FROM decisions").get() as { count: number }).count;
const mistakes = (db.prepare("SELECT COUNT(*) as count FROM mistakes").get() as { count: number }).count;
const commits = (db.prepare("SELECT COUNT(*) as count FROM git_commits").get() as { count: number }).count;
const embeddings = (db.prepare("SELECT COUNT(*) as count FROM message_embeddings").get() as { count: number }).count;
const table = new Table({
head: [chalk.cyan("Metric"), chalk.cyan("Value")],
colWidths: [30, 30],
});
table.push(
["Database", dbPath],
["Conversations", String(conversations)],
["Messages", String(messages)],
["Decisions", String(decisions)],
["Mistakes", String(mistakes)],
["Git Commits", String(commits)],
["Embeddings", String(embeddings)],
["Semantic Search", embeddings > 0 ? chalk.green("enabled") : chalk.yellow("disabled")]
);
let output = "\n" + table.toString() + "\n";
if (conversations === 0) {
output += "\n" + chalk.yellow("⚠️ No conversations indexed yet. Run 'index' to get started.\n");
}
return output;
}
/**
* Handle index command
*/
async function handleIndex(handlers: ToolHandlers, options: Record<string, string | boolean>): Promise<string> {
const args: Record<string, unknown> = {
project_path: typeof options.project === "string" ? options.project : process.cwd(),
};
if (typeof options.session === "string") {
args.session_id = options.session;
}
if (options["exclude-mcp"]) {
args.exclude_mcp_conversations = "all-mcp";
}
if (options["include-mcp"]) {
args.exclude_mcp_conversations = false;
}
if (options["no-git"]) {
args.enable_git = false;
}
if (options.thinking) {
args.include_thinking = true;
}
console.log(chalk.blue("Indexing conversations..."));
const result = await handlers.indexConversations(args);
return chalk.green(`\n✓ Indexing complete!\n\n${JSON.stringify(result, null, 2)}`);
}
/**
* Handle reindex command
*/
async function handleReindex(handlers: ToolHandlers, options: Record<string, string | boolean>): Promise<string> {
console.log(chalk.yellow("⚠️ This will clear all indexed data and re-index."));
console.log(chalk.yellow("Press Ctrl+C to cancel, or wait 3 seconds to continue..."));
await new Promise((resolve) => setTimeout(resolve, 3000));
// Clear database
const db = getSQLiteManager().getDatabase();
db.exec("DELETE FROM conversations");
db.exec("DELETE FROM messages");
db.exec("DELETE FROM decisions");
db.exec("DELETE FROM mistakes");
db.exec("DELETE FROM git_commits");
console.log(chalk.blue("Database cleared. Indexing conversations..."));
return await handleIndex(handlers, options);
}
/**
* Handle search command
*/
async function handleSearch(
handlers: ToolHandlers,
query: string,
options: Record<string, string | boolean>
): Promise<string> {
const args: Record<string, unknown> = { query };
if (typeof options.limit === "string") {
args.limit = parseInt(options.limit, 10);
}
const result = await handlers.searchConversations(args);
if (!result.results || result.results.length === 0) {
return chalk.yellow(`No results found for: ${query}`);
}
let output = chalk.green(`\nFound ${result.results.length} results:\n\n`);
result.results.forEach((r, i) => {
output += chalk.cyan(`${i + 1}. `) + `[${r.timestamp}] Session ${r.conversation_id.slice(0, 8)}\n`;
output += ` ${r.snippet.slice(0, 200)}${r.snippet.length > 200 ? "..." : ""}\n`;
output += chalk.gray(` Similarity: ${(r.similarity * 100).toFixed(1)}%\n\n`);
});
return output;
}
/**
* Handle decisions command
*/
async function handleDecisions(
handlers: ToolHandlers,
query: string,
options: Record<string, string | boolean>
): Promise<string> {
const args: Record<string, unknown> = { query };
if (typeof options.file === "string") {
args.file_path = options.file;
}
if (typeof options.limit === "string") {
args.limit = parseInt(options.limit, 10);
}
const result = await handlers.getDecisions(args);
if (!result.decisions || result.decisions.length === 0) {
return chalk.yellow(`No decisions found for: ${query}`);
}
let output = chalk.green(`\nFound ${result.decisions.length} decisions:\n\n`);
result.decisions.forEach((d, i) => {
output += chalk.cyan(`${i + 1}. ${d.decision_text}\n`);
output += ` Date: ${d.timestamp}\n`;
output += ` Rationale: ${d.rationale || "N/A"}\n`;
if (d.alternatives_considered && d.alternatives_considered.length > 0) {
output += ` Alternatives: ${d.alternatives_considered.join(", ")}\n`;
}
output += "\n";
});
return output;
}
/**
* Handle mistakes command
*/
async function handleMistakes(
handlers: ToolHandlers,
query: string,
options: Record<string, string | boolean>
): Promise<string> {
const args: Record<string, unknown> = { query };
if (typeof options.type === "string") {
args.mistake_type = options.type;
}
if (typeof options.limit === "string") {
args.limit = parseInt(options.limit, 10);
}
const result = await handlers.searchMistakes(args);
if (!result.mistakes || result.mistakes.length === 0) {
return chalk.yellow(`No mistakes found for: ${query}`);
}
let output = chalk.green(`\nFound ${result.mistakes.length} mistakes:\n\n`);
result.mistakes.forEach((m, i) => {
output += chalk.red(`${i + 1}. [${m.mistake_type}] ${m.what_went_wrong}\n`);
output += ` Date: ${m.timestamp}\n`;
output += chalk.green(` Fix: ${m.correction || m.user_correction_message || "N/A"}\n\n`);
});
return output;
}
/**
* Handle check command
*/
async function handleCheck(handlers: ToolHandlers, filePath: string): Promise<string> {
const result = await handlers.checkBeforeModify({ file_path: filePath });
return chalk.green(`\nContext for: ${filePath}\n\n`) + JSON.stringify(result, null, 2);
}
/**
* Handle history command
*/
async function handleHistory(
handlers: ToolHandlers,
filePath: string,
options: Record<string, string | boolean>
): Promise<string> {
const args: Record<string, unknown> = { file_path: filePath };
if (options["no-commits"]) {
args.include_commits = false;
}
if (options["no-decisions"]) {
args.include_decisions = false;
}
const result = await handlers.getFileEvolution(args);
return chalk.green(`\nFile evolution for: ${filePath}\n\n`) + JSON.stringify(result, null, 2);
}
/**
* Handle commits command
*/
async function handleCommits(
handlers: ToolHandlers,
query: string,
options: Record<string, string | boolean>
): Promise<string> {
const args: Record<string, unknown> = {};
if (query) {
args.query = query;
}
if (typeof options.conversation === "string") {
args.conversation_id = options.conversation;
}
if (typeof options.limit === "string") {
args.limit = parseInt(options.limit, 10);
}
const result = await handlers.linkCommitsToConversations(args);
return chalk.green("\nCommits linked to conversations:\n\n") + JSON.stringify(result, null, 2);
}
/**
* Handle similar command
*/
async function handleSimilar(
handlers: ToolHandlers,
query: string,
options: Record<string, string | boolean>
): Promise<string> {
const args: Record<string, unknown> = { query };
if (typeof options.limit === "string") {
args.limit = parseInt(options.limit, 10);
}
const result = await handlers.findSimilarSessions(args);
return chalk.green("\nSimilar sessions:\n\n") + JSON.stringify(result, null, 2);
}
/**
* Handle requirements command
*/
async function handleRequirements(
handlers: ToolHandlers,
component: string,
options: Record<string, string | boolean>
): Promise<string> {
const args: Record<string, unknown> = { component };
if (typeof options.type === "string") {
args.type = options.type;
}
const result = await handlers.getRequirements(args);
return chalk.green(`\nRequirements for: ${component}\n\n`) + JSON.stringify(result, null, 2);
}
/**
* Handle tools command
*/
async function handleTools(handlers: ToolHandlers, options: Record<string, string | boolean>): Promise<string> {
const args: Record<string, unknown> = {};
if (typeof options.file === "string") {
args.file_path = options.file;
}
if (typeof options.tool === "string") {
args.tool_name = options.tool;
}
if (typeof options.limit === "string") {
args.limit = parseInt(options.limit, 10);
}
const result = await handlers.getToolHistory(args);
return chalk.green("\nTool usage history:\n\n") + JSON.stringify(result, null, 2);
}
/**
* Handle docs command
*/
async function handleDocs(handlers: ToolHandlers, options: Record<string, string | boolean>): Promise<string> {
const args: Record<string, unknown> = {
project_path: process.cwd(),
};
if (typeof options.scope === "string") {
args.scope = options.scope;
}
if (typeof options.module === "string") {
args.module_filter = options.module;
}
console.log(chalk.blue("Generating documentation..."));
const result = await handlers.generateDocumentation(args);
return chalk.green("\n✓ Documentation generated!\n\n") + JSON.stringify(result, null, 2);
}
/**
* Handle reset command
*/
async function handleReset(): Promise<string> {
console.log(chalk.red("⚠️ WARNING: This will delete ALL indexed data!"));
console.log(chalk.yellow("Press Ctrl+C to cancel, or wait 5 seconds to continue..."));
await new Promise((resolve) => setTimeout(resolve, 5000));
const db = getSQLiteManager().getDatabase();
db.exec("DELETE FROM conversations");
db.exec("DELETE FROM messages");
db.exec("DELETE FROM decisions");
db.exec("DELETE FROM mistakes");
db.exec("DELETE FROM git_commits");
return chalk.green("\n✓ Database reset complete.\n");
}
/**
* Handle vacuum command
*/
async function handleVacuum(): Promise<string> {
const db = getSQLiteManager().getDatabase();
const beforeSize = db.prepare("PRAGMA page_count").get() as { page_count: number };
db.exec("VACUUM");
const afterSize = db.prepare("PRAGMA page_count").get() as { page_count: number };
const beforeKB = (beforeSize.page_count * 4096) / 1024;
const afterKB = (afterSize.page_count * 4096) / 1024;
return chalk.green(`\n✓ Database vacuumed: ${beforeKB.toFixed(1)}KB → ${afterKB.toFixed(1)}KB\n`);
}
/**
* Handle config show command
*/
function handleConfigShow(): string {
const sources = ConfigManager.getConfigSources();
const configPath = ConfigManager.getConfigPath();
const configExists = ConfigManager.configExists();
let output = chalk.cyan("\n=== Embedding Configuration ===\n\n");
// Show effective config
output += chalk.bold("Current (Effective) Configuration:\n");
const table = new Table({
head: [chalk.cyan("Key"), chalk.cyan("Value")],
colWidths: [20, 50],
});
table.push(
["Provider", sources.effective.provider],
["Model", sources.effective.model],
["Dimensions", String(sources.effective.dimensions || "auto")],
["Base URL", sources.effective.baseUrl || "N/A"],
["API Key", sources.effective.apiKey ? "***" + sources.effective.apiKey.slice(-4) : "N/A"]
);
output += table.toString() + "\n\n";
// Show sources breakdown
output += chalk.bold("Configuration Sources:\n\n");
if (sources.home) {
output += chalk.green(`✓ Home Config: ${configPath}\n`);
output += ` Provider: ${sources.home.provider || "not set"}\n`;
output += ` Model: ${sources.home.model || "not set"}\n`;
output += ` Dimensions: ${sources.home.dimensions || "not set"}\n\n`;
} else {
output += chalk.gray(` Home Config: ${configPath} (not found)\n\n`);
}
if (sources.project) {
output += chalk.green("✓ Project Config: .claude-memory-config.json\n");
output += ` Provider: ${sources.project.provider || "not set"}\n`;
output += ` Model: ${sources.project.model || "not set"}\n\n`;
}
if (Object.keys(sources.env).length > 0) {
output += chalk.green("✓ Environment Variables:\n");
if (sources.env.provider) {
output += ` EMBEDDING_PROVIDER=${sources.env.provider}\n`;
}
if (sources.env.model) {
output += ` EMBEDDING_MODEL=${sources.env.model}\n`;
}
if (sources.env.dimensions) {
output += ` EMBEDDING_DIMENSIONS=${sources.env.dimensions}\n`;
}
if (sources.env.baseUrl) {
output += ` EMBEDDING_BASE_URL=${sources.env.baseUrl}\n`;
}
if (sources.env.apiKey) {
output += ` OPENAI_API_KEY=***\n`;
}
output += "\n";
}
// Show usage instructions
output += chalk.bold("Usage:\n");
output += ` ${chalk.cyan("config")} Show this config\n`;
output += ` ${chalk.cyan("config <key> <value>")} Set config value\n`;
output += ` ${chalk.cyan("get <key>")} Get specific value\n`;
output += ` ${chalk.cyan("set <key> <value>")} Set specific value\n\n`;
output += chalk.bold("Valid Keys:\n");
output += ` ${chalk.cyan("provider")} ollama, transformers, openai\n`;
output += ` ${chalk.cyan("model")} Model name (e.g., mxbai-embed-large)\n`;
output += ` ${chalk.cyan("dimensions")} Embedding dimensions (e.g., 1024)\n`;
output += ` ${chalk.cyan("baseUrl")} Ollama base URL (default: http://localhost:11434)\n`;
output += ` ${chalk.cyan("apiKey")} OpenAI API key\n\n`;
// Show available models by provider using ModelRegistry
output += chalk.bold("Known Models by Provider:\n\n");
// Ollama models
output += chalk.yellow("Ollama (local):\n");
const ollamaModels = getModelsByProvider("ollama");
for (const model of ollamaModels) {
const suffix = model.installation ? ` ${chalk.dim(`(${model.description})`)}` : "";
output += ` ${model.name.padEnd(30)} ${model.dimensions.toString().padStart(4)} dims${suffix}\n`;
}
output += "\n";
// Transformers models
output += chalk.yellow("Transformers (offline):\n");
const transformersModels = getModelsByProvider("transformers");
for (const model of transformersModels) {
output += ` ${model.name.padEnd(30)} ${model.dimensions.toString().padStart(4)} dims ${chalk.dim(`(${model.description})`)}\n`;
}
output += "\n";
// OpenAI models
output += chalk.yellow("OpenAI (cloud):\n");
const openaiModels = getModelsByProvider("openai");
for (const model of openaiModels) {
const costSuffix = model.cost ? ` - ${model.cost}` : "";
output += ` ${model.name.padEnd(30)} ${model.dimensions.toString().padStart(4)} dims ${chalk.dim(`(${model.description}${costSuffix})`)}\n`;
}
output += "\n";
output += chalk.gray(`Config file location: ${configPath}\n`);
if (!configExists) {
output += chalk.yellow("Config file will be created on first 'set' command.\n");
}
return output;
}
/**
* Handle config get command
*/
function handleConfigGet(key: string): string {
try {
const value = ConfigManager.getConfigValue(key);
if (value === undefined || value === null) {
return chalk.yellow(`Config key '${key}' is not set`);
}
// Mask API keys
if (key === "apiKey" || key === "api_key") {
const apiKey = value as string;
return chalk.green(`${key}: ***${apiKey.slice(-4)}`);
}
return chalk.green(`${key}: ${value}`);
} catch (error) {
return chalk.red(`Error: ${(error as Error).message}`);
}
}
/**
* Handle config set command
*/
function handleConfigSet(key: string, value: string): string {
try {
// Validate model name if setting model
if (key === "model") {
if (!modelExists(value)) {
let warning = chalk.yellow(`⚠️ Model '${value}' is not in the registry.\n\n`);
warning += chalk.gray("This might be a custom model. If so, make sure to also set the correct dimensions.\n\n");
warning += chalk.cyan("Known models:\n");
warning += chalk.gray(" Run 'models' to see all available models\n");
warning += chalk.gray(" Or 'models <provider>' to see provider-specific models\n\n");
warning += chalk.yellow("Proceeding with custom model...\n\n");
console.warn(warning);
}
}
ConfigManager.setConfigValue(key, value);
// Show confirmation with helpful info
let output = chalk.green(`✓ Config updated: ${key} = ${value}\n\n`);
// If setting dimensions, suggest matching models
if (key === "dimensions") {
const dims = parseInt(value, 10);
const matchingModels = getAllModels().filter(m => m.dimensions === dims);
if (matchingModels.length > 0) {
output += chalk.cyan("Models with matching dimensions:\n");
for (const model of matchingModels) {
output += ` - ${model.name} (${model.provider})\n`;
}
output += "\n";
}
}
// If setting model, suggest dimensions
if (key === "model") {
const knownDims = ConfigManager.getKnownModelDimensions(value);
if (knownDims) {
output += chalk.cyan(`💡 Tip: This model uses ${knownDims} dimensions\n`);
output += ` Run: ${chalk.green(`set dimensions ${knownDims}`)}\n\n`;
}
}
output += chalk.gray(`Config saved to: ${ConfigManager.getConfigPath()}\n`);
return output;
} catch (error) {
return chalk.red(`Error: ${(error as Error).message}`);
}
}
/**
* Handle models command - List, filter, search models
* Usage:
* models - List all models
* models <provider> - Filter by provider (ollama, transformers, openai)
* models quality <tier> - Filter by quality (low, medium, high, highest)
* models recommend - Show recommended models for each provider
*/
function handleModels(args: string[]): string {
let output = "";
// No args: list all models
if (args.length === 0) {
output += chalk.bold("📚 All Available Embedding Models\n\n");
const allModels = getAllModels();
output += formatModelsTable(allModels);
output += "\n";
output += chalk.gray("💡 Tip: Use 'models <provider>' to filter by provider\n");
output += chalk.gray(" Or: 'models quality <tier>' to filter by quality\n");
output += chalk.gray(" Or: 'models recommend' to see recommendations\n");
return output;
}
const subcommand = args[0].toLowerCase();
// Filter by provider
if (["ollama", "transformers", "openai"].includes(subcommand)) {
const models = getModelsByProvider(subcommand);
output += chalk.bold(`📚 ${capitalize(subcommand)} Models\n\n`);
output += formatModelsTable(models);
// Show recommended model for this provider
const recommended = getRecommendedModel(subcommand);
if (recommended) {
output += "\n";
output += chalk.cyan(`⭐ Recommended: ${recommended.name} (${recommended.dimensions} dims, ${recommended.quality} quality)\n`);
}
return output;
}
// Filter by quality
if (subcommand === "quality") {
if (args.length < 2) {
return chalk.yellow("Usage: models quality <tier>\nTiers: low, medium, high, highest");
}
const quality = args[1].toLowerCase() as ModelInfo["quality"];
if (!["low", "medium", "high", "highest"].includes(quality)) {
return chalk.red(`Invalid quality tier: ${args[1]}\nValid tiers: low, medium, high, highest`);
}
const models = getModelsByQuality(quality);
output += chalk.bold(`📚 ${capitalize(quality)} Quality Models\n\n`);
output += formatModelsTable(models);
return output;
}
// Show recommended models
if (subcommand === "recommend" || subcommand === "recommended") {
output += chalk.bold("⭐ Recommended Models by Provider\n\n");
const providers = ["ollama", "transformers", "openai"];
for (const provider of providers) {
const recommended = getRecommendedModel(provider);
if (recommended) {
output += chalk.yellow(`${capitalize(provider)}:\n`);
output += ` ${chalk.green(recommended.name)} ${chalk.dim(`(${recommended.dimensions} dims, ${recommended.quality} quality)`)}\n`;
output += ` ${chalk.dim(recommended.description)}\n`;
if (recommended.installation) {
output += ` ${chalk.dim(`Install: ${recommended.installation}`)}\n`;
}
if (recommended.cost) {
output += ` ${chalk.dim(`Cost: ${recommended.cost}`)}\n`;
}
output += "\n";
}
}
return output;
}
return chalk.yellow(`Unknown models subcommand: ${subcommand}\n\nUsage:\n models - List all models\n models <provider> - Filter by provider (ollama, transformers, openai)\n models quality <tier> - Filter by quality\n models recommend - Show recommendations`);
}
/**
* Format models into a table
*/
function formatModelsTable(models: ModelInfo[]): string {
const table = new Table({
head: [
chalk.cyan("Model"),
chalk.cyan("Provider"),
chalk.cyan("Dimensions"),
chalk.cyan("Quality"),
chalk.cyan("Description")
],
colWidths: [35, 13, 12, 10, 45],
wordWrap: true,
});
for (const model of models) {
table.push([
model.name,
model.provider,
model.dimensions.toString(),
model.quality,
model.description
]);
}
return table.toString();
}
/**
* Capitalize first letter
*/
function capitalize(str: string): string {
return str.charAt(0).toUpperCase() + str.slice(1);
}
/**
* Handle interactive model selection
*/
async function handleSelectModel(): Promise<string> {
try {
// Step 1: Choose provider
const providerResponse = await prompts({
type: "select",
name: "provider",
message: "Choose an embedding provider:",
choices: [
{
title: "Ollama (Local, High Quality)",
value: "ollama",
description: "Run models locally with Ollama. Requires: ollama serve"
},
{
title: "Transformers (Offline, No Setup)",
value: "transformers",
description: "Auto-download models, runs offline. No external setup needed."
},
{
title: "OpenAI (Cloud, Highest Quality)",
value: "openai",
description: "Cloud API with best quality. Requires API key and costs money."
}
],
initial: 0,
});
if (!providerResponse.provider) {
return chalk.yellow("Selection cancelled");
}
const provider = providerResponse.provider as string;
// Step 2: Choose model from that provider
const models = getModelsByProvider(provider);
const modelChoices = models.map(m => ({
title: `${m.name} (${m.dimensions} dims, ${m.quality} quality)`,
value: m.name,
description: m.description + (m.installation ? ` - ${m.installation}` : "") + (m.cost ? ` - ${m.cost}` : "")
}));
// Highlight recommended model
const recommended = getRecommendedModel(provider);
if (recommended) {
const recIndex = modelChoices.findIndex(c => c.value === recommended.name);
if (recIndex >= 0) {
modelChoices[recIndex].title = `⭐ ${modelChoices[recIndex].title} (recommended)`;
}
}
const modelResponse = await prompts({
type: "select",
name: "model",
message: `Choose a model from ${capitalize(provider)}:`,
choices: modelChoices,
initial: 0,
});
if (!modelResponse.model) {
return chalk.yellow("Selection cancelled");
}
const modelName = modelResponse.model as string;
const selectedModel = models.find(m => m.name === modelName);
if (!selectedModel) {
return chalk.red("Error: Model not found");
}
// Step 3: Confirm and save
const confirmResponse = await prompts({
type: "confirm",
name: "confirm",
message: `Set ${selectedModel.name} as your embedding model?\n Provider: ${selectedModel.provider}\n Dimensions: ${selectedModel.dimensions}\n Quality: ${selectedModel.quality}`,
initial: true,
});
if (!confirmResponse.confirm) {
return chalk.yellow("Selection cancelled");
}
// Save configuration
ConfigManager.setConfigValue("provider", provider);
ConfigManager.setConfigValue("model", modelName);
ConfigManager.setConfigValue("dimensions", selectedModel.dimensions.toString());
let output = chalk.green(`✓ Configuration updated!\n\n`);
output += ` Provider: ${chalk.cyan(provider)}\n`;
output += ` Model: ${chalk.cyan(modelName)}\n`;
output += ` Dimensions: ${chalk.cyan(selectedModel.dimensions)}\n\n`;
// Add setup instructions
if (selectedModel.installation) {
output += chalk.yellow(`⚠️ Setup Required:\n`);
output += ` ${selectedModel.installation}\n\n`;
}
if (selectedModel.cost) {
output += chalk.yellow(`💰 Cost: ${selectedModel.cost}\n\n`);
}
output += chalk.dim("💡 Tip: You may need to reindex conversations for the new model:\n");
output += chalk.dim(" reset && index\n\n");
output += chalk.gray(`Config saved to: ${ConfigManager.getConfigPath()}\n`);
return output;
} catch (error) {
if ((error as { message?: string }).message === "User force closed the prompt") {
return chalk.yellow("\nSelection cancelled");
}
return chalk.red(`Error: ${(error as Error).message}`);
}
}
/**
* Handle init-mcp command - Configure MCP server in ~/.claude.json
*/
async function handleInitMcp(): Promise<string> {
const { isMcpConfigured, addMcpServer } = await import("../utils/McpConfig.js");
try {
const { configured, configPath } = isMcpConfigured();
if (configured) {
return chalk.yellow(`✓ MCP server is already configured in ${configPath}\n`) +
chalk.dim(" Use 'mcp-status' to see configuration details\n");
}
// Configure the MCP server
addMcpServer();
let output = chalk.green("✅ Successfully configured cccmemory MCP server!\n\n");
output += chalk.cyan("Configuration added to: ") + chalk.white(`${configPath}\n\n`);
output += chalk.bold("🎉 Available MCP Tools:\n");
output += chalk.dim(" • index_conversations - Index conversation history\n");
output += chalk.dim(" • search_conversations - Search past conversations\n");
output += chalk.dim(" • get_decisions - Find design decisions\n");
output += chalk.dim(" • check_before_modify - Check file context before editing\n");
output += chalk.dim(" • get_file_evolution - Track file changes over time\n");
output += chalk.dim(" • and 10 more tools...\n\n");
output += chalk.yellow("💡 Restart Claude Code to load the new MCP server\n");
output += chalk.dim(" Run '/mcp' in Claude Code to list all available tools\n");
return output;
} catch (error) {
return chalk.red(`❌ Failed to configure MCP server: ${(error as Error).message}\n\n`) +
chalk.yellow("Manual configuration:\n") +
chalk.dim(" Add this to ~/.claude.json under \"mcpServers\":\n") +
chalk.dim(" {\n") +
chalk.dim(" \"cccmemory\": {\n") +
chalk.dim(" \"type\": \"stdio\",\n") +
chalk.dim(" \"command\": \"cccmemory\",\n") +
chalk.dim(" \"args\": []\n") +
chalk.dim(" }\n") +
chalk.dim(" }\n");
}
}
/**
* Handle remove-mcp command - Remove MCP server configuration
*/
async function handleRemoveMcp(): Promise<string> {
const { isMcpConfigured, removeMcpServer } = await import("../utils/McpConfig.js");
const prompts = (await import("prompts")).default;
try {
const { configured, configPath } = isMcpConfigured();
if (!configured) {
return chalk.yellow("⚠️ MCP server is not configured\n") +
chalk.dim(" Nothing to remove\n");
}
// Confirm removal
const response = await prompts({
type: "confirm",
name: "confirm",
message: `Remove cccmemory MCP server from ${configPath}?`,
initial: false,
});
if (!response.confirm) {
return chalk.yellow("Removal cancelled\n");
}
// Remove the MCP server
removeMcpServer();
let output = chalk.green("✅ Successfully removed cccmemory MCP server\n\n");
output += chalk.cyan("Configuration removed from: ") + chalk.white(`${configPath}\n\n`);
output += chalk.yellow("💡 Restart Claude Code to apply changes\n");
output += chalk.dim(" Run 'init-mcp' to reconfigure if needed\n");
return output;
} catch (error) {
if ((error as { message?: string }).message === "User force closed the prompt") {
return chalk.yellow("\nRemoval cancelled");
}
return chalk.red(`❌ Failed to remove MCP server: ${(error as Error).message}\n`);
}
}
/**
* Handle mcp-status command - Show MCP server configuration status
*/
function handleMcpStatus(): string {
const status = getMcpStatus();
const table = new Table({
head: [chalk.cyan("Status"), chalk.cyan("Value")],
colWidths: [30, 50],
});
table.push(
["Claude Config Exists", status.claudeConfigExists ? chalk.green("✓ Yes") : chalk.red("✗ No")],
["MCP Server Configured", status.mcpConfigured ? chalk.green("✓ Yes") : chalk.yellow("✗ No")],
["Command Installed", status.commandExists ? chalk.green("✓ Yes") : chalk.yellow("✗ No")]
);
if (status.commandPath) {
table.push(["Command Path", chalk.dim(status.commandPath)]);
}
if (status.serverConfig) {
table.push(
["Server Type", chalk.dim(status.serverConfig.type)],
["Server Command", chalk.dim(status.serverConfig.command)]
);
}
let output = "\n" + table.toString() + "\n";
// Add recommendations
if (!status.claudeConfigExists) {
output += "\n" + chalk.yellow("⚠️ Claude Code configuration not found at ~/.claude.json\n");
output += chalk.dim(" Please install Claude Code first: https://claude.ai/download\n");
} else if (!status.mcpConfigured) {
output += "\n" + chalk.yellow("⚠️ MCP server is not configured\n");
output += chalk.dim(" Run 'init-mcp' to configure automatically\n");
} else if (!status.commandExists) {
output += "\n" + chalk.yellow("⚠️ Command not found in global npm bin\n");
output += chalk.dim(" Reinstall: npm install -g cccmemory\n");
} else {
output += "\n" + chalk.green("✅ Everything looks good! MCP server is ready to use.\n");
output += chalk.dim(" Restart Claude Code if you haven't already\n");
}
return output;
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/cli/help.ts | TypeScript | /**
* Help text and documentation for CLI
*/
import chalk from "chalk";
import { getSQLiteManager } from "../storage/SQLiteManager.js";
import { readFileSync } from "fs";
import { join, dirname } from "path";
import { fileURLToPath } from "url";
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
/**
* Get version from package.json
*/
function getVersion(): string {
try {
const packageJsonPath = join(__dirname, "..", "..", "package.json");
const packageJson = JSON.parse(readFileSync(packageJsonPath, "utf-8"));
return packageJson.version;
} catch (_error) {
return "unknown";
}
}
/**
* Show welcome message
*/
export function showWelcome() {
const dbPath = getSQLiteManager().getStats().dbPath;
const shortPath = dbPath.replace(process.env.HOME || "", "~");
const version = getVersion();
const versionText = `CCCMemory v${version}`.padEnd(55);
console.log(chalk.cyan("┌─────────────────────────────────────────────────────────┐"));
console.log(chalk.cyan("│") + ` ${versionText} ` + chalk.cyan("│"));
console.log(chalk.cyan("│") + ` Database: ${shortPath.padEnd(39)} ` + chalk.cyan("│"));
console.log(chalk.cyan("│") + " Type 'help' for commands or 'exit' to quit " + chalk.cyan("│"));
console.log(chalk.cyan("└─────────────────────────────────────────────────────────┘"));
console.log();
}
/**
* Show main help screen
*/
export function showHelp(): string {
const version = getVersion();
return `
${chalk.bold(`CCCMemory v${version} - Interactive CLI`)}
${chalk.bold("CATEGORIES:")}
${chalk.yellow("📥 Indexing:")}
${chalk.green("index")} Index conversation history
${chalk.green("reindex")} Clear and re-index conversations
${chalk.yellow("🔍 Search:")}
${chalk.green("search")} Search conversations
${chalk.green("decisions")} Find decisions about a topic
${chalk.green("mistakes")} Search past mistakes
${chalk.green("similar")} Find similar sessions
${chalk.yellow("📋 Files:")}
${chalk.green("check")} Check context before modifying file
${chalk.green("history")} Show complete file evolution
${chalk.yellow("🔗 Git:")}
${chalk.green("commits")} Link commits to conversations
${chalk.yellow("📝 Other:")}
${chalk.green("requirements")} Get requirements for component
${chalk.green("tools")} Query tool usage history
${chalk.green("docs")} Generate documentation
${chalk.yellow("ℹ️ Info:")}
${chalk.green("status")} Show database statistics
${chalk.green("info")} Show information
${chalk.green("version")} Show version
${chalk.yellow("⚙️ Config:")}
${chalk.green("config")} Get/set configuration
${chalk.green("set")} Set config value
${chalk.green("get")} Get config value
${chalk.yellow("🔧 MCP Setup:")}
${chalk.green("init-mcp")} Configure MCP in ~/.claude.json
${chalk.green("remove-mcp")} Remove MCP configuration
${chalk.green("mcp-status")} Check MCP configuration status
${chalk.yellow("🧹 Maintenance:")}
${chalk.green("clear")} Clear screen
${chalk.green("reset")} Reset database
${chalk.green("vacuum")} Vacuum database
${chalk.yellow("📖 Help:")}
${chalk.green("help")} Show this help or command help
${chalk.green("commands")} List all commands
${chalk.yellow("🚪 Exit:")}
${chalk.green("exit")} Exit REPL
${chalk.bold("COMMAND LINE OPTIONS:")}
${chalk.green("--version, -v")} Show version and exit
${chalk.green("--server")} Run as MCP server
Type ${chalk.cyan("'help <command>'")} for detailed command help.
Examples: ${chalk.cyan("help search")}, ${chalk.cyan("help index")}
`;
}
/**
* Show command-specific help
*/
export function showCommandHelp(command: string): string {
const helps: Record<string, string> = {
index: `
${chalk.bold("COMMAND:")} index
${chalk.bold("USAGE:")}
index [options]
${chalk.bold("DESCRIPTION:")}
Index conversation history for current project.
Parses .jsonl conversation files, extracts decisions and mistakes,
links git commits, and generates embeddings for semantic search.
${chalk.bold("OPTIONS:")}
--project <path> Project path (default: current directory)
--session <id> Index specific session only (use list_recent_sessions.session_id)
--exclude-mcp Exclude MCP conversations
--include-mcp Include all MCP conversations
--no-git Disable git integration
--thinking Include thinking blocks
${chalk.bold("EXAMPLES:")}
index
index --exclude-mcp
index --project /path/to/project
index --session a1172af3-ca62-41be-9b90-701cef39daae
${chalk.bold("ALIASES:")}
None
${chalk.bold("SEE ALSO:")}
reindex, status
`,
search: `
${chalk.bold("COMMAND:")} search
${chalk.bold("USAGE:")}
search <query> [options]
${chalk.bold("DESCRIPTION:")}
Search conversation history using natural language queries.
Returns relevant messages with context including timestamps,
git branches, and similarity scores.
${chalk.bold("OPTIONS:")}
--limit <n> Maximum number of results (default: 10)
--after <date> Filter messages after this date
--before <date> Filter messages before this date
${chalk.bold("EXAMPLES:")}
search authentication system
search "database migration" --limit 5
search error --after 2025-01-01
${chalk.bold("ALIASES:")}
find
${chalk.bold("SEE ALSO:")}
decisions, mistakes, similar
`,
status: `
${chalk.bold("COMMAND:")} status
${chalk.bold("USAGE:")}
status
${chalk.bold("DESCRIPTION:")}
Show database statistics including:
- Number of indexed conversations
- Total messages
- Decisions and mistakes tracked
- Git commits linked
- Embeddings count and semantic search status
${chalk.bold("EXAMPLES:")}
status
${chalk.bold("ALIASES:")}
stats
${chalk.bold("SEE ALSO:")}
info, version
`,
decisions: `
${chalk.bold("COMMAND:")} decisions
${chalk.bold("USAGE:")}
decisions <topic> [options]
${chalk.bold("DESCRIPTION:")}
Find decisions made about a specific topic, file, or component.
Shows rationale, alternatives considered, and rejected approaches.
${chalk.bold("OPTIONS:")}
--file <path> Filter by file path
--limit <n> Maximum results (default: 10)
${chalk.bold("EXAMPLES:")}
decisions authentication
decisions "API design"
decisions database --file src/db.ts
${chalk.bold("ALIASES:")}
why
${chalk.bold("SEE ALSO:")}
search, mistakes
`,
mistakes: `
${chalk.bold("COMMAND:")} mistakes
${chalk.bold("USAGE:")}
mistakes <query> [options]
${chalk.bold("DESCRIPTION:")}
Search past mistakes to avoid repeating them.
Shows what went wrong and how it was corrected.
${chalk.bold("OPTIONS:")}
--type <type> Filter by type:
- logic_error
- wrong_approach
- misunderstanding
- tool_error
- syntax_error
--limit <n> Maximum results (default: 10)
${chalk.bold("EXAMPLES:")}
mistakes async/await
mistakes "type error" --type logic_error
${chalk.bold("ALIASES:")}
errors
${chalk.bold("SEE ALSO:")}
decisions, search
`,
check: `
${chalk.bold("COMMAND:")} check
${chalk.bold("USAGE:")}
check <file>
${chalk.bold("DESCRIPTION:")}
Show important context before modifying a file.
Shows recent changes, related decisions, commits, and past mistakes.
${chalk.bold("EXAMPLES:")}
check src/auth.ts
check database.ts
${chalk.bold("ALIASES:")}
None
${chalk.bold("SEE ALSO:")}
history, decisions
`,
exit: `
${chalk.bold("COMMAND:")} exit
${chalk.bold("USAGE:")}
exit
${chalk.bold("DESCRIPTION:")}
Exit the REPL and return to shell.
You can also use Ctrl+D to exit.
${chalk.bold("ALIASES:")}
quit, q
${chalk.bold("SEE ALSO:")}
None
`,
"init-mcp": `
${chalk.bold("COMMAND:")} init-mcp
${chalk.bold("USAGE:")}
init-mcp
${chalk.bold("DESCRIPTION:")}
Automatically configure the cccmemory MCP server in
Claude Code's global configuration (~/.claude.json).
This command:
- Checks if Claude Code is installed
- Creates a backup of your configuration
- Adds the MCP server to the mcpServers section
- Provides helpful status messages
${chalk.bold("EXAMPLES:")}
init-mcp
${chalk.bold("ALIASES:")}
None
${chalk.bold("SEE ALSO:")}
remove-mcp, mcp-status
`,
"remove-mcp": `
${chalk.bold("COMMAND:")} remove-mcp
${chalk.bold("USAGE:")}
remove-mcp
${chalk.bold("DESCRIPTION:")}
Remove the cccmemory MCP server from Claude Code's
global configuration (~/.claude.json).
This command:
- Confirms before removal
- Creates a backup of your configuration
- Removes the MCP server entry
- Provides instructions for next steps
${chalk.bold("EXAMPLES:")}
remove-mcp
${chalk.bold("ALIASES:")}
None
${chalk.bold("SEE ALSO:")}
init-mcp, mcp-status
`,
"mcp-status": `
${chalk.bold("COMMAND:")} mcp-status
${chalk.bold("USAGE:")}
mcp-status
${chalk.bold("DESCRIPTION:")}
Display comprehensive status information about the MCP server
configuration and installation.
Shows:
- Whether ~/.claude.json exists
- Whether MCP server is configured
- Whether command is installed globally
- Current configuration details
- Helpful recommendations if issues found
${chalk.bold("EXAMPLES:")}
mcp-status
${chalk.bold("ALIASES:")}
None
${chalk.bold("SEE ALSO:")}
init-mcp, remove-mcp
`,
};
return helps[command] || chalk.yellow(`No help available for command: ${command}\n\nUse 'help' to see all available commands.`);
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/cli/index.ts | TypeScript | /**
* CCCMemory - Interactive CLI/REPL
* Main entry point for interactive mode
*/
import * as readline from "node:readline";
import chalk from "chalk";
import { ConversationMemory } from "../ConversationMemory.js";
import { ToolHandlers } from "../tools/ToolHandlers.js";
import { getSQLiteManager } from "../storage/SQLiteManager.js";
import { executeCommand } from "./commands.js";
import { showWelcome } from "./help.js";
/**
* Interactive REPL for CCCMemory
*/
export class ConversationMemoryCLI {
private rl: readline.Interface | null = null;
private memory: ConversationMemory;
private handlers: ToolHandlers;
constructor() {
this.memory = new ConversationMemory();
this.handlers = new ToolHandlers(this.memory, getSQLiteManager());
}
/**
* Setup readline interface and handlers (only for REPL mode)
*/
private setupREPL() {
this.rl = readline.createInterface({
input: process.stdin,
output: process.stdout,
prompt: chalk.cyan("ccm> "),
});
this.rl.on("line", async (line: string) => {
const trimmed = line.trim();
if (!trimmed) {
if (this.rl) {
this.rl.prompt();
}
return;
}
await this.handleCommand(trimmed);
if (this.rl) {
this.rl.prompt();
}
});
this.rl.on("close", () => {
console.log(chalk.green("\nGoodbye!"));
process.exit(0);
});
// Handle Ctrl+C gracefully
this.rl.on("SIGINT", () => {
console.log(chalk.yellow("\nUse 'exit' or Ctrl+D to quit"));
if (this.rl) {
this.rl.prompt();
}
});
}
/**
* Handle a command
*/
private async handleCommand(input: string) {
try {
const result = await executeCommand(input, this.handlers);
if (result === "exit") {
if (this.rl) {
this.rl.close();
}
return;
}
if (result === "clear") {
console.clear();
showWelcome();
return;
}
if (result !== null) {
console.log(result);
}
} catch (error: unknown) {
const err = error as Error;
console.error(chalk.red(`Error: ${err.message}`));
}
}
/**
* Start the REPL
*/
async start() {
this.setupREPL();
showWelcome();
if (this.rl) {
this.rl.prompt();
}
}
/**
* Execute a single command and exit
*/
async runSingleCommand(command: string) {
try {
const result = await executeCommand(command, this.handlers);
if (result !== null && result !== "exit" && result !== "clear") {
console.log(result);
}
process.exit(0);
} catch (error: unknown) {
const err = error as Error;
console.error(chalk.red(`Error: ${err.message}`));
process.exit(1);
}
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/context/ContextInjector.ts | TypeScript | /**
* Context Injector
*
* Intelligently selects and formats context for injection into new conversations.
* Combines working memory, handoffs, decisions, and file history.
*/
import type { Database } from "better-sqlite3";
import type {
InjectedContext,
ContextInjectionOptions,
SessionHandoff,
HandoffDecision,
ActiveFile,
WorkingMemoryItem,
} from "../memory/types.js";
import { WorkingMemoryStore } from "../memory/WorkingMemoryStore.js";
import { SessionHandoffStore } from "../handoff/SessionHandoffStore.js";
/**
* Priority levels for context items
*/
type Priority = "critical" | "high" | "medium" | "low";
/**
* Weighted context item for sorting
*/
interface WeightedItem {
type: "handoff" | "decision" | "memory" | "file";
content: string;
priority: Priority;
timestamp: number;
tokenEstimate: number;
}
export class ContextInjector {
private memoryStore: WorkingMemoryStore;
private handoffStore: SessionHandoffStore;
constructor(db: Database) {
this.memoryStore = new WorkingMemoryStore(db);
this.handoffStore = new SessionHandoffStore(db);
}
/**
* Get relevant context for a new conversation
*/
async getRelevantContext(options: ContextInjectionOptions): Promise<InjectedContext> {
const {
query,
projectPath,
maxTokens = 2000,
sources = ["history", "decisions", "memory", "handoffs"],
} = options;
const items: WeightedItem[] = [];
// 1. Get most recent handoff
let handoff: SessionHandoff | undefined;
if (sources.includes("handoffs")) {
const handoffs = this.handoffStore.listHandoffs(projectPath, {
limit: 1,
includeResumed: false,
});
if (handoffs.length > 0) {
handoff = this.handoffStore.getHandoff(handoffs[0].id) || undefined;
if (handoff) {
items.push({
type: "handoff",
content: `Previous session context: ${handoff.contextSummary}`,
priority: "critical",
timestamp: handoff.createdAt,
tokenEstimate: this.estimateTokens(handoff.contextSummary) + 20,
});
}
}
}
// 2. Get relevant working memory items
const memory: WorkingMemoryItem[] = [];
if (sources.includes("memory")) {
if (query) {
// Semantic search for relevant items
const relevant = this.memoryStore.recallRelevant({
query,
projectPath,
limit: 10,
});
memory.push(...relevant);
} else {
// Get recent items
const recent = this.memoryStore.list(projectPath, { limit: 10 });
memory.push(...recent);
}
for (const item of memory) {
items.push({
type: "memory",
content: `${item.key}: ${item.value}`,
priority: this.getMemoryPriority(item),
timestamp: item.updatedAt,
tokenEstimate: this.estimateTokens(`${item.key}: ${item.value}`),
});
}
}
// 3. Get decisions from handoff
const decisions: HandoffDecision[] = [];
if (sources.includes("decisions") && handoff) {
decisions.push(...handoff.decisions.slice(0, 10));
for (const decision of decisions) {
items.push({
type: "decision",
content: decision.text,
priority: "high",
timestamp: decision.timestamp,
tokenEstimate: this.estimateTokens(decision.text),
});
}
}
// 4. Get recent files from handoff
const recentFiles: ActiveFile[] = [];
if (sources.includes("history") && handoff) {
recentFiles.push(...handoff.activeFiles.slice(0, 10));
for (const file of recentFiles) {
items.push({
type: "file",
content: `${file.lastAction}: ${file.path}`,
priority: "medium",
timestamp: file.timestamp,
tokenEstimate: this.estimateTokens(`${file.lastAction}: ${file.path}`),
});
}
}
// 5. Select items within token budget
const selectedItems = this.selectWithinBudget(items, maxTokens);
// 6. Generate summary
const summary = this.generateSummary(selectedItems, projectPath);
// 7. Calculate total token estimate
const tokenEstimate = selectedItems.reduce((sum, item) => sum + item.tokenEstimate, 0);
return {
handoff,
decisions,
memory,
recentFiles,
summary,
tokenEstimate,
};
}
/**
* Format context for direct injection into conversation
*/
formatForInjection(context: InjectedContext): string {
const parts: string[] = [];
// Add handoff summary if available
if (context.handoff) {
parts.push("## Previous Session Context");
parts.push(context.handoff.contextSummary);
parts.push("");
}
// Add key decisions
if (context.decisions.length > 0) {
parts.push("## Recent Decisions");
for (const decision of context.decisions.slice(0, 5)) {
parts.push(`- ${decision.text}`);
}
parts.push("");
}
// Add working memory items
if (context.memory.length > 0) {
parts.push("## Remembered Context");
for (const item of context.memory.slice(0, 5)) {
parts.push(`- **${item.key}**: ${item.value}`);
}
parts.push("");
}
// Add recent file activity
if (context.recentFiles.length > 0) {
parts.push("## Recent Files");
for (const file of context.recentFiles.slice(0, 5)) {
parts.push(`- [${file.lastAction}] ${file.path}`);
}
parts.push("");
}
return parts.join("\n");
}
/**
* Get priority for a memory item based on tags and recency
*/
private getMemoryPriority(item: WorkingMemoryItem): Priority {
const tags = item.tags;
if (tags.includes("critical") || tags.includes("important")) {
return "critical";
}
if (tags.includes("decision") || tags.includes("error")) {
return "high";
}
if (tags.includes("task") || tags.includes("file")) {
return "medium";
}
return "low";
}
/**
* Estimate token count for a string (rough approximation)
*/
private estimateTokens(text: string): number {
// Rough estimate: ~4 chars per token for English
return Math.ceil(text.length / 4);
}
/**
* Select items within token budget, prioritizing by importance
*/
private selectWithinBudget(
items: WeightedItem[],
maxTokens: number
): WeightedItem[] {
// Sort by priority (critical first) then by recency
const priorityOrder: Record<Priority, number> = {
critical: 0,
high: 1,
medium: 2,
low: 3,
};
const sorted = [...items].sort((a, b) => {
const priorityDiff = priorityOrder[a.priority] - priorityOrder[b.priority];
if (priorityDiff !== 0) {
return priorityDiff;
}
// More recent first
return b.timestamp - a.timestamp;
});
// Select within budget
const selected: WeightedItem[] = [];
let usedTokens = 0;
for (const item of sorted) {
if (usedTokens + item.tokenEstimate <= maxTokens) {
selected.push(item);
usedTokens += item.tokenEstimate;
} else {
// Stop if we can't fit any more
break;
}
}
return selected;
}
/**
* Generate a summary of the injected context
*/
private generateSummary(items: WeightedItem[], projectPath: string): string {
const counts = {
handoff: 0,
decision: 0,
memory: 0,
file: 0,
};
for (const item of items) {
counts[item.type]++;
}
const parts: string[] = [`Context for ${projectPath}:`];
if (counts.handoff > 0) {
parts.push("Previous session available");
}
if (counts.decision > 0) {
parts.push(`${counts.decision} decision(s)`);
}
if (counts.memory > 0) {
parts.push(`${counts.memory} memory item(s)`);
}
if (counts.file > 0) {
parts.push(`${counts.file} file(s)`);
}
return parts.join(", ");
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/documentation/CodeAnalyzer.ts | TypeScript | /**
* CodeAnalyzer - Lightweight codebase analyzer for documentation.
*
* Scans the local filesystem to build a basic code index without
* relying on external MCP services.
*/
import { readdirSync, statSync } from 'fs';
import { join, relative } from 'path';
import type {
CodeData,
CodeEntity,
FileInfo,
Hotspot,
CodeClone,
} from './types.js';
export class CodeAnalyzer {
/**
* Scan a project directory and return lightweight code metadata.
*/
async analyze(projectPath: string, moduleFilter?: string): Promise<CodeData> {
console.error('🔍 Scanning codebase structure...');
const files = this.collectFiles(projectPath, moduleFilter);
const fileInfos: FileInfo[] = files.map((file) => ({
path: file.relativePath,
size: file.size,
entities: [],
}));
const entities: CodeEntity[] = [];
const hotspots: Hotspot[] = [];
const clones: CodeClone[] = [];
console.error(` Found ${fileInfos.length} files`);
return {
entities,
relationships: [],
files: fileInfos,
hotspots,
clones,
};
}
private collectFiles(projectPath: string, moduleFilter?: string): Array<{ relativePath: string; size: number }> {
const ignoreDirs = new Set([
'node_modules',
'.git',
'.turbo',
'.next',
'dist',
'build',
'coverage',
'.cache',
'.cccmemory',
]);
const includeExts = new Set([
'.ts', '.tsx', '.js', '.jsx', '.mjs', '.cjs',
'.py', '.go', '.rs', '.java', '.kt', '.swift',
'.c', '.cpp', '.h', '.hpp', '.cs', '.json', '.md'
]);
const results: Array<{ relativePath: string; size: number }> = [];
const walk = (dir: string) => {
let entries: string[];
try {
entries = readdirSync(dir);
} catch (_error) {
return;
}
for (const entry of entries) {
const fullPath = join(dir, entry);
let stats;
try {
stats = statSync(fullPath);
} catch (_error) {
continue;
}
if (stats.isDirectory()) {
if (ignoreDirs.has(entry)) {
continue;
}
walk(fullPath);
continue;
}
const relativePath = relative(projectPath, fullPath).replace(/\\/g, '/');
if (moduleFilter && !relativePath.includes(moduleFilter)) {
continue;
}
const extIndex = entry.lastIndexOf('.');
const ext = extIndex >= 0 ? entry.slice(extIndex) : '';
if (!includeExts.has(ext)) {
continue;
}
results.push({ relativePath, size: stats.size });
}
};
walk(projectPath);
return results;
}
/**
* Extract file paths from code data for cross-referencing
*/
extractFilePaths(codeData: CodeData): string[] {
const paths = new Set<string>();
for (const entity of codeData.entities) {
if (entity.filePath) {paths.add(entity.filePath);}
}
for (const file of codeData.files) {
paths.add(file.path);
}
for (const hotspot of codeData.hotspots) {
paths.add(hotspot.filePath);
}
for (const clone of codeData.clones) {
clone.files.forEach(f => paths.add(f));
}
return Array.from(paths);
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/documentation/ConversationAnalyzer.ts | TypeScript | /**
* ConversationAnalyzer - Queries conversation memory database
*/
import type { SQLiteManager } from '../storage/SQLiteManager.js';
import type { ConversationData, Decision, Mistake, Requirement, FileEdit, GitCommit } from './types.js';
import type { DecisionRow, MistakeRow, RequirementRow, GitCommitRow } from '../types/ToolTypes.js';
import { safeJsonParse } from '../utils/safeJson.js';
export class ConversationAnalyzer {
constructor(private db: SQLiteManager) {}
/**
* Analyze conversations for a project
*/
async analyze(projectPath: string, sessionId?: string): Promise<ConversationData> {
console.error('📊 Analyzing conversation history...');
const decisions = this.getDecisions(projectPath, sessionId);
const mistakes = this.getMistakes(projectPath, sessionId);
const requirements = this.getRequirements(projectPath, sessionId);
const fileEdits = this.getFileEdits(projectPath, sessionId);
const commits = this.getGitCommits(projectPath, sessionId);
console.error(` Found ${decisions.length} decisions, ${mistakes.length} mistakes`);
return {
decisions,
mistakes,
requirements,
fileEdits,
commits
};
}
private getDecisions(projectPath: string, sessionId?: string): Decision[] {
let sql = `
SELECT
d.external_id as decision_external_id,
d.decision_text,
d.rationale,
d.alternatives_considered,
d.rejected_reasons,
d.context,
d.related_files,
d.related_commits,
d.timestamp,
c.external_id as conversation_external_id,
m.external_id as message_external_id
FROM decisions d
JOIN conversations c ON d.conversation_id = c.id
LEFT JOIN messages m ON d.message_id = m.id
WHERE c.project_path = ?
`;
if (sessionId) {
sql += ' AND c.id = ?';
}
sql += ' ORDER BY d.timestamp DESC';
const stmt = this.db.getDatabase().prepare(sql);
const rows = sessionId
? stmt.all(projectPath, sessionId) as Array<DecisionRow & { conversation_external_id: string; message_external_id: string | null; decision_external_id: string }>
: stmt.all(projectPath) as Array<DecisionRow & { conversation_external_id: string; message_external_id: string | null; decision_external_id: string }>;
const results: Decision[] = [];
for (const row of rows) {
if (!row.message_external_id) {
continue;
}
results.push({
id: row.decision_external_id,
conversation_id: row.conversation_external_id,
message_id: row.message_external_id,
decision_text: row.decision_text,
rationale: row.rationale || '',
alternatives_considered: safeJsonParse<string[]>(row.alternatives_considered, []),
rejected_reasons: safeJsonParse<Record<string, string>>(row.rejected_reasons, {}),
context: row.context,
related_files: safeJsonParse<string[]>(row.related_files, []),
related_commits: safeJsonParse<string[]>(row.related_commits, []),
timestamp: row.timestamp
});
}
return results;
}
private getMistakes(projectPath: string, sessionId?: string): Mistake[] {
let sql = `
SELECT
m.external_id as mistake_external_id,
m.mistake_type,
m.what_went_wrong,
m.correction,
m.user_correction_message,
m.files_affected,
m.timestamp,
c.external_id as conversation_external_id
FROM mistakes m
JOIN conversations c ON m.conversation_id = c.id
WHERE c.project_path = ?
`;
if (sessionId) {
sql += ' AND c.id = ?';
}
sql += ' ORDER BY m.timestamp DESC';
const stmt = this.db.getDatabase().prepare(sql);
const rows = sessionId
? stmt.all(projectPath, sessionId) as Array<MistakeRow & { conversation_external_id: string; mistake_external_id: string }>
: stmt.all(projectPath) as Array<MistakeRow & { conversation_external_id: string; mistake_external_id: string }>;
return rows.map((row) => ({
id: row.mistake_external_id,
conversation_id: row.conversation_external_id,
what_went_wrong: row.what_went_wrong,
why_it_happened: '',
how_it_was_fixed: row.correction || '',
lesson_learned: row.user_correction_message || '',
related_files: safeJsonParse<string[]>(row.files_affected, []),
severity: row.mistake_type || 'general',
timestamp: row.timestamp
}));
}
private getRequirements(projectPath: string, sessionId?: string): Requirement[] {
let sql = `
SELECT
r.external_id as requirement_external_id,
r.type,
r.description,
r.rationale,
r.affects_components,
r.timestamp
FROM requirements r
JOIN conversations c ON r.conversation_id = c.id
WHERE c.project_path = ?
`;
if (sessionId) {
sql += ' AND c.id = ?';
}
sql += ' ORDER BY r.timestamp DESC';
const stmt = this.db.getDatabase().prepare(sql);
const rows = sessionId
? stmt.all(projectPath, sessionId) as Array<RequirementRow & { requirement_external_id: string }>
: stmt.all(projectPath) as Array<RequirementRow & { requirement_external_id: string }>;
return rows.map((row) => ({
id: row.requirement_external_id,
requirement_type: row.type,
description: row.description,
rationale: row.rationale || '',
related_files: safeJsonParse<string[]>(row.affects_components, []),
timestamp: row.timestamp
}));
}
private getFileEdits(projectPath: string, sessionId?: string): FileEdit[] {
let sql = `
SELECT
fe.external_id as edit_external_id,
fe.file_path,
fe.snapshot_timestamp,
c.external_id as conversation_external_id
FROM file_edits fe
JOIN conversations c ON fe.conversation_id = c.id
WHERE c.project_path = ?
`;
if (sessionId) {
sql += ' AND c.id = ?';
}
sql += ' ORDER BY fe.snapshot_timestamp DESC LIMIT 1000';
const stmt = this.db.getDatabase().prepare(sql);
const rows = sessionId
? stmt.all(projectPath, sessionId) as Array<Record<string, unknown>>
: stmt.all(projectPath) as Array<Record<string, unknown>>;
return rows.map((row) => ({
id: row.edit_external_id as string,
conversation_id: row.conversation_external_id as string,
file_path: row.file_path as string,
edit_type: 'backup', // All file_edits are backups based on schema
timestamp: row.snapshot_timestamp as number
}));
}
private getGitCommits(projectPath: string, sessionId?: string): GitCommit[] {
let sql = `
SELECT
gc.hash,
gc.message,
gc.author,
gc.timestamp,
gc.files_changed,
c.external_id as conversation_external_id
FROM git_commits gc
LEFT JOIN conversations c ON gc.conversation_id = c.id
WHERE c.project_path = ?
`;
if (sessionId) {
sql += ' AND c.id = ?';
}
sql += ' ORDER BY gc.timestamp DESC LIMIT 500';
const stmt = this.db.getDatabase().prepare(sql);
const rows = sessionId
? stmt.all(projectPath, sessionId) as Array<GitCommitRow & { conversation_external_id: string | null }>
: stmt.all(projectPath) as Array<GitCommitRow & { conversation_external_id: string | null }>;
return rows.map((row) => ({
hash: row.hash,
conversation_id: row.conversation_external_id || '',
message: row.message,
author: row.author || 'Unknown',
timestamp: row.timestamp,
files_changed: safeJsonParse<string[]>(row.files_changed, [])
}));
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/documentation/CrossReferencer.ts | TypeScript | /**
* CrossReferencer - Links code entities with conversation data
*/
import { basename, dirname } from 'path';
import type {
CodeData,
ConversationData,
LinkedData,
LinkedModule,
TimelineEvent,
QualityReport,
HotspotWithContext,
MistakeSummary,
DocumentationStatistics,
CodeEntity,
Hotspot,
Decision,
Mistake,
Requirement,
FileEdit
} from './types.js';
export class CrossReferencer {
/**
* Link code and conversation data
*/
async link(codeData: CodeData, conversationData: ConversationData): Promise<LinkedData> {
console.error('🔗 Cross-referencing code and conversations...');
const modules = this.groupIntoModules(codeData, conversationData);
const timeline = this.buildTimeline(conversationData);
const qualityReport = this.buildQualityReport(codeData, conversationData);
const statistics = this.calculateStatistics(codeData, conversationData);
console.error(` Created ${modules.length} module summaries`);
return {
modules,
timeline,
qualityReport,
statistics
};
}
/**
* Group files into logical modules and link with conversation data
*/
private groupIntoModules(codeData: CodeData, conversationData: ConversationData): LinkedModule[] {
// Group files by directory
const moduleMap = new Map<string, LinkedModule>();
for (const file of codeData.files) {
const modulePath = this.extractModulePath(file.path);
const moduleName = this.pathToModuleName(modulePath);
if (!moduleMap.has(modulePath)) {
moduleMap.set(modulePath, {
path: modulePath,
name: moduleName,
entities: [],
decisions: [],
mistakes: [],
requirements: [],
complexity: 0,
changeFrequency: 0,
description: undefined
});
}
const module = moduleMap.get(modulePath);
if (module) {
module.entities.push(...file.entities);
}
}
// Link decisions, mistakes, and requirements to modules
for (const module of moduleMap.values()) {
module.decisions = this.findRelatedDecisions(module.path, conversationData.decisions);
module.mistakes = this.findRelatedMistakes(module.path, conversationData.mistakes);
module.requirements = this.findRelatedRequirements(module.path, conversationData.requirements);
module.complexity = this.calculateModuleComplexity(module.entities, codeData.hotspots);
module.changeFrequency = this.calculateChangeFrequency(module.path, conversationData.fileEdits);
module.description = this.extractModuleDescription(module.decisions, module.requirements);
}
return Array.from(moduleMap.values())
.sort((a, b) => b.decisions.length + b.mistakes.length - (a.decisions.length + a.mistakes.length));
}
/**
* Extract module path from file path (e.g., src/auth/token.ts → src/auth)
*/
private extractModulePath(filePath: string): string {
const dir = dirname(filePath);
// If it's a top-level file, use the filename without extension
if (dir === '.' || dir === 'src') {
return basename(filePath, '.ts').replace(/\./g, '/');
}
return dir;
}
/**
* Convert path to human-readable module name
*/
private pathToModuleName(path: string): string {
const parts = path.split('/');
const lastPart = parts[parts.length - 1];
// Convert snake_case or kebab-case to Title Case
return lastPart
.split(/[-_]/)
.map(word => word.charAt(0).toUpperCase() + word.slice(1))
.join(' ');
}
/**
* Find decisions related to a module
*/
private findRelatedDecisions(modulePath: string, decisions: Decision[]): Decision[] {
return decisions.filter(decision =>
decision.related_files.some((file: string) =>
file.startsWith(modulePath) || this.isRelatedPath(file, modulePath)
)
);
}
/**
* Find mistakes related to a module
*/
private findRelatedMistakes(modulePath: string, mistakes: Mistake[]): Mistake[] {
return mistakes.filter(mistake =>
mistake.related_files.some((file: string) =>
file.startsWith(modulePath) || this.isRelatedPath(file, modulePath)
)
);
}
/**
* Find requirements related to a module
*/
private findRelatedRequirements(modulePath: string, requirements: Requirement[]): Requirement[] {
return requirements.filter(req =>
req.related_files.some((file: string) =>
file.startsWith(modulePath) || this.isRelatedPath(file, modulePath)
)
);
}
/**
* Check if two paths are related (flexible matching)
*/
private isRelatedPath(filePath: string, modulePath: string): boolean {
return filePath.includes(modulePath) || modulePath.includes(filePath);
}
/**
* Calculate module complexity
*/
private calculateModuleComplexity(entities: CodeEntity[], hotspots: Hotspot[]): number {
if (entities.length === 0) {
return 0;
}
const entityComplexity = entities
.filter(e => e.complexity)
.reduce((sum, e) => sum + (e.complexity || 0), 0);
const hotspotComplexity = hotspots
.filter(h => entities.some(e => e.filePath === h.filePath))
.reduce((sum, h) => sum + h.complexity, 0);
const avgComplexity = (entityComplexity + hotspotComplexity) / (entities.length || 1);
return Math.min(10, Math.round(avgComplexity));
}
/**
* Calculate how frequently a module changes
*/
private calculateChangeFrequency(modulePath: string, fileEdits: FileEdit[]): number {
return fileEdits.filter(edit => edit.file_path.startsWith(modulePath)).length;
}
/**
* Extract module description from decisions and requirements
*/
private extractModuleDescription(decisions: Decision[], requirements: Requirement[]): string | undefined {
// Use the first decision's context or rationale as description
if (decisions.length > 0 && decisions[0].context) {
return decisions[0].context;
}
if (decisions.length > 0 && decisions[0].rationale) {
return decisions[0].rationale;
}
if (requirements.length > 0) {
return requirements[0].description;
}
return undefined;
}
/**
* Build chronological timeline of events
*/
private buildTimeline(conversationData: ConversationData): TimelineEvent[] {
const events: TimelineEvent[] = [];
// Add decisions
for (const decision of conversationData.decisions) {
events.push({
timestamp: decision.timestamp,
type: 'decision',
description: decision.decision_text,
files: decision.related_files,
details: decision
});
}
// Add mistakes
for (const mistake of conversationData.mistakes) {
events.push({
timestamp: mistake.timestamp,
type: 'mistake',
description: mistake.what_went_wrong,
files: mistake.related_files,
details: mistake
});
}
// Add commits
for (const commit of conversationData.commits) {
events.push({
timestamp: commit.timestamp,
type: 'commit',
description: commit.message,
files: commit.files_changed,
details: commit
});
}
// Sort by timestamp (newest first)
return events.sort((a, b) => b.timestamp - a.timestamp);
}
/**
* Build quality report with context
*/
private buildQualityReport(codeData: CodeData, conversationData: ConversationData): QualityReport {
const hotspotsWithContext: HotspotWithContext[] = codeData.hotspots.map(hotspot => ({
...hotspot,
relatedMistakes: conversationData.mistakes.filter(m =>
m.related_files.some(f => f === hotspot.filePath)
),
relatedDecisions: conversationData.decisions.filter(d =>
d.related_files.some(f => f === hotspot.filePath)
)
}));
const mistakeSummary = this.summarizeMistakes(conversationData.mistakes);
return {
hotspots: hotspotsWithContext,
clones: codeData.clones,
mistakeSummary
};
}
/**
* Summarize mistakes for learning
*/
private summarizeMistakes(mistakes: Mistake[]): MistakeSummary {
const byCategory: Record<string, number> = {};
const topLessons: string[] = [];
const criticalIssues: Mistake[] = [];
for (const mistake of mistakes) {
// Categorize by severity
const category = mistake.severity || 'medium';
byCategory[category] = (byCategory[category] || 0) + 1;
// Collect lessons
if (mistake.lesson_learned && !topLessons.includes(mistake.lesson_learned)) {
topLessons.push(mistake.lesson_learned);
}
// Identify critical issues
if (mistake.severity === 'critical' || mistake.severity === 'high') {
criticalIssues.push(mistake);
}
}
return {
total: mistakes.length,
byCategory,
topLessons: topLessons.slice(0, 10), // Top 10 lessons
criticalIssues
};
}
/**
* Calculate overall statistics
*/
private calculateStatistics(codeData: CodeData, conversationData: ConversationData): DocumentationStatistics {
const totalComplexity = codeData.entities
.filter(e => e.complexity)
.reduce((sum, e) => sum + (e.complexity || 0), 0);
return {
totalFiles: codeData.files.length,
totalEntities: codeData.entities.length,
totalDecisions: conversationData.decisions.length,
totalMistakes: conversationData.mistakes.length,
totalCommits: conversationData.commits.length,
averageComplexity: codeData.entities.length > 0
? Math.round((totalComplexity / codeData.entities.length) * 10) / 10
: 0
};
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/documentation/DocumentationGenerator.ts | TypeScript | /**
* DocumentationGenerator - Main orchestrator for comprehensive documentation
*/
import { CodeAnalyzer } from './CodeAnalyzer.js';
import { ConversationAnalyzer } from './ConversationAnalyzer.js';
import { CrossReferencer } from './CrossReferencer.js';
import { MarkdownFormatter } from './MarkdownFormatter.js';
import type { SQLiteManager } from '../storage/SQLiteManager.js';
import type { DocumentationOptions } from './types.js';
export class DocumentationGenerator {
private codeAnalyzer: CodeAnalyzer;
private conversationAnalyzer: ConversationAnalyzer;
private crossReferencer: CrossReferencer;
private formatter: MarkdownFormatter;
constructor(db: SQLiteManager) {
this.codeAnalyzer = new CodeAnalyzer();
this.conversationAnalyzer = new ConversationAnalyzer(db);
this.crossReferencer = new CrossReferencer();
this.formatter = new MarkdownFormatter();
}
/**
* Generate comprehensive documentation
*
* @param options - Documentation options
* @returns Markdown documentation
*/
async generate(options: DocumentationOptions): Promise<string> {
console.error('\n📚 Generating Comprehensive Documentation');
console.error(`Project: ${options.projectPath}`);
console.error(`Scope: ${options.scope}`);
if (options.sessionId) {
console.error(`Session: ${options.sessionId}`);
}
if (options.moduleFilter) {
console.error(`Filter: ${options.moduleFilter}`);
}
try {
// Step 1: Analyze code structure
const codeData = await this.codeAnalyzer.analyze(options.projectPath, options.moduleFilter);
// Step 2: Analyze conversation history
const conversationData = await this.conversationAnalyzer.analyze(
options.projectPath,
options.sessionId
);
// Step 3: Cross-reference code and conversations
const linkedData = await this.crossReferencer.link(codeData, conversationData);
// Step 4: Format as markdown
const documentation = this.formatter.format(linkedData, options);
console.error('✅ Documentation generated successfully');
console.error(` Modules: ${linkedData.modules.length}`);
console.error(` Decisions: ${linkedData.statistics.totalDecisions}`);
console.error(` Mistakes: ${linkedData.statistics.totalMistakes}`);
return documentation;
} catch (error) {
console.error('❌ Error generating documentation:', error);
throw error;
}
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/documentation/MarkdownFormatter.ts | TypeScript | /**
* MarkdownFormatter - Generates comprehensive markdown documentation
*/
import type {
LinkedData,
LinkedModule,
TimelineEvent,
DocumentationOptions,
Decision,
Mistake,
Requirement,
CodeEntity,
QualityReport,
HotspotWithContext
} from './types.js';
export class MarkdownFormatter {
/**
* Format linked data as markdown
*/
format(data: LinkedData, options: DocumentationOptions): string {
let doc = '';
doc += this.renderHeader(data, options);
doc += this.renderStatistics(data);
const { scope, moduleFilter } = options;
if (scope === 'full' || scope === 'architecture') {
doc += this.renderArchitecture(data.modules, moduleFilter);
}
if (scope === 'full' || scope === 'decisions') {
doc += this.renderDecisions(data);
}
if (scope === 'full' || scope === 'quality') {
doc += this.renderQuality(data.qualityReport);
}
if (scope === 'full') {
doc += this.renderTimeline(data.timeline);
}
doc += this.renderFooter();
return doc;
}
private renderHeader(data: LinkedData, options: DocumentationOptions): string {
const projectName = options.projectPath.split('/').pop() || 'Project';
const sessionInfo = options.sessionId ? ` (Session: ${options.sessionId.substring(0, 8)}...)` : ' (All Sessions)';
return `# ${projectName} - Comprehensive Documentation
**Generated**: ${new Date().toLocaleString()}
**Scope**: ${options.scope}${sessionInfo}
**Modules**: ${data.modules.length}
---
`;
}
private renderStatistics(data: LinkedData): string {
const stats = data.statistics;
return `## 📊 Project Statistics
| Metric | Count |
|--------|-------|
| Files | ${stats.totalFiles} |
| Code Entities | ${stats.totalEntities} |
| Decisions | ${stats.totalDecisions} |
| Mistakes | ${stats.totalMistakes} |
| Git Commits | ${stats.totalCommits} |
| Avg Complexity | ${stats.averageComplexity}/10 |
---
`;
}
private renderArchitecture(modules: LinkedModule[], moduleFilter?: string): string {
let filtered = modules;
if (moduleFilter) {
filtered = modules.filter(m => m.path.includes(moduleFilter));
}
if (filtered.length === 0) {
return `## 🏗️ Architecture Overview
No modules found${moduleFilter ? ` matching filter: ${moduleFilter}` : ''}.
---
`;
}
return `## 🏗️ Architecture Overview
${filtered.map(m => this.renderModule(m)).join('\n')}
---
`;
}
private renderModule(module: LinkedModule): string {
const hasDecisions = module.decisions.length > 0;
const hasMistakes = module.mistakes.length > 0;
const hasRequirements = module.requirements.length > 0;
return `### ${module.name}
**Location**: \`${module.path}\`
**Complexity**: ${module.complexity}/10
**Changes**: ${module.changeFrequency} edits
**Entities**: ${module.entities.length}
${module.description ? `**Purpose**: ${module.description}\n` : ''}
${hasDecisions ? this.renderModuleDecisions(module.decisions) : ''}
${hasMistakes ? this.renderModuleMistakes(module.mistakes) : ''}
${hasRequirements ? this.renderModuleRequirements(module.requirements) : ''}
${this.renderModuleEntities(module.entities)}
`;
}
private renderModuleDecisions(decisions: Decision[]): string {
if (decisions.length === 0) {
return '';
}
const recent = decisions.slice(0, 5); // Show top 5 decisions
return `**Key Decisions** (${decisions.length} total):
${recent.map(d => `- ${d.decision_text} (${this.formatDate(d.timestamp)})
- *Rationale*: ${d.rationale || 'Not specified'}${d.alternatives_considered.length > 0 ? `
- *Alternatives*: ${d.alternatives_considered.join(', ')}` : ''}`).join('\n')}
`;
}
private renderModuleMistakes(mistakes: Mistake[]): string {
if (mistakes.length === 0) {
return '';
}
return `**Past Issues** (${mistakes.length} total):
${mistakes.map(m => `- ⚠️ ${m.what_went_wrong} (${this.formatDate(m.timestamp)})
- *Fix*: ${m.how_it_was_fixed || 'Not documented'}
- *Lesson*: ${m.lesson_learned || 'Not documented'}`).join('\n')}
`;
}
private renderModuleRequirements(requirements: Requirement[]): string {
if (requirements.length === 0) {
return '';
}
return `**Requirements**:
${requirements.map(r => `- ${r.requirement_type}: ${r.description}`).join('\n')}
`;
}
private renderModuleEntities(entities: CodeEntity[]): string {
if (entities.length === 0) {
return '';
}
const byType: Record<string, number> = {};
for (const entity of entities) {
byType[entity.type] = (byType[entity.type] || 0) + 1;
}
const summary = Object.entries(byType)
.map(([type, count]) => `${count} ${type}${count > 1 ? 's' : ''}`)
.join(', ');
return `**Code Structure**: ${summary}
`;
}
private renderDecisions(data: LinkedData): string {
const allDecisions = data.modules
.flatMap(m => m.decisions)
.sort((a, b) => b.timestamp - a.timestamp);
if (allDecisions.length === 0) {
return `## 💡 Decision Log
No decisions documented.
---
`;
}
return `## 💡 Decision Log
${allDecisions.map((d, i) => `### ${i + 1}. ${d.decision_text}
**Date**: ${this.formatDate(d.timestamp)}
**Context**: ${d.context || 'Not specified'}
**Rationale**: ${d.rationale}
${d.alternatives_considered.length > 0 ? `**Alternatives Considered**:
${d.alternatives_considered.map(alt => `- ${alt}${d.rejected_reasons[alt] ? `: ${d.rejected_reasons[alt]}` : ''}`).join('\n')}
` : ''}
**Affected Files**: ${d.related_files.length > 0 ? d.related_files.map(f => `\`${f}\``).join(', ') : 'None specified'}
${d.related_commits.length > 0 ? `**Related Commits**: ${d.related_commits.join(', ')}\n` : ''}
`).join('\n---\n\n')}
---
`;
}
private renderQuality(report: QualityReport): string {
return `## 🔍 Quality Insights
### Code Hotspots
${report.hotspots.length > 0 ? report.hotspots.map((h: HotspotWithContext) => `
**\`${h.filePath}\`**
Complexity: ${h.complexity}/10 | Changes: ${h.changeCount}
${h.relatedMistakes.length > 0 ? `Past Issues:
${h.relatedMistakes.map((m: Mistake) => `- ${m.what_went_wrong}`).join('\n')}
` : ''}
${h.relatedDecisions.length > 0 ? `Related Decisions:
${h.relatedDecisions.map((d: Decision) => `- ${d.decision_text}`).join('\n')}
` : ''}
`).join('\n') : 'No hotspots identified.\n'}
### Code Duplication
${report.clones.length > 0 ? report.clones.map(c => `
- **Similarity**: ${Math.round(c.similarity * 100)}%
- Files: ${c.files.map((f: string) => `\`${f}\``).join(', ')}
- ${c.description}
`).join('\n') : 'No code duplication detected.\n'}
### Lessons Learned
${report.mistakeSummary.topLessons.length > 0 ? report.mistakeSummary.topLessons.map((lesson: string) => `- ${lesson}`).join('\n') : 'No lessons documented yet.\n'}
### Mistake Summary
**Total**: ${report.mistakeSummary.total} mistakes documented
${Object.keys(report.mistakeSummary.byCategory).length > 0 ? `**By Severity**:
${Object.entries(report.mistakeSummary.byCategory).map(([cat, count]) => `- ${cat}: ${count}`).join('\n')}
` : ''}
${report.mistakeSummary.criticalIssues.length > 0 ? `**Critical Issues**:
${report.mistakeSummary.criticalIssues.map((m: Mistake) => `- ${m.what_went_wrong}`).join('\n')}
` : ''}
---
`;
}
private renderTimeline(timeline: TimelineEvent[]): string {
if (timeline.length === 0) {
return `## 📅 Development Timeline
No timeline events found.
---
`;
}
// Group by month
const byMonth: Record<string, TimelineEvent[]> = {};
for (const event of timeline) {
const monthKey = this.formatMonth(event.timestamp);
if (!byMonth[monthKey]) {byMonth[monthKey] = [];}
byMonth[monthKey].push(event);
}
return `## 📅 Development Timeline
${Object.entries(byMonth).map(([month, events]) => `
### ${month}
${events.map(e => this.renderTimelineEvent(e)).join('\n')}
`).join('\n')}
---
`;
}
private renderTimelineEvent(event: TimelineEvent): string {
const icon = {
decision: '💡',
mistake: '⚠️',
commit: '📝',
edit: '✏️'
}[event.type] || '•';
const date = new Date(event.timestamp).toLocaleDateString();
const files = event.files.length > 0 ? ` (${event.files.length} files)` : '';
return `- **${date}** ${icon} ${event.description}${files}`;
}
private renderFooter(): string {
return `## 📚 About This Documentation
This documentation was automatically generated by combining:
- **Codebase analysis** from local filesystem scanning
- **Development conversations** from Claude Code conversation history
- **Git history** and commit linkage
The documentation shows not just **what** exists in the code, but **why** it was built that way.
---
*Generated by CCCMemory MCP Server*
`;
}
private formatDate(timestamp: number): string {
return new Date(timestamp).toLocaleDateString('en-US', {
year: 'numeric',
month: 'short',
day: 'numeric'
});
}
private formatMonth(timestamp: number): string {
return new Date(timestamp).toLocaleDateString('en-US', {
year: 'numeric',
month: 'long'
});
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/documentation/types.ts | TypeScript | /**
* Type definitions for documentation generation
*/
// ==================== Code Analysis Types ====================
export interface CodeEntity {
id: string;
name: string;
type: 'class' | 'function' | 'interface' | 'module' | 'component';
filePath: string;
lineNumber?: number;
complexity?: number;
description?: string;
}
export interface Relationship {
from: string;
to: string;
type: string;
description?: string;
}
export interface FileInfo {
path: string;
size: number;
entities: CodeEntity[];
}
export interface Hotspot {
filePath: string;
complexity: number;
changeCount: number;
metric: string;
}
export interface CodeClone {
files: string[];
similarity: number;
description: string;
}
export interface CodeData {
entities: CodeEntity[];
relationships: Relationship[];
files: FileInfo[];
hotspots: Hotspot[];
clones: CodeClone[];
}
// ==================== Conversation Analysis Types ====================
export interface Decision {
id: string;
conversation_id: string;
message_id: string;
decision_text: string;
rationale: string;
alternatives_considered: string[];
rejected_reasons: Record<string, string>;
context?: string;
related_files: string[];
related_commits: string[];
timestamp: number;
}
export interface Mistake {
id: string;
conversation_id: string;
what_went_wrong: string;
why_it_happened: string;
how_it_was_fixed: string;
lesson_learned: string;
related_files: string[];
severity: string;
timestamp: number;
}
export interface Requirement {
id: string;
requirement_type: string;
description: string;
rationale?: string;
related_files: string[];
timestamp: number;
}
export interface FileEdit {
id: string;
conversation_id: string;
file_path: string;
edit_type: string;
timestamp: number;
}
export interface GitCommit {
hash: string;
conversation_id?: string;
message: string;
author: string;
timestamp: number;
files_changed: string[];
}
export interface ConversationData {
decisions: Decision[];
mistakes: Mistake[];
requirements: Requirement[];
fileEdits: FileEdit[];
commits: GitCommit[];
}
// ==================== Cross-Referenced Types ====================
export interface LinkedModule {
path: string;
name: string;
entities: CodeEntity[];
decisions: Decision[];
mistakes: Mistake[];
requirements: Requirement[];
complexity: number;
changeFrequency: number;
description?: string;
}
export interface TimelineEvent {
timestamp: number;
type: 'decision' | 'mistake' | 'commit' | 'edit';
description: string;
files: string[];
details: unknown;
}
export interface QualityReport {
hotspots: HotspotWithContext[];
clones: CodeClone[];
mistakeSummary: MistakeSummary;
}
export interface HotspotWithContext extends Hotspot {
relatedMistakes: Mistake[];
relatedDecisions: Decision[];
}
export interface MistakeSummary {
total: number;
byCategory: Record<string, number>;
topLessons: string[];
criticalIssues: Mistake[];
}
export interface LinkedData {
modules: LinkedModule[];
timeline: TimelineEvent[];
qualityReport: QualityReport;
statistics: DocumentationStatistics;
}
export interface DocumentationStatistics {
totalFiles: number;
totalEntities: number;
totalDecisions: number;
totalMistakes: number;
totalCommits: number;
averageComplexity: number;
}
// ==================== Options ====================
export interface DocumentationOptions {
projectPath: string;
sessionId?: string;
scope: 'full' | 'architecture' | 'decisions' | 'quality';
moduleFilter?: string;
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/embeddings/ConfigManager.ts | TypeScript | /**
* Configuration Manager for CLI
* Handles reading and writing embedding configuration
*/
import { readFileSync, writeFileSync, existsSync } from "fs";
import { join } from "path";
import { homedir } from "os";
import { ConfigLoader, EmbeddingConfig, EmbeddingProviderType } from "./EmbeddingConfig.js";
export class ConfigManager {
private static readonly CONFIG_FILENAME = ".claude-memory-config.json";
/**
* Get current effective configuration (after all precedence rules)
*/
static getCurrentConfig(): EmbeddingConfig {
return ConfigLoader.load();
}
/**
* Get configuration sources breakdown
*/
static getConfigSources(): {
home: Partial<EmbeddingConfig> | null;
project: Partial<EmbeddingConfig> | null;
env: Partial<EmbeddingConfig>;
effective: EmbeddingConfig;
} {
const homeConfigPath = join(homedir(), this.CONFIG_FILENAME);
const projectConfigPath = join(process.cwd(), this.CONFIG_FILENAME);
const homeConfig = existsSync(homeConfigPath)
? this.loadConfigFile(homeConfigPath)?.embedding || null
: null;
const projectConfig = existsSync(projectConfigPath)
? this.loadConfigFile(projectConfigPath)?.embedding || null
: null;
const envConfig: Partial<EmbeddingConfig> = {};
if (process.env.EMBEDDING_PROVIDER) {
envConfig.provider = process.env.EMBEDDING_PROVIDER as EmbeddingProviderType;
}
if (process.env.EMBEDDING_MODEL) {
envConfig.model = process.env.EMBEDDING_MODEL;
}
if (process.env.EMBEDDING_DIMENSIONS) {
envConfig.dimensions = parseInt(process.env.EMBEDDING_DIMENSIONS, 10);
}
if (process.env.EMBEDDING_BASE_URL) {
envConfig.baseUrl = process.env.EMBEDDING_BASE_URL;
}
if (process.env.OPENAI_API_KEY) {
envConfig.apiKey = process.env.OPENAI_API_KEY;
}
return {
home: homeConfig,
project: projectConfig,
env: envConfig,
effective: ConfigLoader.load(),
};
}
/**
* Get a specific config value
*/
static getConfigValue(key: string): unknown {
const config = this.getCurrentConfig();
switch (key) {
case "provider":
return config.provider;
case "model":
return config.model;
case "dimensions":
return config.dimensions;
case "baseUrl":
case "base_url":
return config.baseUrl;
case "apiKey":
case "api_key":
return config.apiKey;
default:
throw new Error(`Unknown config key: ${key}. Valid keys: provider, model, dimensions, baseUrl, apiKey`);
}
}
/**
* Set a config value (writes to home config file)
*/
static setConfigValue(key: string, value: string): void {
const homeConfigPath = join(homedir(), this.CONFIG_FILENAME);
// Load existing config or create new one
let config: { embedding: Partial<EmbeddingConfig> } = { embedding: {} };
if (existsSync(homeConfigPath)) {
const existing = this.loadConfigFile(homeConfigPath);
if (existing && existing.embedding) {
config = { embedding: existing.embedding };
}
}
// Ensure embedding object exists
if (!config.embedding) {
config.embedding = {};
}
// Set the value with type conversion
switch (key) {
case "provider":
if (!["ollama", "transformers", "openai"].includes(value)) {
throw new Error(`Invalid provider: ${value}. Must be 'ollama', 'transformers', or 'openai'`);
}
config.embedding.provider = value as EmbeddingProviderType;
break;
case "model":
if (!value || value.trim() === "") {
throw new Error("Model name cannot be empty");
}
config.embedding.model = value;
break;
case "dimensions": {
const dims = parseInt(value, 10);
if (isNaN(dims) || dims < 1 || dims > 10000) {
throw new Error("Dimensions must be a number between 1 and 10000");
}
config.embedding.dimensions = dims;
break;
}
case "baseUrl":
case "base_url":
config.embedding.baseUrl = value;
break;
case "apiKey":
case "api_key":
config.embedding.apiKey = value;
break;
default:
throw new Error(`Unknown config key: ${key}. Valid keys: provider, model, dimensions, baseUrl, apiKey`);
}
// Write config file
try {
writeFileSync(homeConfigPath, JSON.stringify(config, null, 2), "utf-8");
} catch (error) {
throw new Error(`Failed to write config file: ${(error as Error).message}`);
}
}
/**
* Get config file path (home directory)
*/
static getConfigPath(): string {
return join(homedir(), this.CONFIG_FILENAME);
}
/**
* Check if config file exists
*/
static configExists(): boolean {
return existsSync(this.getConfigPath());
}
/**
* Load config file
*/
private static loadConfigFile(path: string): { embedding?: Partial<EmbeddingConfig> } | null {
try {
const content = readFileSync(path, "utf-8");
return JSON.parse(content);
} catch (_error) {
return null;
}
}
/**
* Get known model dimensions for common models
*/
static getKnownModelDimensions(model: string): number | null {
const knownDimensions: Record<string, number> = {
// Ollama models
"mxbai-embed-large": 1024,
"nomic-embed-text": 768,
"all-minilm": 384,
"snowflake-arctic-embed": 1024,
// OpenAI models
"text-embedding-3-small": 1536,
"text-embedding-3-large": 3072,
"text-embedding-ada-002": 1536,
// Transformers models
"Xenova/all-MiniLM-L6-v2": 384,
"Xenova/all-mpnet-base-v2": 768,
};
return knownDimensions[model] || null;
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/embeddings/EmbeddingConfig.ts | TypeScript | /**
* Embedding Configuration Management
* Loads config from file and environment variables (env vars take precedence)
*/
import { readFileSync, existsSync } from "fs";
import { join } from "path";
import { homedir } from "os";
export type EmbeddingProviderType = "ollama" | "transformers" | "openai";
export interface EmbeddingConfig {
provider: EmbeddingProviderType;
model: string;
dimensions?: number; // Optional - can be auto-detected
baseUrl?: string; // For Ollama
apiKey?: string; // For OpenAI
}
export class ConfigLoader {
private static readonly CONFIG_FILENAME = ".claude-memory-config.json";
/**
* Load configuration with precedence: env vars > project config > home config > defaults
*/
static load(): EmbeddingConfig {
// Start with defaults
let config: EmbeddingConfig = {
provider: "transformers",
model: "Xenova/all-MiniLM-L6-v2",
dimensions: 384,
};
// Try loading from home directory config
const homeConfigPath = join(homedir(), this.CONFIG_FILENAME);
if (existsSync(homeConfigPath)) {
const homeConfig = this.loadConfigFile(homeConfigPath);
if (homeConfig?.embedding) {
config = { ...config, ...homeConfig.embedding };
}
}
// Try loading from project config (overrides home config)
const projectConfigPath = join(process.cwd(), this.CONFIG_FILENAME);
if (existsSync(projectConfigPath)) {
const projectConfig = this.loadConfigFile(projectConfigPath);
if (projectConfig?.embedding) {
config = { ...config, ...projectConfig.embedding };
}
}
// Environment variables override everything
if (process.env.EMBEDDING_PROVIDER) {
config.provider = process.env.EMBEDDING_PROVIDER as EmbeddingProviderType;
}
if (process.env.EMBEDDING_MODEL) {
config.model = process.env.EMBEDDING_MODEL;
}
if (process.env.EMBEDDING_DIMENSIONS) {
config.dimensions = parseInt(process.env.EMBEDDING_DIMENSIONS, 10);
}
if (process.env.EMBEDDING_BASE_URL) {
config.baseUrl = process.env.EMBEDDING_BASE_URL;
}
if (process.env.OPENAI_API_KEY) {
config.apiKey = process.env.OPENAI_API_KEY;
}
// Set provider-specific defaults
config = this.applyProviderDefaults(config);
return config;
}
/**
* Load and parse config file
*/
private static loadConfigFile(path: string): { embedding?: Partial<EmbeddingConfig> } | null {
try {
const content = readFileSync(path, "utf-8");
return JSON.parse(content);
} catch (error) {
console.error(`Warning: Could not load config file ${path}:`, error);
return null;
}
}
/**
* Apply provider-specific defaults
*/
private static applyProviderDefaults(config: EmbeddingConfig): EmbeddingConfig {
switch (config.provider) {
case "ollama":
return {
...config,
baseUrl: config.baseUrl || "http://localhost:11434",
model: config.model || "mxbai-embed-large",
dimensions: config.dimensions || 1024, // mxbai-embed-large default
};
case "openai":
return {
...config,
model: config.model || "text-embedding-3-small",
dimensions: config.dimensions || 1536, // text-embedding-3-small default
};
case "transformers":
return {
...config,
model: config.model || "Xenova/all-MiniLM-L6-v2",
dimensions: config.dimensions || 384,
};
default:
return config;
}
}
/**
* Validate configuration
*/
static validate(config: EmbeddingConfig): { valid: boolean; errors: string[] } {
const errors: string[] = [];
if (!["ollama", "transformers", "openai"].includes(config.provider)) {
errors.push(
`Invalid provider: ${config.provider}. Must be 'ollama', 'transformers', or 'openai'`
);
}
if (!config.model || config.model.trim() === "") {
errors.push("Model name is required");
}
if (config.provider === "openai" && !config.apiKey) {
errors.push("OpenAI provider requires OPENAI_API_KEY environment variable or apiKey in config");
}
if (config.dimensions && (config.dimensions < 1 || config.dimensions > 10000)) {
errors.push("Dimensions must be between 1 and 10000");
}
return {
valid: errors.length === 0,
errors,
};
}
/**
* Get example config for documentation
*/
static getExampleConfig(): string {
return JSON.stringify(
{
embedding: {
provider: "ollama",
model: "nomic-embed-text",
baseUrl: "http://localhost:11434",
},
},
null,
2
);
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/embeddings/EmbeddingGenerator.ts | TypeScript | /**
* Embedding Generator Factory
* Creates appropriate embedding provider based on configuration
*/
import type { EmbeddingProvider } from "./EmbeddingProvider.js";
import { ConfigLoader } from "./EmbeddingConfig.js";
import { OllamaEmbeddings } from "./providers/OllamaEmbeddings.js";
import { OpenAIEmbeddings } from "./providers/OpenAIEmbeddings.js";
import { TransformersEmbeddings } from "./providers/TransformersEmbeddings.js";
/**
* Factory class for creating embedding providers
*/
export class EmbeddingGenerator {
private static instance: EmbeddingProvider | null = null;
private static initializationPromise: Promise<EmbeddingProvider> | null = null;
/**
* Get or create embedding provider based on configuration
* Uses promise-based mutex to prevent concurrent initialization race condition
*/
static async getProvider(): Promise<EmbeddingProvider> {
// Fast path: already initialized
if (this.instance) {
return this.instance;
}
// If initialization is in progress, wait for it
if (this.initializationPromise) {
return this.initializationPromise;
}
// Start initialization with mutex
this.initializationPromise = this.initializeProvider();
try {
const provider = await this.initializationPromise;
return provider;
} catch (error) {
// Reset on failure so next call can retry
this.initializationPromise = null;
throw error;
}
}
/**
* Internal initialization logic (called once via mutex)
*/
private static async initializeProvider(): Promise<EmbeddingProvider> {
// Double-check after acquiring mutex
if (this.instance) {
return this.instance;
}
// Load configuration
const config = ConfigLoader.load();
// Validate configuration
const validation = ConfigLoader.validate(config);
if (!validation.valid) {
console.error("⚠️ Invalid embedding configuration:");
validation.errors.forEach((error) => console.error(` - ${error}`));
console.error(" Falling back to auto-detection...");
}
// Try to create provider based on config (or auto-detect)
let provider: EmbeddingProvider;
if (validation.valid) {
console.error(`Attempting to use ${config.provider} embeddings...`);
provider = this.createProvider(config.provider, config);
} else {
// Auto-detect: try providers in order of preference
console.error("Auto-detecting available embedding provider...");
provider = await this.autoDetectProvider();
}
// Initialize the provider
await provider.initialize();
// If provider is not available, try fallback
if (!provider.isAvailable()) {
console.error(`⚠️ ${config.provider} provider not available, trying fallback...`);
provider = await this.autoDetectProvider();
await provider.initialize();
}
this.instance = provider;
return provider;
}
/**
* Create specific provider instance
*/
private static createProvider(type: string, config: { model: string; baseUrl?: string; apiKey?: string; dimensions?: number }): EmbeddingProvider {
switch (type) {
case "ollama":
return new OllamaEmbeddings(
config.baseUrl || "http://localhost:11434",
config.model,
config.dimensions
);
case "openai":
return new OpenAIEmbeddings(
config.apiKey || "",
config.model,
config.dimensions
);
case "transformers":
return new TransformersEmbeddings(
config.model,
config.dimensions
);
default:
throw new Error(`Unknown provider type: ${type}`);
}
}
/**
* Auto-detect best available provider
* Tries in order: Transformers.js (bundled, reliable) → Ollama (fast if running)
*/
private static async autoDetectProvider(): Promise<EmbeddingProvider> {
// Try Transformers.js first (bundled dependency, always works offline)
const transformers = new TransformersEmbeddings();
await transformers.initialize();
if (transformers.isAvailable()) {
console.error("✓ Auto-detected: Using Transformers.js embeddings");
return transformers;
}
// Try Ollama as fallback (requires Ollama to be running)
const ollama = new OllamaEmbeddings();
await ollama.initialize();
if (ollama.isAvailable()) {
console.error("✓ Auto-detected: Using Ollama embeddings");
return ollama;
}
// No provider available - return transformers as placeholder
// It will fail gracefully when used, falling back to FTS
console.error("⚠️ No embedding provider available");
console.error(" Options:");
console.error(" 1. Ensure @xenova/transformers is properly installed");
console.error(" 2. Install Ollama: https://ollama.com");
console.error(" 3. Configure OpenAI: Set OPENAI_API_KEY environment variable");
console.error(" Falling back to full-text search only.");
return transformers; // Return uninitialized provider (will fail gracefully)
}
/**
* Reset singleton (useful for testing)
*/
static reset(): void {
this.instance = null;
this.initializationPromise = null;
}
/**
* Get current provider info (if initialized)
*/
static getInfo(): { provider: string; model: string; available: boolean } | null {
if (!this.instance) {
return null;
}
const info = this.instance.getModelInfo();
return {
provider: info.provider,
model: info.model,
available: info.available,
};
}
}
/**
* Legacy API compatibility - returns provider instance
*/
export async function getEmbeddingGenerator(): Promise<EmbeddingProvider> {
return EmbeddingGenerator.getProvider();
}
/**
* Legacy API compatibility - resets singleton
*/
export function resetEmbeddingGenerator(): void {
EmbeddingGenerator.reset();
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/embeddings/EmbeddingProvider.ts | TypeScript | /**
* Embedding Provider Interface
* Defines the contract for all embedding implementations
*/
export interface ModelInfo {
provider: string;
model: string;
dimensions: number;
available: boolean;
}
export interface EmbeddingProvider {
/**
* Initialize the embedding provider
* Should handle graceful failure if provider unavailable
*/
initialize(): Promise<void>;
/**
* Check if embeddings are available
*/
isAvailable(): boolean;
/**
* Generate embedding for a single text
*/
embed(text: string): Promise<Float32Array>;
/**
* Generate embeddings for multiple texts (batched for efficiency)
*/
embedBatch(texts: string[], batchSize?: number): Promise<Float32Array[]>;
/**
* Get information about the model being used
*/
getModelInfo(): ModelInfo;
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/embeddings/ModelRegistry.ts | TypeScript | /**
* Centralized registry of all supported embedding models
* Single source of truth for model information, dimensions, and metadata
*/
export interface ModelInfo {
name: string;
dimensions: number;
provider: "ollama" | "transformers" | "openai";
description: string;
quality: "low" | "medium" | "high" | "highest";
installation?: string;
cost?: string;
}
/**
* Complete registry of all supported embedding models
*/
export const MODEL_REGISTRY: ModelInfo[] = [
// Ollama models (local)
{
name: "mxbai-embed-large",
dimensions: 1024,
provider: "ollama",
description: "High-quality local embeddings, balanced speed and quality",
quality: "high",
installation: "ollama pull mxbai-embed-large",
},
{
name: "nomic-embed-text",
dimensions: 768,
provider: "ollama",
description: "Fast, good quality for general use",
quality: "medium",
installation: "ollama pull nomic-embed-text",
},
{
name: "all-minilm",
dimensions: 384,
provider: "ollama",
description: "Lightweight, fast, lower quality",
quality: "low",
installation: "ollama pull all-minilm",
},
{
name: "snowflake-arctic-embed",
dimensions: 1024,
provider: "ollama",
description: "High-quality, optimized for retrieval tasks",
quality: "high",
installation: "ollama pull snowflake-arctic-embed",
},
// Transformers models (offline)
{
name: "Xenova/all-MiniLM-L6-v2",
dimensions: 384,
provider: "transformers",
description: "Default model, no setup required, downloads on first use",
quality: "low",
},
{
name: "Xenova/all-mpnet-base-v2",
dimensions: 768,
provider: "transformers",
description: "Better quality, larger size, no setup required",
quality: "medium",
},
{
name: "Xenova/bge-small-en-v1.5",
dimensions: 384,
provider: "transformers",
description: "Fast, English-optimized",
quality: "low",
},
{
name: "Xenova/bge-base-en-v1.5",
dimensions: 768,
provider: "transformers",
description: "Better quality, English-optimized",
quality: "medium",
},
// OpenAI models (cloud)
{
name: "text-embedding-3-small",
dimensions: 1536,
provider: "openai",
description: "Cost-effective, high quality cloud embeddings",
quality: "highest",
cost: "$0.020 per 1M tokens",
},
{
name: "text-embedding-3-large",
dimensions: 3072,
provider: "openai",
description: "Best quality, higher cost",
quality: "highest",
cost: "$0.130 per 1M tokens",
},
{
name: "text-embedding-ada-002",
dimensions: 1536,
provider: "openai",
description: "Legacy model, still supported",
quality: "high",
cost: "$0.100 per 1M tokens",
},
];
/**
* Get all models for a specific provider
*/
export function getModelsByProvider(provider: string): ModelInfo[] {
return MODEL_REGISTRY.filter(m => m.provider === provider);
}
/**
* Get model information by name (supports partial matching)
*/
export function getModelInfo(modelName: string): ModelInfo | null {
// Return null for empty string
if (!modelName || modelName.trim().length === 0) {
return null;
}
// Try exact match first
const exactMatch = MODEL_REGISTRY.find(m => m.name === modelName);
if (exactMatch) {
return exactMatch;
}
// Try partial match (for Ollama-style matching)
const partialMatch = MODEL_REGISTRY.find(m =>
modelName.includes(m.name) || m.name.includes(modelName)
);
return partialMatch || null;
}
/**
* Get dimensions for a model by name
* Returns null if model is unknown
*/
export function getModelDimensions(modelName: string): number | null {
const modelInfo = getModelInfo(modelName);
return modelInfo?.dimensions || null;
}
/**
* Get all models from the registry
*/
export function getAllModels(): ModelInfo[] {
return MODEL_REGISTRY;
}
/**
* Check if a model exists in the registry
*/
export function modelExists(modelName: string): boolean {
return getModelInfo(modelName) !== null;
}
/**
* Get models by quality level
*/
export function getModelsByQuality(quality: ModelInfo["quality"]): ModelInfo[] {
return MODEL_REGISTRY.filter(m => m.quality === quality);
}
/**
* Get recommended model for a provider
* Returns the highest quality model available for the provider
*/
export function getRecommendedModel(provider: string): ModelInfo | null {
const models = getModelsByProvider(provider);
// Try to find highest quality first
const highest = models.find(m => m.quality === "highest");
if (highest) {
return highest;
}
// Then try high quality
const high = models.find(m => m.quality === "high");
if (high) {
return high;
}
// Then try medium quality
const medium = models.find(m => m.quality === "medium");
if (medium) {
return medium;
}
// Fall back to first available model (likely "low" quality)
return models[0] || null;
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/embeddings/VectorStore.ts | TypeScript | /**
* Vector Store with sqlite-vec integration
* Dual-schema support (vector extension or BLOB fallback)
*/
import type { SQLiteManager } from "../storage/SQLiteManager.js";
import Database from "better-sqlite3";
import type { TextChunk } from "../chunking/index.js";
export interface VectorSearchResult {
id: number;
content: string;
similarity: number;
metadata?: Record<string, unknown>;
}
export interface ChunkSearchResult {
chunkId: string;
messageId: number;
chunkIndex: number;
totalChunks: number;
content: string;
startOffset: number;
endOffset: number;
similarity: number;
strategy: string;
}
export interface ChunkEmbeddingData {
messageId: number;
chunk: TextChunk;
embedding: Float32Array;
modelName: string;
}
/**
* Filter options for pre-filtering vector search
*/
export interface SearchFilterOptions {
/** Filter by date range [start, end] as Unix timestamps */
dateRange?: [number, number];
/** Filter by conversation IDs (internal) */
conversationIds?: number[];
/** Filter by message types */
messageTypes?: string[];
/** Minimum similarity threshold */
minSimilarity?: number;
}
export class VectorStore {
private db: Database.Database;
private sqliteManager: SQLiteManager;
private hasVecExtension: boolean = false;
private vecTablesInitialized: boolean = false;
constructor(sqliteManager: SQLiteManager) {
this.db = sqliteManager.getDatabase();
this.sqliteManager = sqliteManager;
this.detectVecExtension();
}
/**
* Detect if sqlite-vec extension is available
*/
private detectVecExtension(): void {
try {
// First, check if vec0 module is registered by querying sqlite_master
// This works even in read-only mode
const vecTables = this.db
.prepare("SELECT name FROM sqlite_master WHERE type='table' AND name LIKE 'vec_%'")
.all() as Array<{ name: string }>;
// If vec tables exist, the extension was loaded successfully before
if (vecTables.length > 0) {
this.hasVecExtension = true;
console.error("✓ sqlite-vec extension detected (existing tables found)");
return;
}
// Try to create a test virtual table (requires write access)
this.db.exec("CREATE VIRTUAL TABLE IF NOT EXISTS vec_test USING vec0(test float[1])");
this.db.exec("DROP TABLE vec_test");
this.hasVecExtension = true;
console.error("✓ sqlite-vec extension detected");
} catch (error) {
const errorMessage = (error as Error).message;
// Check if it's a read-only error vs actual missing extension
if (errorMessage.includes("readonly") || errorMessage.includes("read-only")) {
// Database is read-only, assume vec is available if extension loaded
// (SQLiteManager would have failed to load if it wasn't)
this.hasVecExtension = true;
console.error("✓ sqlite-vec extension assumed available (read-only mode)");
} else {
this.hasVecExtension = false;
console.error("⚠ sqlite-vec not available:", errorMessage);
}
}
}
/**
* Check if vec extension is enabled
*/
isVecEnabled(): boolean {
return this.hasVecExtension;
}
/**
* Generic helper to get existing embedding IDs from both BLOB and vec tables.
* @param blobTable - BLOB table name (e.g., "message_embeddings")
* @param idColumn - Column name for the entity ID (e.g., "message_id")
* @param vecTable - Vec table name (e.g., "vec_message_embeddings")
* @param prefix - ID prefix in vec table (e.g., "msg_")
*/
private getExistingEmbeddingIds(
blobTable: string,
idColumn: string,
vecTable: string,
prefix: string
): Set<number> {
const ids = new Set<number>();
// Query BLOB fallback table
try {
const rows = this.db
.prepare(`SELECT ${idColumn} FROM ${blobTable}`)
.all() as Array<Record<string, number>>;
for (const row of rows) {
ids.add(Number(row[idColumn]));
}
} catch (_e) {
// Table might not exist yet
}
// Also query sqlite-vec table if extension is available
if (this.hasVecExtension) {
try {
const vecRows = this.db
.prepare(`SELECT id FROM ${vecTable}`)
.all() as Array<{ id: string }>;
for (const row of vecRows) {
// Strip prefix to get actual entity ID
if (row.id.startsWith(prefix)) {
ids.add(Number(row.id.substring(prefix.length)));
}
}
} catch (_e) {
// Vec table might not exist yet
}
}
return ids;
}
/**
* Get set of message IDs that already have embeddings.
*/
getExistingMessageEmbeddingIds(): Set<number> {
return this.getExistingEmbeddingIds(
"message_embeddings",
"message_id",
"vec_message_embeddings",
"msg_"
);
}
/**
* Get set of decision IDs that already have embeddings.
*/
getExistingDecisionEmbeddingIds(): Set<number> {
return this.getExistingEmbeddingIds(
"decision_embeddings",
"decision_id",
"vec_decision_embeddings",
"dec_"
);
}
/**
* Get set of mistake IDs that already have embeddings.
*/
getExistingMistakeEmbeddingIds(): Set<number> {
return this.getExistingEmbeddingIds(
"mistake_embeddings",
"mistake_id",
"vec_mistake_embeddings",
"mst_"
);
}
/**
* Ensure vec tables exist with correct dimensions
*/
private ensureVecTables(dimensions: number): void {
if (!this.hasVecExtension || this.vecTablesInitialized) {
return;
}
this.sqliteManager.createVecTablesWithDimensions(dimensions);
this.vecTablesInitialized = true;
}
/**
* Prepare vec tables for search when dimensions are known.
*/
prepareVecTables(dimensions: number): void {
this.ensureVecTables(dimensions);
}
/**
* Store an embedding for a message
* @param messageId - The message ID
* @param content - The message content
* @param embedding - The embedding vector
* @param modelName - The model used to generate the embedding (default: all-MiniLM-L6-v2)
*/
async storeMessageEmbedding(
messageId: number,
content: string,
embedding: Float32Array,
modelName: string = "all-MiniLM-L6-v2"
): Promise<void> {
const foreignKeysEnabled = Boolean(
this.db.pragma("foreign_keys", { simple: true }) as number
);
if (foreignKeysEnabled) {
const messageExists = this.db
.prepare("SELECT 1 FROM messages WHERE id = ?")
.get(messageId);
if (!messageExists) {
return;
}
}
const embedId = `msg_${messageId}`;
// ALWAYS store content in BLOB table for JOINs and fallback
// This ensures search can always retrieve content regardless of vec mode
this.storeInBlobTable(messageId, content, embedding, modelName);
if (this.hasVecExtension) {
// Ensure vec tables exist with correct dimensions
try {
this.ensureVecTables(embedding.length);
} catch (error) {
console.error("Failed to ensure vec tables:", (error as Error).message);
// Content already stored in BLOB table, so we can continue
return;
}
// Also store in sqlite-vec virtual table for fast similarity search
try {
// Try to delete existing entry first (handles dimension mismatches)
try {
this.db
.prepare("DELETE FROM vec_message_embeddings WHERE id = ?")
.run(embedId);
} catch (_deleteError) {
// Ignore - entry might not exist
}
// Now insert the new embedding
this.db
.prepare(
"INSERT INTO vec_message_embeddings (id, embedding) VALUES (?, ?)"
)
.run(embedId, this.float32ArrayToBuffer(embedding));
} catch (error) {
// Only log non-UNIQUE-constraint errors
const errorMessage = (error as Error).message;
if (!errorMessage.includes("UNIQUE constraint")) {
console.error("Vec embedding storage failed, using BLOB only:", errorMessage);
}
// Content already stored in BLOB table, so search will still work
}
}
}
/**
* Store embedding in BLOB table (fallback)
*/
private storeInBlobTable(
messageId: number,
content: string,
embedding: Float32Array,
modelName: string
): void {
this.db
.prepare(
`INSERT INTO message_embeddings
(id, message_id, content, embedding, model_name, created_at)
VALUES (?, ?, ?, ?, ?, ?)
ON CONFLICT(id) DO UPDATE SET
message_id = excluded.message_id,
content = excluded.content,
embedding = excluded.embedding,
model_name = excluded.model_name,
created_at = excluded.created_at`
)
.run(
`msg_${messageId}`,
messageId,
content,
this.float32ArrayToBuffer(embedding),
modelName,
Date.now()
);
}
/**
* Generic helper to store embeddings for decisions/mistakes (simpler schema without content).
* @param entityId - The entity ID (decision or mistake)
* @param embedding - The embedding vector
* @param blobTable - BLOB table name (e.g., "decision_embeddings")
* @param idColumn - Column name for entity ID (e.g., "decision_id")
* @param vecTable - Vec table name (e.g., "vec_decision_embeddings")
* @param prefix - ID prefix (e.g., "dec_")
* @param entityType - For logging (e.g., "decision")
*/
private storeEntityEmbedding(
entityId: number,
embedding: Float32Array,
blobTable: string,
idColumn: string,
vecTable: string,
prefix: string,
entityType: string,
entityTable: string
): void {
const foreignKeysEnabled = Boolean(
this.db.pragma("foreign_keys", { simple: true }) as number
);
if (foreignKeysEnabled) {
const entityExists = this.db
.prepare(`SELECT 1 FROM ${entityTable} WHERE id = ?`)
.get(entityId);
if (!entityExists) {
return;
}
}
const embedId = `${prefix}${entityId}`;
// Store in BLOB table
this.db
.prepare(
`INSERT INTO ${blobTable}
(id, ${idColumn}, embedding, created_at)
VALUES (?, ?, ?, ?)
ON CONFLICT(id) DO UPDATE SET
${idColumn} = excluded.${idColumn},
embedding = excluded.embedding,
created_at = excluded.created_at`
)
.run(
embedId,
entityId,
this.float32ArrayToBuffer(embedding),
Date.now()
);
// Also store in sqlite-vec if available
if (this.hasVecExtension) {
try {
this.ensureVecTables(embedding.length);
try {
this.db.prepare(`DELETE FROM ${vecTable} WHERE id = ?`).run(embedId);
} catch (_e) {
// Ignore - entry might not exist
}
this.db
.prepare(`INSERT INTO ${vecTable} (id, embedding) VALUES (?, ?)`)
.run(embedId, this.float32ArrayToBuffer(embedding));
} catch (error) {
const errorMessage = (error as Error).message;
if (!errorMessage.includes("UNIQUE constraint")) {
console.error(`Vec ${entityType} embedding storage failed:`, errorMessage);
}
}
}
}
/**
* Store an embedding for a decision
*/
async storeDecisionEmbedding(
decisionId: number,
embedding: Float32Array
): Promise<void> {
this.storeEntityEmbedding(
decisionId,
embedding,
"decision_embeddings",
"decision_id",
"vec_decision_embeddings",
"dec_",
"decision",
"decisions"
);
}
/**
* Store an embedding for a mistake
*/
async storeMistakeEmbedding(
mistakeId: number,
embedding: Float32Array
): Promise<void> {
this.storeEntityEmbedding(
mistakeId,
embedding,
"mistake_embeddings",
"mistake_id",
"vec_mistake_embeddings",
"mst_",
"mistake",
"mistakes"
);
}
/**
* Filter options for vector search
*/
searchFilterOptions?: SearchFilterOptions;
/**
* Search for similar messages
*/
async searchMessages(
queryEmbedding: Float32Array,
limit: number = 10,
filter?: SearchFilterOptions
): Promise<VectorSearchResult[]> {
if (this.hasVecExtension) {
return this.searchWithVecExtension(queryEmbedding, limit, filter);
} else {
return this.searchWithCosine(queryEmbedding, limit, filter);
}
}
/**
* Search using sqlite-vec extension with optional pre-filtering
*/
private searchWithVecExtension(
queryEmbedding: Float32Array,
limit: number,
filter?: SearchFilterOptions
): VectorSearchResult[] {
try {
this.ensureVecTables(queryEmbedding.length);
const queryBuffer = this.float32ArrayToBuffer(queryEmbedding);
// Build query with optional pre-filtering
let sql: string;
const params: (Buffer | number | string)[] = [queryBuffer];
if (filter && this.hasFilterConditions(filter)) {
// Use CTE with pre-filtering for better performance
sql = `
WITH filtered_messages AS (
SELECT me.id, me.message_id, me.content
FROM message_embeddings me
JOIN messages m ON me.message_id = m.id
WHERE 1=1
`;
// Add filter conditions
if (filter.dateRange) {
sql += " AND m.timestamp BETWEEN ? AND ?";
params.push(filter.dateRange[0], filter.dateRange[1]);
}
if (filter.conversationIds && filter.conversationIds.length > 0) {
sql += ` AND m.conversation_id IN (${filter.conversationIds.map(() => "?").join(",")})`;
params.push(...filter.conversationIds);
}
if (filter.messageTypes && filter.messageTypes.length > 0) {
sql += ` AND m.message_type IN (${filter.messageTypes.map(() => "?").join(",")})`;
params.push(...filter.messageTypes);
}
sql += `
)
SELECT
vec.id,
fm.content,
vec_distance_cosine(vec.embedding, ?) as distance
FROM vec_message_embeddings vec
JOIN filtered_messages fm ON vec.id = fm.id
ORDER BY distance
LIMIT ?
`;
// Add query buffer again for the vec_distance_cosine call
params.push(queryBuffer);
params.push(limit);
} else {
// No filters - use simple query
sql = `
SELECT
vec.id,
me.content,
vec_distance_cosine(vec.embedding, ?) as distance
FROM vec_message_embeddings vec
JOIN message_embeddings me ON vec.id = me.id
ORDER BY distance
LIMIT ?
`;
params.push(limit);
}
const results = this.db.prepare(sql).all(...params) as Array<{
id: string;
content: string;
distance: number;
}>;
// Apply minimum similarity filter if specified
let filteredResults = results;
if (filter?.minSimilarity) {
const minSim = filter.minSimilarity;
filteredResults = results.filter((r) => (1 - r.distance) >= minSim);
}
return filteredResults.map((r) => ({
id: Number(r.id.replace("msg_", "")),
content: r.content,
similarity: 1 - r.distance,
}));
} catch (error) {
const message = error instanceof Error ? error.message : String(error);
if (!message.includes("no such table: vec_message_embeddings")) {
console.error("Error in vec search:", error);
}
// Fallback to cosine
return this.searchWithCosine(queryEmbedding, limit, filter);
}
}
/**
* Check if filter has any conditions
*/
private hasFilterConditions(filter: SearchFilterOptions): boolean {
return Boolean(
filter.dateRange ||
(filter.conversationIds && filter.conversationIds.length > 0) ||
(filter.messageTypes && filter.messageTypes.length > 0)
);
}
/**
* Search using manual cosine similarity (fallback) with optional pre-filtering
*/
private searchWithCosine(
queryEmbedding: Float32Array,
limit: number,
filter?: SearchFilterOptions
): VectorSearchResult[] {
// Build query with optional pre-filtering
let sql = "SELECT me.id, me.message_id, me.content, me.embedding FROM message_embeddings me";
const params: (number | string)[] = [];
if (filter && this.hasFilterConditions(filter)) {
sql += " JOIN messages m ON me.message_id = m.id WHERE 1=1";
if (filter.dateRange) {
sql += " AND m.timestamp BETWEEN ? AND ?";
params.push(filter.dateRange[0], filter.dateRange[1]);
}
if (filter.conversationIds && filter.conversationIds.length > 0) {
sql += ` AND m.conversation_id IN (${filter.conversationIds.map(() => "?").join(",")})`;
params.push(...filter.conversationIds);
}
if (filter.messageTypes && filter.messageTypes.length > 0) {
sql += ` AND m.message_type IN (${filter.messageTypes.map(() => "?").join(",")})`;
params.push(...filter.messageTypes);
}
}
const allEmbeddings = this.db.prepare(sql).all(...params) as Array<{
id: string;
message_id: number;
content: string;
embedding: Buffer;
}>;
let results = allEmbeddings
.map((row) => {
const embedding = this.bufferToFloat32Array(row.embedding);
const similarity = this.cosineSimilarity(queryEmbedding, embedding);
return {
id: Number(row.message_id),
content: row.content,
similarity,
};
})
.sort((a, b) => b.similarity - a.similarity);
// Apply minimum similarity filter
const minSim = filter?.minSimilarity;
if (minSim) {
results = results.filter((r) => r.similarity >= minSim);
}
return results.slice(0, limit);
}
/**
* Calculate cosine similarity between two vectors
*/
private cosineSimilarity(a: Float32Array, b: Float32Array): number {
if (a.length !== b.length) {
throw new Error(`Vectors must have same length: got ${a.length} and ${b.length}`);
}
let dotProduct = 0;
let normA = 0;
let normB = 0;
for (let i = 0; i < a.length; i++) {
dotProduct += a[i] * b[i];
normA += a[i] * a[i];
normB += b[i] * b[i];
}
// Guard against division by zero (zero vectors)
if (normA === 0 || normB === 0) {
return 0;
}
return dotProduct / (Math.sqrt(normA) * Math.sqrt(normB));
}
/**
* Convert Float32Array to Buffer for storage
*/
private float32ArrayToBuffer(array: Float32Array): Buffer {
return Buffer.from(array.buffer, array.byteOffset, array.byteLength);
}
/**
* Convert Buffer to Float32Array for retrieval
*/
private bufferToFloat32Array(buffer: Buffer): Float32Array {
// Validate byte alignment (must be divisible by 4 for Float32)
if (buffer.byteLength % 4 !== 0) {
console.error(`Invalid embedding buffer size: ${buffer.byteLength} bytes (not divisible by 4)`);
return new Float32Array(0);
}
// Copy to ensure proper alignment (Node Buffers may not be aligned)
const aligned = new Float32Array(buffer.byteLength / 4);
const view = new DataView(buffer.buffer, buffer.byteOffset, buffer.byteLength);
for (let i = 0; i < aligned.length; i++) {
aligned[i] = view.getFloat32(i * 4, true); // little-endian
}
return aligned;
}
/**
* Get embedding count
*/
getEmbeddingCount(): number {
const result = this.db
.prepare("SELECT COUNT(*) as count FROM message_embeddings")
.get() as { count: number };
return result.count;
}
/**
* Clear all embeddings
*/
clearAllEmbeddings(): void {
this.db.exec("DELETE FROM message_embeddings");
this.db.exec("DELETE FROM decision_embeddings");
try {
this.db.exec("DELETE FROM mistake_embeddings");
} catch (_e) {
// Table might not exist yet
}
try {
this.db.exec("DELETE FROM chunk_embeddings");
} catch (_e) {
// Table might not exist yet
}
if (this.hasVecExtension) {
try {
this.db.exec("DELETE FROM vec_message_embeddings");
} catch (_e) {
// Vector table might not exist
}
try {
this.db.exec("DELETE FROM vec_decision_embeddings");
} catch (_e) {
// Vector table might not exist
}
try {
this.db.exec("DELETE FROM vec_mistake_embeddings");
} catch (_e) {
// Vector table might not exist
}
try {
this.db.exec("DELETE FROM vec_chunk_embeddings");
} catch (_e) {
// Vector table might not exist
}
}
}
// ==================================================
// CHUNK EMBEDDINGS SUPPORT
// ==================================================
/**
* Store embedding for a text chunk
*/
async storeChunkEmbedding(data: ChunkEmbeddingData): Promise<void> {
const { messageId, chunk, embedding, modelName } = data;
// Validate message exists if foreign keys are enabled
const foreignKeysEnabled = Boolean(
this.db.pragma("foreign_keys", { simple: true }) as number
);
if (foreignKeysEnabled) {
const messageExists = this.db
.prepare("SELECT 1 FROM messages WHERE id = ?")
.get(messageId);
if (!messageExists) {
return;
}
}
const chunkId = `chunk_${messageId}_${chunk.index}`;
// Store in chunk_embeddings table
try {
this.db
.prepare(
`INSERT INTO chunk_embeddings
(id, message_id, chunk_index, total_chunks, content, start_offset, end_offset,
embedding, strategy, model_name, estimated_tokens, created_at)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(id) DO UPDATE SET
content = excluded.content,
embedding = excluded.embedding,
estimated_tokens = excluded.estimated_tokens,
created_at = excluded.created_at`
)
.run(
chunkId,
messageId,
chunk.index,
chunk.totalChunks,
chunk.content,
chunk.startOffset,
chunk.endOffset,
this.float32ArrayToBuffer(embedding),
chunk.strategy,
modelName,
chunk.estimatedTokens,
Date.now()
);
} catch (error) {
// Table might not exist yet (pre-migration)
const errorMessage = (error as Error).message;
if (!errorMessage.includes("no such table")) {
console.error("Chunk embedding storage failed:", errorMessage);
}
return;
}
// Also store in vec table for fast ANN search
if (this.hasVecExtension) {
try {
this.ensureVecChunkTable(embedding.length);
// Delete existing entry first
try {
this.db
.prepare("DELETE FROM vec_chunk_embeddings WHERE id = ?")
.run(chunkId);
} catch (_e) {
// Entry might not exist
}
this.db
.prepare(
"INSERT INTO vec_chunk_embeddings (id, embedding) VALUES (?, ?)"
)
.run(chunkId, this.float32ArrayToBuffer(embedding));
} catch (error) {
const errorMessage = (error as Error).message;
if (!errorMessage.includes("UNIQUE constraint")) {
console.error("Vec chunk embedding storage failed:", errorMessage);
}
}
}
}
/**
* Store multiple chunk embeddings in batch
*/
async storeChunkEmbeddingsBatch(chunks: ChunkEmbeddingData[]): Promise<void> {
for (const chunk of chunks) {
await this.storeChunkEmbedding(chunk);
}
}
/**
* Ensure vec_chunk_embeddings virtual table exists
*/
private ensureVecChunkTable(dimensions: number): void {
if (!this.hasVecExtension) {
return;
}
try {
// Check if table exists
this.db.prepare("SELECT 1 FROM vec_chunk_embeddings LIMIT 1").get();
} catch (_e) {
// Table doesn't exist, create it
// SECURITY: dimensions is validated in createVecTablesWithDimensions
if (!Number.isInteger(dimensions) || dimensions <= 0 || dimensions > 10000) {
throw new Error(`Invalid dimensions: must be a positive integer <= 10000`);
}
this.db.exec(`
CREATE VIRTUAL TABLE IF NOT EXISTS vec_chunk_embeddings
USING vec0(
id TEXT PRIMARY KEY,
embedding float[${dimensions}]
)
`);
}
}
/**
* Search chunk embeddings for similar content
*/
async searchChunks(
queryEmbedding: Float32Array,
limit: number = 30
): Promise<ChunkSearchResult[]> {
if (this.hasVecExtension) {
return this.searchChunksWithVec(queryEmbedding, limit);
} else {
return this.searchChunksWithCosine(queryEmbedding, limit);
}
}
/**
* Search chunks using sqlite-vec extension
*/
private searchChunksWithVec(
queryEmbedding: Float32Array,
limit: number
): ChunkSearchResult[] {
try {
this.ensureVecChunkTable(queryEmbedding.length);
const queryBuffer = this.float32ArrayToBuffer(queryEmbedding);
const results = this.db
.prepare(
`SELECT
vec.id,
ce.message_id,
ce.chunk_index,
ce.total_chunks,
ce.content,
ce.start_offset,
ce.end_offset,
ce.strategy,
vec_distance_cosine(vec.embedding, ?) as distance
FROM vec_chunk_embeddings vec
JOIN chunk_embeddings ce ON vec.id = ce.id
ORDER BY distance
LIMIT ?`
)
.all(queryBuffer, limit) as Array<{
id: string;
message_id: number;
chunk_index: number;
total_chunks: number;
content: string;
start_offset: number;
end_offset: number;
strategy: string;
distance: number;
}>;
return results.map((r) => ({
chunkId: r.id,
messageId: r.message_id,
chunkIndex: r.chunk_index,
totalChunks: r.total_chunks,
content: r.content,
startOffset: r.start_offset,
endOffset: r.end_offset,
similarity: 1 - r.distance,
strategy: r.strategy,
}));
} catch (error) {
const message = error instanceof Error ? error.message : String(error);
if (!message.includes("no such table")) {
console.error("Error in vec chunk search:", error);
}
return this.searchChunksWithCosine(queryEmbedding, limit);
}
}
/**
* Search chunks using manual cosine similarity (fallback)
*/
private searchChunksWithCosine(
queryEmbedding: Float32Array,
limit: number
): ChunkSearchResult[] {
try {
const allChunks = this.db
.prepare(
`SELECT id, message_id, chunk_index, total_chunks, content,
start_offset, end_offset, strategy, embedding
FROM chunk_embeddings`
)
.all() as Array<{
id: string;
message_id: number;
chunk_index: number;
total_chunks: number;
content: string;
start_offset: number;
end_offset: number;
strategy: string;
embedding: Buffer;
}>;
const results = allChunks
.map((row) => {
const embedding = this.bufferToFloat32Array(row.embedding);
const similarity = this.cosineSimilarity(queryEmbedding, embedding);
return {
chunkId: row.id,
messageId: row.message_id,
chunkIndex: row.chunk_index,
totalChunks: row.total_chunks,
content: row.content,
startOffset: row.start_offset,
endOffset: row.end_offset,
similarity,
strategy: row.strategy,
};
})
.sort((a, b) => b.similarity - a.similarity)
.slice(0, limit);
return results;
} catch (error) {
const message = error instanceof Error ? error.message : String(error);
if (!message.includes("no such table")) {
console.error("Error in cosine chunk search:", error);
}
return [];
}
}
/**
* Get set of message IDs that already have chunk embeddings
*/
getExistingChunkEmbeddingMessageIds(): Set<number> {
const ids = new Set<number>();
try {
const rows = this.db
.prepare("SELECT DISTINCT message_id FROM chunk_embeddings")
.all() as Array<{ message_id: number }>;
for (const row of rows) {
ids.add(row.message_id);
}
} catch (_e) {
// Table might not exist yet
}
return ids;
}
/**
* Get chunk count for statistics
*/
getChunkEmbeddingCount(): number {
try {
const result = this.db
.prepare("SELECT COUNT(*) as count FROM chunk_embeddings")
.get() as { count: number };
return result.count;
} catch (_e) {
return 0;
}
}
/**
* Check if chunk embeddings table exists
*/
hasChunkEmbeddingsTable(): boolean {
try {
this.db.prepare("SELECT 1 FROM chunk_embeddings LIMIT 1").get();
return true;
} catch (_e) {
return false;
}
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/embeddings/providers/OllamaEmbeddings.ts | TypeScript | /**
* Ollama Embeddings Provider
* Uses local Ollama API for embeddings
*/
import type { EmbeddingProvider, ModelInfo } from "../EmbeddingProvider.js";
import { getModelDimensions } from "../ModelRegistry.js";
interface OllamaEmbeddingResponse {
embedding: number[];
}
export class OllamaEmbeddings implements EmbeddingProvider {
private baseUrl: string;
private model: string;
private dimensions: number;
private available: boolean = false;
private initializationError: Error | null = null;
constructor(baseUrl: string = "http://localhost:11434", model: string = "mxbai-embed-large", dimensions?: number) {
this.baseUrl = baseUrl.replace(/\/$/, ""); // Remove trailing slash
this.model = model;
this.dimensions = dimensions || this.getDefaultDimensions(model);
}
/**
* Initialize by checking if Ollama is available
*/
async initialize(): Promise<void> {
try {
// Check if Ollama is running
const response = await fetch(`${this.baseUrl}/api/tags`, {
method: "GET",
});
if (!response.ok) {
throw new Error(`Ollama API returned ${response.status}`);
}
// Check if model is available
const data = (await response.json()) as { models: Array<{ name: string }> };
const hasModel = data.models.some((m) => m.name.includes(this.model));
if (!hasModel) {
console.error(
`⚠️ Model '${this.model}' not found in Ollama. Available models: ${data.models.map((m) => m.name).join(", ")}`
);
console.error(` Pull the model with: ollama pull ${this.model}`);
throw new Error(`Model ${this.model} not available in Ollama`);
}
this.available = true;
console.error(`✓ Ollama embeddings ready (${this.model})`);
} catch (error) {
this.initializationError = error as Error;
this.available = false;
console.error("⚠️ Ollama not available:", (error as Error).message);
console.error(" Make sure Ollama is running: ollama serve");
}
}
/**
* Check if embeddings are available
*/
isAvailable(): boolean {
return this.available;
}
/**
* Generate embedding for a single text
*/
async embed(text: string): Promise<Float32Array> {
if (!this.available) {
throw new Error(
`Ollama embeddings not available: ${this.initializationError?.message || "Not initialized"}`
);
}
try {
const response = await fetch(`${this.baseUrl}/api/embeddings`, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
model: this.model,
prompt: text,
}),
});
if (!response.ok) {
throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
}
const data = (await response.json()) as OllamaEmbeddingResponse;
return new Float32Array(data.embedding);
} catch (error) {
console.error("Error generating embedding with Ollama:", error);
throw error;
}
}
/**
* Generate embeddings for multiple texts (batched)
*/
async embedBatch(texts: string[], batchSize: number = 32): Promise<Float32Array[]> {
if (!this.available) {
throw new Error("Ollama embeddings not initialized");
}
const embeddings: Float32Array[] = [];
// Process in batches
for (let i = 0; i < texts.length; i += batchSize) {
const batch = texts.slice(i, i + batchSize);
console.error(
`Generating embeddings for batch ${Math.floor(i / batchSize) + 1}/${Math.ceil(texts.length / batchSize)} (Ollama)`
);
// Ollama doesn't have batch endpoint, so we do parallel requests
const batchEmbeddings = await Promise.all(
batch.map((text) => this.embed(text))
);
embeddings.push(...batchEmbeddings);
}
return embeddings;
}
/**
* Get model information
*/
getModelInfo(): ModelInfo {
return {
provider: "ollama",
model: this.model,
dimensions: this.dimensions,
available: this.available,
};
}
/**
* Get default dimensions for common Ollama models using ModelRegistry
*/
private getDefaultDimensions(model: string): number {
// Try to get dimensions from ModelRegistry
const dimensions = getModelDimensions(model);
if (dimensions) {
return dimensions;
}
// Default to 768 if unknown (most common for Ollama models)
console.error(
`Unknown model dimensions for '${model}', defaulting to 768. Specify dimensions in config if different.`
);
return 768;
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/embeddings/providers/OpenAIEmbeddings.ts | TypeScript | /**
* OpenAI Embeddings Provider
* Uses OpenAI API for high-quality embeddings
*/
import type { EmbeddingProvider, ModelInfo } from "../EmbeddingProvider.js";
import { getModelDimensions } from "../ModelRegistry.js";
// Type for OpenAI client (dynamic import)
type OpenAIClient = {
embeddings: {
create: (params: {
model: string;
input: string | string[];
}) => Promise<{
data: Array<{ embedding: number[] }>;
}>;
};
};
export class OpenAIEmbeddings implements EmbeddingProvider {
private client: OpenAIClient | null = null;
private model: string;
private dimensions: number;
private apiKey: string;
private available: boolean = false;
private initializationError: Error | null = null;
constructor(apiKey: string, model: string = "text-embedding-3-small", dimensions?: number) {
this.apiKey = apiKey;
this.model = model;
this.dimensions = dimensions || this.getDefaultDimensions(model);
}
/**
* Initialize OpenAI client
*/
async initialize(): Promise<void> {
try {
if (!this.apiKey) {
throw new Error("OpenAI API key required. Set OPENAI_API_KEY environment variable.");
}
// Dynamic import of OpenAI SDK (optional dependency)
const { default: OpenAI } = await import("openai");
this.client = new OpenAI({
apiKey: this.apiKey,
}) as unknown as OpenAIClient;
// Test the API key with a small request
await this.client.embeddings.create({
model: this.model,
input: "test",
});
this.available = true;
console.error(`✓ OpenAI embeddings ready (${this.model})`);
} catch (error) {
this.initializationError = error as Error;
this.available = false;
if ((error as Error).message.includes("Cannot find module")) {
console.error("⚠️ OpenAI SDK not installed");
console.error(" Install with: npm install openai");
} else {
console.error("⚠️ OpenAI initialization failed:", (error as Error).message);
}
}
}
/**
* Check if embeddings are available
*/
isAvailable(): boolean {
return this.available;
}
/**
* Generate embedding for a single text
*/
async embed(text: string): Promise<Float32Array> {
if (!this.available || !this.client) {
throw new Error(
`OpenAI embeddings not available: ${this.initializationError?.message || "Not initialized"}`
);
}
try {
const response = await this.client.embeddings.create({
model: this.model,
input: text,
});
return new Float32Array(response.data[0].embedding);
} catch (error) {
console.error("Error generating embedding with OpenAI:", error);
throw error;
}
}
/**
* Generate embeddings for multiple texts (batched)
* OpenAI supports batch requests natively
*/
async embedBatch(texts: string[], batchSize: number = 100): Promise<Float32Array[]> {
if (!this.available || !this.client) {
throw new Error("OpenAI embeddings not initialized");
}
const embeddings: Float32Array[] = [];
// OpenAI allows up to 2048 inputs per request, but we use smaller batches for reliability
for (let i = 0; i < texts.length; i += batchSize) {
const batch = texts.slice(i, i + batchSize);
console.error(
`Generating embeddings for batch ${Math.floor(i / batchSize) + 1}/${Math.ceil(texts.length / batchSize)} (OpenAI)`
);
try {
const response = await this.client.embeddings.create({
model: this.model,
input: batch,
});
// Convert all embeddings in the batch
const batchEmbeddings = response.data.map(
(item) => new Float32Array(item.embedding)
);
embeddings.push(...batchEmbeddings);
} catch (error) {
console.error("Error in batch embedding:", error);
throw error;
}
}
return embeddings;
}
/**
* Get model information
*/
getModelInfo(): ModelInfo {
return {
provider: "openai",
model: this.model,
dimensions: this.dimensions,
available: this.available,
};
}
/**
* Get default dimensions for OpenAI models using ModelRegistry
*/
private getDefaultDimensions(model: string): number {
// Try to get dimensions from ModelRegistry
const dimensions = getModelDimensions(model);
if (dimensions) {
return dimensions;
}
// Default to 1536 if unknown (most common for OpenAI models)
return 1536;
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/embeddings/providers/TransformersEmbeddings.ts | TypeScript | /**
* Transformers.js Embeddings Provider
* Uses @xenova/transformers for local, offline embeddings
*/
import type { EmbeddingProvider, ModelInfo } from "../EmbeddingProvider.js";
import { getModelDimensions } from "../ModelRegistry.js";
// Type for the pipeline function from @xenova/transformers
type EmbeddingPipeline = ((text: string, options?: Record<string, unknown>) => Promise<Record<string, unknown>>) | null;
export class TransformersEmbeddings implements EmbeddingProvider {
private pipeline: EmbeddingPipeline = null;
private model: string;
private dimensions: number;
private available: boolean = false;
private initializationError: Error | null = null;
constructor(model: string = "Xenova/all-MiniLM-L6-v2", dimensions?: number) {
this.model = model;
this.dimensions = dimensions || this.getDefaultDimensions(model);
}
/**
* Initialize the embedding pipeline
*/
async initialize(): Promise<void> {
if (this.available) {
return;
}
try {
// Try to import @xenova/transformers
const { pipeline } = await import("@xenova/transformers");
console.error(`Loading embedding model: ${this.model}...`);
this.pipeline = (await pipeline("feature-extraction", this.model)) as unknown as EmbeddingPipeline;
this.available = true;
console.error(`✓ Transformers.js embeddings ready (${this.model})`);
} catch (error) {
this.initializationError = error as Error;
this.available = false;
if ((error as Error).message.includes("Cannot find module")) {
console.error("⚠️ @xenova/transformers not installed");
console.error(" Install with: npm install @xenova/transformers");
} else {
console.error("⚠️ Could not load embedding model:", (error as Error).message);
}
}
}
/**
* Check if embeddings are available
*/
isAvailable(): boolean {
return this.available && this.pipeline !== null;
}
/**
* Generate embedding for text
*/
async embed(text: string): Promise<Float32Array> {
if (!this.isAvailable()) {
throw new Error(
`Transformers embeddings not available: ${this.initializationError?.message || "Not initialized"}`
);
}
if (!this.pipeline) {
throw new Error("Pipeline not initialized");
}
try {
// Generate embedding
const output = await this.pipeline(text, {
pooling: "mean",
normalize: true,
});
// Extract the embedding array
const embedding = (output as { data: number[] }).data;
return new Float32Array(embedding);
} catch (error) {
console.error("Error generating embedding:", error);
throw error;
}
}
/**
* Generate embeddings for multiple texts (batched)
*/
async embedBatch(texts: string[], batchSize: number = 32): Promise<Float32Array[]> {
if (!this.isAvailable()) {
throw new Error("Transformers embeddings not initialized");
}
const embeddings: Float32Array[] = [];
// Process in batches
for (let i = 0; i < texts.length; i += batchSize) {
const batch = texts.slice(i, i + batchSize);
console.error(
`Generating embeddings for batch ${Math.floor(i / batchSize) + 1}/${Math.ceil(texts.length / batchSize)} (Transformers.js)`
);
const batchEmbeddings = await Promise.all(
batch.map((text) => this.embed(text))
);
embeddings.push(...batchEmbeddings);
}
return embeddings;
}
/**
* Get model information
*/
getModelInfo(): ModelInfo {
return {
provider: "transformers",
model: this.model,
dimensions: this.dimensions,
available: this.available,
};
}
/**
* Get default dimensions for common transformer models using ModelRegistry
*/
private getDefaultDimensions(model: string): number {
// Try to get dimensions from ModelRegistry
const dimensions = getModelDimensions(model);
if (dimensions) {
return dimensions;
}
// Default to 384 if unknown (most common for small models)
return 384;
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/handoff/SessionHandoffStore.ts | TypeScript | /**
* Session Handoff Store
*
* Manages session handoff documents for transferring context between
* conversations when context fills up or when explicitly requested.
*/
import { nanoid } from "nanoid";
import type { Database } from "better-sqlite3";
import type {
SessionHandoff,
SessionHandoffRow,
HandoffDecision,
ActiveFile,
PendingTask,
WorkingMemoryItem,
} from "../memory/types.js";
import { WorkingMemoryStore } from "../memory/WorkingMemoryStore.js";
/**
* Options for creating a handoff
*/
export interface PrepareHandoffOptions {
sessionId?: string;
projectPath: string;
include?: Array<"decisions" | "files" | "tasks" | "memory">;
}
/**
* Options for resuming from a handoff
*/
export interface ResumeHandoffOptions {
handoffId?: string;
projectPath: string;
newSessionId?: string;
injectContext?: boolean;
}
/**
* Handoff data stored as JSON
*/
interface HandoffData {
decisions: HandoffDecision[];
activeFiles: ActiveFile[];
pendingTasks: PendingTask[];
workingMemory: WorkingMemoryItem[];
contextSummary: string;
}
export class SessionHandoffStore {
private db: Database;
constructor(db: Database) {
this.db = db;
}
/**
* Prepare a handoff document from the current session
*/
prepareHandoff(options: PrepareHandoffOptions): SessionHandoff {
const {
sessionId = "current",
projectPath,
include = ["decisions", "files", "tasks", "memory"],
} = options;
const id = nanoid();
const now = Date.now();
// Collect data based on include options
const handoffData: HandoffData = {
decisions: [],
activeFiles: [],
pendingTasks: [],
workingMemory: [],
contextSummary: "",
};
// Get recent decisions from the database
if (include.includes("decisions")) {
handoffData.decisions = this.getRecentDecisions(projectPath, sessionId);
}
// Get recent file activity
if (include.includes("files")) {
handoffData.activeFiles = this.getActiveFiles(projectPath, sessionId);
}
// Get pending tasks (from working memory tagged as "task")
if (include.includes("tasks")) {
handoffData.pendingTasks = this.getPendingTasks(projectPath);
}
// Get working memory items
if (include.includes("memory")) {
const memoryStore = new WorkingMemoryStore(this.db);
handoffData.workingMemory = memoryStore.list(projectPath);
}
// Generate context summary
handoffData.contextSummary = this.generateContextSummary(handoffData);
// Store the handoff
this.db
.prepare(
`INSERT INTO session_handoffs
(id, from_session_id, project_path, created_at, handoff_data, resumed_by_session_id, resumed_at)
VALUES (?, ?, ?, ?, ?, NULL, NULL)`
)
.run(id, sessionId, projectPath, now, JSON.stringify(handoffData));
return {
id,
fromSessionId: sessionId,
projectPath,
createdAt: now,
...handoffData,
};
}
/**
* Resume from a handoff in a new session
*/
resumeFromHandoff(options: ResumeHandoffOptions): SessionHandoff | null {
const { handoffId, projectPath, newSessionId, injectContext = true } = options;
let row: SessionHandoffRow | undefined;
if (handoffId) {
// Get specific handoff
row = this.db
.prepare("SELECT * FROM session_handoffs WHERE id = ?")
.get(handoffId) as SessionHandoffRow | undefined;
} else {
// Get most recent unresumed handoff for this project
row = this.db
.prepare(
`SELECT * FROM session_handoffs
WHERE project_path = ? AND resumed_by_session_id IS NULL
ORDER BY created_at DESC
LIMIT 1`
)
.get(projectPath) as SessionHandoffRow | undefined;
}
if (!row) {
return null;
}
const handoffData = JSON.parse(row.handoff_data) as HandoffData;
// Mark as resumed if newSessionId provided
if (newSessionId) {
this.db
.prepare(
`UPDATE session_handoffs
SET resumed_by_session_id = ?, resumed_at = ?
WHERE id = ?`
)
.run(newSessionId, Date.now(), row.id);
}
// Optionally inject working memory into new session
if (injectContext && handoffData.workingMemory.length > 0) {
const memoryStore = new WorkingMemoryStore(this.db);
for (const item of handoffData.workingMemory) {
// Re-remember each item in the new session
memoryStore.remember({
key: item.key,
value: item.value,
context: item.context,
tags: item.tags,
sessionId: newSessionId,
projectPath: item.projectPath,
});
}
}
return {
id: row.id,
fromSessionId: row.from_session_id,
projectPath: row.project_path,
createdAt: row.created_at,
...handoffData,
resumedBy: row.resumed_by_session_id || newSessionId,
resumedAt: row.resumed_at || (newSessionId ? Date.now() : undefined),
};
}
/**
* List available handoffs for a project
*/
listHandoffs(
projectPath: string,
options?: { limit?: number; includeResumed?: boolean }
): Array<{
id: string;
fromSessionId: string;
createdAt: number;
resumedBy?: string;
resumedAt?: number;
summary: string;
}> {
const limit = options?.limit || 10;
const includeResumed = options?.includeResumed ?? false;
let sql = `SELECT * FROM session_handoffs WHERE project_path = ?`;
if (!includeResumed) {
sql += " AND resumed_by_session_id IS NULL";
}
sql += " ORDER BY created_at DESC LIMIT ?";
const rows = this.db.prepare(sql).all(projectPath, limit) as SessionHandoffRow[];
return rows.map((row) => {
const handoffData = JSON.parse(row.handoff_data) as HandoffData;
return {
id: row.id,
fromSessionId: row.from_session_id,
createdAt: row.created_at,
resumedBy: row.resumed_by_session_id || undefined,
resumedAt: row.resumed_at || undefined,
summary: handoffData.contextSummary,
};
});
}
/**
* Get a specific handoff by ID
*/
getHandoff(handoffId: string): SessionHandoff | null {
const row = this.db
.prepare("SELECT * FROM session_handoffs WHERE id = ?")
.get(handoffId) as SessionHandoffRow | undefined;
if (!row) {
return null;
}
const handoffData = JSON.parse(row.handoff_data) as HandoffData;
return {
id: row.id,
fromSessionId: row.from_session_id,
projectPath: row.project_path,
createdAt: row.created_at,
...handoffData,
resumedBy: row.resumed_by_session_id || undefined,
resumedAt: row.resumed_at || undefined,
};
}
/**
* Delete a handoff by ID
*/
deleteHandoff(handoffId: string): boolean {
const result = this.db
.prepare("DELETE FROM session_handoffs WHERE id = ?")
.run(handoffId);
return result.changes > 0;
}
/**
* Get recent decisions from the database
*/
private getRecentDecisions(
projectPath: string,
_sessionId: string
): HandoffDecision[] {
try {
// Query recent decisions from the decisions table
const rows = this.db
.prepare(
`SELECT d.id, d.decision_text, d.rationale, d.context, d.timestamp
FROM decisions d
JOIN messages m ON d.message_id = m.id
JOIN conversations c ON m.conversation_id = c.id
WHERE c.project_path = ?
ORDER BY d.timestamp DESC
LIMIT 20`
)
.all(projectPath) as Array<{
id: string;
decision_text: string;
rationale: string | null;
context: string | null;
timestamp: number;
}>;
return rows.map((row) => ({
id: row.id,
text: row.decision_text,
rationale: row.rationale || undefined,
context: row.context || undefined,
timestamp: row.timestamp,
}));
} catch (_error) {
// Table may not exist yet
return [];
}
}
/**
* Get recent file activity from tool uses
*/
private getActiveFiles(
projectPath: string,
_sessionId: string
): ActiveFile[] {
try {
// Query recent file operations from tool_uses table
const rows = this.db
.prepare(
`SELECT tu.tool_name, tu.parameters, tu.timestamp
FROM tool_uses tu
JOIN messages m ON tu.message_id = m.id
JOIN conversations c ON m.conversation_id = c.id
WHERE c.project_path = ?
AND tu.tool_name IN ('Read', 'Edit', 'Write', 'Bash')
ORDER BY tu.timestamp DESC
LIMIT 50`
)
.all(projectPath) as Array<{
tool_name: string;
parameters: string;
timestamp: number;
}>;
// Extract unique files from tool parameters
const fileMap = new Map<string, ActiveFile>();
for (const row of rows) {
try {
const params = JSON.parse(row.parameters || "{}") as Record<string, unknown>;
let filePath: string | undefined;
let action: ActiveFile["lastAction"] = "read";
if (row.tool_name === "Read" && typeof params.file_path === "string") {
filePath = params.file_path;
action = "read";
} else if (row.tool_name === "Edit" && typeof params.file_path === "string") {
filePath = params.file_path;
action = "edit";
} else if (row.tool_name === "Write" && typeof params.file_path === "string") {
filePath = params.file_path;
action = "create";
}
if (filePath && !fileMap.has(filePath)) {
fileMap.set(filePath, {
path: filePath,
lastAction: action,
timestamp: row.timestamp,
});
}
} catch (_e) {
// Skip malformed parameters
}
}
return Array.from(fileMap.values()).slice(0, 20);
} catch (_error) {
// Table may not exist yet
return [];
}
}
/**
* Get pending tasks from working memory
*/
private getPendingTasks(projectPath: string): PendingTask[] {
try {
const memoryStore = new WorkingMemoryStore(this.db);
const items = memoryStore.recallMany({
projectPath,
tags: ["task", "pending", "in_progress", "blocked"],
});
return items.map((item) => ({
description: item.value,
status: this.inferTaskStatus(item.tags),
context: item.context,
}));
} catch (_error) {
return [];
}
}
/**
* Infer task status from tags
*/
private inferTaskStatus(tags: string[]): PendingTask["status"] {
if (tags.includes("blocked")) {
return "blocked";
}
if (tags.includes("in_progress")) {
return "in_progress";
}
return "pending";
}
/**
* Generate a context summary from handoff data
*/
private generateContextSummary(data: HandoffData): string {
const parts: string[] = [];
if (data.decisions.length > 0) {
parts.push(
`${data.decisions.length} decision(s): ${data.decisions
.slice(0, 3)
.map((d) => d.text.substring(0, 50))
.join("; ")}`
);
}
if (data.activeFiles.length > 0) {
parts.push(
`${data.activeFiles.length} file(s) active: ${data.activeFiles
.slice(0, 3)
.map((f) => f.path.split("/").pop())
.join(", ")}`
);
}
if (data.pendingTasks.length > 0) {
parts.push(`${data.pendingTasks.length} pending task(s)`);
}
if (data.workingMemory.length > 0) {
parts.push(`${data.workingMemory.length} memory item(s)`);
}
return parts.length > 0 ? parts.join(". ") + "." : "Empty handoff.";
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/index.ts | TypeScript | #!/usr/bin/env node
/**
* CCCMemory - Main Entry Point
* Supports both MCP server mode and interactive CLI mode
*/
import { readFileSync, existsSync } from "fs";
import { join, dirname } from "path";
import { fileURLToPath } from "url";
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
/**
* Get version from package.json
*/
function getVersion(): string {
try {
const packageJsonPath = join(__dirname, "..", "package.json");
const packageJson = JSON.parse(readFileSync(packageJsonPath, "utf-8"));
return packageJson.version;
} catch (_error) {
return "unknown";
}
}
function checkNodeAbi(): void {
const abiPath = join(__dirname, "..", ".node-abi.json");
if (!existsSync(abiPath)) {
return;
}
try {
const payload = JSON.parse(readFileSync(abiPath, "utf-8"));
const expectedModules = String(payload.modules || "");
const currentModules = String(process.versions.modules || "");
if (expectedModules && currentModules && expectedModules !== currentModules) {
console.error("❌ Native module ABI mismatch.");
console.error(
` This install was built with ABI ${expectedModules} (Node ${payload.nodeVersion || "unknown"}).`
);
console.error(
` Current runtime ABI is ${currentModules} (Node ${process.versions.node}).`
);
console.error(" Reinstall with your runtime Node version, or use npx/volta/asdf to pin Node.");
process.exit(1);
}
} catch {
// If the file is unreadable, skip ABI checks to avoid blocking startup.
}
}
/**
* Detect mode based on arguments and environment
*/
function detectMode(): "mcp" | "cli" | "single-command" | "version" {
const args = process.argv.slice(2);
// If --version or -v flag is present, show version
if (args.includes("--version") || args.includes("-v")) {
return "version";
}
// If --server flag is present, run MCP server mode
if (args.includes("--server")) {
return "mcp";
}
// If command arguments are present (excluding --server), run single command mode
if (args.length > 0) {
return "single-command";
}
// If not a TTY (running via stdio), run MCP server mode
if (!process.stdin.isTTY) {
return "mcp";
}
// Otherwise, run interactive CLI mode
return "cli";
}
/**
* Main entry point
*/
async function main() {
const mode = detectMode();
const args = process.argv.slice(2).filter((arg) => arg !== "--server");
if (mode !== "version") {
checkNodeAbi();
}
switch (mode) {
case "version": {
// Show version
console.log(`cccmemory v${getVersion()}`);
process.exit(0);
break;
}
case "mcp": {
// MCP Server Mode (for Claude Code CLI integration)
const { ConversationMemoryServer } = await import("./mcp-server.js");
const mcpServer = new ConversationMemoryServer();
await mcpServer.start();
break;
}
case "single-command": {
// Single Command Mode
const { ConversationMemoryCLI } = await import("./cli/index.js");
const singleCLI = new ConversationMemoryCLI();
await singleCLI.runSingleCommand(args.join(" "));
break;
}
case "cli":
default: {
// Interactive REPL Mode
const { ConversationMemoryCLI } = await import("./cli/index.js");
const repl = new ConversationMemoryCLI();
await repl.start();
break;
}
}
}
// Run main
main().catch((error) => {
const isDebug = process.env.LOG_LEVEL?.toUpperCase() === "DEBUG";
console.error("Fatal error:", isDebug ? error : (error as Error).message);
process.exit(1);
});
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/mcp-server.ts | TypeScript | /**
* CCCMemory - MCP Server
* MCP server implementation with stdio transport
*/
import { Server } from "@modelcontextprotocol/sdk/server/index.js";
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
import {
CallToolRequestSchema,
ListToolsRequestSchema,
} from "@modelcontextprotocol/sdk/types.js";
import { readFileSync } from "fs";
import { join, dirname } from "path";
import { fileURLToPath } from "url";
import { ConversationMemory } from "./ConversationMemory.js";
import { ToolHandlers } from "./tools/ToolHandlers.js";
import { TOOLS } from "./tools/ToolDefinitions.js";
import { getSQLiteManager } from "./storage/SQLiteManager.js";
// Read version from package.json with fallback
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
const packageJsonPath = join(__dirname, "..", "package.json");
let VERSION = "0.0.0";
try {
const packageJson = JSON.parse(readFileSync(packageJsonPath, "utf-8")) as { version?: string };
VERSION = packageJson.version ?? "0.0.0";
} catch (err) {
console.error(`[MCP] Warning: Could not read package.json version: ${(err as Error).message}`);
}
/**
* Main MCP Server
*/
export class ConversationMemoryServer {
private server: Server;
private memory: ConversationMemory;
private handlers: ToolHandlers;
constructor() {
this.server = new Server(
{
name: "cccmemory",
version: VERSION,
},
{
capabilities: {
tools: {},
},
}
);
this.memory = new ConversationMemory();
this.handlers = new ToolHandlers(this.memory, getSQLiteManager());
this.setupHandlers();
}
/**
* Get tool handler map for dynamic dispatch
* Using a map prevents switch/case drift and makes it easy to add new tools
*/
private getToolHandlerMap(): Record<string, (args: Record<string, unknown>) => Promise<unknown>> {
return {
index_conversations: (args) => this.handlers.indexConversations(args),
search_conversations: (args) => this.handlers.searchConversations(args),
search_project_conversations: (args) => this.handlers.searchProjectConversations(args),
get_decisions: (args) => this.handlers.getDecisions(args),
check_before_modify: (args) => this.handlers.checkBeforeModify(args),
get_file_evolution: (args) => this.handlers.getFileEvolution(args),
link_commits_to_conversations: (args) => this.handlers.linkCommitsToConversations(args),
search_mistakes: (args) => this.handlers.searchMistakes(args),
get_requirements: (args) => this.handlers.getRequirements(args),
get_tool_history: (args) => this.handlers.getToolHistory(args),
find_similar_sessions: (args) => this.handlers.findSimilarSessions(args),
recall_and_apply: (args) => this.handlers.recallAndApply(args),
generate_documentation: (args) => this.handlers.generateDocumentation(args),
discover_old_conversations: (args) => this.handlers.discoverOldConversations(args),
migrate_project: (args) => this.handlers.migrateProject(args),
forget_by_topic: (args) => this.handlers.forgetByTopic(args),
search_by_file: (args) => this.handlers.searchByFile(args),
list_recent_sessions: (args) => this.handlers.listRecentSessions(args),
get_latest_session_summary: (args) => this.handlers.getLatestSessionSummary(args),
index_all_projects: (args) => this.handlers.indexAllProjects(args),
search_all_conversations: (args) => this.handlers.searchAllConversations(args),
get_all_decisions: (args) => this.handlers.getAllDecisions(args),
search_all_mistakes: (args) => this.handlers.searchAllMistakes(args),
// Live Context Layer: Working Memory
remember: (args) => this.handlers.remember(args),
recall: (args) => this.handlers.recall(args),
recall_relevant: (args) => this.handlers.recallRelevant(args),
list_memory: (args) => this.handlers.listMemory(args),
forget: (args) => this.handlers.forget(args),
// Live Context Layer: Session Handoff
prepare_handoff: (args) => this.handlers.prepareHandoff(args),
resume_from_handoff: (args) => this.handlers.resumeFromHandoff(args),
list_handoffs: (args) => this.handlers.listHandoffs(args),
// Live Context Layer: Context Injection
get_startup_context: (args) => this.handlers.getStartupContext(args),
inject_relevant_context: (args) => this.handlers.injectRelevantContext(args),
// Phase 1: Tag Management
list_tags: (args) => this.handlers.listTags(args),
search_by_tags: (args) => this.handlers.searchByTags(args),
rename_tag: (args) => this.handlers.renameTag(args),
merge_tags: (args) => this.handlers.mergeTags(args),
delete_tag: (args) => this.handlers.deleteTag(args),
tag_item: (args) => this.handlers.tagItem(args),
untag_item: (args) => this.handlers.untagItem(args),
// Phase 1: Memory Confidence
set_memory_confidence: (args) => this.handlers.setMemoryConfidence(args),
set_memory_importance: (args) => this.handlers.setMemoryImportance(args),
pin_memory: (args) => this.handlers.pinMemory(args),
archive_memory: (args) => this.handlers.archiveMemory(args),
unarchive_memory: (args) => this.handlers.unarchiveMemory(args),
search_memory_by_quality: (args) => this.handlers.searchMemoryByQuality(args),
get_memory_stats: (args) => this.handlers.getMemoryStats(args),
// Phase 1: Cleanup/Maintenance
get_storage_stats: (args) => this.handlers.getStorageStats(args),
find_stale_items: (args) => this.handlers.findStaleItems(args),
find_duplicates: (args) => this.handlers.findDuplicates(args),
merge_duplicates: (args) => this.handlers.mergeDuplicates(args),
cleanup_stale: (args) => this.handlers.cleanupStale(args),
vacuum_database: (args) => this.handlers.vacuumDatabase(args),
cleanup_orphans: (args) => this.handlers.cleanupOrphans(args),
get_health_report: (args) => this.handlers.getHealthReport(args),
run_maintenance: (args) => this.handlers.runMaintenance(args),
get_maintenance_history: (args) => this.handlers.getMaintenanceHistory(args),
// Phase 9: Methodology & Research Tracking
get_methodologies: (args) => this.handlers.getMethodologies(args),
get_research_findings: (args) => this.handlers.getResearchFindings(args),
get_solution_patterns: (args) => this.handlers.getSolutionPatterns(args),
};
}
/**
* Validate that tool definitions match handler implementations
* Fails fast at startup if there's a drift between TOOLS and handlers
*/
private validateToolHandlers(toolHandlers: Record<string, unknown>): void {
const definedTools = new Set(Object.keys(TOOLS));
const implementedTools = new Set(Object.keys(toolHandlers));
// Find tools defined but not implemented
const missingHandlers = [...definedTools].filter(t => !implementedTools.has(t));
if (missingHandlers.length > 0) {
throw new Error(
`Tool definition/handler drift: Tools defined but not implemented: ${missingHandlers.join(', ')}`
);
}
// Find handlers without tool definitions
const extraHandlers = [...implementedTools].filter(t => !definedTools.has(t));
if (extraHandlers.length > 0) {
throw new Error(
`Tool definition/handler drift: Handlers without tool definitions: ${extraHandlers.join(', ')}`
);
}
}
/**
* Setup MCP request handlers
*/
private setupHandlers() {
const toolHandlers = this.getToolHandlerMap();
// Validate tool definitions match handlers at startup
this.validateToolHandlers(toolHandlers);
// List available tools
this.server.setRequestHandler(ListToolsRequestSchema, async () => {
return {
tools: Object.values(TOOLS),
};
});
// Handle tool execution
this.server.setRequestHandler(CallToolRequestSchema, async (request) => {
// Track tool name for error reporting (may be undefined if params is malformed)
let toolName: string | undefined;
try {
// Validate and extract params inside try block to catch malformed requests
const params = request.params;
if (!params || typeof params.name !== "string") {
return {
content: [
{
type: "text",
text: JSON.stringify({ error: "Invalid request: missing or invalid tool name" }),
},
],
isError: true,
};
}
toolName = params.name;
// Ensure args is always an object, defaulting to empty
const argsObj = (params.arguments ?? {}) as Record<string, unknown>;
console.error(`[MCP] Executing tool: ${toolName}`);
// Guard against prototype pollution: only allow own properties
if (!Object.hasOwn(toolHandlers, toolName)) {
return {
content: [
{
type: "text",
text: JSON.stringify({ error: `Unknown tool: ${toolName}` }),
},
],
isError: true,
};
}
const handler = toolHandlers[toolName];
const result = await handler(argsObj);
// Use compact JSON for responses (no pretty-printing)
return {
content: [
{
type: "text",
text: JSON.stringify(result),
},
],
};
} catch (error: unknown) {
// Safely handle non-Error throws
const err = error instanceof Error ? error : new Error(String(error));
// Log full error details server-side only
console.error(`[MCP] Error executing tool ${toolName ?? "unknown"}:`, err.message);
if (err.stack) {
console.error(`[MCP] Stack trace:`, err.stack);
}
// SECURITY: Return only error message to client, not stack traces
return {
content: [
{
type: "text",
text: JSON.stringify({ error: err.message }),
},
],
isError: true,
};
}
});
}
/**
* Start the server
*/
async start() {
const transport = new StdioServerTransport();
console.error("[MCP] CCCMemory Server starting...");
console.error(`[MCP] Database: ${getSQLiteManager().getStats().dbPath}`);
await this.server.connect(transport);
console.error("[MCP] Server ready - listening on stdio");
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
src/memory/WorkingMemoryStore.ts | TypeScript | /**
* Working Memory Store
*
* A key-value store for facts, decisions, and context that persists
* across conversation boundaries. Enables Claude to "remember" things
* explicitly and retrieve them later.
*/
import { nanoid } from "nanoid";
import type { Database } from "better-sqlite3";
import type {
WorkingMemoryItem,
WorkingMemoryRow,
RememberOptions,
RecallOptions,
SemanticRecallOptions,
SemanticRecallResult,
} from "./types.js";
export class WorkingMemoryStore {
private db: Database;
constructor(db: Database) {
this.db = db;
}
/**
* Store a fact/decision/context in working memory
*/
remember(options: RememberOptions): WorkingMemoryItem {
const now = Date.now();
const id = nanoid();
const expiresAt = options.ttl ? now + options.ttl * 1000 : null;
// Check if key already exists for this project
const existing = this.db
.prepare(
"SELECT id FROM working_memory WHERE project_path = ? AND key = ?"
)
.get(options.projectPath, options.key) as { id: string } | undefined;
if (existing) {
// Update existing
this.db
.prepare(
`UPDATE working_memory
SET value = ?, context = ?, tags = ?, session_id = ?, updated_at = ?, expires_at = ?
WHERE id = ?`
)
.run(
options.value,
options.context || null,
options.tags ? JSON.stringify(options.tags) : null,
options.sessionId || null,
now,
expiresAt,
existing.id
);
// Update FTS
this.updateFts(existing.id, options.key, options.value, options.context);
return this.getById(existing.id) as WorkingMemoryItem;
}
// Insert new
this.db
.prepare(
`INSERT INTO working_memory
(id, key, value, context, tags, session_id, project_path, created_at, updated_at, expires_at)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`
)
.run(
id,
options.key,
options.value,
options.context || null,
options.tags ? JSON.stringify(options.tags) : null,
options.sessionId || null,
options.projectPath,
now,
now,
expiresAt
);
// Insert into FTS
this.insertFts(id, options.key, options.value, options.context);
return {
id,
key: options.key,
value: options.value,
context: options.context,
tags: options.tags || [],
sessionId: options.sessionId,
projectPath: options.projectPath,
createdAt: now,
updatedAt: now,
expiresAt: expiresAt || undefined,
};
}
/**
* Recall a specific item by key
*/
recall(key: string, projectPath: string): WorkingMemoryItem | null {
// First, clean up expired items
this.cleanupExpired();
const row = this.db
.prepare(
`SELECT * FROM working_memory
WHERE project_path = ? AND key = ?
AND (expires_at IS NULL OR expires_at > ?)`
)
.get(projectPath, key, Date.now()) as WorkingMemoryRow | undefined;
if (!row) {
return null;
}
return this.rowToItem(row);
}
/**
* Recall items matching options
*/
recallMany(options: RecallOptions): WorkingMemoryItem[] {
// First, clean up expired items
if (!options.includeExpired) {
this.cleanupExpired();
}
let sql = "SELECT * FROM working_memory WHERE 1=1";
const params: (string | number)[] = [];
if (options.projectPath) {
sql += " AND project_path = ?";
params.push(options.projectPath);
}
if (options.key) {
sql += " AND key = ?";
params.push(options.key);
}
if (options.sessionId) {
sql += " AND session_id = ?";
params.push(options.sessionId);
}
if (!options.includeExpired) {
sql += " AND (expires_at IS NULL OR expires_at > ?)";
params.push(Date.now());
}
sql += " ORDER BY updated_at DESC";
const rows = this.db.prepare(sql).all(...params) as WorkingMemoryRow[];
// Filter by tags in JavaScript (JSON array in SQLite)
let items = rows.map((row) => this.rowToItem(row));
const filterTags = options.tags;
if (filterTags && filterTags.length > 0) {
items = items.filter((item) =>
filterTags.some((tag) => item.tags.includes(tag))
);
}
return items;
}
/**
* Semantic search across working memory using FTS
*/
recallRelevant(options: SemanticRecallOptions): SemanticRecallResult[] {
// First, clean up expired items
this.cleanupExpired();
const limit = options.limit || 10;
// Use FTS5 for text search
const ftsResults = this.db
.prepare(
`SELECT wm.*,
bm25(working_memory_fts) as rank
FROM working_memory_fts fts
JOIN working_memory wm ON wm.id = fts.id
WHERE working_memory_fts MATCH ?
AND wm.project_path = ?
AND (wm.expires_at IS NULL OR wm.expires_at > ?)
ORDER BY rank
LIMIT ?`
)
.all(
this.escapeFtsQuery(options.query),
options.projectPath,
Date.now(),
limit
) as Array<WorkingMemoryRow & { rank: number }>;
return ftsResults.map((row) => ({
...this.rowToItem(row),
similarity: this.normalizeRank(row.rank),
}));
}
/**
* Forget (delete) a memory item by key
*/
forget(key: string, projectPath: string): boolean {
const item = this.recall(key, projectPath);
if (!item) {
return false;
}
// Delete from FTS first
this.deleteFts(item.id);
// Delete from main table
const result = this.db
.prepare("DELETE FROM working_memory WHERE project_path = ? AND key = ?")
.run(projectPath, key);
return result.changes > 0;
}
/**
* Forget all items for a project
*/
forgetAll(projectPath: string): number {
// Get all IDs first for FTS cleanup
const items = this.db
.prepare("SELECT id FROM working_memory WHERE project_path = ?")
.all(projectPath) as Array<{ id: string }>;
for (const item of items) {
this.deleteFts(item.id);
}
const result = this.db
.prepare("DELETE FROM working_memory WHERE project_path = ?")
.run(projectPath);
return result.changes;
}
/**
* List all memory items for a project
*/
list(
projectPath: string,
options?: { tags?: string[]; limit?: number; offset?: number }
): WorkingMemoryItem[] {
// First, clean up expired items
this.cleanupExpired();
const limit = options?.limit || 100;
const offset = options?.offset || 0;
const rows = this.db
.prepare(
`SELECT * FROM working_memory
WHERE project_path = ?
AND (expires_at IS NULL OR expires_at > ?)
ORDER BY updated_at DESC
LIMIT ? OFFSET ?`
)
.all(projectPath, Date.now(), limit, offset) as WorkingMemoryRow[];
let items = rows.map((row) => this.rowToItem(row));
// Filter by tags if specified
const listTags = options?.tags;
if (listTags && listTags.length > 0) {
items = items.filter((item) =>
listTags.some((tag) => item.tags.includes(tag))
);
}
return items;
}
/**
* Get count of items for a project
*/
count(projectPath: string): number {
const result = this.db
.prepare(
`SELECT COUNT(*) as count FROM working_memory
WHERE project_path = ?
AND (expires_at IS NULL OR expires_at > ?)`
)
.get(projectPath, Date.now()) as { count: number };
return result.count;
}
/**
* Get a single item by ID
*/
private getById(id: string): WorkingMemoryItem | null {
const row = this.db
.prepare("SELECT * FROM working_memory WHERE id = ?")
.get(id) as WorkingMemoryRow | undefined;
if (!row) {
return null;
}
return this.rowToItem(row);
}
/**
* Convert database row to WorkingMemoryItem
*/
private rowToItem(row: WorkingMemoryRow): WorkingMemoryItem {
return {
id: row.id,
key: row.key,
value: row.value,
context: row.context || undefined,
tags: row.tags ? JSON.parse(row.tags) : [],
sessionId: row.session_id || undefined,
projectPath: row.project_path,
createdAt: row.created_at,
updatedAt: row.updated_at,
expiresAt: row.expires_at || undefined,
};
}
/**
* Clean up expired items
*/
private cleanupExpired(): void {
const now = Date.now();
// Get expired IDs for FTS cleanup
const expired = this.db
.prepare(
"SELECT id FROM working_memory WHERE expires_at IS NOT NULL AND expires_at <= ?"
)
.all(now) as Array<{ id: string }>;
for (const item of expired) {
this.deleteFts(item.id);
}
// Delete expired items
this.db
.prepare(
"DELETE FROM working_memory WHERE expires_at IS NOT NULL AND expires_at <= ?"
)
.run(now);
}
/**
* Insert into FTS index
*/
private insertFts(
id: string,
key: string,
value: string,
context?: string
): void {
try {
this.db
.prepare(
`INSERT INTO working_memory_fts(id, key, value, context)
VALUES (?, ?, ?, ?)`
)
.run(id, key, value, context || "");
} catch (_error) {
// FTS insert can fail if table doesn't exist yet
// Silently ignore - search will fall back to non-FTS
}
}
/**
* Update FTS index
*/
private updateFts(
id: string,
key: string,
value: string,
context?: string
): void {
try {
// Delete old entry
this.deleteFts(id);
// Insert new entry
this.insertFts(id, key, value, context);
} catch (_error) {
// Silently ignore FTS errors
}
}
/**
* Delete from FTS index
*/
private deleteFts(id: string): void {
try {
this.db
.prepare("DELETE FROM working_memory_fts WHERE id = ?")
.run(id);
} catch (_error) {
// Silently ignore FTS errors
}
}
/**
* Escape FTS query for safe matching
*/
private escapeFtsQuery(query: string): string {
// Escape special FTS5 characters and wrap in quotes for phrase matching
// Also handle simple word queries
const words = query
.split(/\s+/)
.filter((w) => w.length > 0)
.map((word) => {
// Remove special characters that could break FTS
const cleaned = word.replace(/['"(){}[\]:*^~\\-]/g, "");
return cleaned;
})
.filter((w) => w.length > 0);
if (words.length === 0) {
return '""';
}
// Join with OR for broad matching
return words.map((w) => `"${w}"`).join(" OR ");
}
/**
* Normalize BM25 rank to a similarity score (0-1)
* BM25 returns negative scores, lower is better
*/
private normalizeRank(rank: number): number {
// Convert negative BM25 score to positive similarity
// Typical BM25 scores range from -50 to 0
return Math.max(0, Math.min(1, 1 + rank / 50));
}
}
| xiaolai/cccmemory | 21 | MCP server for indexing and searching Claude Code conversation history with decision tracking and git integration | TypeScript | xiaolai | xiaolai | inblockchain |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.