{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# LightMem Example with code data\n", "\n", "Tutorial author: xubuqiang" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 0. Prepare the runtime environment" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "/disk/disk_20T/xubuqiang/lightmem\n", "env: ALL_PROXY=\n", "Looking in indexes: https://pypi.tuna.tsinghua.edu.cn/simple\n", "Obtaining file:///disk/disk_20T/xubuqiang/lightmem\n", " Installing build dependencies ... \u001b[?25ldone\n", "\u001b[?25h Checking if build backend supports build_editable ... \u001b[?25ldone\n", "\u001b[?25h Getting requirements to build editable ... \u001b[?25ldone\n", "\u001b[?25h Preparing editable metadata (pyproject.toml) ... \u001b[?25ldone\n", "\u001b[?25hCollecting torch==2.8.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/5a/63/4fdc45a0304536e75a5e1b1bbfb1b56dd0e2743c48ee83ca729f7ce44162/torch-2.8.0-cp311-cp311-manylinux_2_28_x86_64.whl (888.1 MB)\n", "Collecting transformers==4.57.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/e5/2b/4d2708ac1ff5cd708b6548f4c5812d0ae40d1c28591c4c1c762b6dbdef2d/transformers-4.57.0-py3-none-any.whl (12.0 MB)\n", "Collecting sentence-transformers==5.1.1 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/48/21/4670d03ab8587b0ab6f7d5fa02a95c3dd6b1f39d0e40e508870201f3d76c/sentence_transformers-5.1.1-py3-none-any.whl (486 kB)\n", "Collecting accelerate==1.10.1 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/5f/a0/d9ef19f780f319c21ee90ecfef4431cbeeca95bec7f14071785c17b6029b/accelerate-1.10.1-py3-none-any.whl (374 kB)\n", "Collecting openai==2.3.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/9c/5b/4be258ff072ed8ee15f6bfd8d5a1a4618aa4704b127c0c5959212ad177d6/openai-2.3.0-py3-none-any.whl (999 kB)\n", "Collecting tiktoken==0.12.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/6a/d0/3d9275198e067f8b65076a68894bb52fd253875f3644f0a321a720277b8a/tiktoken-0.12.0-cp311-cp311-manylinux_2_28_x86_64.whl (1.2 MB)\n", "Collecting llmlingua==0.2.2 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/6e/3e/221fe46a3338f2babdb2082ee42df88fcaa8ea0e639e832cbb1b93c5923a/llmlingua-0.2.2-py3-none-any.whl (30 kB)\n", "Collecting qdrant-client==1.15.1 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/ef/33/d8df6a2b214ffbe4138db9a1efe3248f67dc3c671f82308bea1582ecbbb7/qdrant_client-1.15.1-py3-none-any.whl (337 kB)\n", "Collecting pydantic==2.11.10 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/bd/1f/73c53fcbfb0b5a78f91176df41945ca466e71e9d9d836e5c522abda39ee7/pydantic-2.11.10-py3-none-any.whl (444 kB)\n", "Collecting pydantic_core==2.33.2 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/47/bc/cd720e078576bdb8255d5032c5d63ee5c0bf4b7173dd955185a1d658c456/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (2.0 MB)\n", "Collecting numpy==2.2.6 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/b3/dd/2238b898e51bd6d389b7389ffb20d7f4c10066d80351187ec8e303a5a475/numpy-2.2.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (16.8 MB)\n", "Collecting scipy==1.15.3 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/bd/37/89f19c8c05505d0601ed5650156e50eb881ae3918786c8fd7262b4ee66d3/scipy-1.15.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (37.7 MB)\n", "Collecting scikit-learn==1.7.2 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/ef/0e/97dbca66347b8cf0ea8b529e6bb9367e337ba2e8be0ef5c1a545232abfde/scikit_learn-1.7.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl (9.7 MB)\n", "Collecting nltk==3.9.2 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/60/90/81ac364ef94209c100e12579629dc92bf7a709a84af32f8c551b02c07e94/nltk-3.9.2-py3-none-any.whl (1.5 MB)\n", "Collecting tokenizers==0.22.1 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/d0/c6/dc3a0db5a6766416c32c034286d7c2d406da1f498e4de04ab1b8959edd00/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (3.3 MB)\n", "Collecting huggingface-hub==0.35.3 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/31/a0/651f93d154cb72323358bf2bbae3e642bdb5d2f1bfc874d096f7cb159fa0/huggingface_hub-0.35.3-py3-none-any.whl (564 kB)\n", "Collecting safetensors==0.6.2 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/fe/5d/5a514d7b88e310c8b146e2404e0dc161282e78634d9358975fd56dfd14be/safetensors-0.6.2-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (485 kB)\n", "Collecting tqdm==4.67.1 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl (78 kB)\n", "Requirement already satisfied: PyYAML==6.0.3 in /disk/disk_20T/xubuqiang/anaconda3/envs/LightMem/lib/python3.11/site-packages (from lightmem==0.1.0) (6.0.3)\n", "Requirement already satisfied: requests==2.32.5 in /disk/disk_20T/xubuqiang/anaconda3/envs/LightMem/lib/python3.11/site-packages (from lightmem==0.1.0) (2.32.5)\n", "Collecting filelock==3.20.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/76/91/7216b27286936c16f5b4d0c530087e4a54eead683e6b0b73dd0c64844af6/filelock-3.20.0-py3-none-any.whl (16 kB)\n", "Collecting regex==2025.9.18 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/fe/d0/c51d1e6a80eab11ef96a4cbad17fc0310cf68994fb01a7283276b7e5bbd6/regex-2025.9.18-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl (798 kB)\n", "Requirement already satisfied: packaging==25.0 in /disk/disk_20T/xubuqiang/anaconda3/envs/LightMem/lib/python3.11/site-packages (from lightmem==0.1.0) (25.0)\n", "Requirement already satisfied: httpx==0.28.1 in /disk/disk_20T/xubuqiang/anaconda3/envs/LightMem/lib/python3.11/site-packages (from lightmem==0.1.0) (0.28.1)\n", "Requirement already satisfied: httpcore==1.0.9 in /disk/disk_20T/xubuqiang/anaconda3/envs/LightMem/lib/python3.11/site-packages (from lightmem==0.1.0) (1.0.9)\n", "Requirement already satisfied: h11==0.16.0 in /disk/disk_20T/xubuqiang/anaconda3/envs/LightMem/lib/python3.11/site-packages (from lightmem==0.1.0) (0.16.0)\n", "Collecting h2==4.3.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/69/b2/119f6e6dcbd96f9069ce9a2665e0146588dc9f88f29549711853645e736a/h2-4.3.0-py3-none-any.whl (61 kB)\n", "Collecting anyio==4.11.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/15/b3/9b1a8074496371342ec1e796a96f99c82c945a339cd81a8e73de28b4cf9e/anyio-4.11.0-py3-none-any.whl (109 kB)\n", "Collecting certifi==2025.10.5 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/e4/37/af0d2ef3967ac0d6113837b44a4f0bfe1328c2b9763bd5b1744520e5cfed/certifi-2025.10.5-py3-none-any.whl (163 kB)\n", "Collecting charset-normalizer==3.4.3 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/87/df/b7737ff046c974b183ea9aa111b74185ac8c3a326c6262d413bd5a1b8c69/charset_normalizer-3.4.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl (150 kB)\n", "Collecting idna==3.10 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl (70 kB)\n", "Collecting click==8.3.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/db/d3/9dcc0f5797f070ec8edf30fbadfb200e71d9db6b84d211e3b2085a7589a0/click-8.3.0-py3-none-any.whl (107 kB)\n", "Collecting joblib==1.5.2 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/1e/e8/685f47e0d754320684db4425a0967f7d3fa70126bffd76110b7009a0090f/joblib-1.5.2-py3-none-any.whl (308 kB)\n", "Requirement already satisfied: Jinja2==3.1.6 in /disk/disk_20T/xubuqiang/anaconda3/envs/LightMem/lib/python3.11/site-packages (from lightmem==0.1.0) (3.1.6)\n", "Requirement already satisfied: MarkupSafe==3.0.3 in /disk/disk_20T/xubuqiang/anaconda3/envs/LightMem/lib/python3.11/site-packages (from lightmem==0.1.0) (3.0.3)\n", "Collecting pillow==11.3.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/f2/2f/d7675ecae6c43e9f12aa8d58b6012683b20b6edfbdac7abcb4e6af7a3784/pillow-11.3.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl (6.6 MB)\n", "Collecting protobuf==6.32.1 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/5c/f6/88d77011b605ef979aace37b7703e4eefad066f7e84d935e5a696515c2dd/protobuf-6.32.1-cp39-abi3-manylinux2014_x86_64.whl (322 kB)\n", "Collecting psutil==7.1.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/9d/de/04c8c61232f7244aa0a4b9a9fbd63a89d5aeaf94b2fc9d1d16e2faa5cbb0/psutil-7.1.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl (291 kB)\n", "Collecting fsspec==2025.9.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/47/71/70db47e4f6ce3e5c37a607355f80da8860a33226be640226ac52cb05ef2e/fsspec-2025.9.0-py3-none-any.whl (199 kB)\n", "Collecting grpcio==1.75.1 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/3f/42/5f628abe360b84dfe8dd8f32be6b0606dc31dc04d3358eef27db791ea4d5/grpcio-1.75.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl (6.5 MB)\n", "Collecting portalocker==3.2.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/4b/a6/38c8e2f318bf67d338f4d629e93b0b4b9af331f455f0390ea8ce4a099b26/portalocker-3.2.0-py3-none-any.whl (22 kB)\n", "Collecting annotated-types==0.7.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl (13 kB)\n", "Requirement already satisfied: typing_extensions==4.15.0 in /disk/disk_20T/xubuqiang/anaconda3/envs/LightMem/lib/python3.11/site-packages (from lightmem==0.1.0) (4.15.0)\n", "Collecting typing-inspection==0.4.2 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/dc/9b/47798a6c91d8bdb567fe2698fe81e0c6b7cb7ef4d13da4114b41d239f65d/typing_inspection-0.4.2-py3-none-any.whl (14 kB)\n", "Collecting networkx==3.4.2 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/b9/54/dd730b32ea14ea797530a4479b2ed46a6fb250f682a9cfb997e968bf0261/networkx-3.4.2-py3-none-any.whl (1.7 MB)\n", "Collecting sympy==1.14.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/a2/09/77d55d46fd61b4a135c444fc97158ef34a095e5681d0a6c10b75bf356191/sympy-1.14.0-py3-none-any.whl (6.3 MB)\n", "Collecting mpmath==1.3.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/43/e3/7d92a15f894aa0c9c4b49b8ee9ac9850d6e63b03c9c32c0367a13ae62209/mpmath-1.3.0-py3-none-any.whl (536 kB)\n", "Collecting distro==1.9.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/12/b3/231ffd4ab1fc9d679809f356cebee130ac7daa00d6d6f3206dd4fd137e9e/distro-1.9.0-py3-none-any.whl (20 kB)\n", "Collecting hf-xet==1.1.10 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/15/07/86397573efefff941e100367bbda0b21496ffcdb34db7ab51912994c32a2/hf_xet-1.1.10-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (3.2 MB)\n", "Collecting hpack==4.1.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/07/c6/80c95b1b2b94682a72cbdbfb85b81ae2daffa4291fbfa1b1464502ede10d/hpack-4.1.0-py3-none-any.whl (34 kB)\n", "Collecting hyperframe==6.1.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/48/30/47d0bf6072f7252e6521f3447ccfa40b421b6824517f82854703d0f5a98b/hyperframe-6.1.0-py3-none-any.whl (13 kB)\n", "Collecting jiter==0.11.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/9f/d8/ec74886497ea393c29dbd7651ddecc1899e86404a6b1f84a3ddab0ab59fd/jiter-0.11.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (348 kB)\n", "Collecting sniffio==1.3.1 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl (10 kB)\n", "Collecting threadpoolctl==3.6.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/32/d5/f9a850d79b0851d1d4ef6456097579a9005b31fea68726a4ae5f2d82ddd9/threadpoolctl-3.6.0-py3-none-any.whl (18 kB)\n", "Collecting urllib3==2.5.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/a7/c2/fe1e52489ae3122415c51f387e221dd0773709bad6c6cdaa599e8a2c5185/urllib3-2.5.0-py3-none-any.whl (129 kB)\n", "Collecting nvidia-cuda-nvrtc-cu12==12.8.93 (from torch==2.8.0->lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/05/6b/32f747947df2da6994e999492ab306a903659555dddc0fbdeb9d71f75e52/nvidia_cuda_nvrtc_cu12-12.8.93-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl (88.0 MB)\n", "Collecting nvidia-cuda-runtime-cu12==12.8.90 (from torch==2.8.0->lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/0d/9b/a997b638fcd068ad6e4d53b8551a7d30fe8b404d6f1804abf1df69838932/nvidia_cuda_runtime_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl (954 kB)\n", "Collecting nvidia-cuda-cupti-cu12==12.8.90 (from torch==2.8.0->lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/f8/02/2adcaa145158bf1a8295d83591d22e4103dbfd821bcaf6f3f53151ca4ffa/nvidia_cuda_cupti_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl (10.2 MB)\n", "Collecting nvidia-cudnn-cu12==9.10.2.21 (from torch==2.8.0->lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/ba/51/e123d997aa098c61d029f76663dedbfb9bc8dcf8c60cbd6adbe42f76d049/nvidia_cudnn_cu12-9.10.2.21-py3-none-manylinux_2_27_x86_64.whl (706.8 MB)\n", "Collecting nvidia-cublas-cu12==12.8.4.1 (from torch==2.8.0->lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/dc/61/e24b560ab2e2eaeb3c839129175fb330dfcfc29e5203196e5541a4c44682/nvidia_cublas_cu12-12.8.4.1-py3-none-manylinux_2_27_x86_64.whl (594.3 MB)\n", "Collecting nvidia-cufft-cu12==11.3.3.83 (from torch==2.8.0->lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/1f/13/ee4e00f30e676b66ae65b4f08cb5bcbb8392c03f54f2d5413ea99a5d1c80/nvidia_cufft_cu12-11.3.3.83-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl (193.1 MB)\n", "Collecting nvidia-curand-cu12==10.3.9.90 (from torch==2.8.0->lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/fb/aa/6584b56dc84ebe9cf93226a5cde4d99080c8e90ab40f0c27bda7a0f29aa1/nvidia_curand_cu12-10.3.9.90-py3-none-manylinux_2_27_x86_64.whl (63.6 MB)\n", "Collecting nvidia-cusolver-cu12==11.7.3.90 (from torch==2.8.0->lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/85/48/9a13d2975803e8cf2777d5ed57b87a0b6ca2cc795f9a4f59796a910bfb80/nvidia_cusolver_cu12-11.7.3.90-py3-none-manylinux_2_27_x86_64.whl (267.5 MB)\n", "Collecting nvidia-cusparse-cu12==12.5.8.93 (from torch==2.8.0->lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/c2/f5/e1854cb2f2bcd4280c44736c93550cc300ff4b8c95ebe370d0aa7d2b473d/nvidia_cusparse_cu12-12.5.8.93-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl (288.2 MB)\n", "Collecting nvidia-cusparselt-cu12==0.7.1 (from torch==2.8.0->lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/56/79/12978b96bd44274fe38b5dde5cfb660b1d114f70a65ef962bcbbed99b549/nvidia_cusparselt_cu12-0.7.1-py3-none-manylinux2014_x86_64.whl (287.2 MB)\n", "Collecting nvidia-nccl-cu12==2.27.3 (from torch==2.8.0->lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/5c/5b/4e4fff7bad39adf89f735f2bc87248c81db71205b62bcc0d5ca5b606b3c3/nvidia_nccl_cu12-2.27.3-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl (322.4 MB)\n", "Collecting nvidia-nvtx-cu12==12.8.90 (from torch==2.8.0->lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/a2/eb/86626c1bbc2edb86323022371c39aa48df6fd8b0a1647bc274577f72e90b/nvidia_nvtx_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl (89 kB)\n", "Collecting nvidia-nvjitlink-cu12==12.8.93 (from torch==2.8.0->lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/f6/74/86a07f1d0f42998ca31312f998bd3b9a7eff7f52378f4f270c8679c77fb9/nvidia_nvjitlink_cu12-12.8.93-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl (39.3 MB)\n", "Collecting nvidia-cufile-cu12==1.13.1.3 (from torch==2.8.0->lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/bb/fe/1bcba1dfbfb8d01be8d93f07bfc502c93fa23afa6fd5ab3fc7c1df71038a/nvidia_cufile_cu12-1.13.1.3-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl (1.2 MB)\n", "Collecting triton==3.4.0 (from torch==2.8.0->lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/7d/39/43325b3b651d50187e591eefa22e236b2981afcebaefd4f2fc0ea99df191/triton-3.4.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl (155.5 MB)\n", "Requirement already satisfied: setuptools>=40.8.0 in /disk/disk_20T/xubuqiang/anaconda3/envs/LightMem/lib/python3.11/site-packages (from triton==3.4.0->torch==2.8.0->lightmem==0.1.0) (80.9.0)\n", "\u001b[33mWARNING: The candidate selected for download or install is a yanked version: 'transformers' candidate (version 4.57.0 at https://pypi.tuna.tsinghua.edu.cn/packages/e5/2b/4d2708ac1ff5cd708b6548f4c5812d0ae40d1c28591c4c1c762b6dbdef2d/transformers-4.57.0-py3-none-any.whl (from https://pypi.tuna.tsinghua.edu.cn/simple/transformers/) (requires-python:>=3.9.0))\n", "Reason for being yanked: \u001b[0m\u001b[33m\n", "\u001b[0mBuilding wheels for collected packages: lightmem\n", " Building editable for lightmem (pyproject.toml) ... \u001b[?25ldone\n", "\u001b[?25h Created wheel for lightmem: filename=lightmem-0.1.0-0.editable-py3-none-any.whl size=11716 sha256=b8bc5212fe5403e20a1808f7f1f0b7db4990d50b8699794f24852a5fa211cdd2\n", " Stored in directory: /tmp/pip-ephem-wheel-cache-nmptuqgp/wheels/6e/78/3e/ad25a8ed5245b53409d8ea7f1a62638ba248b738ed4cf419a3\n", "Successfully built lightmem\n", "Installing collected packages: nvidia-cusparselt-cu12, mpmath, urllib3, typing-inspection, triton, tqdm, threadpoolctl, sympy, sniffio, safetensors, regex, pydantic_core, psutil, protobuf, portalocker, pillow, nvidia-nvtx-cu12, nvidia-nvjitlink-cu12, nvidia-nccl-cu12, nvidia-curand-cu12, nvidia-cufile-cu12, nvidia-cuda-runtime-cu12, nvidia-cuda-nvrtc-cu12, nvidia-cuda-cupti-cu12, nvidia-cublas-cu12, numpy, networkx, joblib, jiter, idna, hyperframe, hpack, hf-xet, grpcio, fsspec, filelock, distro, click, charset-normalizer, certifi, annotated-types, scipy, pydantic, nvidia-cusparse-cu12, nvidia-cufft-cu12, nvidia-cudnn-cu12, nltk, h2, anyio, tiktoken, scikit-learn, nvidia-cusolver-cu12, huggingface-hub, torch, tokenizers, openai, transformers, qdrant-client, accelerate, sentence-transformers, llmlingua, lightmem\n", "\u001b[2K Attempting uninstall: urllib3━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m 1/62\u001b[0m [mpmath]t-cu12]\n", "\u001b[2K Found existing installation: urllib3 2.6.0━━━━━━━\u001b[0m \u001b[32m 1/62\u001b[0m [mpmath]\n", "\u001b[2K Uninstalling urllib3-2.6.0:━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m 1/62\u001b[0m [mpmath]\n", "\u001b[2K Successfully uninstalled urllib3-2.6.0━━━━━━━━━\u001b[0m \u001b[32m 1/62\u001b[0m [mpmath]\n", "\u001b[2K Attempting uninstall: psutil[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m11/62\u001b[0m [pydantic_core]ion]\n", "\u001b[2K Found existing installation: psutil 7.1.3━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m11/62\u001b[0m [pydantic_core]\n", "\u001b[2K Uninstalling psutil-7.1.3:━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m11/62\u001b[0m [pydantic_core]\n", "\u001b[2K Successfully uninstalled psutil-7.1.3━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m11/62\u001b[0m [pydantic_core]\n", "\u001b[2K Attempting uninstall: idnam\u001b[90m╺\u001b[0m\u001b[90m━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m27/62\u001b[0m [joblib]x]blas-cu12]u12]2]\n", "\u001b[2K Found existing installation: idna 3.11━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m27/62\u001b[0m [joblib]\n", "\u001b[2K Uninstalling idna-3.11:90m╺\u001b[0m\u001b[90m━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m27/62\u001b[0m [joblib]\n", "\u001b[2K Successfully uninstalled idna-3.11━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m27/62\u001b[0m [joblib]\n", "\u001b[2K Attempting uninstall: charset-normalizer[0m\u001b[90m━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m37/62\u001b[0m [click]ck]e]\n", "\u001b[2K Found existing installation: charset-normalizer 3.4.4━━━━━\u001b[0m \u001b[32m37/62\u001b[0m [click]\n", "\u001b[2K Uninstalling charset-normalizer-3.4.4:[90m━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m37/62\u001b[0m [click]\n", "\u001b[2K Successfully uninstalled charset-normalizer-3.4.4━━━━━━━\u001b[0m \u001b[32m37/62\u001b[0m [click]\n", "\u001b[2K Attempting uninstall: certifi━\u001b[0m\u001b[91m╸\u001b[0m\u001b[90m━━━━━━━━━━━━━━━\u001b[0m \u001b[32m38/62\u001b[0m [charset-normalizer]\n", "\u001b[2K Found existing installation: certifi 2025.11.12━━━━━━━━━━━\u001b[0m \u001b[32m38/62\u001b[0m [charset-normalizer]\n", "\u001b[2K Uninstalling certifi-2025.11.12:m╸\u001b[0m\u001b[90m━━━━━━━━━━━━━━━\u001b[0m \u001b[32m38/62\u001b[0m [charset-normalizer]\n", "\u001b[2K Successfully uninstalled certifi-2025.11.12━━━━━━━━━━━━━\u001b[0m \u001b[32m38/62\u001b[0m [charset-normalizer]\n", "\u001b[2K Attempting uninstall: anyio━━━━━━━━━\u001b[0m\u001b[90m╺\u001b[0m\u001b[90m━━━━━━━━━\u001b[0m \u001b[32m47/62\u001b[0m [h2]k]a-cudnn-cu12]12]\n", "\u001b[2K Found existing installation: anyio 4.12.0[0m\u001b[90m━━━━━━━━━\u001b[0m \u001b[32m47/62\u001b[0m [h2]\n", "\u001b[2K Uninstalling anyio-4.12.0:━━━━\u001b[0m\u001b[90m╺\u001b[0m\u001b[90m━━━━━━━━━\u001b[0m \u001b[32m47/62\u001b[0m [h2]\n", "\u001b[2K Successfully uninstalled anyio-4.12.0╺\u001b[0m\u001b[90m━━━━━━━━━\u001b[0m \u001b[32m47/62\u001b[0m [h2]\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m62/62\u001b[0m [lightmem]lightmem]llmlingua]ransformers]\n", "\u001b[1A\u001b[2KSuccessfully installed accelerate-1.10.1 annotated-types-0.7.0 anyio-4.11.0 certifi-2025.10.5 charset-normalizer-3.4.3 click-8.3.0 distro-1.9.0 filelock-3.20.0 fsspec-2025.9.0 grpcio-1.75.1 h2-4.3.0 hf-xet-1.1.10 hpack-4.1.0 huggingface-hub-0.35.3 hyperframe-6.1.0 idna-3.10 jiter-0.11.0 joblib-1.5.2 lightmem-0.1.0 llmlingua-0.2.2 mpmath-1.3.0 networkx-3.4.2 nltk-3.9.2 numpy-2.2.6 nvidia-cublas-cu12-12.8.4.1 nvidia-cuda-cupti-cu12-12.8.90 nvidia-cuda-nvrtc-cu12-12.8.93 nvidia-cuda-runtime-cu12-12.8.90 nvidia-cudnn-cu12-9.10.2.21 nvidia-cufft-cu12-11.3.3.83 nvidia-cufile-cu12-1.13.1.3 nvidia-curand-cu12-10.3.9.90 nvidia-cusolver-cu12-11.7.3.90 nvidia-cusparse-cu12-12.5.8.93 nvidia-cusparselt-cu12-0.7.1 nvidia-nccl-cu12-2.27.3 nvidia-nvjitlink-cu12-12.8.93 nvidia-nvtx-cu12-12.8.90 openai-2.3.0 pillow-11.3.0 portalocker-3.2.0 protobuf-6.32.1 psutil-7.1.0 pydantic-2.11.10 pydantic_core-2.33.2 qdrant-client-1.15.1 regex-2025.9.18 safetensors-0.6.2 scikit-learn-1.7.2 scipy-1.15.3 sentence-transformers-5.1.1 sniffio-1.3.1 sympy-1.14.0 threadpoolctl-3.6.0 tiktoken-0.12.0 tokenizers-0.22.1 torch-2.8.0 tqdm-4.67.1 transformers-4.57.0 triton-3.4.0 typing-inspection-0.4.2 urllib3-2.5.0\n" ] } ], "source": [ "# Set your LightMemory project path\n", "LIGHTMEM_PROJECT_PATH = '/disk/disk_20T/xubuqiang/lightmem'\n", "\n", "# Install in editable mode\n", "%cd {LIGHTMEM_PROJECT_PATH}\n", "%env ALL_PROXY=\n", "!pip install -e ." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 1. Import Dependencies" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import os\n", "import json\n", "import datetime\n", "from lightmem.memory.lightmem import LightMemory\n", "from typing import List, Dict, Any\n", "import pandas as pd\n", "from tqdm import tqdm" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "RUN_LOG_DIR: ./logs/20251206_193204\n", "DATA_FILE_PATH: /disk/disk_20T/xubuqiang/lightmem/dataset/longmemeval/code_single.json\n" ] } ], "source": [ "# logging setup\n", "LOGS_ROOT = \"./logs\"\n", "RUN_TIMESTAMP = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n", "RUN_LOG_DIR = os.path.join(LOGS_ROOT, RUN_TIMESTAMP)\n", "os.makedirs(RUN_LOG_DIR, exist_ok=True)\n", "\n", "# API\n", "API_KEY = ''\n", "API_BASE_URL = ''\n", "LLM_MODEL = 'gpt-4o-mini'\n", "\n", "LLMLINGUA_MODEL_PATH = '/models/llmlingua-2-bert-base-multilingual-cased-meetingbank'\n", "EMBEDDING_MODEL_PATH = '/models/all-MiniLM-L6-v2'\n", "DATA_FILE_PATH = '/code_single.json'\n", "\n", "print(f\"RUN_LOG_DIR: {RUN_LOG_DIR}\")\n", "print(f\"DATA_FILE_PATH: {DATA_FILE_PATH}\")" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "env: ALL_PROXY=\n" ] } ], "source": [ "%env ALL_PROXY=" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 2. LightMemory Initial config" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Initial LightMem...\n", "pre_compressor:llmlingua-2\n", "pre_compressor:llmlingua_config={'model_name': '/disk/disk_20T/fangjizhan/models/llmlingua-2-bert-base-multilingual-cased-meetingbank', 'device_map': 'cuda', 'use_llmlingua2': True} llmlingua2_config={'max_batch_size': 50, 'max_force_token': 100} compress_config={'instruction': '', 'rate': 0.8, 'target_token': -1}\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "2025-12-06 19:11:16 - LightMemory - INFO - Initializing LightMemory with provided configuration\n", "2025-12-06 19:11:16 - LightMemory - INFO - Token statistics tracking initialized\n", "2025-12-06 19:11:16 - LightMemory - INFO - Initializing pre-compressor\n", "`torch_dtype` is deprecated! Use `dtype` instead!\n", "2025-12-06 19:12:13 - LightMemory - INFO - Initializing topic segmenter\n", "2025-12-06 19:12:13 - LightMemory - INFO - Initializing memory manager\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "DEBUG: resolved to encoding o200k_base\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "2025-12-06 19:12:13 - LightMemory - INFO - Initializing text embedder\n", "2025-12-06 19:12:13 - sentence_transformers.SentenceTransformer - INFO - Load pretrained SentenceTransformer: /disk/disk_20T/fangjizhan/models/all-MiniLM-L6-v2\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "ShortMemBufferManager initialized with max_tokens=512\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "2025-12-06 19:12:13 - LightMemory - INFO - Initializing embedding retriever\n", "2025-12-06 19:12:14 - LightMemory - INFO - LightMemory initialization completed successfully\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "LightMem initialized!\n" ] } ], "source": [ "config_dict = {\n", " \"pre_compress\": True,\n", " \"pre_compressor\": {\n", " \"model_name\": \"llmlingua-2\",\n", " \"configs\": {\n", " \"llmlingua_config\": {\n", " \"model_name\": LLMLINGUA_MODEL_PATH,\n", " \"device_map\": \"cuda\",\n", " \"use_llmlingua2\": True,\n", " },\n", " }\n", " },\n", " \"topic_segment\": True,\n", " \"precomp_topic_shared\": True,\n", " \"topic_segmenter\": {\n", " \"model_name\": \"llmlingua-2\",\n", " },\n", " \"messages_use\": \"hybrid\",\n", " \"metadata_generate\": True,\n", " \"text_summary\": True,\n", " \"memory_manager\": {\n", " \"model_name\": 'openai',\n", " \"configs\": {\n", " \"model\": LLM_MODEL,\n", " \"api_key\": API_KEY,\n", " \"max_tokens\": 16000,\n", " \"openai_base_url\": API_BASE_URL\n", " }\n", " },\n", " \"extract_threshold\": 0.1,\n", " \"index_strategy\": \"embedding\",\n", " \"text_embedder\": {\n", " \"model_name\": \"huggingface\",\n", " \"configs\": {\n", " \"model\": EMBEDDING_MODEL_PATH,\n", " \"embedding_dims\": 384,\n", " \"model_kwargs\": {\"device\": \"cuda\"},\n", " },\n", " },\n", " \"retrieve_strategy\": \"embedding\",\n", " \"embedding_retriever\": {\n", " \"model_name\": \"qdrant\",\n", " \"configs\": {\n", " \"collection_name\": \"code_demo\",\n", " \"embedding_model_dims\": 384,\n", " \"path\": \"./code_demo_db\", \n", " }\n", " },\n", " \"update\": \"offline\",\n", " \"logging\": {\n", " \"level\": \"DEBUG\",\n", " \"file_enabled\": True,\n", " \"log_dir\": RUN_LOG_DIR,\n", " }\n", "}\n", "\n", "print(\"Initial LightMem...\")\n", "lightmem = LightMemory.from_config(config_dict)\n", "print(\"LightMem initialized!\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 3. Load dataset\n" ] }, { "cell_type": "code", "execution_count": 30, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Dataset statistics:\n", "- Number of questions: 3\n", "- Number of historical sessions: 3\n", "- Session ID list: ['session_0', 'session_1', 'session_2']\n", "\n", "Question preview:\n", " [q_faker_01] I'm reviewing the fake user data generation task we did previously. Can you remind me exactly how ma...\n", " [q_faker_02] Going back to the fake company data task, I remember the script initially failed when trying to save...\n", " [q_eparse_03] In our previous session using the 'Eparse' tool to convert Excel to JSON, the command failed when we...\n" ] } ], "source": [ "with open(DATA_FILE_PATH, 'r', encoding='utf-8') as f:\n", " data = json.load(f)\n", "\n", "if isinstance(data, list):\n", " data_item = data[0]\n", "else:\n", " data_item = data\n", "\n", "question_ids = data_item.get('question_id', [])\n", "question_types = data_item.get('question_type', [])\n", "questions = data_item.get('question', [])\n", "question_dates = data_item.get('question_date', [])\n", "answers = data_item.get('answer', [])\n", "answer_session_ids = data_item.get('answer_session_ids', [])\n", "haystack_session_ids = data_item.get('haystack_session_ids', [])\n", "haystack_dates = data_item.get('haystack_dates', [])\n", "haystack_sessions = data_item.get('haystack_sessions', [])\n", "\n", "print(f\"Dataset statistics:\")\n", "print(f\"- Number of questions: {len(questions)}\")\n", "print(f\"- Number of historical sessions: {len(haystack_sessions)}\")\n", "print(f\"- Session ID list: {haystack_session_ids}\")\n", "print(f\"\\nQuestion preview:\")\n", "for i, (qid, q) in enumerate(zip(question_ids[:3], questions[:3])):\n", " print(f\" [{qid}] {q[:100]}...\" if len(q) > 100 else f\" [{qid}] {q}\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 4. ADD memory into LightMem\n" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [], "source": [ "METADATA_GENERATE_PROMPT = \"\"\"\n", "You are a Technical Conversation Analyzer.\n", "Your task is to extract **all technical operations, errors, and solutions** from a conversation.\n", "\n", "Input format:\n", "--- Topic X ---\n", "[timestamp, weekday] source_id.SpeakerName: message\n", "...\n", "\n", "Critical Instructions:\n", "1. **Process messages strictly in ascending source_id order** (one by one)\n", "2. **Extract ALL technical information** including:\n", " - Commands executed (preserve EXACT command syntax)\n", " - Error messages (preserve EXACT error text and codes)\n", " - File paths and directories (preserve COMPLETE paths)\n", " - Solutions and fixes applied\n", " - Tool/package names and versions\n", " - Configuration changes\n", " - Problem-solution pairs (link errors with their fixes)\n", "\n", "3. **Preserve Specificity - DO NOT generalize**:\n", " - ✓ \"OSError: directory `/disk/disk_20T/user/GitTaskBench/prompt/Faker_02` does not exist\"\n", " - ✗ \"encountered a file system error\"\n", " \n", " - ✓ \"Fixed by running `mkdir -p /disk/disk_20T/user/GitTaskBench/prompt/Faker_02`\"\n", " - ✗ \"created the directory\"\n", "\n", "4. **Link problems with solutions**:\n", " When a problem is mentioned and later solved, create entries for both:\n", " - The error/problem with full details\n", " - The solution/fix with full details\n", " - Optionally, a combined entry linking them\n", "\n", "5. **Time Handling**:\n", " - Always include timestamp reference: \"on 2025-12-05\" or \"at [timestamp]\"\n", " - For sequences: note which action happened first\n", "\n", "6. Output format:\n", "Please return your response in JSON format.\n", " {\n", " \"data\": [\n", " {\n", " \"source_id\": \"\",\n", " \"fact\": \"\"\n", " }\n", " ]\n", " }\n", "\n", "Example:\n", "--- Topic 1 ---\n", "[2025-12-05T09:00:00.000, Fri] 0.User: python generate_users.py --count 100 --output ./data/users.csv\n", "[2025-12-05T09:00:01.000, Fri] 0.Assistant: Error OSError: [Errno 2] No such file or directory: './data/users.csv'\n", "[2025-12-05T09:00:02.000, Fri] 1.User: mkdir -p ./data\n", "[2025-12-05T09:00:03.000, Fri] 2.User: python generate_users.py --count 100 --output ./data/users.csv\n", "[2025-12-05T09:00:04.000, Fri] 2.Assistant: Successfully generated 100 user records to ./data/users.csv\n", "\n", "{\"data\": [\n", " {\"source_id\": \"0\", \"fact\": \"User executed command `python generate_users.py --count 100 --output ./data/users.csv` on 2025-12-05T09:00:00.\"},\n", " {\"source_id\": \"0\", \"fact\": \"Command failed with OSError: [Errno 2] No such file or directory: './data/users.csv' on 2025-12-05T09:00:01.\"},\n", " {\"source_id\": \"1\", \"fact\": \"User created directory by running `mkdir -p ./data` on 2025-12-05T09:00:02.\"},\n", " {\"source_id\": \"2\", \"fact\": \"User re-executed command `python generate_users.py --count 100 --output ./data/users.csv` on 2025-12-05T09:00:03.\"},\n", " {\"source_id\": \"2\", \"fact\": \"Command successfully generated 100 user records to ./data/users.csv on 2025-12-05T09:00:04.\"},\n", " {\"source_id\": \"0\", \"fact\": \"The CSV generation initially failed because directory './data' did not exist (OSError), and was fixed by creating the directory with `mkdir -p ./data` before re-running the script.\"}\n", "]}\n", "\n", "Reminder: \n", "- Preserve EXACT commands, error messages, file paths\n", "- DO NOT paraphrase technical terms or simplify details\n", "- Link errors with their solutions explicitly\n", "\"\"\"" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [], "source": [ "def convert_timestamp(timestamp: str) -> str:\n", " \"\"\"\n", " Convert timestamp from '2025/12/02 (Tue) 17:06' to '2025-12-02 17:06:00'\n", " \n", " Args:\n", " timestamp: Original timestamp string\n", " \n", " Returns:\n", " Converted timestamp string in format '%Y-%m-%d %H:%M:%S'\n", " \"\"\"\n", " from datetime import datetime\n", " \n", " # Remove day of week (e.g., \"(Tue)\")\n", " timestamp_clean = timestamp.split('(')[0].strip() + ' ' + timestamp.split(')')[1].strip()\n", " # Now it's like: '2025/12/02 17:06'\n", " \n", " # Parse the timestamp\n", " dt = datetime.strptime(timestamp_clean, '%Y/%m/%d %H:%M')\n", " \n", " # Convert to target format\n", " return dt.strftime('%Y-%m-%d %H:%M:%S')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Starting to add historical sessions to memory repository...\n", "Converting timestamps to standard format...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Adding turns: 0%| | 0/20 [00:00 512). Running this sequence through the model will result in indexing errors\n", "2025-12-06 19:12:35 - LightMemory - INFO - [add_memory_20251206_191234_747423] Restored visual contexts after compression\n", "2025-12-06 19:12:35 - LightMemory - INFO - [add_memory_20251206_191234_747423] Target compression rate: 0.8\n", "Adding turns: 5%|▌ | 1/20 [00:00<00:10, 1.89it/s]2025-12-06 19:12:35 - LightMemory - INFO - ========== START add_memory_20251206_191235_278208 ==========\n", "2025-12-06 19:12:35 - LightMemory - INFO - force_segment=False, force_extract=False\n", "2025-12-06 19:12:35 - LightMemory - INFO - [add_memory_20251206_191235_278208] Extracted 0 visual contexts\n", "2025-12-06 19:12:35 - LightMemory - INFO - [add_memory_20251206_191235_278208] Restored visual contexts after compression\n", "2025-12-06 19:12:35 - LightMemory - INFO - [add_memory_20251206_191235_278208] Target compression rate: 0.8\n", "2025-12-06 19:12:35 - LightMemory - INFO - ========== START add_memory_20251206_191235_308053 ==========\n", "2025-12-06 19:12:35 - LightMemory - INFO - force_segment=False, force_extract=False\n", "2025-12-06 19:12:35 - LightMemory - INFO - [add_memory_20251206_191235_308053] Extracted 0 visual contexts\n", "2025-12-06 19:12:35 - LightMemory - INFO - [add_memory_20251206_191235_308053] Restored visual contexts after compression\n", "2025-12-06 19:12:35 - LightMemory - INFO - [add_memory_20251206_191235_308053] Target compression rate: 0.8\n", "BertSdpaSelfAttention is used but `torch.nn.functional.scaled_dot_product_attention` does not support non-absolute `position_embedding_type` or `output_attentions=True` or `head_mask`. Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation=\"eager\"` when loading the model.\n", "2025-12-06 19:12:35 - LightMemory - INFO - [add_memory_20251206_191235_308053] Generated 1 segments\n", "Adding turns: 15%|█▌ | 3/20 [00:00<00:03, 5.21it/s]2025-12-06 19:12:35 - LightMemory - INFO - ========== START add_memory_20251206_191235_425848 ==========\n", "2025-12-06 19:12:35 - LightMemory - INFO - force_segment=False, force_extract=False\n", "2025-12-06 19:12:35 - LightMemory - INFO - [add_memory_20251206_191235_425848] Extracted 0 visual contexts\n", "2025-12-06 19:12:35 - LightMemory - INFO - [add_memory_20251206_191235_425848] Restored visual contexts after compression\n", "2025-12-06 19:12:35 - LightMemory - INFO - [add_memory_20251206_191235_425848] Target compression rate: 0.8\n", "2025-12-06 19:12:35 - LightMemory - INFO - [add_memory_20251206_191235_425848] Generated 1 segments\n", "2025-12-06 19:12:35 - LightMemory - INFO - [add_memory_20251206_191235_425848] Assigned global topic IDs: total=1, mapping=[[0]]\n", "2025-12-06 19:12:35 - LightMemory - INFO - [add_memory_20251206_191235_425848] Extraction triggered 1 times, extract_list length: 1\n", "2025-12-06 19:12:35 - LightMemory - INFO - [add_memory_20251206_191235_425848] Batch max_source_ids: [1]\n", "2025-12-06 19:12:35 - LightMemory - INFO - [add_memory_20251206_191235_425848] Starting metadata generation\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "User prompt for API call 0:\n", "--- Topic 0 ---\n", "[2025-12-02T17:06:00.000, Tue] 0.User: Task Task Description repository content generate 100 fake data entries save CSV file two columns Username Email Repository Faker Path disk / disk _ 20T user GitTaskBench code _ base Faker Repository URL https github. com joke2k faker Understanding Guide Read README understand basic functions usage File Paths Input File Description Directory disk / disk _ 20T user GitTaskBench prompt Faker _ 01 file name output xxx start naming output 01 file format determined task requirements Supplementary Instructions understand analyze code generate execute code call tools complete user - specified task Workflow & Standards Task analyze user - provided task description task working directory work _ dir repository information repo code importance Plan Solution formulate execution steps read code library README file understand structure usage insufficient information require writing code rely language understanding tool invocation paths code generation execution paths avoid errors Repository Analysis Explore Structure understand file directory structure paths Identify Key Files Prioritize README configuration files main entry scripts Dependency Management Check requirements txt files determine installation Include installation commands code blocks pip install - r requirements txt avoid installation Configuration Python Conda pre - set no extra configuration ensure code library path PYTHONPATH generate export remote _ repo _ path } commandCode Implementation Execution detailed code implementation steps complete function class definitions parameters return values comments docstrings checkpoint model files first check download first automatic download required multiple files - O Error Handling Iteration Check code execution results errors analyze cause fix code regenerate complete script retrial task resolved completed analyze cause alternative solutions Tool Priority checkpoint model files needed first download automatic download required 7 Task Completion task completed confirmed clear summaryConstraints Mandatory Requirements Paths handling files data loading code Provided Repository Code tasks completed existing repository code prohibited rewrite code implementation Read README. md code library file understand structure usage If no README md insufficient read code\n", "[2025-12-02T17:06:00.500, Tue] 0.Assistant: Command : cat / disk disk _ 20T user GitTaskBench code _ base Faker / README.\n", "[2025-12-02T17:06:01.000, Tue] 1.User: Output cat : disk / disk _ 20T user / GitTaskBench / code _ base / Faker / README. md No file\n", "[2025-12-02T17:06:01.500, Tue] 1.Assistant: Command : ls - l disk disk _ 20T user / GitTaskBench code _ base /\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "2025-12-06 19:12:45 - LightMemory - INFO - [add_memory_20251206_191235_425848] API Call 0 tokens - Prompt: 1410, Completion: 662, Total: 2072\n", "2025-12-06 19:12:45 - LightMemory - INFO - [add_memory_20251206_191235_425848] Metadata generation completed with 1 API calls\n", "2025-12-06 19:12:45 - LightMemory - INFO - [add_memory_20251206_191235_425848] Created 15 MemoryEntry objects\n", "2025-12-06 19:12:45 - LightMemory - INFO - ========== START offline_update_20251206_191245_198353 ==========\n", "2025-12-06 19:12:45 - LightMemory - INFO - [offline_update_20251206_191245_198353] Received 15 memory entries\n", "2025-12-06 19:12:45 - LightMemory - INFO - [offline_update_20251206_191245_198353] construct_update_queue_trigger=False, offline_update_trigger=False\n", "2025-12-06 19:12:45 - LightMemory - INFO - [offline_update_20251206_191245_198353] Starting embedding and insertion to vector database\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "7a270a7760204839ac7a2b5332ec531f", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Batches: 0%| | 0/1 [00:00 = 24. 8. 0 in disk / disk _ 20T / user anaconda3 lib / python3. 13 site - packages from - r disk 20T line 1 ( 24. 10. 0 ) Requirement check - manifest in disk / disk _ 20T user anaconda3 lib python3. 13 site - packages from - r disk 20T line 2 ( 0. 51 ) Requirement coverage > = 5. 2 in disk / disk _ 20T user anaconda3 lib python3. 13 site - packages from - r disk 20T line 3 ( 7. 12. 0 ) Requirement doc8 > = 1. 1. 1 in disk disk _ 20T anaconda3 python3. 13 site - packages from - disk _ 20T line 4 ( 2. 0. 0 flake8 - comprehensions disk 20T anaconda3 python3 13 site - packages 20T line 5 3. 17. 0 Requirement flake8 = 4. 0. 0 in disk 20T anaconda3 python3. 13 site - packages - r line 6 ( 7. 1. 1 Requirement freezegun > = 1. 5. 1 in disk / disk _ 20T anaconda3 python3. 13 site - packages - r disk 20T GitTaskBench code base dev - requirements txt line 7 ) 1. 5. 5 Requirement isort > = 5. 13. 2 in disk / disk _ 20T anaconda3 lib python3. 13 site - packages - 20T code base dev - requirements txt line 8 ) 6. 0. 1 mypy - extensions > = 1. 0.disk disk _ 20T user anaconda3 lib python3. 13 site packages r disk disk 20T GitTaskBench code base dev - requirements. txt line 9\n", "[2025-12-02T17:06:00.500, Tue] 0.Assistant: import pandas pd from faker Faker Initialize Faker fake = Faker ( ) Generate fake user data user data = Username fake. user name ( Email fake. email ( } for _ in range ( 100 ) Create DataFrame df = pd. DataFrame ( user _ data ) Save to CSV output _ path = ' disk / disk _ 20T / user / GitTaskBench / prompt / Faker _ 01 / output. csv ' df. to _ csv ( output _ path index = False\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "2025-12-06 19:13:23 - LightMemory - INFO - [add_memory_20251206_191304_997206] API Call 0 tokens - Prompt: 1511, Completion: 674, Total: 2185\n", "2025-12-06 19:13:23 - LightMemory - INFO - [add_memory_20251206_191304_997206] Metadata generation completed with 1 API calls\n", "2025-12-06 19:13:23 - LightMemory - INFO - [add_memory_20251206_191304_997206] Created 8 MemoryEntry objects\n", "2025-12-06 19:13:23 - LightMemory - INFO - ========== START offline_update_20251206_191323_569092 ==========\n", "2025-12-06 19:13:23 - LightMemory - INFO - [offline_update_20251206_191323_569092] Received 8 memory entries\n", "2025-12-06 19:13:23 - LightMemory - INFO - [offline_update_20251206_191323_569092] construct_update_queue_trigger=False, offline_update_trigger=False\n", "2025-12-06 19:13:23 - LightMemory - INFO - [offline_update_20251206_191323_569092] Starting embedding and insertion to vector database\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "08782d37b66844bca1714551f029848f", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Batches: 0%| | 0/1 [00:00 23 df. to csv ( output file path index = False File disk / disk _ 20T user / anaconda3 envs gittaskbench lib / python3. 12 site - packages pandas / util / decorators. py : 333 deprecate nonkeyword arguments locals > decorate locals wrapper ( * args, kwargs ) 327 if len ( args ) > num allow args : 328 warnings. warn ( 329 msg. format ( arguments = format argument list allow args ) ) 330 FutureWarning 331 stacklevel = find _ stack _ level ( ) 332 ) - > 333 return func ( * args, kwargs ) File disk / disk _ 20T user anaconda3 envs gittaskbench lib / python3. 12 site - packages / pandas / core / generic. py : 3967 NDFrameto _ csv ( self path buf sep na rep float format columns header index index label mode encoding compression quoting quotechar lineterminator chunksize date _ format doublequote escapechar decimal errors storage options 3956 df = self isinstance self ABCDataFrame self frame 3958 formatter = DataFrameFormatter 3959 frame = df 3960 header header 3964 decimal = decimal 3965 - > 3967 return DataFrameRenderer formatter to _ csv 3968 path _ buf 3969 lineterminator = 3970 sep = sep 3971 encoding 3972 errors = errors 3973 compression = compression 3974 quoting = quoting 3975 columns = columns 3976 index _ label = index 3977 mode = mode 3978 chunksize = 3979 quotechar = quotechar 3980 date _ format = date _ format 3981 doublequote 3982 escapechar = escapechar 3983 storage options = storage options 3984 File / disk / disk _ 20T / user... (\n", "[2025-12-03T15:01:00.500, Wed] 0.Assistant: Command mkdir - p disk disk _ 20T user / GitTaskBench prompt / Faker _\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "2025-12-06 19:13:54 - LightMemory - INFO - [add_memory_20251206_191350_114036] API Call 0 tokens - Prompt: 1418, Completion: 308, Total: 1726\n", "2025-12-06 19:13:54 - LightMemory - INFO - [add_memory_20251206_191350_114036] Metadata generation completed with 1 API calls\n", "2025-12-06 19:13:54 - LightMemory - INFO - [add_memory_20251206_191350_114036] Created 4 MemoryEntry objects\n", "2025-12-06 19:13:54 - LightMemory - INFO - ========== START offline_update_20251206_191354_128284 ==========\n", "2025-12-06 19:13:54 - LightMemory - INFO - [offline_update_20251206_191354_128284] Received 4 memory entries\n", "2025-12-06 19:13:54 - LightMemory - INFO - [offline_update_20251206_191354_128284] construct_update_queue_trigger=False, offline_update_trigger=False\n", "2025-12-06 19:13:54 - LightMemory - INFO - [offline_update_20251206_191354_128284] Starting embedding and insertion to vector database\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "ab153645b7334281b6f504ae676ddb19", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Batches: 0%| | 0/1 [00:00 _ package optional use other interfaces SQLite3 interface without install psycopg2 install psycopg2 package environment may install pre - compiled binary driver follows :.code - block : : $ pip install psycopg2 - binary error use postgres endpoint user : pass @ host : port / my _ db mentions postgres driver missing know haven ' t properly installed compiled ) psycopg2. U... ( truncated\n", "[2025-12-03T15:32:00.500, Wed] 0.Assistant: Command pip install\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "2025-12-06 19:14:14 - LightMemory - INFO - [add_memory_20251206_191409_044607] API Call 0 tokens - Prompt: 1310, Completion: 397, Total: 1707\n", "2025-12-06 19:14:14 - LightMemory - INFO - [add_memory_20251206_191409_044607] Metadata generation completed with 1 API calls\n", "2025-12-06 19:14:14 - LightMemory - INFO - [add_memory_20251206_191409_044607] Created 9 MemoryEntry objects\n", "2025-12-06 19:14:14 - LightMemory - INFO - ========== START offline_update_20251206_191414_458377 ==========\n", "2025-12-06 19:14:14 - LightMemory - INFO - [offline_update_20251206_191414_458377] Received 9 memory entries\n", "2025-12-06 19:14:14 - LightMemory - INFO - [offline_update_20251206_191414_458377] construct_update_queue_trigger=False, offline_update_trigger=False\n", "2025-12-06 19:14:14 - LightMemory - INFO - [offline_update_20251206_191414_458377] Starting embedding and insertion to vector database\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "e42d7e50d0a4440ca7bbb72ed8ae4ec1", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Batches: 0%| | 0/1 [00:00 8. 0. 0 disk 20T user anaconda3 lib python3. 13 site - packages eparse 8. 1. 8 openpyxl 3. 0. 0 20T 13 packages. 5 lxml 4. 9. 3 pandas. 3 peewee 3. 16. 0. 18. 3 unstructured 0. 8. 5 Downloading pypi. tuna. tsinghua. educn / packages / c2 / 98 / e8ddcfadd762f8f69d84e14498c28adefdd8e2008f443077495984405c45 / unstructured - 0. 18. 21 - py3 - none - any. whl ( 1. 8 MB ) MB 5. 9 MB s 0 : 00 : 00 Requirement : et - xmlfile disk disk _ 20T user anaconda3 lib / python3. 13 site - packages openpyxl > 3. 0 eparse 2. 0. 0 numpy 1. 26. 0 disk 20T anaconda3. 13 site - packages pandas 2. 0 eparse 2. 1. 3 python - dateutil 2. 8. 2 disk disk 20T anaconda3 python3. 13 site - packages pandas 2. 0. 0 eparse 2. 9. 0post0 pytz 2020. 1 disk disk 20T user anaconda3 lib python3. 13 site packages pandas 2. 0. 0 eparse 2024. 1 tzdata 2022. 7 disk disk 20T user anaconda3 lib python3. 13 site packages pandas 2 0 0 eparse 2025. 2\n", "[2025-12-03T15:32:00.500, Wed] 0.Assistant: Command mkdir - p disk disk _ 20T user / GitTaskBench prompt / Eparse _\n", "[2025-12-03T15:32:01.000, Wed] 1.User: Output\n", "[2025-12-03T15:32:01.500, Wed] 1.Assistant: Command eparse - v - f disk disk _ 20T user / GitTaskBench queries / Eparse _ 03 input Eparse 03 _ input. xlsx - o json disk / disk _ 20T user / GitTaskBench prompt / Eparse _ 03 / output. json\n", "User prompt for API call 1:\n", "--- Topic 13 ---\n", "[2025-12-03T15:32:02.000, Wed] 2.User: Output found 1 files output error - json disk / disk _ 20T user / GitTaskBench / prompt / Eparse _ 03 / output. json not recognized\n", "[2025-12-03T15:32:02.500, Wed] 2.Assistant: Command eparse - v - f disk / disk _ 20T user GitTaskBench queries / Eparse _ 03 input Eparse _ 03 _ input. xlsx - o stdout parse - z > disk / disk _ 20T user GitTaskBench prompt / Eparse _ 03 / output.\n", "[2025-12-03T15:32:03.000, Wed] 3.User: Output\n", "[2025-12-03T15:32:03.500, Wed] 3.Assistant: Command cat disk disk _ 20T user / GitTaskBench prompt / Eparse _ 03 / output.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "2025-12-06 19:14:23 - LightMemory - INFO - [add_memory_20251206_191415_893489] API Call 0 tokens - Prompt: 1596, Completion: 344, Total: 1940\n", "2025-12-06 19:14:23 - LightMemory - INFO - [add_memory_20251206_191415_893489] API Call 1 tokens - Prompt: 1127, Completion: 241, Total: 1368\n", "2025-12-06 19:14:23 - LightMemory - INFO - [add_memory_20251206_191415_893489] Metadata generation completed with 2 API calls\n", "2025-12-06 19:14:23 - LightMemory - WARNING - LLM returned invalid source_id=2 (valid range: [0, 1]) in batch 1. Auto-corrected to source_id=1. Fact: User encountered an output error stating '1 files output error - json disk / disk _ 20T user / GitTa...\n", "2025-12-06 19:14:23 - LightMemory - WARNING - LLM returned invalid source_id=2 (valid range: [0, 1]) in batch 1. Auto-corrected to source_id=1. Fact: Assistant provided command `eparse - v - f disk / disk _ 20T user GitTaskBench queries / Eparse _ 03...\n", "2025-12-06 19:14:23 - LightMemory - WARNING - LLM returned invalid source_id=3 (valid range: [0, 1]) in batch 1. Auto-corrected to source_id=1. Fact: User executed command `cat disk disk _ 20T user / GitTaskBench prompt / Eparse _ 03 / output.` on 20...\n", "2025-12-06 19:14:23 - LightMemory - INFO - [add_memory_20251206_191415_893489] Created 9 MemoryEntry objects\n", "2025-12-06 19:14:23 - LightMemory - INFO - ========== START offline_update_20251206_191423_160631 ==========\n", "2025-12-06 19:14:23 - LightMemory - INFO - [offline_update_20251206_191423_160631] Received 9 memory entries\n", "2025-12-06 19:14:23 - LightMemory - INFO - [offline_update_20251206_191423_160631] construct_update_queue_trigger=False, offline_update_trigger=False\n", "2025-12-06 19:14:23 - LightMemory - INFO - [offline_update_20251206_191423_160631] Starting embedding and insertion to vector database\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "2d201059bd9d49f8b66e2ba003a2ab1f", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Batches: 0%| | 0/1 [00:00 None:\n", " \"\"\"\n", " Add historical sessions to the LightMemory system.\n", " Sessions are added turn by turn (each turn contains a user message and an assistant message).\n", " \n", " Args:\n", " lightmem: LightMemory instance\n", " sessions: List of sessions, each session contains multiple conversation turns\n", " session_ids: List of session IDs\n", " dates: List of session timestamps (will be converted to standard format)\n", " \"\"\"\n", " print(\"Starting to add historical sessions to memory repository...\")\n", " \n", " # Convert all timestamps to standard format\n", " print(\"Converting timestamps to standard format...\")\n", " converted_dates = [convert_timestamp(date) for date in dates]\n", " \n", " # Calculate total number of turns for progress bar\n", " total_turns = 0\n", " for session in sessions:\n", " # Ensure first message is from user\n", " session_copy = session.copy()\n", " while session_copy and session_copy[0][\"role\"] != \"user\":\n", " session_copy.pop(0)\n", " num_turns = len(session_copy) // 2\n", " total_turns += num_turns\n", " \n", " progress_bar = tqdm(total=total_turns, desc=\"Adding turns\")\n", " \n", " for session_idx, (session, session_id, date) in enumerate(zip(sessions, session_ids, converted_dates)):\n", " # Ensure the first message is from user\n", " while session and session[0][\"role\"] != \"user\":\n", " session.pop(0)\n", " \n", " num_turns = len(session) // 2\n", " \n", " for turn_idx in range(num_turns):\n", " # Extract one turn (user + assistant messages)\n", " turn_messages = session[turn_idx*2 : turn_idx*2 + 2]\n", " \n", " # Validate turn structure\n", " if len(turn_messages) < 2 or turn_messages[0][\"role\"] != \"user\" or turn_messages[1][\"role\"] != \"assistant\":\n", " continue\n", " \n", " # Add timestamp and speaker information to each message\n", " for msg in turn_messages:\n", " msg[\"time_stamp\"] = date\n", " # Add default speaker information if not present\n", " if \"speaker_name\" not in msg:\n", " msg[\"speaker_name\"] = \"User\" if msg[\"role\"] == \"user\" else \"Assistant\"\n", " if \"speaker_id\" not in msg:\n", " msg[\"speaker_id\"] = \"speaker_a\" if msg[\"role\"] == \"user\" else \"speaker_b\"\n", " \n", " # Only force_segment and force_extract on the last turn of the last session\n", " is_last_turn = (session_idx == len(sessions) - 1 and turn_idx == num_turns - 1)\n", " \n", " # Add turn to memory system\n", " try:\n", " lightmem.add_memory(\n", " messages=turn_messages,\n", " METADATA_GENERATE_PROMPT=METADATA_GENERATE_PROMPT,\n", " force_segment=is_last_turn,\n", " force_extract=is_last_turn,\n", " )\n", " progress_bar.update(1)\n", " except Exception as e:\n", " print(f\"\\nWarning: Failed to add turn {turn_idx} from session {session_id}: {str(e)}\")\n", " continue\n", " \n", " progress_bar.close()\n", " print(\"\\nAll historical sessions have been added!\")\n", " \n", "add_sessions_to_memory(lightmem, haystack_sessions, haystack_session_ids, haystack_dates)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 5. Offline update" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "2025-12-06 19:14:35 - LightMemory - INFO - ========== START construct_queue_20251206_191435_771858 ==========\n", "2025-12-06 19:14:35 - LightMemory - INFO - [construct_queue_20251206_191435_771858] Parameters: top_k=20, keep_top_n=10, max_workers=8\n", "2025-12-06 19:14:35 - LightMemory - INFO - [construct_queue_20251206_191435_771858] Retrieved 89 entries from vector database\n", "2025-12-06 19:14:35 - LightMemory - INFO - [construct_queue_20251206_191435_771858] Starting parallel queue construction with 8 workers\n", "2025-12-06 19:14:48 - LightMemory - INFO - [construct_queue_20251206_191435_771858] Queue construction completed: 89 updated, 0 skipped, nonempty_queues=89, empty_queues=0\n", "2025-12-06 19:14:48 - LightMemory - INFO - ========== END construct_queue_20251206_191435_771858 ==========\n", "2025-12-06 19:14:48 - LightMemory - INFO - ========== START offline_update_all_20251206_191448_699374 ==========\n", "2025-12-06 19:14:48 - LightMemory - INFO - [offline_update_all_20251206_191448_699374] Parameters: score_threshold=0.8, max_workers=5\n", "2025-12-06 19:14:48 - LightMemory - INFO - [offline_update_all_20251206_191448_699374] Retrieved 89 entries from vector database\n", "2025-12-06 19:14:48 - LightMemory - INFO - [offline_update_all_20251206_191448_699374] Starting parallel offline update with 5 workers\n", "2025-12-06 19:15:07 - LightMemory - INFO - [offline_update_all_20251206_191448_699374] Offline update completed:\n", "2025-12-06 19:15:07 - LightMemory - INFO - [offline_update_all_20251206_191448_699374] - Processed: 41 entries\n", "2025-12-06 19:15:07 - LightMemory - INFO - [offline_update_all_20251206_191448_699374] - Updated: 17 entries\n", "2025-12-06 19:15:07 - LightMemory - INFO - [offline_update_all_20251206_191448_699374] - Deleted: 19 entries\n", "2025-12-06 19:15:07 - LightMemory - INFO - [offline_update_all_20251206_191448_699374] - Skipped (no candidates): 48 entries\n", "2025-12-06 19:15:07 - LightMemory - INFO - [offline_update_all_20251206_191448_699374] - Update API calls: 41, Total tokens: 32401\n", "2025-12-06 19:15:07 - LightMemory - INFO - ========== END offline_update_all_20251206_191448_699374 ==========\n" ] } ], "source": [ "lightmem.construct_update_queue_all_entries()\n", "lightmem.offline_update_all_entries(score_threshold=0.8)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 6. Retrieval and answer" ] }, { "cell_type": "code", "execution_count": 27, "metadata": {}, "outputs": [], "source": [ "def test_retrieval_and_answer(lightmem: LightMemory, \n", " questions: List[str], \n", " question_ids: List[str],\n", " question_types: List[str],\n", " question_dates: List[str],\n", " answers: List[str],\n", " top_k: int = 20) -> pd.DataFrame:\n", " \"\"\"\n", " Perform memory retrieval, generate answers using LLM, and evaluate correctness.\n", " \n", " Args:\n", " lightmem: LightMemory instance\n", " questions: List of questions\n", " question_ids: List of question IDs\n", " question_types: List of question types\n", " question_dates: List of question dates\n", " answers: List of expected answers\n", " top_k: Number of memory entries to retrieve\n", " \n", " Returns:\n", " DataFrame containing retrieval and evaluation results\n", " \"\"\"\n", " results = []\n", " \n", " print(f\"Starting memory retrieval and answer generation for {len(questions)} questions...\\n\")\n", " \n", " # Initialize LLM for answer generation (using the same config as LightMemory)\n", " from openai import OpenAI\n", " \n", " llm_client = OpenAI(\n", " api_key=API_KEY,\n", " base_url=API_BASE_URL\n", " )\n", " \n", " # LLM for judging (can be the same)\n", " llm_judge = llm_client\n", " \n", " for idx, (qid, question, qtype, qdate, expected_answer) in enumerate(\n", " zip(question_ids, questions, question_types, question_dates, answers), 1\n", " ):\n", " print(f\"\\n{'='*80}\")\n", " print(f\"Question {idx}/{len(questions)} [ID: {qid}]\")\n", " print(f\"{'='*80}\")\n", " print(f\"Question: {question}\")\n", " print(f\"Question Date: {qdate}\")\n", " print(f\"Question Type: {qtype}\")\n", " print(f\"Expected Answer: {expected_answer}\")\n", " \n", " try:\n", " # Step 1: Retrieve relevant memories\n", " result_string = lightmem.retrieve(question, limit=top_k)\n", " related_memories = [m.strip() for m in result_string.split('\\n') if m.strip()]\n", " \n", " print(f\"\\nRetrieved {len(related_memories)} relevant memories\")\n", " print(\"-\" * 80)\n", " \n", " # Display first few memories\n", " for mem_idx, memory in enumerate(related_memories, 1):\n", " print(f\"Memory {mem_idx}: {memory}\")\n", " \n", " # Step 2: Generate answer using LLM\n", " print(\"\\nGenerating answer...\")\n", " messages = [\n", " {\"role\": \"system\", \"content\": \"You can ONLY answer based on the provided memories.\"},\n", " {\n", " \"role\": \"user\",\n", " \"content\": f\"Question: {question}\\n\\nPlease answer the question based on the following memories:\\n{result_string}\"\n", " }\n", " ]\n", " \n", " response = llm_client.chat.completions.create(\n", " model=LLM_MODEL,\n", " messages=messages,\n", " max_tokens=1024,\n", " temperature=0.0\n", " )\n", " \n", " generated_answer = response.choices[0].message.content\n", " print(f\"\\nGenerated Answer: {generated_answer}\")\n", " \n", " # Step 3: Evaluate answer correctness\n", " print(\"\\nEvaluating answer...\")\n", " \n", " # Build evaluation prompt\n", "\n", " eval_prompt = f\"\"\"You are an expert evaluator. Compare the generated answer with the expected answer.\n", " Question: {question}\n", " Expected Answer: {expected_answer}\n", " Generated Answer: {generated_answer}\n", "\n", " Determine if the generated answer is correct compared to the expected answer.\n", " Answer only \"True\" or \"False\".\"\"\"\n", " \n", " eval_messages = [{\"role\": \"user\", \"content\": eval_prompt}]\n", " \n", " eval_response = llm_judge.chat.completions.create(\n", " model=LLM_MODEL,\n", " messages=eval_messages,\n", " max_tokens=10,\n", " temperature=0.0\n", " )\n", " \n", " eval_result = eval_response.choices[0].message.content.strip()\n", " correct = 1 if \"true\" in eval_result.lower() else 0\n", " \n", " print(f\"Evaluation Result: {eval_result} ({'✓ Correct' if correct else '✗ Incorrect'})\")\n", " \n", " # Record results\n", " results.append({\n", " 'question_id': qid,\n", " 'question_type': qtype,\n", " 'question': question,\n", " 'question_date': qdate,\n", " 'expected_answer': expected_answer,\n", " 'retrieved_count': len(related_memories),\n", " 'retrieved_memories': related_memories,\n", " 'generated_answer': generated_answer,\n", " 'eval_result': eval_result,\n", " 'correct': correct\n", " })\n", " \n", " except Exception as e:\n", " print(f\"\\nError: Processing failed - {str(e)}\")\n", " import traceback\n", " traceback.print_exc()\n", " \n", " results.append({\n", " 'question_id': qid,\n", " 'question_type': qtype,\n", " 'question': question,\n", " 'question_date': qdate,\n", " 'expected_answer': expected_answer,\n", " 'retrieved_count': 0,\n", " 'retrieved_memories': [],\n", " 'generated_answer': '',\n", " 'eval_result': '',\n", " 'correct': 0,\n", " 'error': str(e)\n", " })\n", " \n", " print(f\"\\n{'='*80}\")\n", " print(\"Retrieval and answer generation completed!\")\n", " print(f\"{'='*80}\\n\")\n", " \n", " df = pd.DataFrame(results)\n", " \n", " # Print summary statistics\n", " if len(df) > 0 and 'correct' in df.columns:\n", " accuracy = df['correct'].mean() * 100\n", " print(f\"Overall Accuracy: {accuracy:.2f}% ({df['correct'].sum()}/{len(df)})\")\n", " \n", " return df\n", "\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [] }, { "cell_type": "code", "execution_count": 31, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "2025-12-06 19:32:13 - LightMemory - INFO - ========== START retrieve_20251206_193213_714931 ==========\n", "2025-12-06 19:32:13 - LightMemory - INFO - [retrieve_20251206_193213_714931] Query: I'm reviewing the fake user data generation task we did previously. Can you remind me exactly how many user records were generated and what were the column headers in the output CSV?\n", "2025-12-06 19:32:13 - LightMemory - INFO - [retrieve_20251206_193213_714931] Parameters: limit=20, filters=None\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Starting memory retrieval and answer generation for 3 questions...\n", "\n", "\n", "================================================================================\n", "Question 1/3 [ID: q_faker_01]\n", "================================================================================\n", "Question: I'm reviewing the fake user data generation task we did previously. Can you remind me exactly how many user records were generated and what were the column headers in the output CSV?\n", "Question Date: 2025/12/05 (Fri) 09:00\n", "Question Type: single-session-assistant\n", "Expected Answer: We generated 100 fake user records. The column headers in the output CSV were 'Username' and 'Email'.\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "fd5d166fbb134d06afe33a8cb6f9bc8f", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Batches: 0%| | 0/1 [00:00 disk / disk _ 20T user GitTaskBench prompt / Eparse _ 03 / output.` on 2025-12-03T15:32:02.500.\n", "Memory 5: 2025-12-03T15:01:00.000 Wed Assistant provided code snippet for saving a CSV file after creating a directory with the file path '/disk/disk_20T/user/GitTaskBench/prompt/Faker_02/output.csv' using 'df.to_csv(output_file_path, index=False)' on 2025-12-03T15:01:00.500.\n", "Memory 6: 2025-12-03T15:01:00.000 Wed User included a command to save the DataFrame to CSV: `df.to_csv(output_file_path, index=False)`.\n", "Memory 7: 2025-12-03T15:32:00.000 Wed User mentioned the output file name as 'xxx' with a suffix '01' and indicated that the output file format is determined by task requirements on 2025-12-03T15:32:00.\n", "Memory 8: 2025-12-03T15:01:00.000 Wed The specific line causing the error is indicated as 'df.to_csv(output_file_path, index=False)' on line 23 of the user's code.\n", "Memory 9: 2025-12-03T15:32:00.000 Wed User mentioned the command to install eparse using pip: `$ pip install eparse`, and also provided a command to install eparse from source: `$ pip install.`.\n", "Memory 10: 2025-12-03T15:01:00.000 Wed The error traceback includes the following details: 'File disk/disk_20T/user/anaconda3/envs/gittaskbench/lib/python3.12/site-packages/pandas/core/generic.py:3967 NDFrame.to_csv(self, path_or_buf, sep, na_rep, float_format, columns, header, index, index_label, mode, encoding, compression, quoting, quotechar, lineterminator, chunksize, date_format, doublequote, escapechar, decimal, errors, storage_options)'\n", "Memory 11: 2025-12-03T15:32:00.000 Wed User provided a command to change directory into the eparse folder: `$ cd eparse`.\n", "Memory 12: 2025-12-03T15:01:00.000 Wed User indicated that the Python Conda environment is pre-set with no extra configuration needed and that the PYTHONPATH should be generated and exported on 2025-12-03T15:01:00.\n", "Memory 13: 2025-12-03T15:01:00.000 Wed The task to generate company data entries could not proceed due to the missing README.md file, which was confirmed by the output error from the `cat` command.\n", "Memory 14: 2025-12-02T17:06:00.000 Tue User executed code in Jupyter with the current working directory set to 'disk/disk_20T/user/GitTaskBench' and Python interpreter located at 'disk/disk_20T/user/anaconda3/envs/gittaskbench/bin/python' on 2025-12-02T17:06:00.\n", "Memory 15: 2025-12-03T15:32:00.000 Wed User mentioned adding the latest PyPI version of eparse to the requirements.txt file: `eparse==0.8.0`.\n", "Memory 16: 2025-12-03T15:32:00.000 Wed User attempted to output indexes from pypi.tuna.tsinghua.edu.cn and downloaded packages including eparse-0.7.3 and unstructured-0.18.21.\n", "Memory 17: 2025-12-03T15:01:00.000 Wed User requested output on 2025-12-03T15:01:00.\n", "Memory 18: 2025-12-03T15:32:00.000 Wed User emphasized the importance of reading the README file to understand basic functions and usage, and mentioned paths like 'disk/disk_20T/GitTaskBench/queries/Eparse_03' on 2025-12-03T15:32:00.\n", "Memory 19: 2025-12-03T15:32:00.000 Wed User provided a command to clone the eparse repository: `$ git clone https://github.com/ChrisPappalardo/eparse.git`.\n", "Memory 20: 2025-12-03T15:32:00.000 Wed User provided information about the eparse package, including installation instructions and features on 2025-12-03T15:32:00.\n", "\n", "Generating answer...\n", "\n", "Generated Answer: In our previous session, instead of using the 'json://' endpoint, we successfully saved the output using the command line argument `-o stdout`.\n", "\n", "Evaluating answer...\n", "Evaluation Result: True (✓ Correct)\n", "\n", "================================================================================\n", "Retrieval and answer generation completed!\n", "================================================================================\n", "\n", "Overall Accuracy: 100.00% (3/3)\n" ] } ], "source": [ "# Execute retrieval, answer generation, and evaluation\n", "retrieval_results = test_retrieval_and_answer(\n", " lightmem, \n", " questions, \n", " question_ids,\n", " question_types,\n", " question_dates,\n", " answers, \n", " top_k=20\n", ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", "metadata": {}, "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", "metadata": {}, "source": [] } ], "metadata": { "kernelspec": { "display_name": "LightMem", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.14" } }, "nbformat": 4, "nbformat_minor": 4 }