{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# LightMem Example with longmemeval data\n", "\n", "Tutorial author: xubuqiang" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 0. Prepare the runtime environment" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "/disk/disk_20T/xubuqiang/lightmem\n", "env: ALL_PROXY=\n", "Looking in indexes: https://pypi.tuna.tsinghua.edu.cn/simple\n", "Obtaining file:///disk/disk_20T/xubuqiang/lightmem\n", " Installing build dependencies ... \u001b[?25ldone\n", "\u001b[?25h Checking if build backend supports build_editable ... \u001b[?25ldone\n", "\u001b[?25h Getting requirements to build editable ... \u001b[?25ldone\n", "\u001b[?25h Preparing editable metadata (pyproject.toml) ... \u001b[?25ldone\n", "\u001b[?25hCollecting torch==2.8.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/5a/63/4fdc45a0304536e75a5e1b1bbfb1b56dd0e2743c48ee83ca729f7ce44162/torch-2.8.0-cp311-cp311-manylinux_2_28_x86_64.whl (888.1 MB)\n", "Collecting transformers==4.57.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/e5/2b/4d2708ac1ff5cd708b6548f4c5812d0ae40d1c28591c4c1c762b6dbdef2d/transformers-4.57.0-py3-none-any.whl (12.0 MB)\n", "Collecting sentence-transformers==5.1.1 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/48/21/4670d03ab8587b0ab6f7d5fa02a95c3dd6b1f39d0e40e508870201f3d76c/sentence_transformers-5.1.1-py3-none-any.whl (486 kB)\n", "Collecting accelerate==1.10.1 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/5f/a0/d9ef19f780f319c21ee90ecfef4431cbeeca95bec7f14071785c17b6029b/accelerate-1.10.1-py3-none-any.whl (374 kB)\n", "Collecting openai==2.3.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/9c/5b/4be258ff072ed8ee15f6bfd8d5a1a4618aa4704b127c0c5959212ad177d6/openai-2.3.0-py3-none-any.whl (999 kB)\n", "Collecting tiktoken==0.12.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/6a/d0/3d9275198e067f8b65076a68894bb52fd253875f3644f0a321a720277b8a/tiktoken-0.12.0-cp311-cp311-manylinux_2_28_x86_64.whl (1.2 MB)\n", "Collecting llmlingua==0.2.2 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/6e/3e/221fe46a3338f2babdb2082ee42df88fcaa8ea0e639e832cbb1b93c5923a/llmlingua-0.2.2-py3-none-any.whl (30 kB)\n", "Collecting qdrant-client==1.15.1 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/ef/33/d8df6a2b214ffbe4138db9a1efe3248f67dc3c671f82308bea1582ecbbb7/qdrant_client-1.15.1-py3-none-any.whl (337 kB)\n", "Collecting pydantic==2.11.10 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/bd/1f/73c53fcbfb0b5a78f91176df41945ca466e71e9d9d836e5c522abda39ee7/pydantic-2.11.10-py3-none-any.whl (444 kB)\n", "Collecting pydantic_core==2.33.2 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/47/bc/cd720e078576bdb8255d5032c5d63ee5c0bf4b7173dd955185a1d658c456/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (2.0 MB)\n", "Collecting numpy==2.2.6 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/b3/dd/2238b898e51bd6d389b7389ffb20d7f4c10066d80351187ec8e303a5a475/numpy-2.2.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (16.8 MB)\n", "Collecting scipy==1.15.3 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/bd/37/89f19c8c05505d0601ed5650156e50eb881ae3918786c8fd7262b4ee66d3/scipy-1.15.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (37.7 MB)\n", "Collecting scikit-learn==1.7.2 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/ef/0e/97dbca66347b8cf0ea8b529e6bb9367e337ba2e8be0ef5c1a545232abfde/scikit_learn-1.7.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl (9.7 MB)\n", "Collecting nltk==3.9.2 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/60/90/81ac364ef94209c100e12579629dc92bf7a709a84af32f8c551b02c07e94/nltk-3.9.2-py3-none-any.whl (1.5 MB)\n", "Collecting tokenizers==0.22.1 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/d0/c6/dc3a0db5a6766416c32c034286d7c2d406da1f498e4de04ab1b8959edd00/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (3.3 MB)\n", "Collecting huggingface-hub==0.35.3 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/31/a0/651f93d154cb72323358bf2bbae3e642bdb5d2f1bfc874d096f7cb159fa0/huggingface_hub-0.35.3-py3-none-any.whl (564 kB)\n", "Collecting safetensors==0.6.2 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/fe/5d/5a514d7b88e310c8b146e2404e0dc161282e78634d9358975fd56dfd14be/safetensors-0.6.2-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (485 kB)\n", "Collecting tqdm==4.67.1 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl (78 kB)\n", "Requirement already satisfied: PyYAML==6.0.3 in /disk/disk_20T/xubuqiang/anaconda3/envs/LightMem/lib/python3.11/site-packages (from lightmem==0.1.0) (6.0.3)\n", "Requirement already satisfied: requests==2.32.5 in /disk/disk_20T/xubuqiang/anaconda3/envs/LightMem/lib/python3.11/site-packages (from lightmem==0.1.0) (2.32.5)\n", "Collecting filelock==3.20.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/76/91/7216b27286936c16f5b4d0c530087e4a54eead683e6b0b73dd0c64844af6/filelock-3.20.0-py3-none-any.whl (16 kB)\n", "Collecting regex==2025.9.18 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/fe/d0/c51d1e6a80eab11ef96a4cbad17fc0310cf68994fb01a7283276b7e5bbd6/regex-2025.9.18-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl (798 kB)\n", "Requirement already satisfied: packaging==25.0 in /disk/disk_20T/xubuqiang/anaconda3/envs/LightMem/lib/python3.11/site-packages (from lightmem==0.1.0) (25.0)\n", "Requirement already satisfied: httpx==0.28.1 in /disk/disk_20T/xubuqiang/anaconda3/envs/LightMem/lib/python3.11/site-packages (from lightmem==0.1.0) (0.28.1)\n", "Requirement already satisfied: httpcore==1.0.9 in /disk/disk_20T/xubuqiang/anaconda3/envs/LightMem/lib/python3.11/site-packages (from lightmem==0.1.0) (1.0.9)\n", "Requirement already satisfied: h11==0.16.0 in /disk/disk_20T/xubuqiang/anaconda3/envs/LightMem/lib/python3.11/site-packages (from lightmem==0.1.0) (0.16.0)\n", "Collecting h2==4.3.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/69/b2/119f6e6dcbd96f9069ce9a2665e0146588dc9f88f29549711853645e736a/h2-4.3.0-py3-none-any.whl (61 kB)\n", "Collecting anyio==4.11.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/15/b3/9b1a8074496371342ec1e796a96f99c82c945a339cd81a8e73de28b4cf9e/anyio-4.11.0-py3-none-any.whl (109 kB)\n", "Collecting certifi==2025.10.5 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/e4/37/af0d2ef3967ac0d6113837b44a4f0bfe1328c2b9763bd5b1744520e5cfed/certifi-2025.10.5-py3-none-any.whl (163 kB)\n", "Collecting charset-normalizer==3.4.3 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/87/df/b7737ff046c974b183ea9aa111b74185ac8c3a326c6262d413bd5a1b8c69/charset_normalizer-3.4.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl (150 kB)\n", "Collecting idna==3.10 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl (70 kB)\n", "Collecting click==8.3.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/db/d3/9dcc0f5797f070ec8edf30fbadfb200e71d9db6b84d211e3b2085a7589a0/click-8.3.0-py3-none-any.whl (107 kB)\n", "Collecting joblib==1.5.2 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/1e/e8/685f47e0d754320684db4425a0967f7d3fa70126bffd76110b7009a0090f/joblib-1.5.2-py3-none-any.whl (308 kB)\n", "Requirement already satisfied: Jinja2==3.1.6 in /disk/disk_20T/xubuqiang/anaconda3/envs/LightMem/lib/python3.11/site-packages (from lightmem==0.1.0) (3.1.6)\n", "Requirement already satisfied: MarkupSafe==3.0.3 in /disk/disk_20T/xubuqiang/anaconda3/envs/LightMem/lib/python3.11/site-packages (from lightmem==0.1.0) (3.0.3)\n", "Collecting pillow==11.3.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/f2/2f/d7675ecae6c43e9f12aa8d58b6012683b20b6edfbdac7abcb4e6af7a3784/pillow-11.3.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl (6.6 MB)\n", "Collecting protobuf==6.32.1 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/5c/f6/88d77011b605ef979aace37b7703e4eefad066f7e84d935e5a696515c2dd/protobuf-6.32.1-cp39-abi3-manylinux2014_x86_64.whl (322 kB)\n", "Collecting psutil==7.1.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/9d/de/04c8c61232f7244aa0a4b9a9fbd63a89d5aeaf94b2fc9d1d16e2faa5cbb0/psutil-7.1.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl (291 kB)\n", "Collecting fsspec==2025.9.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/47/71/70db47e4f6ce3e5c37a607355f80da8860a33226be640226ac52cb05ef2e/fsspec-2025.9.0-py3-none-any.whl (199 kB)\n", "Collecting grpcio==1.75.1 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/3f/42/5f628abe360b84dfe8dd8f32be6b0606dc31dc04d3358eef27db791ea4d5/grpcio-1.75.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl (6.5 MB)\n", "Collecting portalocker==3.2.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/4b/a6/38c8e2f318bf67d338f4d629e93b0b4b9af331f455f0390ea8ce4a099b26/portalocker-3.2.0-py3-none-any.whl (22 kB)\n", "Collecting annotated-types==0.7.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl (13 kB)\n", "Requirement already satisfied: typing_extensions==4.15.0 in /disk/disk_20T/xubuqiang/anaconda3/envs/LightMem/lib/python3.11/site-packages (from lightmem==0.1.0) (4.15.0)\n", "Collecting typing-inspection==0.4.2 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/dc/9b/47798a6c91d8bdb567fe2698fe81e0c6b7cb7ef4d13da4114b41d239f65d/typing_inspection-0.4.2-py3-none-any.whl (14 kB)\n", "Collecting networkx==3.4.2 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/b9/54/dd730b32ea14ea797530a4479b2ed46a6fb250f682a9cfb997e968bf0261/networkx-3.4.2-py3-none-any.whl (1.7 MB)\n", "Collecting sympy==1.14.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/a2/09/77d55d46fd61b4a135c444fc97158ef34a095e5681d0a6c10b75bf356191/sympy-1.14.0-py3-none-any.whl (6.3 MB)\n", "Collecting mpmath==1.3.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/43/e3/7d92a15f894aa0c9c4b49b8ee9ac9850d6e63b03c9c32c0367a13ae62209/mpmath-1.3.0-py3-none-any.whl (536 kB)\n", "Collecting distro==1.9.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/12/b3/231ffd4ab1fc9d679809f356cebee130ac7daa00d6d6f3206dd4fd137e9e/distro-1.9.0-py3-none-any.whl (20 kB)\n", "Collecting hf-xet==1.1.10 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/15/07/86397573efefff941e100367bbda0b21496ffcdb34db7ab51912994c32a2/hf_xet-1.1.10-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (3.2 MB)\n", "Collecting hpack==4.1.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/07/c6/80c95b1b2b94682a72cbdbfb85b81ae2daffa4291fbfa1b1464502ede10d/hpack-4.1.0-py3-none-any.whl (34 kB)\n", "Collecting hyperframe==6.1.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/48/30/47d0bf6072f7252e6521f3447ccfa40b421b6824517f82854703d0f5a98b/hyperframe-6.1.0-py3-none-any.whl (13 kB)\n", "Collecting jiter==0.11.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/9f/d8/ec74886497ea393c29dbd7651ddecc1899e86404a6b1f84a3ddab0ab59fd/jiter-0.11.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (348 kB)\n", "Collecting sniffio==1.3.1 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl (10 kB)\n", "Collecting threadpoolctl==3.6.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/32/d5/f9a850d79b0851d1d4ef6456097579a9005b31fea68726a4ae5f2d82ddd9/threadpoolctl-3.6.0-py3-none-any.whl (18 kB)\n", "Collecting urllib3==2.5.0 (from lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/a7/c2/fe1e52489ae3122415c51f387e221dd0773709bad6c6cdaa599e8a2c5185/urllib3-2.5.0-py3-none-any.whl (129 kB)\n", "Collecting nvidia-cuda-nvrtc-cu12==12.8.93 (from torch==2.8.0->lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/05/6b/32f747947df2da6994e999492ab306a903659555dddc0fbdeb9d71f75e52/nvidia_cuda_nvrtc_cu12-12.8.93-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl (88.0 MB)\n", "Collecting nvidia-cuda-runtime-cu12==12.8.90 (from torch==2.8.0->lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/0d/9b/a997b638fcd068ad6e4d53b8551a7d30fe8b404d6f1804abf1df69838932/nvidia_cuda_runtime_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl (954 kB)\n", "Collecting nvidia-cuda-cupti-cu12==12.8.90 (from torch==2.8.0->lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/f8/02/2adcaa145158bf1a8295d83591d22e4103dbfd821bcaf6f3f53151ca4ffa/nvidia_cuda_cupti_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl (10.2 MB)\n", "Collecting nvidia-cudnn-cu12==9.10.2.21 (from torch==2.8.0->lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/ba/51/e123d997aa098c61d029f76663dedbfb9bc8dcf8c60cbd6adbe42f76d049/nvidia_cudnn_cu12-9.10.2.21-py3-none-manylinux_2_27_x86_64.whl (706.8 MB)\n", "Collecting nvidia-cublas-cu12==12.8.4.1 (from torch==2.8.0->lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/dc/61/e24b560ab2e2eaeb3c839129175fb330dfcfc29e5203196e5541a4c44682/nvidia_cublas_cu12-12.8.4.1-py3-none-manylinux_2_27_x86_64.whl (594.3 MB)\n", "Collecting nvidia-cufft-cu12==11.3.3.83 (from torch==2.8.0->lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/1f/13/ee4e00f30e676b66ae65b4f08cb5bcbb8392c03f54f2d5413ea99a5d1c80/nvidia_cufft_cu12-11.3.3.83-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl (193.1 MB)\n", "Collecting nvidia-curand-cu12==10.3.9.90 (from torch==2.8.0->lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/fb/aa/6584b56dc84ebe9cf93226a5cde4d99080c8e90ab40f0c27bda7a0f29aa1/nvidia_curand_cu12-10.3.9.90-py3-none-manylinux_2_27_x86_64.whl (63.6 MB)\n", "Collecting nvidia-cusolver-cu12==11.7.3.90 (from torch==2.8.0->lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/85/48/9a13d2975803e8cf2777d5ed57b87a0b6ca2cc795f9a4f59796a910bfb80/nvidia_cusolver_cu12-11.7.3.90-py3-none-manylinux_2_27_x86_64.whl (267.5 MB)\n", "Collecting nvidia-cusparse-cu12==12.5.8.93 (from torch==2.8.0->lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/c2/f5/e1854cb2f2bcd4280c44736c93550cc300ff4b8c95ebe370d0aa7d2b473d/nvidia_cusparse_cu12-12.5.8.93-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl (288.2 MB)\n", "Collecting nvidia-cusparselt-cu12==0.7.1 (from torch==2.8.0->lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/56/79/12978b96bd44274fe38b5dde5cfb660b1d114f70a65ef962bcbbed99b549/nvidia_cusparselt_cu12-0.7.1-py3-none-manylinux2014_x86_64.whl (287.2 MB)\n", "Collecting nvidia-nccl-cu12==2.27.3 (from torch==2.8.0->lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/5c/5b/4e4fff7bad39adf89f735f2bc87248c81db71205b62bcc0d5ca5b606b3c3/nvidia_nccl_cu12-2.27.3-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl (322.4 MB)\n", "Collecting nvidia-nvtx-cu12==12.8.90 (from torch==2.8.0->lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/a2/eb/86626c1bbc2edb86323022371c39aa48df6fd8b0a1647bc274577f72e90b/nvidia_nvtx_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl (89 kB)\n", "Collecting nvidia-nvjitlink-cu12==12.8.93 (from torch==2.8.0->lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/f6/74/86a07f1d0f42998ca31312f998bd3b9a7eff7f52378f4f270c8679c77fb9/nvidia_nvjitlink_cu12-12.8.93-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl (39.3 MB)\n", "Collecting nvidia-cufile-cu12==1.13.1.3 (from torch==2.8.0->lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/bb/fe/1bcba1dfbfb8d01be8d93f07bfc502c93fa23afa6fd5ab3fc7c1df71038a/nvidia_cufile_cu12-1.13.1.3-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl (1.2 MB)\n", "Collecting triton==3.4.0 (from torch==2.8.0->lightmem==0.1.0)\n", " Using cached https://pypi.tuna.tsinghua.edu.cn/packages/7d/39/43325b3b651d50187e591eefa22e236b2981afcebaefd4f2fc0ea99df191/triton-3.4.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl (155.5 MB)\n", "Requirement already satisfied: setuptools>=40.8.0 in /disk/disk_20T/xubuqiang/anaconda3/envs/LightMem/lib/python3.11/site-packages (from triton==3.4.0->torch==2.8.0->lightmem==0.1.0) (80.9.0)\n", "\u001b[33mWARNING: The candidate selected for download or install is a yanked version: 'transformers' candidate (version 4.57.0 at https://pypi.tuna.tsinghua.edu.cn/packages/e5/2b/4d2708ac1ff5cd708b6548f4c5812d0ae40d1c28591c4c1c762b6dbdef2d/transformers-4.57.0-py3-none-any.whl (from https://pypi.tuna.tsinghua.edu.cn/simple/transformers/) (requires-python:>=3.9.0))\n", "Reason for being yanked: \u001b[0m\u001b[33m\n", "\u001b[0mBuilding wheels for collected packages: lightmem\n", " Building editable for lightmem (pyproject.toml) ... \u001b[?25ldone\n", "\u001b[?25h Created wheel for lightmem: filename=lightmem-0.1.0-0.editable-py3-none-any.whl size=11716 sha256=b8bc5212fe5403e20a1808f7f1f0b7db4990d50b8699794f24852a5fa211cdd2\n", " Stored in directory: /tmp/pip-ephem-wheel-cache-nmptuqgp/wheels/6e/78/3e/ad25a8ed5245b53409d8ea7f1a62638ba248b738ed4cf419a3\n", "Successfully built lightmem\n", "Installing collected packages: nvidia-cusparselt-cu12, mpmath, urllib3, typing-inspection, triton, tqdm, threadpoolctl, sympy, sniffio, safetensors, regex, pydantic_core, psutil, protobuf, portalocker, pillow, nvidia-nvtx-cu12, nvidia-nvjitlink-cu12, nvidia-nccl-cu12, nvidia-curand-cu12, nvidia-cufile-cu12, nvidia-cuda-runtime-cu12, nvidia-cuda-nvrtc-cu12, nvidia-cuda-cupti-cu12, nvidia-cublas-cu12, numpy, networkx, joblib, jiter, idna, hyperframe, hpack, hf-xet, grpcio, fsspec, filelock, distro, click, charset-normalizer, certifi, annotated-types, scipy, pydantic, nvidia-cusparse-cu12, nvidia-cufft-cu12, nvidia-cudnn-cu12, nltk, h2, anyio, tiktoken, scikit-learn, nvidia-cusolver-cu12, huggingface-hub, torch, tokenizers, openai, transformers, qdrant-client, accelerate, sentence-transformers, llmlingua, lightmem\n", "\u001b[2K Attempting uninstall: urllib3━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m 1/62\u001b[0m [mpmath]t-cu12]\n", "\u001b[2K Found existing installation: urllib3 2.6.0━━━━━━━\u001b[0m \u001b[32m 1/62\u001b[0m [mpmath]\n", "\u001b[2K Uninstalling urllib3-2.6.0:━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m 1/62\u001b[0m [mpmath]\n", "\u001b[2K Successfully uninstalled urllib3-2.6.0━━━━━━━━━\u001b[0m \u001b[32m 1/62\u001b[0m [mpmath]\n", "\u001b[2K Attempting uninstall: psutil[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m11/62\u001b[0m [pydantic_core]ion]\n", "\u001b[2K Found existing installation: psutil 7.1.3━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m11/62\u001b[0m [pydantic_core]\n", "\u001b[2K Uninstalling psutil-7.1.3:━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m11/62\u001b[0m [pydantic_core]\n", "\u001b[2K Successfully uninstalled psutil-7.1.3━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m11/62\u001b[0m [pydantic_core]\n", "\u001b[2K Attempting uninstall: idnam\u001b[90m╺\u001b[0m\u001b[90m━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m27/62\u001b[0m [joblib]x]blas-cu12]u12]2]\n", "\u001b[2K Found existing installation: idna 3.11━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m27/62\u001b[0m [joblib]\n", "\u001b[2K Uninstalling idna-3.11:90m╺\u001b[0m\u001b[90m━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m27/62\u001b[0m [joblib]\n", "\u001b[2K Successfully uninstalled idna-3.11━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m27/62\u001b[0m [joblib]\n", "\u001b[2K Attempting uninstall: charset-normalizer[0m\u001b[90m━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m37/62\u001b[0m [click]ck]e]\n", "\u001b[2K Found existing installation: charset-normalizer 3.4.4━━━━━\u001b[0m \u001b[32m37/62\u001b[0m [click]\n", "\u001b[2K Uninstalling charset-normalizer-3.4.4:[90m━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m37/62\u001b[0m [click]\n", "\u001b[2K Successfully uninstalled charset-normalizer-3.4.4━━━━━━━\u001b[0m \u001b[32m37/62\u001b[0m [click]\n", "\u001b[2K Attempting uninstall: certifi━\u001b[0m\u001b[91m╸\u001b[0m\u001b[90m━━━━━━━━━━━━━━━\u001b[0m \u001b[32m38/62\u001b[0m [charset-normalizer]\n", "\u001b[2K Found existing installation: certifi 2025.11.12━━━━━━━━━━━\u001b[0m \u001b[32m38/62\u001b[0m [charset-normalizer]\n", "\u001b[2K Uninstalling certifi-2025.11.12:m╸\u001b[0m\u001b[90m━━━━━━━━━━━━━━━\u001b[0m \u001b[32m38/62\u001b[0m [charset-normalizer]\n", "\u001b[2K Successfully uninstalled certifi-2025.11.12━━━━━━━━━━━━━\u001b[0m \u001b[32m38/62\u001b[0m [charset-normalizer]\n", "\u001b[2K Attempting uninstall: anyio━━━━━━━━━\u001b[0m\u001b[90m╺\u001b[0m\u001b[90m━━━━━━━━━\u001b[0m \u001b[32m47/62\u001b[0m [h2]k]a-cudnn-cu12]12]\n", "\u001b[2K Found existing installation: anyio 4.12.0[0m\u001b[90m━━━━━━━━━\u001b[0m \u001b[32m47/62\u001b[0m [h2]\n", "\u001b[2K Uninstalling anyio-4.12.0:━━━━\u001b[0m\u001b[90m╺\u001b[0m\u001b[90m━━━━━━━━━\u001b[0m \u001b[32m47/62\u001b[0m [h2]\n", "\u001b[2K Successfully uninstalled anyio-4.12.0╺\u001b[0m\u001b[90m━━━━━━━━━\u001b[0m \u001b[32m47/62\u001b[0m [h2]\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m62/62\u001b[0m [lightmem]lightmem]llmlingua]ransformers]\n", "\u001b[1A\u001b[2KSuccessfully installed accelerate-1.10.1 annotated-types-0.7.0 anyio-4.11.0 certifi-2025.10.5 charset-normalizer-3.4.3 click-8.3.0 distro-1.9.0 filelock-3.20.0 fsspec-2025.9.0 grpcio-1.75.1 h2-4.3.0 hf-xet-1.1.10 hpack-4.1.0 huggingface-hub-0.35.3 hyperframe-6.1.0 idna-3.10 jiter-0.11.0 joblib-1.5.2 lightmem-0.1.0 llmlingua-0.2.2 mpmath-1.3.0 networkx-3.4.2 nltk-3.9.2 numpy-2.2.6 nvidia-cublas-cu12-12.8.4.1 nvidia-cuda-cupti-cu12-12.8.90 nvidia-cuda-nvrtc-cu12-12.8.93 nvidia-cuda-runtime-cu12-12.8.90 nvidia-cudnn-cu12-9.10.2.21 nvidia-cufft-cu12-11.3.3.83 nvidia-cufile-cu12-1.13.1.3 nvidia-curand-cu12-10.3.9.90 nvidia-cusolver-cu12-11.7.3.90 nvidia-cusparse-cu12-12.5.8.93 nvidia-cusparselt-cu12-0.7.1 nvidia-nccl-cu12-2.27.3 nvidia-nvjitlink-cu12-12.8.93 nvidia-nvtx-cu12-12.8.90 openai-2.3.0 pillow-11.3.0 portalocker-3.2.0 protobuf-6.32.1 psutil-7.1.0 pydantic-2.11.10 pydantic_core-2.33.2 qdrant-client-1.15.1 regex-2025.9.18 safetensors-0.6.2 scikit-learn-1.7.2 scipy-1.15.3 sentence-transformers-5.1.1 sniffio-1.3.1 sympy-1.14.0 threadpoolctl-3.6.0 tiktoken-0.12.0 tokenizers-0.22.1 torch-2.8.0 tqdm-4.67.1 transformers-4.57.0 triton-3.4.0 typing-inspection-0.4.2 urllib3-2.5.0\n" ] } ], "source": [ "# Set your LightMemory project path\n", "LIGHTMEM_PROJECT_PATH = '/disk/disk_20T/xubuqiang/lightmem'\n", "\n", "# Install in editable mode\n", "%cd {LIGHTMEM_PROJECT_PATH}\n", "%env ALL_PROXY=\n", "!pip install -e .\n", "%cd tutorial-notebooks" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 1. Import Dependencies" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import os\n", "import json\n", "import datetime\n", "from lightmem.memory.lightmem import LightMemory\n", "from typing import List, Dict, Any\n", "import pandas as pd\n", "from tqdm import tqdm" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "RUN_LOG_DIR: ./logs/20251206_202021\n", "DATA_FILE_PATH: /disk/disk_20T/xubuqiang/lightmem/dataset/longmemeval/longmemeval_single.json\n" ] } ], "source": [ "# logging setup\n", "LOGS_ROOT = \"./logs\"\n", "RUN_TIMESTAMP = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n", "RUN_LOG_DIR = os.path.join(LOGS_ROOT, RUN_TIMESTAMP)\n", "os.makedirs(RUN_LOG_DIR, exist_ok=True)\n", "\n", "# API\n", "API_KEY = ''\n", "API_BASE_URL = ''\n", "LLM_MODEL = 'gpt-4o-mini'\n", "\n", "LLMLINGUA_MODEL_PATH = '/disk/disk_20T/fangjizhan/models/llmlingua-2-bert-base-multilingual-cased-meetingbank'\n", "EMBEDDING_MODEL_PATH = '/disk/disk_20T/fangjizhan/models/all-MiniLM-L6-v2'\n", "DATA_FILE_PATH = '/longmemeval_single.json'\n", "\n", "print(f\"RUN_LOG_DIR: {RUN_LOG_DIR}\")\n", "print(f\"DATA_FILE_PATH: {DATA_FILE_PATH}\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 2. LightMemory Initial config" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Initial LightMem...\n", "pre_compressor:llmlingua-2\n", "pre_compressor:llmlingua_config={'model_name': '/disk/disk_20T/fangjizhan/models/llmlingua-2-bert-base-multilingual-cased-meetingbank', 'device_map': 'cuda', 'use_llmlingua2': True} llmlingua2_config={'max_batch_size': 50, 'max_force_token': 100} compress_config={'instruction': '', 'rate': 0.8, 'target_token': -1}\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "2025-12-06 20:20:25 - LightMemory - INFO - Initializing LightMemory with provided configuration\n", "2025-12-06 20:20:25 - LightMemory - INFO - Token statistics tracking initialized\n", "2025-12-06 20:20:25 - LightMemory - INFO - Initializing pre-compressor\n", "`torch_dtype` is deprecated! Use `dtype` instead!\n", "2025-12-06 20:21:15 - LightMemory - INFO - Initializing topic segmenter\n", "2025-12-06 20:21:15 - LightMemory - INFO - Initializing memory manager\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "DEBUG: resolved to encoding o200k_base\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "2025-12-06 20:21:16 - LightMemory - INFO - Initializing text embedder\n", "2025-12-06 20:21:16 - sentence_transformers.SentenceTransformer - INFO - Load pretrained SentenceTransformer: /disk/disk_20T/fangjizhan/models/all-MiniLM-L6-v2\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "ShortMemBufferManager initialized with max_tokens=512\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "2025-12-06 20:21:16 - LightMemory - INFO - Initializing embedding retriever\n", "2025-12-06 20:21:16 - LightMemory - INFO - LightMemory initialization completed successfully\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "LightMem initialized!\n" ] } ], "source": [ "config_dict = {\n", " \"pre_compress\": True,\n", " \"pre_compressor\": {\n", " \"model_name\": \"llmlingua-2\",\n", " \"configs\": {\n", " \"llmlingua_config\": {\n", " \"model_name\": LLMLINGUA_MODEL_PATH,\n", " \"device_map\": \"cuda\",\n", " \"use_llmlingua2\": True,\n", " },\n", " }\n", " },\n", " \"topic_segment\": True,\n", " \"precomp_topic_shared\": True,\n", " \"topic_segmenter\": {\n", " \"model_name\": \"llmlingua-2\",\n", " },\n", " \"messages_use\": \"user_only\",\n", " \"metadata_generate\": True,\n", " \"text_summary\": True,\n", " \"memory_manager\": {\n", " \"model_name\": 'openai',\n", " \"configs\": {\n", " \"model\": LLM_MODEL,\n", " \"api_key\": API_KEY,\n", " \"max_tokens\": 16000,\n", " \"openai_base_url\": API_BASE_URL\n", " }\n", " },\n", " \"extract_threshold\": 0.1,\n", " \"index_strategy\": \"embedding\",\n", " \"text_embedder\": {\n", " \"model_name\": \"huggingface\",\n", " \"configs\": {\n", " \"model\": EMBEDDING_MODEL_PATH,\n", " \"embedding_dims\": 384,\n", " \"model_kwargs\": {\"device\": \"cuda\"},\n", " },\n", " },\n", " \"retrieve_strategy\": \"embedding\",\n", " \"embedding_retriever\": {\n", " \"model_name\": \"qdrant\",\n", " \"configs\": {\n", " \"collection_name\": \"longmemeval_demo\",\n", " \"embedding_model_dims\": 384,\n", " \"path\": \"./longmemeval_demo_db\", \n", " }\n", " },\n", " \"update\": \"offline\",\n", " \"logging\": {\n", " \"level\": \"DEBUG\",\n", " \"file_enabled\": True,\n", " \"log_dir\": RUN_LOG_DIR,\n", " }\n", "}\n", "\n", "print(\"Initial LightMem...\")\n", "lightmem = LightMemory.from_config(config_dict)\n", "print(\"LightMem initialized!\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 3. Load dataset\n" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "Dataset Statistics:\n", "- Number of questions: 1\n", "- Number of unique haystack sessions: 53\n", "- Haystack session IDs: ['sharegpt_yywfIrx_0', '85a1be56_1', 'sharegpt_Jcy1CVN_0', 'sharegpt_Cr2tc1f_0', 'sharegpt_DGTCD7D_0']...\n", "\n", "Question Preview:\n", " [e47becba] What degree did I graduate with?\n", "\n", "Question Types Distribution:\n", " single-session-user: 1\n" ] } ], "source": [ "with open(DATA_FILE_PATH, 'r', encoding='utf-8') as f:\n", " data = json.load(f)\n", "\n", "if isinstance(data, list):\n", " data_item = data[0]\n", "else:\n", " data_item = data\n", "\n", "question_ids = [item.get('question_id', '') for item in data]\n", "question_types = [item.get('question_type', '') for item in data]\n", "questions = [item.get('question', '') for item in data]\n", "question_dates = [item.get('question_date', '') for item in data]\n", "answers = [item.get('answer', '') for item in data]\n", "answer_session_ids = [item.get('answer_session_ids', []) for item in data]\n", "\n", "# Collect all unique haystack sessions (avoid duplicates)\n", "haystack_sessions_dict = {}\n", "haystack_dates_dict = {}\n", "\n", "for item in data:\n", " sessions = item.get('haystack_sessions', [])\n", " dates = item.get('haystack_dates', [])\n", " session_ids = item.get('haystack_session_ids', [])\n", " \n", " for session, date, sid in zip(sessions, dates, session_ids):\n", " if sid not in haystack_sessions_dict:\n", " haystack_sessions_dict[sid] = session\n", " haystack_dates_dict[sid] = date\n", "\n", "haystack_session_ids = list(haystack_sessions_dict.keys())\n", "haystack_sessions = list(haystack_sessions_dict.values())\n", "haystack_dates = list(haystack_dates_dict.values())\n", "print(f\"\\nDataset Statistics:\")\n", "print(f\"- Number of questions: {len(questions)}\")\n", "print(f\"- Number of unique haystack sessions: {len(haystack_sessions)}\")\n", "print(f\"- Haystack session IDs: {haystack_session_ids[:5]}...\" if len(haystack_session_ids) > 5 else f\"- Haystack session IDs: {haystack_session_ids}\")\n", "\n", "print(f\"\\nQuestion Preview:\")\n", "for i, (qid, q) in enumerate(zip(question_ids[:3], questions[:3])):\n", " preview = q[:100] + '...' if len(q) > 100 else q\n", " print(f\" [{qid}] {preview}\")\n", "\n", "print(f\"\\nQuestion Types Distribution:\")\n", "from collections import Counter\n", "type_counts = Counter(question_types)\n", "for qtype, count in type_counts.items():\n", " print(f\" {qtype}: {count}\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 4. ADD memory into LightMem\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "METADATA_GENERATE_PROMPT = \"\"\"\n", "You are a Personal Information Extractor. \n", "Your task is to extract **all possible facts or information** about the user from a conversation, \n", "where the dialogue is organized into topic segments separated by markers like:\n", "\n", "Input format:\n", "--- Topic X ---\n", "[timestamp, weekday] source_id.SpeakerName: message\n", "...\n", "\n", "Important Instructions:\n", "0. You MUST process messages **strictly in ascending sequence_number order** (lowest → highest). For each message, stop and **carefully** evaluate its content before moving to the next. Do NOT reorder, batch-skip, or skip ahead — treat messages one-by-one.\n", "1. You MUST process every user message in order, one by one. \n", " For each message, decide whether it contains any factual information.\n", " - If yes → extract it and rephrase into a standalone sentence.\n", " - If no (pure greeting, filler, or irrelevant remark) → skip it.\n", " - Do NOT skip just because the information looks minor, trivial, or unimportant. \n", " Even small details (e.g., \"User drank coffee this morning\") must be kept. \n", " Only skip if it is *completely* meaningless (e.g., \"Hi\", \"lol\", \"thanks\").\n", "2. Perform light contextual completion so that each fact is a clear standalone statement.\n", " Examples of completion:\n", " - \"user: Bought apples yesterday\" → \"User bought apples yesterday.\"\n", " - \"user: My friend John is studying medicine\" → \"User's friend John is studying medicine.\"\n", "3. Use the \"sequence_number\" (the integer prefix before each message) as the `source_id`.\n", "4. Output format:\n", "Please return your response in JSON format.\n", " {\n", " \"data\": [\n", " {\n", " \"source_id\": \"\",\n", " \"fact\": \"\"\n", " }\n", " ]\n", " }\n", "\n", "\n", "Examples:\n", "\n", "--- Topic 1 ---\n", "[2022-03-20T13:21:00.000, Sun] 0.User: My name is Alice and I work as a teacher.\n", "[2022-03-20T13:21:00.500, Sun] 2.User: My favourite movies are Inception and Interstellar.\n", "--- Topic 2 ---\n", "[2022-03-20T13:21:01.000, Sun] 4.User: I visited Paris last summer.\n", "{\"data\": [\n", " {\"source_id\": 0, \"fact\": \"User's name is Alice.\"},\n", " {\"source_id\": 0, \"fact\": \"User works as a teacher.\"},\n", " {\"source_id\": 2, \"fact\": \"User's favourite movies are Inception and Interstellar.\"},\n", " {\"source_id\": 4, \"fact\": \"User visited Paris last summer.\"}\n", "]}\n", "\n", "Reminder: Be exhaustive. Unless a message is purely meaningless, extract and output it as a fact.\n", "\"\"\"\n" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [], "source": [ "def convert_timestamp(timestamp: str) -> str:\n", " \"\"\"\n", " Convert timestamp from '2025/12/02 (Tue) 17:06' to '2025-12-02 17:06:00'\n", " \n", " Args:\n", " timestamp: Original timestamp string\n", " \n", " Returns:\n", " Converted timestamp string in format '%Y-%m-%d %H:%M:%S'\n", " \"\"\"\n", " from datetime import datetime\n", " \n", " # Remove day of week (e.g., \"(Tue)\")\n", " timestamp_clean = timestamp.split('(')[0].strip() + ' ' + timestamp.split(')')[1].strip()\n", " # Now it's like: '2025/12/02 17:06'\n", " \n", " # Parse the timestamp\n", " dt = datetime.strptime(timestamp_clean, '%Y/%m/%d %H:%M')\n", " \n", " # Convert to target format\n", " return dt.strftime('%Y-%m-%d %H:%M:%S')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Starting to add historical sessions to memory repository...\n", "Converting timestamps to standard format...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Adding turns: 0%| | 0/273 [00:00 512). Running this sequence through the model will result in indexing errors\n", "2025-12-06 20:23:08 - LightMemory - INFO - [add_memory_20251206_202308_291645] Restored visual contexts after compression\n", "2025-12-06 20:23:08 - LightMemory - INFO - [add_memory_20251206_202308_291645] Target compression rate: 0.8\n", "2025-12-06 20:23:08 - LightMemory - INFO - ========== START add_memory_20251206_202308_362393 ==========\n", "2025-12-06 20:23:08 - LightMemory - INFO - force_segment=False, force_extract=False\n", "2025-12-06 20:23:08 - LightMemory - INFO - [add_memory_20251206_202308_362393] Extracted 0 visual contexts\n", "2025-12-06 20:23:08 - LightMemory - INFO - [add_memory_20251206_202308_362393] Restored visual contexts after compression\n", "2025-12-06 20:23:08 - LightMemory - INFO - [add_memory_20251206_202308_362393] Target compression rate: 0.8\n", "Adding turns: 1%| | 3/273 [00:00<00:43, 6.27it/s]2025-12-06 20:23:08 - LightMemory - INFO - ========== START add_memory_20251206_202308_440390 ==========\n", "2025-12-06 20:23:08 - LightMemory - INFO - force_segment=False, force_extract=False\n", "2025-12-06 20:23:08 - LightMemory - INFO - [add_memory_20251206_202308_440390] Extracted 0 visual contexts\n", "2025-12-06 20:23:08 - LightMemory - INFO - [add_memory_20251206_202308_440390] Restored visual contexts after compression\n", "2025-12-06 20:23:08 - LightMemory - INFO - [add_memory_20251206_202308_440390] Target compression rate: 0.8\n", "2025-12-06 20:23:08 - LightMemory - INFO - ========== START add_memory_20251206_202308_479335 ==========\n", "2025-12-06 20:23:08 - LightMemory - INFO - force_segment=False, force_extract=False\n", "2025-12-06 20:23:08 - LightMemory - INFO - [add_memory_20251206_202308_479335] Extracted 0 visual contexts\n", "2025-12-06 20:23:08 - LightMemory - INFO - [add_memory_20251206_202308_479335] Restored visual contexts after compression\n", "2025-12-06 20:23:08 - LightMemory - INFO - [add_memory_20251206_202308_479335] Target compression rate: 0.8\n", "2025-12-06 20:23:08 - LightMemory - INFO - ========== START add_memory_20251206_202308_526890 ==========\n", "2025-12-06 20:23:08 - LightMemory - INFO - force_segment=False, force_extract=False\n", "2025-12-06 20:23:08 - LightMemory - INFO - [add_memory_20251206_202308_526890] Extracted 0 visual contexts\n", "2025-12-06 20:23:08 - LightMemory - INFO - [add_memory_20251206_202308_526890] Restored visual contexts after compression\n", "2025-12-06 20:23:08 - LightMemory - INFO - [add_memory_20251206_202308_526890] Target compression rate: 0.8\n", "Adding turns: 2%|▏ | 6/273 [00:00<00:23, 11.40it/s]2025-12-06 20:23:08 - LightMemory - INFO - ========== START add_memory_20251206_202308_567625 ==========\n", "2025-12-06 20:23:08 - LightMemory - INFO - force_segment=False, force_extract=False\n", "2025-12-06 20:23:08 - LightMemory - INFO - [add_memory_20251206_202308_567625] Extracted 0 visual contexts\n", "2025-12-06 20:23:08 - LightMemory - INFO - [add_memory_20251206_202308_567625] Restored visual contexts after compression\n", "2025-12-06 20:23:08 - LightMemory - INFO - [add_memory_20251206_202308_567625] Target compression rate: 0.8\n", "2025-12-06 20:23:08 - LightMemory - INFO - ========== START add_memory_20251206_202308_607559 ==========\n", "2025-12-06 20:23:08 - LightMemory - INFO - force_segment=False, force_extract=False\n", "2025-12-06 20:23:08 - LightMemory - INFO - [add_memory_20251206_202308_607559] Extracted 0 visual contexts\n", "2025-12-06 20:23:08 - LightMemory - INFO - [add_memory_20251206_202308_607559] Restored visual contexts after compression\n", "2025-12-06 20:23:08 - LightMemory - INFO - [add_memory_20251206_202308_607559] Target compression rate: 0.8\n", "2025-12-06 20:23:08 - LightMemory - INFO - ========== START add_memory_20251206_202308_642700 ==========\n", "2025-12-06 20:23:08 - LightMemory - INFO - force_segment=False, force_extract=False\n", "2025-12-06 20:23:08 - LightMemory - INFO - [add_memory_20251206_202308_642700] Extracted 0 visual contexts\n", "2025-12-06 20:23:08 - LightMemory - INFO - [add_memory_20251206_202308_642700] Restored visual contexts after compression\n", "2025-12-06 20:23:08 - LightMemory - INFO - [add_memory_20251206_202308_642700] Target compression rate: 0.8\n", "Adding turns: 3%|▎ | 9/273 [00:00<00:16, 15.58it/s]2025-12-06 20:23:08 - LightMemory - INFO - ========== START add_memory_20251206_202308_679553 ==========\n", "2025-12-06 20:23:08 - LightMemory - INFO - force_segment=False, force_extract=False\n", "2025-12-06 20:23:08 - LightMemory - INFO - [add_memory_20251206_202308_679553] Extracted 0 visual contexts\n", "2025-12-06 20:23:08 - LightMemory - INFO - [add_memory_20251206_202308_679553] Restored visual contexts after compression\n", "2025-12-06 20:23:08 - LightMemory - INFO - [add_memory_20251206_202308_679553] Target compression rate: 0.8\n", "2025-12-06 20:23:08 - LightMemory - INFO - ========== START add_memory_20251206_202308_752148 ==========\n", "2025-12-06 20:23:08 - LightMemory - INFO - force_segment=False, force_extract=False\n", "2025-12-06 20:23:08 - LightMemory - INFO - [add_memory_20251206_202308_752148] Extracted 0 visual contexts\n", "2025-12-06 20:23:08 - LightMemory - INFO - [add_memory_20251206_202308_752148] Restored visual contexts after compression\n", "2025-12-06 20:23:08 - LightMemory - INFO - [add_memory_20251206_202308_752148] Target compression rate: 0.8\n", "2025-12-06 20:23:08 - LightMemory - INFO - ========== START add_memory_20251206_202308_798026 ==========\n", "2025-12-06 20:23:08 - LightMemory - INFO - force_segment=False, force_extract=False\n", "2025-12-06 20:23:08 - LightMemory - INFO - [add_memory_20251206_202308_798026] Extracted 0 visual contexts\n", "2025-12-06 20:23:08 - LightMemory - INFO - [add_memory_20251206_202308_798026] Restored visual contexts after compression\n", "2025-12-06 20:23:08 - LightMemory - INFO - [add_memory_20251206_202308_798026] Target compression rate: 0.8\n", "BertSdpaSelfAttention is used but `torch.nn.functional.scaled_dot_product_attention` does not support non-absolute `position_embedding_type` or `output_attentions=True` or `head_mask`. Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation=\"eager\"` when loading the model.\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "b2e46e8890bc430bb7e34083aeb32f23", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Batches: 0%| | 0/1 [00:00 I m thinking of getting a smart plug for my floor lamp in living room can turn it on and off with my phone. know if those easy to set up and use? by replaced the bulb in my bedside lamp with LED one about three weeks ago it ' s been a game - changer\n", "[2023-05-26T04:52:04.000, Fri] 10.User: No, haven ' t noticed a difference in my energy bill yet, but ' hoping it will make a difference over time. been thinking about replacing other bulbs in my house with LEDs too, especially in dining room where it ' s a bit dim. Do think worth it to get some under - cabinet lighting in there\n", "[2023-05-26T04:52:05.000, Fri] 11.User: I ' ve been thinking about getting new pendant light above table, something modern and sleek. seen some cool designs on Pinterest, but not sure what would work best in my space. have any advice on how to choose right pendant light for a dining room\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "2025-12-06 20:25:12 - LightMemory - INFO - [add_memory_20251206_202507_566795] API Call 0 tokens - Prompt: 1189, Completion: 275, Total: 1464\n", "2025-12-06 20:25:12 - LightMemory - INFO - [add_memory_20251206_202507_566795] Metadata generation completed with 1 API calls\n", "2025-12-06 20:25:12 - LightMemory - INFO - [add_memory_20251206_202507_566795] Created 7 MemoryEntry objects\n", "2025-12-06 20:25:12 - LightMemory - INFO - ========== START offline_update_20251206_202512_864750 ==========\n", "2025-12-06 20:25:12 - LightMemory - INFO - [offline_update_20251206_202512_864750] Received 7 memory entries\n", "2025-12-06 20:25:12 - LightMemory - INFO - [offline_update_20251206_202512_864750] construct_update_queue_trigger=False, offline_update_trigger=False\n", "2025-12-06 20:25:12 - LightMemory - INFO - [offline_update_20251206_202512_864750] Starting embedding and insertion to vector database\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "ba57904c496249b48980c4e6b9a532a4", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Batches: 0%| | 0/1 [00:00 None:\n", " \"\"\"\n", " Add historical sessions to the LightMemory system.\n", " Sessions are added turn by turn (each turn contains a user message and an assistant message).\n", " \n", " Args:\n", " lightmem: LightMemory instance\n", " sessions: List of sessions, each session contains multiple conversation turns\n", " session_ids: List of session IDs\n", " dates: List of session timestamps (will be converted to standard format)\n", " \"\"\"\n", " print(\"Starting to add historical sessions to memory repository...\")\n", " \n", " # Convert all timestamps to standard format\n", " print(\"Converting timestamps to standard format...\")\n", " converted_dates = [convert_timestamp(date) for date in dates]\n", " \n", " # Calculate total number of turns for progress bar\n", " total_turns = 0\n", " for session in sessions:\n", " # Ensure first message is from user\n", " session_copy = session.copy()\n", " while session_copy and session_copy[0][\"role\"] != \"user\":\n", " session_copy.pop(0)\n", " num_turns = len(session_copy) // 2\n", " total_turns += num_turns\n", " \n", " progress_bar = tqdm(total=total_turns, desc=\"Adding turns\")\n", " \n", " for session_idx, (session, session_id, date) in enumerate(zip(sessions, session_ids, converted_dates)):\n", " # Ensure the first message is from user\n", " while session and session[0][\"role\"] != \"user\":\n", " session.pop(0)\n", " \n", " num_turns = len(session) // 2\n", " \n", " for turn_idx in range(num_turns):\n", " # Extract one turn (user + assistant messages)\n", " turn_messages = session[turn_idx*2 : turn_idx*2 + 2]\n", " \n", " # Validate turn structure\n", " if len(turn_messages) < 2 or turn_messages[0][\"role\"] != \"user\" or turn_messages[1][\"role\"] != \"assistant\":\n", " continue\n", " \n", " # Add timestamp and speaker information to each message\n", " for msg in turn_messages:\n", " msg[\"time_stamp\"] = date\n", " # Add default speaker information if not present\n", " if \"speaker_name\" not in msg:\n", " msg[\"speaker_name\"] = \"User\" if msg[\"role\"] == \"user\" else \"Assistant\"\n", " if \"speaker_id\" not in msg:\n", " msg[\"speaker_id\"] = \"speaker_a\" if msg[\"role\"] == \"user\" else \"speaker_b\"\n", " \n", " # Only force_segment and force_extract on the last turn of the last session\n", " is_last_turn = (session_idx == len(sessions) - 1 and turn_idx == num_turns - 1)\n", " \n", " # Add turn to memory system\n", " try:\n", " lightmem.add_memory(\n", " messages=turn_messages,\n", " METADATA_GENERATE_PROMPT=METADATA_GENERATE_PROMPT,\n", " force_segment=is_last_turn,\n", " force_extract=is_last_turn,\n", " )\n", " progress_bar.update(1)\n", " except Exception as e:\n", " print(f\"\\nWarning: Failed to add turn {turn_idx} from session {session_id}: {str(e)}\")\n", " continue\n", " \n", " progress_bar.close()\n", " print(\"\\nAll historical sessions have been added!\")\n", " \n", "add_sessions_to_memory(lightmem, haystack_sessions, haystack_session_ids, haystack_dates)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 5. Offline update" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "2025-12-06 20:31:47 - LightMemory - INFO - ========== START construct_queue_20251206_203147_758505 ==========\n", "2025-12-06 20:31:47 - LightMemory - INFO - [construct_queue_20251206_203147_758505] Parameters: top_k=20, keep_top_n=10, max_workers=8\n", "2025-12-06 20:31:47 - LightMemory - INFO - [construct_queue_20251206_203147_758505] Retrieved 263 entries from vector database\n", "2025-12-06 20:31:47 - LightMemory - INFO - [construct_queue_20251206_203147_758505] Starting parallel queue construction with 8 workers\n", "2025-12-06 20:32:27 - LightMemory - INFO - [construct_queue_20251206_203147_758505] Queue construction completed: 263 updated, 0 skipped, nonempty_queues=263, empty_queues=0\n", "2025-12-06 20:32:27 - LightMemory - INFO - ========== END construct_queue_20251206_203147_758505 ==========\n", "2025-12-06 20:32:27 - LightMemory - INFO - ========== START offline_update_all_20251206_203227_940953 ==========\n", "2025-12-06 20:32:27 - LightMemory - INFO - [offline_update_all_20251206_203227_940953] Parameters: score_threshold=0.8, max_workers=5\n", "2025-12-06 20:32:27 - LightMemory - INFO - [offline_update_all_20251206_203227_940953] Retrieved 263 entries from vector database\n", "2025-12-06 20:32:27 - LightMemory - INFO - [offline_update_all_20251206_203227_940953] Starting parallel offline update with 5 workers\n", "2025-12-06 20:32:37 - LightMemory - INFO - [offline_update_all_20251206_203227_940953] Offline update completed:\n", "2025-12-06 20:32:37 - LightMemory - INFO - [offline_update_all_20251206_203227_940953] - Processed: 19 entries\n", "2025-12-06 20:32:37 - LightMemory - INFO - [offline_update_all_20251206_203227_940953] - Updated: 17 entries\n", "2025-12-06 20:32:37 - LightMemory - INFO - [offline_update_all_20251206_203227_940953] - Deleted: 1 entries\n", "2025-12-06 20:32:37 - LightMemory - INFO - [offline_update_all_20251206_203227_940953] - Skipped (no candidates): 244 entries\n", "2025-12-06 20:32:37 - LightMemory - INFO - [offline_update_all_20251206_203227_940953] - Update API calls: 19, Total tokens: 12673\n", "2025-12-06 20:32:37 - LightMemory - INFO - ========== END offline_update_all_20251206_203227_940953 ==========\n" ] } ], "source": [ "lightmem.construct_update_queue_all_entries()\n", "lightmem.offline_update_all_entries(score_threshold=0.8)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 6. Retrieval and answer" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [], "source": [ "def test_retrieval_and_answer(lightmem: LightMemory, \n", " questions: List[str], \n", " question_ids: List[str],\n", " question_types: List[str],\n", " question_dates: List[str],\n", " answers: List[str],\n", " top_k: int = 20) -> pd.DataFrame:\n", " \"\"\"\n", " Perform memory retrieval, generate answers using LLM, and evaluate correctness.\n", " \n", " Args:\n", " lightmem: LightMemory instance\n", " questions: List of questions\n", " question_ids: List of question IDs\n", " question_types: List of question types\n", " question_dates: List of question dates\n", " answers: List of expected answers\n", " top_k: Number of memory entries to retrieve\n", " \n", " Returns:\n", " DataFrame containing retrieval and evaluation results\n", " \"\"\"\n", " results = []\n", " \n", " print(f\"Starting memory retrieval and answer generation for {len(questions)} questions...\\n\")\n", " \n", " # Initialize LLM for answer generation (using the same config as LightMemory)\n", " from openai import OpenAI\n", " \n", " llm_client = OpenAI(\n", " api_key=API_KEY,\n", " base_url=API_BASE_URL\n", " )\n", " \n", " # LLM for judging (can be the same)\n", " llm_judge = llm_client\n", " \n", " for idx, (qid, question, qtype, qdate, expected_answer) in enumerate(\n", " zip(question_ids, questions, question_types, question_dates, answers), 1\n", " ):\n", " print(f\"\\n{'='*80}\")\n", " print(f\"Question {idx}/{len(questions)} [ID: {qid}]\")\n", " print(f\"{'='*80}\")\n", " print(f\"Question: {question}\")\n", " print(f\"Question Date: {qdate}\")\n", " print(f\"Question Type: {qtype}\")\n", " print(f\"Expected Answer: {expected_answer}\")\n", " \n", " try:\n", " # Step 1: Retrieve relevant memories\n", " result_string = lightmem.retrieve(question, limit=top_k)\n", " related_memories = [m.strip() for m in result_string.split('\\n') if m.strip()]\n", " \n", " print(f\"\\nRetrieved {len(related_memories)} relevant memories\")\n", " print(\"-\" * 80)\n", " \n", " # Display first few memories\n", " for mem_idx, memory in enumerate(related_memories, 1):\n", " print(f\"Memory {mem_idx}: {memory}\")\n", " \n", " # Step 2: Generate answer using LLM\n", " print(\"\\nGenerating answer...\")\n", " messages = [\n", " {\"role\": \"system\", \"content\": \"You can ONLY answer based on the provided memories.\"},\n", " {\n", " \"role\": \"user\",\n", " \"content\": f\"Question: {question}\\n\\nPlease answer the question based on the following memories:\\n{result_string}\"\n", " }\n", " ]\n", " \n", " response = llm_client.chat.completions.create(\n", " model=LLM_MODEL,\n", " messages=messages,\n", " max_tokens=1024,\n", " temperature=0.0\n", " )\n", " \n", " generated_answer = response.choices[0].message.content\n", " print(f\"\\nGenerated Answer: {generated_answer}\")\n", " \n", " # Step 3: Evaluate answer correctness\n", " print(\"\\nEvaluating answer...\")\n", " \n", " # Build evaluation prompt\n", "\n", " eval_prompt = f\"\"\"You are an expert evaluator. Compare the generated answer with the expected answer.\n", " Question: {question}\n", " Expected Answer: {expected_answer}\n", " Generated Answer: {generated_answer}\n", "\n", " Determine if the generated answer is correct compared to the expected answer.\n", " Answer only \"True\" or \"False\".\"\"\"\n", " \n", " eval_messages = [{\"role\": \"user\", \"content\": eval_prompt}]\n", " \n", " eval_response = llm_judge.chat.completions.create(\n", " model=LLM_MODEL,\n", " messages=eval_messages,\n", " max_tokens=10,\n", " temperature=0.0\n", " )\n", " \n", " eval_result = eval_response.choices[0].message.content.strip()\n", " correct = 1 if \"true\" in eval_result.lower() else 0\n", " \n", " print(f\"Evaluation Result: {eval_result} ({'✓ Correct' if correct else '✗ Incorrect'})\")\n", " \n", " # Record results\n", " results.append({\n", " 'question_id': qid,\n", " 'question_type': qtype,\n", " 'question': question,\n", " 'question_date': qdate,\n", " 'expected_answer': expected_answer,\n", " 'retrieved_count': len(related_memories),\n", " 'retrieved_memories': related_memories,\n", " 'generated_answer': generated_answer,\n", " 'eval_result': eval_result,\n", " 'correct': correct\n", " })\n", " \n", " except Exception as e:\n", " print(f\"\\nError: Processing failed - {str(e)}\")\n", " import traceback\n", " traceback.print_exc()\n", " \n", " results.append({\n", " 'question_id': qid,\n", " 'question_type': qtype,\n", " 'question': question,\n", " 'question_date': qdate,\n", " 'expected_answer': expected_answer,\n", " 'retrieved_count': 0,\n", " 'retrieved_memories': [],\n", " 'generated_answer': '',\n", " 'eval_result': '',\n", " 'correct': 0,\n", " 'error': str(e)\n", " })\n", " \n", " print(f\"\\n{'='*80}\")\n", " print(\"Retrieval and answer generation completed!\")\n", " print(f\"{'='*80}\\n\")\n", " \n", " df = pd.DataFrame(results)\n", " \n", " # Print summary statistics\n", " if len(df) > 0 and 'correct' in df.columns:\n", " accuracy = df['correct'].mean() * 100\n", " print(f\"Overall Accuracy: {accuracy:.2f}% ({df['correct'].sum()}/{len(df)})\")\n", " \n", " return df\n", "\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "2025-12-06 20:35:36 - LightMemory - INFO - ========== START retrieve_20251206_203536_594227 ==========\n", "2025-12-06 20:35:36 - LightMemory - INFO - [retrieve_20251206_203536_594227] Query: What degree did I graduate with?\n", "2025-12-06 20:35:36 - LightMemory - INFO - [retrieve_20251206_203536_594227] Parameters: limit=20, filters=None\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Starting memory retrieval and answer generation for 1 questions...\n", "\n", "\n", "================================================================================\n", "Question 1/1 [ID: e47becba]\n", "================================================================================\n", "Question: What degree did I graduate with?\n", "Question Date: 2023/05/30 (Tue) 23:40\n", "Question Type: single-session-user\n", "Expected Answer: Business Administration\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "127fbc7ee6d849b0a5bff59a5bfb8a67", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Batches: 0%| | 0/1 [00:00