rag-w-binary-quant / requirements.txt
serverdaun's picture
add reqs file for hf
fe610d1
raw
history blame
12.3 kB
# This file was autogenerated by uv via the following command:
# uv export --frozen --no-hashes -o requirements.txt
aiofiles==24.1.0
# via gradio
aiohappyeyeballs==2.6.1
# via aiohttp
aiohttp==3.12.15
# via
# huggingface-hub
# langchain-community
# llama-index-core
aiosignal==1.4.0
# via aiohttp
aiosqlite==0.21.0
# via llama-index-core
annotated-types==0.7.0
# via pydantic
anyio==4.10.0
# via
# gradio
# httpx
# openai
# starlette
attrs==25.3.0
# via aiohttp
audioop-lts==0.2.2 ; python_full_version >= '3.13'
# via gradio
banks==2.2.0
# via llama-index-core
beautifulsoup4==4.13.4
# via llama-index-readers-file
black==25.1.0
# via rag-w-binary-quant
brotli==1.1.0
# via gradio
certifi==2025.8.3
# via
# httpcore
# httpx
# llama-cloud
# requests
cffi==1.17.1 ; platform_python_implementation == 'PyPy'
# via zstandard
charset-normalizer==3.4.2
# via requests
click==8.2.1
# via
# black
# llama-cloud-services
# nltk
# typer
# uvicorn
colorama==0.4.6
# via
# click
# griffe
# tqdm
dataclasses-json==0.6.7
# via
# langchain-community
# llama-index-core
defusedxml==0.7.1
# via llama-index-readers-file
deprecated==1.2.18
# via
# banks
# llama-index-core
# llama-index-instrumentation
dirtyjson==1.0.8
# via llama-index-core
distro==1.9.0
# via openai
docx2txt==0.9
# via rag-w-binary-quant
dotenv==0.9.9
# via rag-w-binary-quant
fastapi==0.116.1
# via gradio
ffmpy==0.6.1
# via gradio
filelock==3.18.0
# via
# huggingface-hub
# torch
# transformers
filetype==1.2.0
# via llama-index-core
frozenlist==1.7.0
# via
# aiohttp
# aiosignal
fsspec==2025.7.0
# via
# gradio-client
# huggingface-hub
# llama-index-core
# torch
gradio==5.41.0
# via rag-w-binary-quant
gradio-client==1.11.0
# via gradio
greenlet==3.2.3
# via sqlalchemy
griffe==1.9.0
# via banks
groovy==0.1.2
# via gradio
grpcio==1.67.1
# via pymilvus
h11==0.16.0
# via
# httpcore
# uvicorn
hf-xet==1.1.5 ; platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'arm64' or platform_machine == 'x86_64'
# via huggingface-hub
httpcore==1.0.9
# via httpx
httpx==0.28.1
# via
# gradio
# gradio-client
# langsmith
# llama-cloud
# llama-index-core
# openai
# safehttpx
httpx-sse==0.4.1
# via langchain-community
huggingface-hub==0.34.3
# via
# gradio
# gradio-client
# llama-index-embeddings-huggingface
# sentence-transformers
# tokenizers
# transformers
idna==3.10
# via
# anyio
# httpx
# requests
# yarl
isort==6.0.1
# via rag-w-binary-quant
jinja2==3.1.6
# via
# banks
# gradio
# torch
jiter==0.10.0
# via openai
joblib==1.5.1
# via
# nltk
# scikit-learn
jsonpatch==1.33
# via langchain-core
jsonpointer==3.0.0
# via jsonpatch
langchain==0.3.27
# via
# langchain-community
# rag-w-binary-quant
langchain-community==0.3.27
# via rag-w-binary-quant
langchain-core==0.3.72
# via
# langchain
# langchain-community
# langchain-openai
# langchain-text-splitters
langchain-openai==0.3.28
# via rag-w-binary-quant
langchain-text-splitters==0.3.9
# via langchain
langsmith==0.4.10
# via
# langchain
# langchain-community
# langchain-core
llama-cloud==0.1.35
# via
# llama-cloud-services
# llama-index-indices-managed-llama-cloud
llama-cloud-services==0.6.54
# via llama-parse
llama-index==0.13.0
# via rag-w-binary-quant
llama-index-cli==0.5.0
# via llama-index
llama-index-core==0.13.0
# via
# llama-cloud-services
# llama-index
# llama-index-cli
# llama-index-embeddings-huggingface
# llama-index-embeddings-openai
# llama-index-indices-managed-llama-cloud
# llama-index-llms-openai
# llama-index-readers-file
# llama-index-readers-llama-parse
llama-index-embeddings-huggingface==0.6.0
# via rag-w-binary-quant
llama-index-embeddings-openai==0.5.0
# via
# llama-index
# llama-index-cli
llama-index-indices-managed-llama-cloud==0.9.0
# via llama-index
llama-index-instrumentation==0.4.0
# via llama-index-workflows
llama-index-llms-openai==0.5.0
# via
# llama-index
# llama-index-cli
llama-index-readers-file==0.5.0
# via llama-index
llama-index-readers-llama-parse==0.5.0
# via llama-index
llama-index-workflows==1.2.0
# via llama-index-core
llama-parse==0.6.54
# via llama-index-readers-llama-parse
logging==0.4.9.6
# via rag-w-binary-quant
markdown-it-py==3.0.0 ; sys_platform != 'emscripten'
# via rich
markupsafe==3.0.2
# via
# gradio
# jinja2
marshmallow==3.26.1
# via dataclasses-json
mdurl==0.1.2 ; sys_platform != 'emscripten'
# via markdown-it-py
milvus-lite==2.5.1 ; sys_platform != 'win32'
# via pymilvus
mpmath==1.3.0
# via sympy
multidict==6.6.3
# via
# aiohttp
# yarl
mypy-extensions==1.1.0
# via
# black
# typing-inspect
nest-asyncio==1.6.0
# via llama-index-core
networkx==3.5
# via
# llama-index-core
# torch
nltk==3.9.1
# via
# llama-index
# llama-index-core
numpy==2.3.2
# via
# gradio
# langchain-community
# llama-index-core
# pandas
# rag-w-binary-quant
# scikit-learn
# scipy
# transformers
nvidia-cublas-cu12==12.6.4.1 ; platform_machine == 'x86_64' and sys_platform == 'linux'
# via
# nvidia-cudnn-cu12
# nvidia-cusolver-cu12
# torch
nvidia-cuda-cupti-cu12==12.6.80 ; platform_machine == 'x86_64' and sys_platform == 'linux'
# via torch
nvidia-cuda-nvrtc-cu12==12.6.77 ; platform_machine == 'x86_64' and sys_platform == 'linux'
# via torch
nvidia-cuda-runtime-cu12==12.6.77 ; platform_machine == 'x86_64' and sys_platform == 'linux'
# via torch
nvidia-cudnn-cu12==9.5.1.17 ; platform_machine == 'x86_64' and sys_platform == 'linux'
# via torch
nvidia-cufft-cu12==11.3.0.4 ; platform_machine == 'x86_64' and sys_platform == 'linux'
# via torch
nvidia-cufile-cu12==1.11.1.6 ; platform_machine == 'x86_64' and sys_platform == 'linux'
# via torch
nvidia-curand-cu12==10.3.7.77 ; platform_machine == 'x86_64' and sys_platform == 'linux'
# via torch
nvidia-cusolver-cu12==11.7.1.2 ; platform_machine == 'x86_64' and sys_platform == 'linux'
# via torch
nvidia-cusparse-cu12==12.5.4.2 ; platform_machine == 'x86_64' and sys_platform == 'linux'
# via
# nvidia-cusolver-cu12
# torch
nvidia-cusparselt-cu12==0.6.3 ; platform_machine == 'x86_64' and sys_platform == 'linux'
# via torch
nvidia-nccl-cu12==2.26.2 ; platform_machine == 'x86_64' and sys_platform == 'linux'
# via torch
nvidia-nvjitlink-cu12==12.6.85 ; platform_machine == 'x86_64' and sys_platform == 'linux'
# via
# nvidia-cufft-cu12
# nvidia-cusolver-cu12
# nvidia-cusparse-cu12
# torch
nvidia-nvtx-cu12==12.6.77 ; platform_machine == 'x86_64' and sys_platform == 'linux'
# via torch
openai==1.98.0
# via
# langchain-openai
# llama-index-embeddings-openai
# llama-index-llms-openai
orjson==3.11.1
# via
# gradio
# langsmith
packaging==25.0
# via
# black
# gradio
# gradio-client
# huggingface-hub
# langchain-core
# langsmith
# marshmallow
# transformers
pandas==2.2.3
# via
# gradio
# llama-index-readers-file
# pymilvus
pathspec==0.12.1
# via black
pillow==11.3.0
# via
# gradio
# llama-index-core
# sentence-transformers
platformdirs==4.3.8
# via
# banks
# black
# llama-cloud-services
# llama-index-core
propcache==0.3.2
# via
# aiohttp
# yarl
protobuf==6.31.1
# via pymilvus
pycparser==2.22 ; platform_python_implementation == 'PyPy'
# via cffi
pydantic==2.11.7
# via
# banks
# fastapi
# gradio
# langchain
# langchain-core
# langsmith
# llama-cloud
# llama-cloud-services
# llama-index-core
# llama-index-instrumentation
# llama-index-workflows
# openai
# pydantic-settings
pydantic-core==2.33.2
# via pydantic
pydantic-settings==2.10.1
# via langchain-community
pydub==0.25.1
# via gradio
pygments==2.19.2 ; sys_platform != 'emscripten'
# via rich
pymilvus==2.5.14
# via rag-w-binary-quant
pypdf==5.9.0
# via llama-index-readers-file
python-dateutil==2.9.0.post0
# via pandas
python-dotenv==1.1.1
# via
# dotenv
# llama-cloud-services
# pydantic-settings
# pymilvus
python-multipart==0.0.20
# via gradio
pytz==2025.2
# via pandas
pyyaml==6.0.2
# via
# gradio
# huggingface-hub
# langchain
# langchain-community
# langchain-core
# llama-index-core
# transformers
regex==2025.7.34
# via
# nltk
# tiktoken
# transformers
requests==2.32.4
# via
# huggingface-hub
# langchain
# langchain-community
# langsmith
# llama-index-core
# requests-toolbelt
# tiktoken
# transformers
requests-toolbelt==1.0.0
# via langsmith
rich==14.1.0 ; sys_platform != 'emscripten'
# via typer
ruff==0.12.7 ; sys_platform != 'emscripten'
# via gradio
safehttpx==0.1.6
# via gradio
safetensors==0.5.3
# via transformers
scikit-learn==1.7.1
# via sentence-transformers
scipy==1.16.1
# via
# scikit-learn
# sentence-transformers
semantic-version==2.10.0
# via gradio
sentence-transformers==5.0.0
# via llama-index-embeddings-huggingface
setuptools==80.9.0
# via
# llama-index-core
# pymilvus
# torch
# triton
shellingham==1.5.4 ; sys_platform != 'emscripten'
# via typer
six==1.17.0
# via python-dateutil
sniffio==1.3.1
# via
# anyio
# openai
soupsieve==2.7
# via beautifulsoup4
sqlalchemy==2.0.42
# via
# langchain
# langchain-community
# llama-index-core
starlette==0.47.2
# via
# fastapi
# gradio
striprtf==0.0.26
# via llama-index-readers-file
sympy==1.14.0
# via torch
tenacity==9.1.2
# via
# langchain-community
# langchain-core
# llama-cloud-services
# llama-index-core
threadpoolctl==3.6.0
# via scikit-learn
tiktoken==0.9.0
# via
# langchain-openai
# llama-index-core
tokenizers==0.21.4
# via transformers
tomlkit==0.13.3
# via gradio
torch==2.7.1
# via sentence-transformers
tqdm==4.67.1
# via
# huggingface-hub
# llama-index-core
# milvus-lite
# nltk
# openai
# sentence-transformers
# transformers
transformers==4.54.1
# via sentence-transformers
triton==3.3.1 ; platform_machine == 'x86_64' and sys_platform == 'linux'
# via torch
typer==0.16.0 ; sys_platform != 'emscripten'
# via gradio
typing-extensions==4.14.1
# via
# aiosignal
# aiosqlite
# anyio
# beautifulsoup4
# fastapi
# gradio
# gradio-client
# huggingface-hub
# langchain-core
# llama-index-core
# openai
# pydantic
# pydantic-core
# sentence-transformers
# sqlalchemy
# starlette
# torch
# typer
# typing-inspect
# typing-inspection
typing-inspect==0.9.0
# via
# dataclasses-json
# llama-index-core
typing-inspection==0.4.1
# via
# pydantic
# pydantic-settings
tzdata==2025.2
# via pandas
ujson==5.10.0
# via pymilvus
urllib3==2.5.0
# via
# gradio
# requests
uvicorn==0.35.0 ; sys_platform != 'emscripten'
# via gradio
websockets==15.0.1
# via gradio-client
wrapt==1.17.2
# via
# deprecated
# llama-index-core
yarl==1.20.1
# via aiohttp
zstandard==0.23.0
# via langsmith