eddddyy commited on
Commit
2ca0aae
·
verified ·
1 Parent(s): 17ffe56

Update requirements.txt

Browse files
Files changed (1) hide show
  1. requirements.txt +18 -23
requirements.txt CHANGED
@@ -1,28 +1,23 @@
1
- # Transformers library by Hugging Face for LLMs
2
- transformers>=4.40.0 # Required for LLaMA 3.1 and PEFT support
 
3
 
4
- # Hugging Face Hub integration
5
- huggingface_hub>=0.21.4 # To download private models and access secrets via token
6
 
7
- # Gradio for UI in Hugging Face Spaces
8
- gradio>=4.19 # Provides web-based interface to interact with the assistant
 
 
 
9
 
10
- # BitsAndBytes for 4-bit and 8-bit quantized model loading (LLaMA 3.1 needs this for memory-efficient inference)
11
- bitsandbytes>=0.42.0 # Required for loading large models in 4-bit/8-bit
12
 
13
- # Accelerate for device mapping and optimized model training/inference
14
- accelerate>=0.27.0 # Makes it easy to run across CPU/GPU with minimal changes
 
15
 
16
- # PEFT (Parameter-Efficient Fine-Tuning) for QLoRA / LoRA-based fine-tuning
17
- peft>=0.10.0 # Used to fine-tune large language models efficiently
18
-
19
- # Datasets library to load and process your JSON dataset
20
- datasets>=2.19.0 # Useful for loading dataset.json and mapping instructions/responses
21
-
22
- # Tokenizers backend (implicitly used by transformers, included for stability)
23
- tokenizers>=0.19.1
24
-
25
- # Optional utilities (used for progress bars, etc.)
26
- tqdm>=4.66.0
27
- gradio>=4.30.0
28
- torch>=2.2.0
 
1
+ # 🧠 Core LLM + Model Interaction
2
+ transformers>=4.41.0 # Qwen2.5 support, multi-modal support, trust_remote_code fix
3
+ huggingface_hub>=0.21.4 # Hugging Face Hub access (private models, secrets)
4
 
5
+ # 🖼️ User Interface (Web App)
6
+ gradio>=4.30.0 # Web UI for interaction
7
 
8
+ # 🧠 Model Inference Optimization
9
+ torch>=2.2.0 # PyTorch core
10
+ accelerate>=0.27.0 # Simplified GPU/CPU device mapping
11
+ bitsandbytes>=0.42.0 # Quantized model (4-bit / 8-bit) inference support
12
+ peft>=0.10.0 # Parameter-efficient fine-tuning (QLoRA / LoRA)
13
 
14
+ # 📊 Dataset handling (optional, useful for training/fine-tuning)
15
+ datasets>=2.19.0 # Load JSON/CSV datasets
16
 
17
+ # 🧮 Tokenization / Utilities
18
+ tokenizers>=0.19.1 # Tokenization backend for transformers
19
+ tqdm>=4.66.0 # Nice progress bars in CLI
20
 
21
+ # (Optional) Dependency for screenshot/capture or system utilities if needed later
22
+ # pillow>=10.0.0 # Image handling (for future vision support)
23
+ # pyautogui>=0.9.54 # GUI automation (e.g., for screenshots or cursor actions)