AsyncRAG / download_models.py
Zubaish
update
2194516
# download_models.py
from transformers import AutoModelForCausalLM, AutoTokenizer
from langchain_huggingface import HuggingFaceEmbeddings
from config import EMBEDDING_MODEL, LLM_MODEL
print("⏳ Downloading Embedding Model...")
HuggingFaceEmbeddings(model_name=EMBEDDING_MODEL)
print(f"⏳ Downloading LLM: {LLM_MODEL}...")
# Direct download to cache
AutoTokenizer.from_pretrained(LLM_MODEL)
AutoModelForCausalLM.from_pretrained(LLM_MODEL)
print("✅ Models cached successfully")