File size: 1,720 Bytes
0914e96 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
# ai-service/scripts/download_llm_model.py
import os
from huggingface_hub import hf_hub_download
# --- Configuration for the NEW, FAST Language Model ---
# Model ka repository on Hugging Face
REPO_ID = "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF"
# Model ka file name
FILENAME = "tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf"
# Model ko kahan save karna hai ('llm_model' folder mein)
SAVE_DIRECTORY = os.path.join(os.path.dirname(__file__), '..', 'llm_model')
def download_language_model():
"""
Downloads the specified GGUF language model from Hugging Face.
"""
print(f"--- Starting download for Language Model: {FILENAME} ---")
# Target file ka poora path
file_path = os.path.join(SAVE_DIRECTORY, FILENAME)
# Check karein ki model pehle se download to nahi hai.
if os.path.exists(file_path):
print(f"✅ Model '{FILENAME}' already exists at: {SAVE_DIRECTORY}")
print("Skipping download.")
return
# Folder banayein agar woh मौजूद nahi hai
os.makedirs(SAVE_DIRECTORY, exist_ok=True)
print(f"Downloading model to: {SAVE_DIRECTORY}")
print("This may take a moment (approx 700-800MB)...")
try:
# Hugging Face se model download karein
hf_hub_download(
repo_id=REPO_ID,
filename=FILENAME,
local_dir=SAVE_DIRECTORY,
local_dir_use_symlinks=False # Important for Windows
)
print("\n" + "="*50)
print(f"✅ Language Model '{FILENAME}' downloaded successfully!")
print("="*50 + "\n")
except Exception as e:
print(f"🚨 An error occurred during download: {e}")
if __name__ == "__main__":
download_language_model() |