phi_inference / download_model.py
Leon4gr45's picture
Create download_model.py
302a3ec verified
raw
history blame contribute delete
392 Bytes
# download_model.py
import torch
from transformers import AutoModel, AutoTokenizer
model_id = "OpenGVLab/InternVL2_5-2B"
print(f"Downloading {model_id}...")
# Download model and tokenizer to the default cache directory
model = AutoModel.from_pretrained(model_id, trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
print("Download complete.")