|
|
from huggingface_hub import hf_hub_download |
|
|
import os |
|
|
|
|
|
|
|
|
os.makedirs("models", exist_ok=True) |
|
|
|
|
|
|
|
|
print("Downloading model from tatendachirume/zems repository...") |
|
|
print("This may take some time depending on your internet connection and the model size.") |
|
|
|
|
|
try: |
|
|
model_path = hf_hub_download( |
|
|
repo_id="tatendachirume/zems", |
|
|
filename="MedGenius_LLaMA-3.2B.Q8_0.gguf", |
|
|
local_dir="./models" |
|
|
) |
|
|
|
|
|
print("\nโ
Model downloaded successfully!") |
|
|
print(f"๐ File saved to: {model_path}") |
|
|
print("\nYou can now run the application with:") |
|
|
print("streamlit run app.py") |
|
|
|
|
|
except Exception as e: |
|
|
print(f"\nโ Error downloading the model: {e}") |
|
|
print("\nIf you're encountering issues, try the following:") |
|
|
print("1. Make sure you have a stable internet connection") |
|
|
print("2. Verify that the model file exists at: https://huggingface.co/tatendachirume/zems/blob/main/MedGenius_LLaMA-3.2B.Q8_0.gguf") |
|
|
print("3. Try logging in to Hugging Face: huggingface-cli login") |