Update app.py
Browse files
app.py
CHANGED
|
@@ -11,17 +11,18 @@ from llama_cpp import Llama
|
|
| 11 |
import requests
|
| 12 |
from pathlib import Path
|
| 13 |
|
| 14 |
-
# ---------------------- Download Model ----------------------
|
| 15 |
|
| 16 |
-
|
| 17 |
-
|
|
|
|
| 18 |
|
| 19 |
if not Path(MODEL_PATH).exists():
|
| 20 |
-
print("📥 Downloading LLaMA model...")
|
| 21 |
response = requests.get(MODEL_URL, stream=True)
|
| 22 |
with open(MODEL_PATH, "wb") as f:
|
| 23 |
for chunk in response.iter_content(chunk_size=8192):
|
| 24 |
-
|
|
|
|
| 25 |
print("✅ Download complete!")
|
| 26 |
|
| 27 |
# ---------------------- Load Data ----------------------
|
|
|
|
| 11 |
import requests
|
| 12 |
from pathlib import Path
|
| 13 |
|
|
|
|
| 14 |
|
| 15 |
+
# ---------------------- Download Model ----------------------
|
| 16 |
+
MODEL_URL = "https://huggingface.co/unsloth/Llama-3.2-1B-Instruct-GGUF/resolve/main/llama-3.2-1b-instruct.Q8_0.gguf"
|
| 17 |
+
MODEL_PATH = "llama-3.2-1b-instruct.Q8_0.gguf"
|
| 18 |
|
| 19 |
if not Path(MODEL_PATH).exists():
|
| 20 |
+
print("📥 Downloading Unsloth LLaMA 3.2 1B Q8_0 model...")
|
| 21 |
response = requests.get(MODEL_URL, stream=True)
|
| 22 |
with open(MODEL_PATH, "wb") as f:
|
| 23 |
for chunk in response.iter_content(chunk_size=8192):
|
| 24 |
+
if chunk:
|
| 25 |
+
f.write(chunk)
|
| 26 |
print("✅ Download complete!")
|
| 27 |
|
| 28 |
# ---------------------- Load Data ----------------------
|