Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -13,7 +13,7 @@ except Exception as e:
|
|
| 13 |
|
| 14 |
# ---------- Configuration Area ----------
|
| 15 |
# β
β
β
Please change this to your model repository β
β
β
|
| 16 |
-
MODEL_REPO = "Marcus719/Llama-3.2-
|
| 17 |
# Specify to download only the q4_k_m file to prevent running out of disk space
|
| 18 |
GGUF_FILENAME = "unsloth.Q4_K_M.gguf"
|
| 19 |
DEFAULT_N_CTX = 2048 # Context length
|
|
@@ -141,7 +141,7 @@ with gr.Blocks(title="Llama 3.2 Lab2 Project") as demo:
|
|
| 141 |
# Header
|
| 142 |
with gr.Row():
|
| 143 |
with gr.Column(scale=1):
|
| 144 |
-
gr.Markdown("# π¦ Llama 3.2 (
|
| 145 |
gr.Markdown(
|
| 146 |
f"""
|
| 147 |
**ID2223 Lab 2 Project** | Fine-tuned on **FineTome-100k**.
|
|
|
|
| 13 |
|
| 14 |
# ---------- Configuration Area ----------
|
| 15 |
# β
β
β
Please change this to your model repository β
β
β
|
| 16 |
+
MODEL_REPO = "Marcus719/Llama-3.2-1B-Instruct-Lab2-GGUF"
|
| 17 |
# Specify to download only the q4_k_m file to prevent running out of disk space
|
| 18 |
GGUF_FILENAME = "unsloth.Q4_K_M.gguf"
|
| 19 |
DEFAULT_N_CTX = 2048 # Context length
|
|
|
|
| 141 |
# Header
|
| 142 |
with gr.Row():
|
| 143 |
with gr.Column(scale=1):
|
| 144 |
+
gr.Markdown("# π¦ Llama 3.2 (1B) Fine-Tuned Chatbot")
|
| 145 |
gr.Markdown(
|
| 146 |
f"""
|
| 147 |
**ID2223 Lab 2 Project** | Fine-tuned on **FineTome-100k**.
|