Spaces:
Build error
Build error
gusreinaos
commited on
Commit
Β·
7b8ee94
1
Parent(s):
1a7c573
Fixed
Browse files
app.py
CHANGED
|
@@ -3,7 +3,7 @@ import subprocess
|
|
| 3 |
import sys
|
| 4 |
import os
|
| 5 |
|
| 6 |
-
# === RUNTIME INSTALL OF llama-cpp-python
|
| 7 |
try:
|
| 8 |
from llama_cpp import Llama
|
| 9 |
print("llama-cpp-python already installed.")
|
|
@@ -15,30 +15,30 @@ except ImportError:
|
|
| 15 |
"https://github.com/yownas/llama-cpp-python-wheels/releases/download/v0.3.16/llama_cpp_python-0.3.16+cpuavx-cp310-cp310-linux_x86_64.whl"
|
| 16 |
])
|
| 17 |
print("llama-cpp-python installed from wheel.")
|
| 18 |
-
except Exception as e:
|
| 19 |
-
print("Wheel failed β falling back to PyPI
|
| 20 |
subprocess.check_call([
|
| 21 |
sys.executable, "-m", "pip", "install", "--no-cache-dir",
|
| 22 |
"llama-cpp-python==0.3.16", "--force-reinstall"
|
| 23 |
])
|
| 24 |
-
from llama_cpp import Llama #
|
| 25 |
|
| 26 |
from huggingface_hub import hf_hub_download
|
| 27 |
|
| 28 |
-
# ===
|
| 29 |
-
MODEL_REPO = "
|
| 30 |
-
MODEL_FILE = "
|
| 31 |
|
| 32 |
-
print("Downloading
|
| 33 |
model_path = hf_hub_download(
|
| 34 |
repo_id=MODEL_REPO,
|
| 35 |
filename=MODEL_FILE,
|
| 36 |
local_dir="./models",
|
| 37 |
local_dir_use_symlinks=False
|
| 38 |
)
|
| 39 |
-
print(f"Model
|
| 40 |
|
| 41 |
-
print("Loading model (
|
| 42 |
llm = Llama(
|
| 43 |
model_path=model_path,
|
| 44 |
n_ctx=8192,
|
|
@@ -47,7 +47,7 @@ llm = Llama(
|
|
| 47 |
n_gpu_layers=0,
|
| 48 |
verbose=False
|
| 49 |
)
|
| 50 |
-
print("Model loaded!")
|
| 51 |
|
| 52 |
def chat(message, history):
|
| 53 |
if not message.strip():
|
|
@@ -65,7 +65,7 @@ def chat(message, history):
|
|
| 65 |
max_tokens=512,
|
| 66 |
temperature=0.7,
|
| 67 |
top_p=0.9,
|
| 68 |
-
stop=["<|eot_id|>", "<|end_of_text|>"],
|
| 69 |
stream=False
|
| 70 |
)
|
| 71 |
|
|
@@ -73,7 +73,7 @@ def chat(message, history):
|
|
| 73 |
history.append((message, bot_response))
|
| 74 |
return history, ""
|
| 75 |
|
| 76 |
-
# === CSS &
|
| 77 |
custom_css = """
|
| 78 |
@import url('https://fonts.googleapis.com/css2?family=JetBrains+Mono:wght@400;700&family=Source+Code+Pro:wght@400;600&display=swap');
|
| 79 |
body, .gradio-container { background: #0c0c0c !important; font-family: 'JetBrains Mono', monospace !important; }
|
|
@@ -86,38 +86,19 @@ button { background: #1a1a1a !important; border: 1px solid #00ff00 !important; c
|
|
| 86 |
button:hover { background: #00ff00 !important; color: #000 !important; }
|
| 87 |
.primary { background: #00ff00 !important; color: #000 !important; }
|
| 88 |
footer { display: none !important; }
|
| 89 |
-
::-webkit-scrollbar { width: 8px; background: #0c0c0c; }
|
| 90 |
-
::-webkit-scrollbar-thumb { background: #00ff00; }
|
| 91 |
"""
|
| 92 |
|
| 93 |
with gr.Blocks(theme=gr.themes.Base(primary_hue="green"), css=custom_css, title="$ LLAMA TERMINAL") as demo:
|
| 94 |
-
gr.Markdown("# $ LLAMA TERMINAL\n```\n> System Online |
|
| 95 |
chatbot = gr.Chatbot(height=600)
|
| 96 |
-
|
| 97 |
with gr.Row():
|
| 98 |
msg = gr.Textbox(placeholder="$ Enter command...", show_label=False, scale=8, container=False)
|
| 99 |
submit = gr.Button("SEND", scale=1, variant="primary")
|
| 100 |
-
|
| 101 |
-
gr.Examples(
|
| 102 |
-
examples=[
|
| 103 |
-
"What is the capital of France?",
|
| 104 |
-
"Explain quantum computing",
|
| 105 |
-
"Write fibonacci in Python",
|
| 106 |
-
"Write a haiku about AI",
|
| 107 |
-
],
|
| 108 |
-
inputs=msg
|
| 109 |
-
)
|
| 110 |
-
|
| 111 |
gr.ClearButton([msg, chatbot], value="CLEAR")
|
| 112 |
-
|
| 113 |
submit.click(chat, [msg, chatbot], [chatbot, msg])
|
| 114 |
msg.submit(chat, [msg, chatbot], [chatbot, msg])
|
| 115 |
|
| 116 |
if __name__ == "__main__":
|
| 117 |
demo.queue(max_size=20)
|
| 118 |
-
demo.launch(
|
| 119 |
-
share=True,
|
| 120 |
-
server_name="0.0.0.0",
|
| 121 |
-
server_port=7860,
|
| 122 |
-
show_error=True
|
| 123 |
-
)
|
|
|
|
| 3 |
import sys
|
| 4 |
import os
|
| 5 |
|
| 6 |
+
# === RUNTIME INSTALL OF llama-cpp-python ===
|
| 7 |
try:
|
| 8 |
from llama_cpp import Llama
|
| 9 |
print("llama-cpp-python already installed.")
|
|
|
|
| 15 |
"https://github.com/yownas/llama-cpp-python-wheels/releases/download/v0.3.16/llama_cpp_python-0.3.16+cpuavx-cp310-cp310-linux_x86_64.whl"
|
| 16 |
])
|
| 17 |
print("llama-cpp-python installed from wheel.")
|
| 18 |
+
except Exception as e:
|
| 19 |
+
print("Wheel failed β falling back to PyPI...")
|
| 20 |
subprocess.check_call([
|
| 21 |
sys.executable, "-m", "pip", "install", "--no-cache-dir",
|
| 22 |
"llama-cpp-python==0.3.16", "--force-reinstall"
|
| 23 |
])
|
| 24 |
+
from llama_cpp import Llama # β INDENTED CORRECTLY
|
| 25 |
|
| 26 |
from huggingface_hub import hf_hub_download
|
| 27 |
|
| 28 |
+
# === WORKING PUBLIC MODEL ===
|
| 29 |
+
MODEL_REPO = "TheBloke/Llama-3.2-3B-Instruct-GGUF"
|
| 30 |
+
MODEL_FILE = "llama-3.2-3b-instruct-q4_k_m.gguf"
|
| 31 |
|
| 32 |
+
print("Downloading Llama 3.2 3B Instruct (Q4_K_M)...")
|
| 33 |
model_path = hf_hub_download(
|
| 34 |
repo_id=MODEL_REPO,
|
| 35 |
filename=MODEL_FILE,
|
| 36 |
local_dir="./models",
|
| 37 |
local_dir_use_symlinks=False
|
| 38 |
)
|
| 39 |
+
print(f"Model downloaded: {model_path}")
|
| 40 |
|
| 41 |
+
print("Loading model into memory (20β40 sec)...")
|
| 42 |
llm = Llama(
|
| 43 |
model_path=model_path,
|
| 44 |
n_ctx=8192,
|
|
|
|
| 47 |
n_gpu_layers=0,
|
| 48 |
verbose=False
|
| 49 |
)
|
| 50 |
+
print("Model loaded β ready to chat!")
|
| 51 |
|
| 52 |
def chat(message, history):
|
| 53 |
if not message.strip():
|
|
|
|
| 65 |
max_tokens=512,
|
| 66 |
temperature=0.7,
|
| 67 |
top_p=0.9,
|
| 68 |
+
stop=["<|eot_id|>", "<|end_of_text|>"],
|
| 69 |
stream=False
|
| 70 |
)
|
| 71 |
|
|
|
|
| 73 |
history.append((message, bot_response))
|
| 74 |
return history, ""
|
| 75 |
|
| 76 |
+
# === CSS & UI (perfect) ===
|
| 77 |
custom_css = """
|
| 78 |
@import url('https://fonts.googleapis.com/css2?family=JetBrains+Mono:wght@400;700&family=Source+Code+Pro:wght@400;600&display=swap');
|
| 79 |
body, .gradio-container { background: #0c0c0c !important; font-family: 'JetBrains Mono', monospace !important; }
|
|
|
|
| 86 |
button:hover { background: #00ff00 !important; color: #000 !important; }
|
| 87 |
.primary { background: #00ff00 !important; color: #000 !important; }
|
| 88 |
footer { display: none !important; }
|
|
|
|
|
|
|
| 89 |
"""
|
| 90 |
|
| 91 |
with gr.Blocks(theme=gr.themes.Base(primary_hue="green"), css=custom_css, title="$ LLAMA TERMINAL") as demo:
|
| 92 |
+
gr.Markdown("# $ LLAMA TERMINAL\n```\n> System Online | Llama 3.2 3B Ready\n> Type your query below...\n```")
|
| 93 |
chatbot = gr.Chatbot(height=600)
|
|
|
|
| 94 |
with gr.Row():
|
| 95 |
msg = gr.Textbox(placeholder="$ Enter command...", show_label=False, scale=8, container=False)
|
| 96 |
submit = gr.Button("SEND", scale=1, variant="primary")
|
| 97 |
+
gr.Examples(["What is the capital of France?", "Write a haiku about AI"], inputs=msg)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 98 |
gr.ClearButton([msg, chatbot], value="CLEAR")
|
|
|
|
| 99 |
submit.click(chat, [msg, chatbot], [chatbot, msg])
|
| 100 |
msg.submit(chat, [msg, chatbot], [chatbot, msg])
|
| 101 |
|
| 102 |
if __name__ == "__main__":
|
| 103 |
demo.queue(max_size=20)
|
| 104 |
+
demo.launch(server_name="0.0.0.0", server_port=7860, share=True, show_error=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|