llm-tuning-ui / app.py
simonper's picture
Update app.py
9047fc3 verified
import gradio as gr
from llama_cpp import Llama
# 1. Path to your GGUF file inside the Space repository
#MODEL_PATH = "simonper/fine-tuned-gguf-modal1/Llama-3.2-1B.Q8_0.gguf" # <- change if your file is named differently
llm = Llama.from_pretrained(
repo_id="simonper/fine-tuned-gguf-modal1",
filename="Llama-3.2-1B.Q8_0.gguf",
)
"""
# 2. Load the GGUF model once at startup
llm = Llama(
model_path=MODEL_PATH,
n_ctx=4096, # context length, adjust if needed
n_threads=8, # tweak based on CPU in the Space
n_gpu_layers=0, # 0 = pure CPU, >0 if GPU layers are available
)
"""
def build_prompt(system_message: str, history: list[dict], user_message: str) -> str:
"""
Simple instruction-style prompt builder for GGUF/llama.cpp.
You can make this fancier or closer to Llama 3's official format if you want.
"""
lines = []
if system_message:
lines.append(f"System: {system_message}\n")
for turn in history:
role = turn["role"]
content = turn["content"]
if role == "user":
lines.append(f"User: {content}")
elif role == "assistant":
lines.append(f"Assistant: {content}")
lines.append(f"User: {user_message}")
lines.append("Assistant:")
return "\n".join(lines)
def respond(
message,
history: list[dict[str, str]],
system_message,
max_tokens,
temperature,
top_p,
):
# 3. Build a text prompt from system + history + new message
prompt = build_prompt(system_message, history, message)
# 4. Call llama.cpp model
output = llm(
prompt,
max_tokens=int(max_tokens),
temperature=float(temperature),
top_p=float(top_p),
stop=["User:", "System:"], # stop when next user/system turn would start
)
reply = output["choices"][0]["text"].strip()
return reply
# 5. Gradio UI
chatbot = gr.ChatInterface(
respond,
type="messages", # history comes in as [{"role": "...", "content": "..."}]
additional_inputs=[
gr.Textbox(
value="You are a friendly chatbot.",
label="System message",
),
gr.Slider(
minimum=1,
maximum=2048,
value=512,
step=1,
label="Max new tokens",
),
gr.Slider(
minimum=0.1,
maximum=4.0,
value=0.7,
step=0.1,
label="Temperature",
),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-p (nucleus sampling)",
),
],
)
demo = chatbot
if __name__ == "__main__":
demo.launch()
# Old UI implementation
'''
import gradio as gr
from huggingface_hub import InferenceClient
def respond(
message,
history: list[dict[str, str]],
system_message,
max_tokens,
temperature,
top_p,
hf_token: gr.OAuthToken,
):
"""
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
"""
client = InferenceClient(token=hf_token.token, model="meta-llama/Meta-Llama-3-8B")
messages = [{"role": "system", "content": system_message}]
messages.extend(history)
messages.append({"role": "user", "content": message})
response = ""
for message in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
choices = message.choices
token = ""
if len(choices) and choices[0].delta.content:
token = choices[0].delta.content
response += token
yield response
"""
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
"""
chatbot = gr.ChatInterface(
respond,
type="messages",
additional_inputs=[
gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-p (nucleus sampling)",
),
],
)
with gr.Blocks() as demo:
with gr.Sidebar():
gr.LoginButton()
chatbot.render()
if __name__ == "__main__":
demo.launch()
'''