Update app.py
Browse files
app.py
CHANGED
|
@@ -2,7 +2,6 @@ import gradio as gr
|
|
| 2 |
import torch
|
| 3 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 4 |
|
| 5 |
-
# Only transformer-loadable models
|
| 6 |
MODEL_OPTIONS = {
|
| 7 |
"Llama-3.2-3B": "meta-llama/Llama-3.2-3B-Instruct",
|
| 8 |
"Llama-3.2-1B": "meta-llama/Llama-3.2-1B-Instruct",
|
|
@@ -13,7 +12,7 @@ MODEL_OPTIONS = {
|
|
| 13 |
}
|
| 14 |
|
| 15 |
loaded = {}
|
| 16 |
-
SYSTEM_PROMPT = "You are HugginGPT — a helpful assistant
|
| 17 |
|
| 18 |
def load_model(model_key):
|
| 19 |
model_id = MODEL_OPTIONS[model_key]
|
|
@@ -32,7 +31,6 @@ def load_model(model_key):
|
|
| 32 |
def generate_response(message, history, model_choice):
|
| 33 |
tokenizer, model = load_model(model_choice)
|
| 34 |
|
| 35 |
-
# build prompt with system + memory
|
| 36 |
context = f"system: {SYSTEM_PROMPT}\n"
|
| 37 |
if history:
|
| 38 |
for u, a in history:
|
|
@@ -52,10 +50,17 @@ def generate_response(message, history, model_choice):
|
|
| 52 |
return reply
|
| 53 |
|
| 54 |
with gr.Blocks() as demo:
|
| 55 |
-
gr.
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
)
|
| 60 |
|
| 61 |
demo.launch()
|
|
|
|
| 2 |
import torch
|
| 3 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 4 |
|
|
|
|
| 5 |
MODEL_OPTIONS = {
|
| 6 |
"Llama-3.2-3B": "meta-llama/Llama-3.2-3B-Instruct",
|
| 7 |
"Llama-3.2-1B": "meta-llama/Llama-3.2-1B-Instruct",
|
|
|
|
| 12 |
}
|
| 13 |
|
| 14 |
loaded = {}
|
| 15 |
+
SYSTEM_PROMPT = "You are HugginGPT — a helpful assistant that remembers context."
|
| 16 |
|
| 17 |
def load_model(model_key):
|
| 18 |
model_id = MODEL_OPTIONS[model_key]
|
|
|
|
| 31 |
def generate_response(message, history, model_choice):
|
| 32 |
tokenizer, model = load_model(model_choice)
|
| 33 |
|
|
|
|
| 34 |
context = f"system: {SYSTEM_PROMPT}\n"
|
| 35 |
if history:
|
| 36 |
for u, a in history:
|
|
|
|
| 50 |
return reply
|
| 51 |
|
| 52 |
with gr.Blocks() as demo:
|
| 53 |
+
gr.Markdown("## HugginGPT")
|
| 54 |
+
|
| 55 |
+
model_selector = gr.Dropdown(
|
| 56 |
+
choices=list(MODEL_OPTIONS.keys()),
|
| 57 |
+
value="Llama-3.2-3B",
|
| 58 |
+
label="Select model"
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
chat = gr.ChatInterface(
|
| 62 |
+
fn=lambda message, history: generate_response(message, history, model_selector.value),
|
| 63 |
+
title="HugginGPT"
|
| 64 |
)
|
| 65 |
|
| 66 |
demo.launch()
|