Update app.py
Browse files
app.py
CHANGED
|
@@ -2,9 +2,11 @@ import gradio as gr
|
|
| 2 |
import torch
|
| 3 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 4 |
|
| 5 |
-
# Load model once
|
| 6 |
model_name = "fla-hub/rwkv7-2.9B-world"
|
|
|
|
|
|
|
| 7 |
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
|
|
|
| 8 |
model = AutoModelForCausalLM.from_pretrained(
|
| 9 |
model_name,
|
| 10 |
trust_remote_code=True,
|
|
@@ -12,12 +14,13 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
| 12 |
low_cpu_mem_usage=True,
|
| 13 |
device_map="cpu"
|
| 14 |
)
|
|
|
|
| 15 |
|
| 16 |
def respond(message, history, system_message, max_tokens, temperature, top_p):
|
| 17 |
messages = [{"role": "system", "content": system_message}]
|
| 18 |
-
for
|
| 19 |
-
messages.append({"role": "user", "content":
|
| 20 |
-
messages.append({"role": "assistant", "content":
|
| 21 |
messages.append({"role": "user", "content": message})
|
| 22 |
|
| 23 |
text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
|
|
|
| 2 |
import torch
|
| 3 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 4 |
|
|
|
|
| 5 |
model_name = "fla-hub/rwkv7-2.9B-world"
|
| 6 |
+
|
| 7 |
+
print("Loading tokenizer...")
|
| 8 |
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
| 9 |
+
print("Loading model...")
|
| 10 |
model = AutoModelForCausalLM.from_pretrained(
|
| 11 |
model_name,
|
| 12 |
trust_remote_code=True,
|
|
|
|
| 14 |
low_cpu_mem_usage=True,
|
| 15 |
device_map="cpu"
|
| 16 |
)
|
| 17 |
+
print("Model loaded!")
|
| 18 |
|
| 19 |
def respond(message, history, system_message, max_tokens, temperature, top_p):
|
| 20 |
messages = [{"role": "system", "content": system_message}]
|
| 21 |
+
for human, assistant in history:
|
| 22 |
+
messages.append({"role": "user", "content": human})
|
| 23 |
+
messages.append({"role": "assistant", "content": assistant})
|
| 24 |
messages.append({"role": "user", "content": message})
|
| 25 |
|
| 26 |
text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|