Update app.py
Browse files
app.py
CHANGED
|
@@ -26,22 +26,42 @@ def load_model():
|
|
| 26 |
def respond(message, history):
|
| 27 |
load_model()
|
| 28 |
|
| 29 |
-
prompt = ""
|
| 30 |
for user, bot in history:
|
| 31 |
-
prompt +=
|
| 32 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
|
| 34 |
-
inputs = tokenizer(prompt, return_tensors="pt", truncation=True)
|
| 35 |
outputs = model.generate(
|
| 36 |
**inputs,
|
| 37 |
-
max_new_tokens=
|
| 38 |
-
do_sample=True,
|
| 39 |
temperature=0.7,
|
| 40 |
-
top_p=0.9
|
|
|
|
|
|
|
| 41 |
)
|
| 42 |
|
| 43 |
-
|
| 44 |
-
reply =
|
| 45 |
return reply
|
| 46 |
|
| 47 |
gr.ChatInterface(
|
|
|
|
| 26 |
def respond(message, history):
|
| 27 |
load_model()
|
| 28 |
|
| 29 |
+
prompt = "<bos>"
|
| 30 |
for user, bot in history:
|
| 31 |
+
prompt += (
|
| 32 |
+
"<start_of_turn>user\n"
|
| 33 |
+
f"{user}\n"
|
| 34 |
+
"<end_of_turn>\n"
|
| 35 |
+
"<start_of_turn>model\n"
|
| 36 |
+
f"{bot}\n"
|
| 37 |
+
"<end_of_turn>\n"
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
prompt += (
|
| 41 |
+
"<start_of_turn>user\n"
|
| 42 |
+
f"{message}\n"
|
| 43 |
+
"<end_of_turn>\n"
|
| 44 |
+
"<start_of_turn>model\n"
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
inputs = tokenizer(
|
| 48 |
+
prompt,
|
| 49 |
+
return_tensors="pt",
|
| 50 |
+
truncation=True,
|
| 51 |
+
max_length=2048
|
| 52 |
+
)
|
| 53 |
|
|
|
|
| 54 |
outputs = model.generate(
|
| 55 |
**inputs,
|
| 56 |
+
max_new_tokens=200,
|
|
|
|
| 57 |
temperature=0.7,
|
| 58 |
+
top_p=0.9,
|
| 59 |
+
do_sample=True,
|
| 60 |
+
eos_token_id=tokenizer.eos_token_id
|
| 61 |
)
|
| 62 |
|
| 63 |
+
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 64 |
+
reply = decoded.split("<start_of_turn>model")[-1].strip()
|
| 65 |
return reply
|
| 66 |
|
| 67 |
gr.ChatInterface(
|