Spaces:
Runtime error
Runtime error
Haofei Yu
commited on
Commit
·
6774d89
1
Parent(s):
3bb389b
Bug/fix instruction showing (#8)
Browse files* add the issue and pr template
* only show generated conversation
app.py
CHANGED
|
@@ -37,12 +37,13 @@ model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1
|
|
| 37 |
model = PeftModel.from_pretrained(model, MODEL_NAME, config=config).to(COMPUTE_DTYPE).to("cuda")
|
| 38 |
according_visible = True
|
| 39 |
|
|
|
|
| 40 |
def introduction():
|
| 41 |
with gr.Column(scale=2):
|
| 42 |
gr.Image("images/sotopia.jpeg", elem_id="banner-image", show_label=False)
|
| 43 |
with gr.Column(scale=5):
|
| 44 |
gr.Markdown(
|
| 45 |
-
"""# Sotopia-Pi Demo
|
| 46 |
**Chat with [Sotopia-Pi](https://github.com/sotopia-lab/sotopia-pi), brainstorm ideas, discuss your holiday plans, and more!**
|
| 47 |
|
| 48 |
➡️️ **Intended Use**: this demo is intended to showcase an early finetuning of [sotopia-pi-mistral-7b-BC_SR](https://huggingface.co/cmu-lti/sotopia-pi-mistral-7b-BC_SR)/
|
|
@@ -53,6 +54,7 @@ def introduction():
|
|
| 53 |
"""
|
| 54 |
)
|
| 55 |
|
|
|
|
| 56 |
def chat_accordion():
|
| 57 |
with gr.Accordion("Parameters", open=False, visible=according_visible):
|
| 58 |
temperature = gr.Slider(
|
|
@@ -63,7 +65,6 @@ def chat_accordion():
|
|
| 63 |
interactive=True,
|
| 64 |
label="Temperature",
|
| 65 |
)
|
| 66 |
-
|
| 67 |
max_tokens = gr.Slider(
|
| 68 |
minimum=1024,
|
| 69 |
maximum=4096,
|
|
@@ -72,13 +73,11 @@ def chat_accordion():
|
|
| 72 |
interactive=True,
|
| 73 |
label="Max Tokens",
|
| 74 |
)
|
| 75 |
-
|
| 76 |
session_id = gr.Textbox(
|
| 77 |
value=uuid4,
|
| 78 |
interactive=False,
|
| 79 |
visible=False,
|
| 80 |
)
|
| 81 |
-
|
| 82 |
with gr.Accordion("Instructions", open=False, visible=False):
|
| 83 |
instructions = gr.Textbox(
|
| 84 |
placeholder="The Instructions",
|
|
@@ -114,30 +113,33 @@ def chat_accordion():
|
|
| 114 |
return temperature, instructions, user_name, bot_name, session_id, max_tokens
|
| 115 |
|
| 116 |
|
| 117 |
-
def
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
)
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
|
|
|
| 140 |
|
|
|
|
|
|
|
| 141 |
with gr.Column():
|
| 142 |
with gr.Row():
|
| 143 |
(
|
|
|
|
| 37 |
model = PeftModel.from_pretrained(model, MODEL_NAME, config=config).to(COMPUTE_DTYPE).to("cuda")
|
| 38 |
according_visible = True
|
| 39 |
|
| 40 |
+
|
| 41 |
def introduction():
|
| 42 |
with gr.Column(scale=2):
|
| 43 |
gr.Image("images/sotopia.jpeg", elem_id="banner-image", show_label=False)
|
| 44 |
with gr.Column(scale=5):
|
| 45 |
gr.Markdown(
|
| 46 |
+
"""# Sotopia-Pi Demo Test
|
| 47 |
**Chat with [Sotopia-Pi](https://github.com/sotopia-lab/sotopia-pi), brainstorm ideas, discuss your holiday plans, and more!**
|
| 48 |
|
| 49 |
➡️️ **Intended Use**: this demo is intended to showcase an early finetuning of [sotopia-pi-mistral-7b-BC_SR](https://huggingface.co/cmu-lti/sotopia-pi-mistral-7b-BC_SR)/
|
|
|
|
| 54 |
"""
|
| 55 |
)
|
| 56 |
|
| 57 |
+
|
| 58 |
def chat_accordion():
|
| 59 |
with gr.Accordion("Parameters", open=False, visible=according_visible):
|
| 60 |
temperature = gr.Slider(
|
|
|
|
| 65 |
interactive=True,
|
| 66 |
label="Temperature",
|
| 67 |
)
|
|
|
|
| 68 |
max_tokens = gr.Slider(
|
| 69 |
minimum=1024,
|
| 70 |
maximum=4096,
|
|
|
|
| 73 |
interactive=True,
|
| 74 |
label="Max Tokens",
|
| 75 |
)
|
|
|
|
| 76 |
session_id = gr.Textbox(
|
| 77 |
value=uuid4,
|
| 78 |
interactive=False,
|
| 79 |
visible=False,
|
| 80 |
)
|
|
|
|
| 81 |
with gr.Accordion("Instructions", open=False, visible=False):
|
| 82 |
instructions = gr.Textbox(
|
| 83 |
placeholder="The Instructions",
|
|
|
|
| 113 |
return temperature, instructions, user_name, bot_name, session_id, max_tokens
|
| 114 |
|
| 115 |
|
| 116 |
+
def run_chat(
|
| 117 |
+
message: str,
|
| 118 |
+
history,
|
| 119 |
+
instructions: str,
|
| 120 |
+
user_name: str,
|
| 121 |
+
bot_name: str,
|
| 122 |
+
temperature: float,
|
| 123 |
+
top_p: float,
|
| 124 |
+
max_tokens: int
|
| 125 |
+
):
|
| 126 |
+
prompt = format_chat_prompt(message, history, instructions, user_name, bot_name)
|
| 127 |
+
input_tokens = tokenizer(prompt, return_tensors="pt", padding="do_not_pad").input_ids.to("cuda")
|
| 128 |
+
input_length = input_tokens.shape[-1]
|
| 129 |
+
output_tokens = model.generate(
|
| 130 |
+
input_tokens,
|
| 131 |
+
temperature=temperature,
|
| 132 |
+
top_p=top_p,
|
| 133 |
+
max_length=max_tokens,
|
| 134 |
+
pad_token_id=tokenizer.eos_token_id,
|
| 135 |
+
num_return_sequences=1
|
| 136 |
+
)
|
| 137 |
+
output_tokens = output_tokens[:, input_length:]
|
| 138 |
+
text_output = tokenizer.decode(output_tokens[0], skip_special_tokens=True)
|
| 139 |
+
return text_output
|
| 140 |
|
| 141 |
+
|
| 142 |
+
def chat_tab():
|
| 143 |
with gr.Column():
|
| 144 |
with gr.Row():
|
| 145 |
(
|