Jn-Huang
commited on
Commit
·
d298fc0
1
Parent(s):
77a4a60
Update default prompt and fix chat interface inputs
Browse files- app.py +1 -1
- app_vllm.py +3 -3
app.py
CHANGED
|
@@ -103,7 +103,7 @@ def generate_response(messages, max_new_tokens=512, temperature=0.7) -> str:
|
|
| 103 |
generated_text = tokenizer.decode(out[0][input_length:], skip_special_tokens=True)
|
| 104 |
return generated_text.strip()
|
| 105 |
|
| 106 |
-
def chat_fn(message, history, system_prompt, max_new_tokens, temperature):
|
| 107 |
# Build conversation in Llama 3.1 chat format
|
| 108 |
messages = []
|
| 109 |
|
|
|
|
| 103 |
generated_text = tokenizer.decode(out[0][input_length:], skip_special_tokens=True)
|
| 104 |
return generated_text.strip()
|
| 105 |
|
| 106 |
+
def chat_fn(message, history, system_prompt, _prompt_reference, max_new_tokens, temperature):
|
| 107 |
# Build conversation in Llama 3.1 chat format
|
| 108 |
messages = []
|
| 109 |
|
app_vllm.py
CHANGED
|
@@ -88,7 +88,7 @@ def generate_response(messages, max_new_tokens=512, temperature=0.7) -> str:
|
|
| 88 |
|
| 89 |
return outputs[0].outputs[0].text
|
| 90 |
|
| 91 |
-
def chat_fn(message, history, system_prompt, max_new_tokens, temperature):
|
| 92 |
# Build conversation in Llama 3.1 chat format
|
| 93 |
messages = []
|
| 94 |
|
|
@@ -117,8 +117,8 @@ def chat_fn(message, history, system_prompt, max_new_tokens, temperature):
|
|
| 117 |
return reply
|
| 118 |
|
| 119 |
demo = gr.ChatInterface(
|
| 120 |
-
fn=lambda message, history, system_prompt, max_new_tokens, temperature:
|
| 121 |
-
chat_fn(message, history, system_prompt, max_new_tokens, temperature),
|
| 122 |
additional_inputs=[
|
| 123 |
gr.Textbox(
|
| 124 |
label="System prompt (optional)",
|
|
|
|
| 88 |
|
| 89 |
return outputs[0].outputs[0].text
|
| 90 |
|
| 91 |
+
def chat_fn(message, history, system_prompt, _prompt_reference, max_new_tokens, temperature):
|
| 92 |
# Build conversation in Llama 3.1 chat format
|
| 93 |
messages = []
|
| 94 |
|
|
|
|
| 117 |
return reply
|
| 118 |
|
| 119 |
demo = gr.ChatInterface(
|
| 120 |
+
fn=lambda message, history, system_prompt, prompt_reference, max_new_tokens, temperature:
|
| 121 |
+
chat_fn(message, history, system_prompt, prompt_reference, max_new_tokens, temperature),
|
| 122 |
additional_inputs=[
|
| 123 |
gr.Textbox(
|
| 124 |
label="System prompt (optional)",
|