Spaces:
Runtime error
Runtime error
Commit
·
3c712c1
1
Parent(s):
c4b7399
test
Browse files
app.py
CHANGED
|
@@ -12,6 +12,20 @@ model = AutoModelForCausalLM.from_pretrained(model_name)
|
|
| 12 |
|
| 13 |
template_single = '''Please output any <{}> in the following sentence one per line without any additional text: "{}"'''
|
| 14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
def interface():
|
| 16 |
gr.Markdown(" Description ")
|
| 17 |
|
|
@@ -80,20 +94,6 @@ with gr.Blocks() as demo:
|
|
| 80 |
interface()
|
| 81 |
with gr.Tab("T-units"):
|
| 82 |
interface()
|
| 83 |
-
|
| 84 |
-
def gpt3(prompt):
|
| 85 |
-
response = openai.ChatCompletion.create(
|
| 86 |
-
model='gpt3.5', messages=[{"role": "user", "content": prompt}])
|
| 87 |
-
return response['choices'][0]['message']['content']
|
| 88 |
-
|
| 89 |
-
def respond(message, chat_history):
|
| 90 |
-
input_ids = tokenizer.encode(message, return_tensors="pt")
|
| 91 |
-
output_ids = model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2)
|
| 92 |
-
bot_message = tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
| 93 |
-
|
| 94 |
-
chat_history.append((message, bot_message))
|
| 95 |
-
time.sleep(2)
|
| 96 |
-
return "", chat_history
|
| 97 |
|
| 98 |
prompt_CHUNK.submit(respond, [prompt_CHUNK, vicuna_S1_chatbot_CHUNK], [prompt_CHUNK, vicuna_S1_chatbot_CHUNK])
|
| 99 |
prompt_CHUNK.submit(respond, [prompt_CHUNK, vicuna_S2_chatbot_CHUNK], [prompt_CHUNK, vicuna_S2_chatbot_CHUNK])
|
|
|
|
| 12 |
|
| 13 |
template_single = '''Please output any <{}> in the following sentence one per line without any additional text: "{}"'''
|
| 14 |
|
| 15 |
+
def gpt3(prompt):
|
| 16 |
+
response = openai.ChatCompletion.create(
|
| 17 |
+
model='gpt3.5', messages=[{"role": "user", "content": prompt}])
|
| 18 |
+
return response['choices'][0]['message']['content']
|
| 19 |
+
|
| 20 |
+
def respond(message, chat_history):
|
| 21 |
+
input_ids = tokenizer.encode(message, return_tensors="pt")
|
| 22 |
+
output_ids = model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2)
|
| 23 |
+
bot_message = tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
| 24 |
+
|
| 25 |
+
chat_history.append((message, bot_message))
|
| 26 |
+
time.sleep(2)
|
| 27 |
+
return "", chat_history
|
| 28 |
+
|
| 29 |
def interface():
|
| 30 |
gr.Markdown(" Description ")
|
| 31 |
|
|
|
|
| 94 |
interface()
|
| 95 |
with gr.Tab("T-units"):
|
| 96 |
interface()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 97 |
|
| 98 |
prompt_CHUNK.submit(respond, [prompt_CHUNK, vicuna_S1_chatbot_CHUNK], [prompt_CHUNK, vicuna_S1_chatbot_CHUNK])
|
| 99 |
prompt_CHUNK.submit(respond, [prompt_CHUNK, vicuna_S2_chatbot_CHUNK], [prompt_CHUNK, vicuna_S2_chatbot_CHUNK])
|