Spaces:
Runtime error
Runtime error
Commit
·
738a5f6
1
Parent(s):
7045d9d
test
Browse files
app.py
CHANGED
|
@@ -1,103 +1,109 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 3 |
import time
|
|
|
|
|
|
|
|
|
|
| 4 |
|
| 5 |
# Load the Vicuna 7B v1.3 LMSys model and tokenizer
|
| 6 |
model_name = "lmsys/vicuna-7b-v1.3"
|
| 7 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 8 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
| 9 |
|
| 10 |
-
template_single = '''
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
with gr.Blocks() as demo:
|
| 22 |
gr.Markdown("# LLM Evaluator With Linguistic Scrutiny")
|
| 23 |
|
|
|
|
|
|
|
| 24 |
|
| 25 |
-
gr.
|
|
|
|
| 26 |
|
| 27 |
-
|
| 28 |
-
entity_dropdown = gr.Dropdown(linguistic_entities, label="Select Linguistic Entity")
|
| 29 |
-
prompt_POS = gr.Textbox(show_label=False, placeholder="Write a prompt and press enter")
|
| 30 |
-
submit_btn = gr.Button(label="Submit")
|
| 31 |
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
clear = gr.ClearButton([prompt_POS, vicuna_S3_chatbot_POS])
|
| 51 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
# gr.Markdown("Strategy 1 QA")
|
| 58 |
-
# with gr.Row():
|
| 59 |
-
# vicuna_S1_chatbot_CHUNK = gr.Chatbot(label="vicuna-7b")
|
| 60 |
-
# llama_S1_chatbot_CHUNK = gr.Chatbot(label="llama-7b")
|
| 61 |
-
# gpt_S1_chatbot_CHUNK = gr.Chatbot(label="gpt-3.5")
|
| 62 |
-
# clear = gr.ClearButton([prompt_CHUNK, vicuna_S1_chatbot_CHUNK])
|
| 63 |
-
# gr.Markdown("Strategy 2 Instruction")
|
| 64 |
-
# with gr.Row():
|
| 65 |
-
# vicuna_S2_chatbot_CHUNK = gr.Chatbot(label="vicuna-7b")
|
| 66 |
-
# llama_S2_chatbot_CHUNK = gr.Chatbot(label="llama-7b")
|
| 67 |
-
# gpt_S2_chatbot_CHUNK = gr.Chatbot(label="gpt-3.5")
|
| 68 |
-
# clear = gr.ClearButton([prompt_CHUNK, vicuna_S2_chatbot_CHUNK])
|
| 69 |
-
# gr.Markdown("Strategy 3 Structured Prompting")
|
| 70 |
-
# with gr.Row():
|
| 71 |
-
# vicuna_S3_chatbot_CHUNK = gr.Chatbot(label="vicuna-7b")
|
| 72 |
-
# llama_S3_chatbot_CHUNK = gr.Chatbot(label="llama-7b")
|
| 73 |
-
# gpt_S3_chatbot_CHUNK = gr.Chatbot(label="gpt-3.5")
|
| 74 |
-
# clear = gr.ClearButton([prompt_CHUNK, vicuna_S3_chatbot_CHUNK])
|
| 75 |
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
# output_ids = model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2)
|
| 79 |
-
# bot_message = tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
| 80 |
-
|
| 81 |
-
# chat_history.append((message, bot_message))
|
| 82 |
-
# time.sleep(2)
|
| 83 |
-
# return "", chat_history
|
| 84 |
-
|
| 85 |
-
def respond_entities(entity, message, chat_history):
|
| 86 |
-
prompt = template_single.format(entity, message)
|
| 87 |
-
input_ids = tokenizer.encode(prompt, return_tensors="pt")
|
| 88 |
output_ids = model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2)
|
| 89 |
bot_message = tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
| 90 |
|
| 91 |
chat_history.append((message, bot_message))
|
| 92 |
time.sleep(2)
|
| 93 |
-
return
|
| 94 |
-
|
| 95 |
-
submit_btn.click(respond_entities, [entity_dropdown, prompt_POS, vicuna_S1_chatbot_POS], [entity_dropdown, prompt_POS, vicuna_S1_chatbot_POS])
|
| 96 |
-
submit_btn.click(respond_entities, [entity_dropdown, prompt_POS, vicuna_S2_chatbot_POS], [entity_dropdown, prompt_POS, vicuna_S2_chatbot_POS])
|
| 97 |
-
submit_btn.click(respond_entities, [entity_dropdown, prompt_POS, vicuna_S3_chatbot_POS], [entity_dropdown, prompt_POS, vicuna_S3_chatbot_POS])
|
| 98 |
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
|
| 103 |
demo.launch()
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 3 |
import time
|
| 4 |
+
import openai
|
| 5 |
+
|
| 6 |
+
openai.api_key = "OPENAI_API_KEY"
|
| 7 |
|
| 8 |
# Load the Vicuna 7B v1.3 LMSys model and tokenizer
|
| 9 |
model_name = "lmsys/vicuna-7b-v1.3"
|
| 10 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 11 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
| 12 |
|
| 13 |
+
template_single = '''Please output any <{}> in the following sentence one per line without any additional text: "{}"'''
|
| 14 |
+
|
| 15 |
+
Noun
|
| 16 |
+
Determiner
|
| 17 |
+
Noun phrase
|
| 18 |
+
Verb phrase
|
| 19 |
+
Dependent Clause
|
| 20 |
+
T-units
|
| 21 |
+
|
| 22 |
+
def interface():
|
| 23 |
+
gr.Markdown(" Description ")
|
| 24 |
|
| 25 |
+
prompt_POS = gr.Textbox(show_label=False, placeholder="Write a prompt and press enter")
|
| 26 |
+
openai_key = gr.Textbox(label="Open AI Key", placeholder="Enter your Openai key here", type="password")
|
| 27 |
+
|
| 28 |
+
gr.Markdown("Strategy 1 QA-Based Prompting")
|
| 29 |
+
with gr.Row():
|
| 30 |
+
vicuna_S1_chatbot_POS = gr.Chatbot(label="vicuna-7b")
|
| 31 |
+
llama_S1_chatbot_POS = gr.Chatbot(label="llama-7b")
|
| 32 |
+
gpt_S1_chatbot_POS = gr.Chatbot(label="gpt-3.5")
|
| 33 |
+
clear = gr.ClearButton([prompt_POS, vicuna_S1_chatbot_POS])
|
| 34 |
+
gr.Markdown("Strategy 2 Instruction-Based Prompting")
|
| 35 |
+
with gr.Row():
|
| 36 |
+
vicuna_S2_chatbot_POS = gr.Chatbot(label="vicuna-7b")
|
| 37 |
+
llama_S2_chatbot_POS = gr.Chatbot(label="llama-7b")
|
| 38 |
+
gpt_S2_chatbot_POS = gr.Chatbot(label="gpt-3.5")
|
| 39 |
+
clear = gr.ClearButton([prompt_POS, vicuna_S2_chatbot_POS])
|
| 40 |
+
gr.Markdown("Strategy 3 Structured Prompting")
|
| 41 |
+
with gr.Row():
|
| 42 |
+
vicuna_S3_chatbot_POS = gr.Chatbot(label="vicuna-7b")
|
| 43 |
+
llama_S3_chatbot_POS = gr.Chatbot(label="llama-7b")
|
| 44 |
+
gpt_S3_chatbot_POS = gr.Chatbot(label="gpt-3.5")
|
| 45 |
+
clear = gr.ClearButton([prompt_POS, vicuna_S3_chatbot_POS])
|
| 46 |
+
|
| 47 |
+
prompt_POS.submit(respond, [prompt_POS, vicuna_S1_chatbot_POS], [prompt_POS, vicuna_S1_chatbot_POS])
|
| 48 |
+
prompt_POS.submit(respond, [prompt_POS, vicuna_S2_chatbot_POS], [prompt_POS, vicuna_S2_chatbot_POS])
|
| 49 |
+
prompt_POS.submit(respond, [prompt_POS, vicuna_S3_chatbot_POS], [prompt_POS, vicuna_S3_chatbot_POS])
|
| 50 |
+
|
| 51 |
|
| 52 |
with gr.Blocks() as demo:
|
| 53 |
gr.Markdown("# LLM Evaluator With Linguistic Scrutiny")
|
| 54 |
|
| 55 |
+
with gr.Tab("Noun"):
|
| 56 |
+
interface()
|
| 57 |
|
| 58 |
+
with gr.Tab("Determiner"):
|
| 59 |
+
gr.Markdown(" Description ")
|
| 60 |
|
| 61 |
+
prompt_CHUNK = gr.Textbox(show_label=False, placeholder="Write a prompt and press enter")
|
|
|
|
|
|
|
|
|
|
| 62 |
|
| 63 |
+
gr.Markdown("Strategy 1 QA")
|
| 64 |
+
with gr.Row():
|
| 65 |
+
vicuna_S1_chatbot_CHUNK = gr.Chatbot(label="vicuna-7b")
|
| 66 |
+
llama_S1_chatbot_CHUNK = gr.Chatbot(label="llama-7b")
|
| 67 |
+
gpt_S1_chatbot_CHUNK = gr.Chatbot(label="gpt-3.5")
|
| 68 |
+
clear = gr.ClearButton([prompt_CHUNK, vicuna_S1_chatbot_CHUNK])
|
| 69 |
+
gr.Markdown("Strategy 2 Instruction")
|
| 70 |
+
with gr.Row():
|
| 71 |
+
vicuna_S2_chatbot_CHUNK = gr.Chatbot(label="vicuna-7b")
|
| 72 |
+
llama_S2_chatbot_CHUNK = gr.Chatbot(label="llama-7b")
|
| 73 |
+
gpt_S2_chatbot_CHUNK = gr.Chatbot(label="gpt-3.5")
|
| 74 |
+
clear = gr.ClearButton([prompt_CHUNK, vicuna_S2_chatbot_CHUNK])
|
| 75 |
+
gr.Markdown("Strategy 3 Structured Prompting")
|
| 76 |
+
with gr.Row():
|
| 77 |
+
vicuna_S3_chatbot_CHUNK = gr.Chatbot(label="vicuna-7b")
|
| 78 |
+
llama_S3_chatbot_CHUNK = gr.Chatbot(label="llama-7b")
|
| 79 |
+
gpt_S3_chatbot_CHUNK = gr.Chatbot(label="gpt-3.5")
|
| 80 |
+
clear = gr.ClearButton([prompt_CHUNK, vicuna_S3_chatbot_CHUNK])
|
|
|
|
| 81 |
|
| 82 |
+
with gr.Tab("Noun phrase"):
|
| 83 |
+
interface()
|
| 84 |
+
with gr.Tab("Verb phrase"):
|
| 85 |
+
interface()
|
| 86 |
+
with gr.Tab("Dependent clause"):
|
| 87 |
+
interface()
|
| 88 |
+
with gr.Tab("T-units"):
|
| 89 |
+
interface()
|
| 90 |
|
| 91 |
+
def gpt3(prompt):
|
| 92 |
+
response = openai.ChatCompletion.create(
|
| 93 |
+
model='gpt3.5', messages=[{"role": "user", "content": prompt}])
|
| 94 |
+
return response['choices'][0]['message']['content']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 95 |
|
| 96 |
+
def respond(message, chat_history):
|
| 97 |
+
input_ids = tokenizer.encode(message, return_tensors="pt")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 98 |
output_ids = model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2)
|
| 99 |
bot_message = tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
| 100 |
|
| 101 |
chat_history.append((message, bot_message))
|
| 102 |
time.sleep(2)
|
| 103 |
+
return "", chat_history
|
|
|
|
|
|
|
|
|
|
|
|
|
| 104 |
|
| 105 |
+
prompt_CHUNK.submit(respond, [prompt_CHUNK, vicuna_S1_chatbot_CHUNK], [prompt_CHUNK, vicuna_S1_chatbot_CHUNK])
|
| 106 |
+
prompt_CHUNK.submit(respond, [prompt_CHUNK, vicuna_S2_chatbot_CHUNK], [prompt_CHUNK, vicuna_S2_chatbot_CHUNK])
|
| 107 |
+
prompt_CHUNK.submit(respond, [prompt_CHUNK, vicuna_S3_chatbot_CHUNK], [prompt_CHUNK, vicuna_S3_chatbot_CHUNK])
|
| 108 |
|
| 109 |
demo.launch()
|