Spaces:
Sleeping
Sleeping
Fixed
Browse files
app.py
CHANGED
|
@@ -20,6 +20,8 @@ def query_model(model_choice, user_question, max_length, preprompt, api_key=None
|
|
| 20 |
response = model_opt(full_prompt, max_length=max_length, num_return_sequences=1)
|
| 21 |
return response[0]['generated_text']
|
| 22 |
elif model_choice == "OpenAI":
|
|
|
|
|
|
|
| 23 |
client = OpenAI(api_key=api_key)
|
| 24 |
completion = client.chat.completions.create(
|
| 25 |
model="gpt-3.5-turbo",
|
|
@@ -30,19 +32,23 @@ def query_model(model_choice, user_question, max_length, preprompt, api_key=None
|
|
| 30 |
)
|
| 31 |
return completion.choices[0].message.content
|
| 32 |
elif model_choice == "Anthropic":
|
|
|
|
|
|
|
| 33 |
client = anthropic.Anthropic(api_key=api_key)
|
| 34 |
message = client.messages.create(
|
| 35 |
model="claude-2.1",
|
| 36 |
max_tokens=1024,
|
| 37 |
messages=[
|
| 38 |
-
{"role": "user", "content": full_prompt or "Hello"}
|
| 39 |
]
|
| 40 |
)
|
| 41 |
return message.content[0].text
|
| 42 |
elif model_choice == "Gemini":
|
|
|
|
|
|
|
| 43 |
genai.configure(api_key=api_key)
|
| 44 |
model = genai.GenerativeModel('gemini-pro')
|
| 45 |
-
response = model.generate_content(full_prompt or "Hello")
|
| 46 |
return response.text
|
| 47 |
else:
|
| 48 |
return "Invalid model selected."
|
|
@@ -60,11 +66,6 @@ def compete_models(user_question, max_length, preprompt, selected_models, api_ke
|
|
| 60 |
|
| 61 |
with concurrent.futures.ThreadPoolExecutor() as executor:
|
| 62 |
future_to_model = {
|
| 63 |
-
executor.submit(
|
| 64 |
-
model_map[model][0],
|
| 65 |
-
full_prompt,
|
| 66 |
-
max_length
|
| 67 |
-
) if model in ["GPT-2", "OPT-350M"] else
|
| 68 |
executor.submit(
|
| 69 |
model_map[model][0],
|
| 70 |
model,
|
|
@@ -128,13 +129,24 @@ with gr.Blocks() as demo:
|
|
| 128 |
outputs=[api_key, api_key_info]
|
| 129 |
)
|
| 130 |
|
| 131 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 132 |
|
| 133 |
with gr.TabItem("Compete Mode"):
|
| 134 |
-
|
| 135 |
-
compete_max_length = gr.Slider(minimum=10, maximum=500, value=100, step=10, label="Max Length")
|
| 136 |
compete_preprompt = gr.Textbox(label="Preprompt (optional)", lines=2, placeholder="Enter a preprompt here...")
|
| 137 |
-
compete_question = gr.Textbox(label="Enter your question", lines=2, placeholder="Type your question here...")
|
| 138 |
|
| 139 |
with gr.Row():
|
| 140 |
with gr.Column():
|
|
@@ -165,8 +177,8 @@ with gr.Blocks() as demo:
|
|
| 165 |
selected_models = compete_models_local + compete_models_api
|
| 166 |
results = compete_models(compete_question, compete_max_length, compete_preprompt, selected_models, api_key_openai, api_key_anthropic, api_key_gemini)
|
| 167 |
new_row = [compete_question] + results
|
| 168 |
-
history = history.
|
| 169 |
-
history.append(new_row)
|
| 170 |
return history, ""
|
| 171 |
|
| 172 |
compete_button.click(
|
|
|
|
| 20 |
response = model_opt(full_prompt, max_length=max_length, num_return_sequences=1)
|
| 21 |
return response[0]['generated_text']
|
| 22 |
elif model_choice == "OpenAI":
|
| 23 |
+
if not api_key:
|
| 24 |
+
return "API key is required for OpenAI."
|
| 25 |
client = OpenAI(api_key=api_key)
|
| 26 |
completion = client.chat.completions.create(
|
| 27 |
model="gpt-3.5-turbo",
|
|
|
|
| 32 |
)
|
| 33 |
return completion.choices[0].message.content
|
| 34 |
elif model_choice == "Anthropic":
|
| 35 |
+
if not api_key:
|
| 36 |
+
return "API key is required for Anthropic."
|
| 37 |
client = anthropic.Anthropic(api_key=api_key)
|
| 38 |
message = client.messages.create(
|
| 39 |
model="claude-2.1",
|
| 40 |
max_tokens=1024,
|
| 41 |
messages=[
|
| 42 |
+
{"role": "user", "content": full_prompt or "Hello"}
|
| 43 |
]
|
| 44 |
)
|
| 45 |
return message.content[0].text
|
| 46 |
elif model_choice == "Gemini":
|
| 47 |
+
if not api_key:
|
| 48 |
+
return "API key is required for Gemini."
|
| 49 |
genai.configure(api_key=api_key)
|
| 50 |
model = genai.GenerativeModel('gemini-pro')
|
| 51 |
+
response = model.generate_content(full_prompt or "Hello")
|
| 52 |
return response.text
|
| 53 |
else:
|
| 54 |
return "Invalid model selected."
|
|
|
|
| 66 |
|
| 67 |
with concurrent.futures.ThreadPoolExecutor() as executor:
|
| 68 |
future_to_model = {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
executor.submit(
|
| 70 |
model_map[model][0],
|
| 71 |
model,
|
|
|
|
| 129 |
outputs=[api_key, api_key_info]
|
| 130 |
)
|
| 131 |
|
| 132 |
+
def query_single_model(model_choice, user_question, max_length, preprompt, api_key, history):
|
| 133 |
+
if not model_choice:
|
| 134 |
+
response = "Please select a model."
|
| 135 |
+
else:
|
| 136 |
+
response = query_model(model_choice, user_question, max_length, preprompt, api_key)
|
| 137 |
+
history.append((user_question, response))
|
| 138 |
+
return history, ""
|
| 139 |
+
|
| 140 |
+
query_button.click(
|
| 141 |
+
query_single_model,
|
| 142 |
+
inputs=[model_choice, user_question, max_length, preprompt, api_key, conversation_history],
|
| 143 |
+
outputs=[conversation_history, user_question]
|
| 144 |
+
)
|
| 145 |
|
| 146 |
with gr.TabItem("Compete Mode"):
|
| 147 |
+
compete_max_length = gr.Slider(minimum=10, maximum=500, value=100, step=10, label="Max Length")
|
|
|
|
| 148 |
compete_preprompt = gr.Textbox(label="Preprompt (optional)", lines=2, placeholder="Enter a preprompt here...")
|
| 149 |
+
compete_question = gr.Textbox(label="Enter your question", lines=2, placeholder="Type your question here...")
|
| 150 |
|
| 151 |
with gr.Row():
|
| 152 |
with gr.Column():
|
|
|
|
| 177 |
selected_models = compete_models_local + compete_models_api
|
| 178 |
results = compete_models(compete_question, compete_max_length, compete_preprompt, selected_models, api_key_openai, api_key_anthropic, api_key_gemini)
|
| 179 |
new_row = [compete_question] + results
|
| 180 |
+
history = history.to_dict('records') if history is not None else []
|
| 181 |
+
history.append(dict(zip(["Question", "GPT-2", "OPT-350M", "OpenAI", "Anthropic", "Gemini"], new_row)))
|
| 182 |
return history, ""
|
| 183 |
|
| 184 |
compete_button.click(
|