Alihamas212 commited on
Commit
45879f1
·
verified ·
1 Parent(s): 7692494

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +80 -0
app.py CHANGED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ from openrouter import Client
4
+
5
+ # --- Setup the OpenRouter Client ---
6
+ # IMPORTANT: This gets the API key from Hugging Face "Secrets"
7
+ # DO NOT paste your key here.
8
+ api_key = os.environ.get("OPENROUTER_API_KEY")
9
+
10
+ # Check if the API key is available
11
+ if not api_key:
12
+ # If running locally, you might use a different way to load keys
13
+ # For this example, we'll raise an error if it's not set in the HF Space
14
+ print("OPENROUTER_API_KEY not found in environment secrets!")
15
+ # We can still run the app but show an error message
16
+ client = None
17
+ else:
18
+ client = Client(api_key=api_key)
19
+
20
+ # --- The Core Function ---
21
+ # This is what Gradio will run when the user clicks "Submit"
22
+ def compare_models(user_prompt):
23
+ if not client:
24
+ return "Error: API Key not configured.", "Error: API Key not configured.", "Error: API Key not configured."
25
+
26
+ # Define the models you want to compare
27
+ model_1 = "mistralai/mistral-7b-instruct" # Fast and cheap
28
+ model_2 = "meta-llama/llama-3-8b-instruct" # Good all-rounder
29
+ model_3 = "google/gemini-flash-1.5" # Fast and capable
30
+
31
+ # --- Call Model 1 ---
32
+ try:
33
+ response_1 = client.chat.completions.create(
34
+ model=model_1,
35
+ messages=[{"role": "user", "content": user_prompt}]
36
+ )
37
+ output_1 = response_1.choices[0].message.content
38
+ except Exception as e:
39
+ output_1 = f"Error calling {model_1}: {e}"
40
+
41
+ # --- Call Model 2 ---
42
+ try:
43
+ response_2 = client.chat.completions.create(
44
+ model=model_2,
45
+ messages=[{"role": "user", "content": user_prompt}]
46
+ )
47
+ output_2 = response_2.choices[0].message.content
48
+ except Exception as e:
49
+ output_2 = f"Error calling {model_2}: {e}"
50
+
51
+ # --- Call Model 3 ---
52
+ try:
53
+ response_3 = client.chat.completions.create(
54
+ model=model_3,
55
+ messages=[{"role": "user", "content": user_prompt}]
56
+ )
57
+ output_3 = response_3.choices[0].message.content
58
+ except Exception as e:
59
+ output_3 = f"Error calling {model_3}: {e}"
60
+
61
+ return output_1, output_2, output_3
62
+
63
+ # --- Create the Gradio Interface ---
64
+
65
+ # Use gr.Interface for a simple all-in-one UI
66
+ # We use 'parallel' to show outputs side-by-side
67
+ demo = gr.Interface(
68
+ fn=compare_models,
69
+ inputs=gr.Textbox(label="Enter your prompt", lines=3),
70
+ outputs=[
71
+ gr.Textbox(label="Model 1: Mistral 7B"),
72
+ gr.Textbox(label="Model 2: LLaMA 3 8B"),
73
+ gr.Textbox(label="Model 3: Gemini 1.5 Flash")
74
+ ],
75
+ title="🤖 OpenRouter Model Arena",
76
+ description="Enter one prompt and see the results from three different AI models side-by-side. (Powered by OpenRouter)"
77
+ )
78
+
79
+ # Launch the app!
80
+ demo.launch()