WeVi commited on
Commit
06653ca
·
verified ·
1 Parent(s): e91a2de

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +291 -0
app.py ADDED
@@ -0,0 +1,291 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File: README.md
2
+
3
+ ## Side‑by‑Side AI — One prompt, answers from many models
4
+
5
+ This Space shows Grok 4, ChatGPT (OpenAI), Gemini 2.5 Pro, DeepSeek, Claude Sonnet 4, and Perplexity Sonar Pro side‑by‑side in a single chat window. No tab‑hopping. You enter one prompt and compare answers instantly.
6
+
7
+ ### Live features
8
+ - One prompt → parallel calls to selected providers
9
+ - Models shown in columns, updated as responses arrive
10
+ - Optional system prompt, temperature, and per‑provider model names
11
+ - Works with API keys set as **Space Secrets** (recommended) or typed at runtime
12
+ - Clean, mobile‑friendly Gradio UI
13
+
14
+ ### Supported providers
15
+ - **OpenAI** (e.g., `gpt-4o`, `o4-mini`, etc.)
16
+ - **Anthropic** (Claude Sonnet 3.7/4, etc.)
17
+ - **Google Gemini** (e.g., `gemini-2.0-pro`, `gemini-2.0-flash`)
18
+ - **xAI Grok** (via OpenAI‑compatible API, e.g., `grok-2-latest`)
19
+ - **DeepSeek** (`deepseek-chat`) via OpenAI‑compatible API
20
+ - **Perplexity** (`sonar-pro`) via OpenAI‑compatible API
21
+
22
+ > You can enable or disable any provider from the sidebar.
23
+
24
+ ### How to run on Hugging Face Spaces
25
+ 1. Create a **Gradio** Space.
26
+ 2. Add these files (`app.py`, `requirements.txt`, optional `Procfile`).
27
+ 3. In **Settings → Secrets**, add any keys you have (you don’t need all of them):
28
+ - `OPENAI_API_KEY`
29
+ - `ANTHROPIC_API_KEY`
30
+ - `GEMINI_API_KEY`
31
+ - `XAI_API_KEY` (Grok)
32
+ - `DEEPSEEK_API_KEY`
33
+ - `PPLX_API_KEY` (Perplexity)
34
+ 4. Deploy. The app auto‑detects which providers have keys.
35
+
36
+ ### Notes
37
+ - Some providers have usage limits and content rules. Your account is responsible for API costs.
38
+ - If a provider is missing a key, its column will show a helpful message instead of failing.
39
+ - Perplexity supports retrieval/web by default; this app sends plain prompts. You can add custom params in code if you need web search.
40
+
41
+ ---
42
+
43
+ # File: requirements.txt
44
+
45
+ gradio>=4.40.0
46
+ openai>=1.40.0
47
+ anthropic>=0.34.2
48
+ google-generativeai>=0.7.2
49
+ aiohttp>=3.9.5
50
+ pydantic>=2.8.2
51
+ uvicorn==0.30.6
52
+
53
+ ---
54
+
55
+ # File: app.py
56
+
57
+ import os
58
+ import asyncio
59
+ from typing import Dict, Any, Optional
60
+
61
+ import gradio as gr
62
+
63
+ # OpenAI‑compatible SDK (also used for Grok, DeepSeek, Perplexity)
64
+ from openai import OpenAI
65
+
66
+ # Anthropic SDK
67
+ import anthropic
68
+
69
+ # Google Gemini SDK
70
+ import google.generativeai as genai
71
+
72
+ DEFAULT_SYSTEM_PROMPT = "You are a helpful, concise assistant."
73
+
74
+ # -------- Provider adapters -------- #
75
+
76
+ async def call_openai(prompt: str, system: str, model: str, api_key: Optional[str]) -> str:
77
+ key = api_key or os.getenv("OPENAI_API_KEY")
78
+ if not key:
79
+ return "⚠️ OpenAI key not set. Add OPENAI_API_KEY in Secrets or enter in sidebar."
80
+ client = OpenAI(api_key=key)
81
+ try:
82
+ resp = client.chat.completions.create(
83
+ model=model or "gpt-4o-mini",
84
+ messages=[{"role": "system", "content": system or DEFAULT_SYSTEM_PROMPT},
85
+ {"role": "user", "content": prompt}],
86
+ temperature=0.7,
87
+ )
88
+ return resp.choices[0].message.content
89
+ except Exception as e:
90
+ return f"❌ OpenAI error: {e}"
91
+
92
+ async def call_openai_compatible(prompt: str, system: str, model: str, base_url: str, key_env: str, fallback_name: str, api_key_override: Optional[str] = None) -> str:
93
+ key = api_key_override or os.getenv(key_env)
94
+ if not key:
95
+ return f"⚠️ {fallback_name} key not set. Add {key_env} in Secrets or enter in sidebar."
96
+ client = OpenAI(api_key=key, base_url=base_url)
97
+ try:
98
+ resp = client.chat.completions.create(
99
+ model=model,
100
+ messages=[{"role": "system", "content": system or DEFAULT_SYSTEM_PROMPT},
101
+ {"role": "user", "content": prompt}],
102
+ )
103
+ return resp.choices[0].message.content
104
+ except Exception as e:
105
+ return f"❌ {fallback_name} error: {e}"
106
+
107
+ async def call_anthropic(prompt: str, system: str, model: str, api_key: Optional[str]) -> str:
108
+ key = api_key or os.getenv("ANTHROPIC_API_KEY")
109
+ if not key:
110
+ return "⚠️ Anthropic key not set. Add ANTHROPIC_API_KEY in Secrets or enter in sidebar."
111
+ client = anthropic.Anthropic(api_key=key)
112
+ try:
113
+ msg = client.messages.create(
114
+ model=model or "claude-3-5-sonnet-latest",
115
+ system=system or DEFAULT_SYSTEM_PROMPT,
116
+ max_tokens=1200,
117
+ messages=[{"role": "user", "content": prompt}],
118
+ )
119
+ return "".join([b.text for b in msg.content if getattr(b, 'type', '') == 'text'])
120
+ except Exception as e:
121
+ return f"❌ Anthropic error: {e}"
122
+
123
+ async def call_gemini(prompt: str, system: str, model: str, api_key: Optional[str]) -> str:
124
+ key = api_key or os.getenv("GEMINI_API_KEY")
125
+ if not key:
126
+ return "⚠️ Gemini key not set. Add GEMINI_API_KEY in Secrets or enter in sidebar."
127
+ try:
128
+ genai.configure(api_key=key)
129
+ mname = model or "gemini-2.0-pro"
130
+ model_obj = genai.GenerativeModel(mname, system_instruction=(system or DEFAULT_SYSTEM_PROMPT))
131
+ resp = model_obj.generate_content(prompt)
132
+ return resp.text
133
+ except Exception as e:
134
+ return f"❌ Gemini error: {e}"
135
+
136
+ # Wrappers for specific OpenAI‑compatible providers
137
+ async def call_grok(prompt: str, system: str, model: str, api_key: Optional[str]) -> str:
138
+ return await call_openai_compatible(prompt, system, model or "grok-2-latest", "https://api.x.ai/v1", "XAI_API_KEY", "Grok", api_key)
139
+
140
+ async def call_deepseek(prompt: str, system: str, model: str, api_key: Optional[str]) -> str:
141
+ return await call_openai_compatible(prompt, system, model or "deepseek-chat", "https://api.deepseek.com", "DEEPSEEK_API_KEY", "DeepSeek", api_key)
142
+
143
+ async def call_perplexity(prompt: str, system: str, model: str, api_key: Optional[str]) -> str:
144
+ # Perplexity uses OpenAI‑compatible API; models: sonar, sonar-pro, sonar-reasoning, etc.
145
+ return await call_openai_compatible(prompt, system, model or "sonar-pro", "https://api.perplexity.ai", "PPLX_API_KEY", "Perplexity", api_key)
146
+
147
+ PROVIDERS = {
148
+ "OpenAI": call_openai,
149
+ "Claude": call_anthropic,
150
+ "Gemini": call_gemini,
151
+ "Grok": call_grok,
152
+ "DeepSeek": call_deepseek,
153
+ "Perplexity": call_perplexity,
154
+ }
155
+
156
+ # -------- App logic -------- #
157
+
158
+ async def run_all(prompt: str,
159
+ system: str,
160
+ temperature: float,
161
+ use_openai: bool, use_claude: bool, use_gemini: bool, use_grok: bool, use_deepseek: bool, use_perplexity: bool,
162
+ openai_model: str, claude_model: str, gemini_model: str, grok_model: str, deepseek_model: str, pplx_model: str,
163
+ openai_key: str, claude_key: str, gemini_key: str, grok_key: str, deepseek_key: str, pplx_key: str
164
+ ) -> Dict[str, Any]:
165
+
166
+ if not prompt or not prompt.strip():
167
+ return {"status": "Please enter a prompt."}
168
+
169
+ tasks = []
170
+ results_map = {}
171
+
172
+ async def add_task(flag: bool, name: str, func, model: str, key: str):
173
+ if flag:
174
+ tasks.append(asyncio.create_task(func(prompt, system, model, key or None)))
175
+ results_map[name] = None
176
+
177
+ await add_task(use_openai, "OpenAI", call_openai, openai_model, openai_key)
178
+ await add_task(use_claude, "Claude", call_anthropic, claude_model, claude_key)
179
+ await add_task(use_gemini, "Gemini", call_gemini, gemini_model, gemini_key)
180
+ await add_task(use_grok, "Grok", call_grok, grok_model, grok_key)
181
+ await add_task(use_deepseek, "DeepSeek", call_deepseek, deepseek_model, deepseek_key)
182
+ await add_task(use_perplexity, "Perplexity", call_perplexity, pplx_model, pplx_key)
183
+
184
+ if not tasks:
185
+ return {"status": "Select at least one provider in the sidebar."}
186
+
187
+ # Run all providers in parallel
188
+ responses = await asyncio.gather(*tasks)
189
+
190
+ # Map results back to names in order
191
+ i = 0
192
+ for name in list(results_map.keys()):
193
+ results_map[name] = responses[i]
194
+ i += 1
195
+
196
+ return results_map
197
+
198
+ # -------- UI -------- #
199
+
200
+ def build_ui():
201
+ with gr.Blocks(fill_height=True, theme=gr.themes.Soft()) as demo:
202
+ gr.Markdown("""
203
+ # Side‑by‑Side AI
204
+ Compare answers from Grok 4, ChatGPT, Gemini 2.5 Pro, DeepSeek, Claude Sonnet 4, and Perplexity Sonar Pro — in one chat window.
205
+ """)
206
+
207
+ with gr.Row():
208
+ with gr.Column(scale=1):
209
+ gr.Markdown("### Settings")
210
+ system = gr.Textbox(value=DEFAULT_SYSTEM_PROMPT, label="System prompt", lines=3)
211
+ temp = gr.Slider(0.0, 1.0, value=0.7, step=0.05, label="Temperature (some SDKs ignore this)")
212
+ gr.Markdown("**Providers** (enable and set model names as you like)")
213
+
214
+ use_openai = gr.Checkbox(value=True, label="OpenAI (ChatGPT)")
215
+ openai_model = gr.Textbox(value="gpt-4o-mini", label="OpenAI model")
216
+ openai_key = gr.Textbox(value="", label="OpenAI API key (optional — uses env if empty)", type="password")
217
+
218
+ use_claude = gr.Checkbox(value=True, label="Anthropic Claude")
219
+ claude_model = gr.Textbox(value="claude-3-5-sonnet-latest", label="Claude model")
220
+ claude_key = gr.Textbox(value="", label="Anthropic API key (optional)", type="password")
221
+
222
+ use_gemini = gr.Checkbox(value=True, label="Google Gemini")
223
+ gemini_model = gr.Textbox(value="gemini-2.0-pro", label="Gemini model")
224
+ gemini_key = gr.Textbox(value="", label="Gemini API key (optional)", type="password")
225
+
226
+ use_grok = gr.Checkbox(value=True, label="xAI Grok")
227
+ grok_model = gr.Textbox(value="grok-2-latest", label="Grok model")
228
+ grok_key = gr.Textbox(value="", label="xAI API key (optional)", type="password")
229
+
230
+ use_deepseek = gr.Checkbox(value=True, label="DeepSeek")
231
+ deepseek_model = gr.Textbox(value="deepseek-chat", label="DeepSeek model")
232
+ deepseek_key = gr.Textbox(value="", label="DeepSeek API key (optional)", type="password")
233
+
234
+ use_perplexity = gr.Checkbox(value=True, label="Perplexity Sonar")
235
+ pplx_model = gr.Textbox(value="sonar-pro", label="Perplexity model")
236
+ pplx_key = gr.Textbox(value="", label="Perplexity API key (optional)", type="password")
237
+
238
+ with gr.Column(scale=2):
239
+ prompt = gr.Textbox(placeholder="Ask anything once. All providers will answer side‑by‑side.", label="Your prompt", lines=6)
240
+ ask = gr.Button("Ask all")
241
+ status = gr.Markdown(visible=False)
242
+
243
+ with gr.Row():
244
+ out_openai = gr.Markdown(label="OpenAI")
245
+ out_claude = gr.Markdown(label="Claude")
246
+ out_gemini = gr.Markdown(label="Gemini")
247
+
248
+ with gr.Row():
249
+ out_grok = gr.Markdown(label="Grok")
250
+ out_deepseek = gr.Markdown(label="DeepSeek")
251
+ out_pplx = gr.Markdown(label="Perplexity")
252
+
253
+ def on_click(prompt, system, temp,
254
+ use_openai, use_claude, use_gemini, use_grok, use_deepseek, use_perplexity,
255
+ openai_model, claude_model, gemini_model, grok_model, deepseek_model, pplx_model,
256
+ openai_key, claude_key, gemini_key, grok_key, deepseek_key, pplx_key):
257
+ # Run async in sync handler
258
+ results = asyncio.run(run_all(prompt, system, temp,
259
+ use_openai, use_claude, use_gemini, use_grok, use_deepseek, use_perplexity,
260
+ openai_model, claude_model, gemini_model, grok_model, deepseek_model, pplx_model,
261
+ openai_key, claude_key, gemini_key, grok_key, deepseek_key, pplx_key))
262
+
263
+ if "status" in results:
264
+ return results["status"], "", "", "", "", "", ""
265
+
266
+ return ("", # status
267
+ results.get("OpenAI", ""),
268
+ results.get("Claude", ""),
269
+ results.get("Gemini", ""),
270
+ results.get("Grok", ""),
271
+ results.get("DeepSeek", ""),
272
+ results.get("Perplexity", ""))
273
+
274
+ ask.click(on_click,
275
+ [prompt, system, temp,
276
+ use_openai, use_claude, use_gemini, use_grok, use_deepseek, use_perplexity,
277
+ openai_model, claude_model, gemini_model, grok_model, deepseek_model, pplx_model,
278
+ openai_key, claude_key, gemini_key, grok_key, deepseek_key, pplx_key],
279
+ [status, out_openai, out_claude, out_gemini, out_grok, out_deepseek, out_pplx])
280
+
281
+ gr.Markdown("Built with ❤️ using Gradio. Keys are stored only in your Space runtime.")
282
+
283
+ return demo
284
+
285
+ if __name__ == "__main__":
286
+ build_ui().queue(concurrency_count=8).launch()
287
+
288
+ ---
289
+
290
+ # (Optional) File: Procfile
291
+ web: python app.py