SHIKARICHACHA commited on
Commit
b0b8066
·
verified ·
1 Parent(s): 7add57b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +74 -24
app.py CHANGED
@@ -1,16 +1,16 @@
1
-
2
-
3
  import gradio as gr
4
  import os
5
  from openai import OpenAI
 
 
6
 
7
  # OpenRouter API key
8
  OPENROUTER_API_KEY = "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"
9
 
10
- # Available models
11
  TEXT_MODELS = {
12
  "Mistral Small": "mistralai/mistral-small-3.2-24b-instruct:free",
13
- "Claude 3 Haiku": "anthropic/claude-3-haiku:free",
14
  "Gemini Pro": "google/gemini-2.5-pro-exp-03-25",
15
  "Qwen": "qwen/qwen2.5-32b-instruct:free",
16
  "Mistral 3.1": "mistralai/mistral-small-3.1-24b-instruct:free",
@@ -21,19 +21,23 @@ TEXT_MODELS = {
21
  # Available image models
22
  IMAGE_MODELS = {
23
  "Kimi Vision": "moonshotai/kimi-vl-a3b-thinking:free",
24
- "Claude 3 Opus Vision": "anthropic/claude-3-opus-vision:free",
25
- "Claude 3 Sonnet Vision": "anthropic/claude-3-sonnet-vision:free",
26
- "Gemini Pro Vision": "google/gemini-pro-vision:free",
27
- "GPT-4 Vision": "openai/gpt-4-vision:free",
28
- "Llava": "llava/llava-1.6-34b-vision:free",
29
  }
30
 
31
- # Combined models for dropdown
32
- MODELS = {**TEXT_MODELS, **IMAGE_MODELS}
 
 
 
 
33
 
34
  # Initialize chat history
35
  history = []
36
 
 
37
  import base64
38
  from PIL import Image
39
  import io
@@ -90,15 +94,45 @@ def chat_with_ai(message, model_name, history, image=None):
90
  # Add the current message (text only)
91
  messages.append({"role": "user", "content": message})
92
 
 
 
 
93
  # Create the completion request
 
94
  completion = client.chat.completions.create(
95
  extra_headers={
96
  "HTTP-Referer": "https://gradio-openrouter-interface.com",
97
  "X-Title": "Gradio OpenRouter Interface",
98
  },
99
- model=MODELS[model_name],
100
  messages=messages
101
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
 
103
  # Return the model's response
104
  return completion.choices[0].message.content
@@ -106,6 +140,21 @@ def chat_with_ai(message, model_name, history, image=None):
106
  except Exception as e:
107
  return f"Error: {str(e)}"
108
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
  # Create the Gradio interface
110
  with gr.Blocks(title="OpenRouter AI Multi-Modal Interface", css="style.css") as demo:
111
  gr.Markdown(
@@ -206,15 +255,18 @@ with gr.Blocks(title="OpenRouter AI Multi-Modal Interface", css="style.css") as
206
  """
207
  ### Available Image Models
208
  - **Kimi Vision**: Moonshot AI's vision-language model
209
- - **Claude 3 Opus Vision**: Anthropic's premium vision model
210
- - **Claude 3 Sonnet Vision**: Mid-tier vision model from Anthropic
211
  - **Gemini Pro Vision**: Google's multimodal vision model
212
- - **GPT-4 Vision**: OpenAI's vision-enabled GPT model
213
- - **Llava**: Open-source vision-language model
 
214
 
215
  *Note: All responses will be provided in English*
216
  """
217
  )
 
 
 
 
218
 
219
  # Set up the submit action
220
  def respond(message, chat_history, model, image, model_type):
@@ -231,16 +283,10 @@ with gr.Blocks(title="OpenRouter AI Multi-Modal Interface", css="style.css") as
231
  # Process with or without image
232
  if use_image:
233
  bot_message = chat_with_ai(message, model, chat_history, image)
234
- # Add a note that the response is in English
235
- if not bot_message.startswith("Error:"):
236
- display_message = f"{bot_message}\n\n*Response provided in English*"
237
- else:
238
- display_message = bot_message
239
  else:
240
  bot_message = chat_with_ai(message, model, chat_history)
241
- display_message = bot_message
242
 
243
- chat_history.append((message, display_message))
244
  return "", chat_history
245
 
246
  # Connect the components
@@ -260,6 +306,9 @@ with gr.Blocks(title="OpenRouter AI Multi-Modal Interface", css="style.css") as
260
  return None, None
261
 
262
  clear_btn.click(clear_all, None, [chatbot, image_input], queue=False)
 
 
 
263
 
264
  # For Hugging Face Spaces compatibility
265
  if __name__ == "__main__":
@@ -267,4 +316,5 @@ if __name__ == "__main__":
267
  demo.launch(share=True)
268
  else:
269
  # For Hugging Face Spaces, we need to expose the app
270
- app = demo.launch(share=False, show_api=False)
 
 
 
 
1
  import gradio as gr
2
  import os
3
  from openai import OpenAI
4
+ import json
5
+ import time
6
 
7
  # OpenRouter API key
8
  OPENROUTER_API_KEY = "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"
9
 
10
+ # Available models categorized by type
11
  TEXT_MODELS = {
12
  "Mistral Small": "mistralai/mistral-small-3.2-24b-instruct:free",
13
+ "Claude 3 Haiku": "anthropic/claude-3-haiku",
14
  "Gemini Pro": "google/gemini-2.5-pro-exp-03-25",
15
  "Qwen": "qwen/qwen2.5-32b-instruct:free",
16
  "Mistral 3.1": "mistralai/mistral-small-3.1-24b-instruct:free",
 
21
  # Available image models
22
  IMAGE_MODELS = {
23
  "Kimi Vision": "moonshotai/kimi-vl-a3b-thinking:free",
24
+ "Gemini Pro Vision": "google/gemini-2.5-pro-exp-03-25",
25
+ "Qwen Vision": "qwen/qwen2.5-vl-32b-instruct:free",
26
+ "Gemma Vision": "google/gemma-3-4b-it:free",
27
+ "Llama 3 Vision": "meta-llama/llama-3.2-11b-vision-instruct:free",
 
28
  }
29
 
30
+ # Token usage tracking
31
+ token_usage = {
32
+ "total_input_tokens": 0,
33
+ "total_output_tokens": 0,
34
+ "model_usage": {}
35
+ }
36
 
37
  # Initialize chat history
38
  history = []
39
 
40
+ # Helper function to convert image to base64
41
  import base64
42
  from PIL import Image
43
  import io
 
94
  # Add the current message (text only)
95
  messages.append({"role": "user", "content": message})
96
 
97
+ # Get the model ID based on the selected model name
98
+ model_id = TEXT_MODELS.get(model_name) or IMAGE_MODELS.get(model_name)
99
+
100
  # Create the completion request
101
+ start_time = time.time()
102
  completion = client.chat.completions.create(
103
  extra_headers={
104
  "HTTP-Referer": "https://gradio-openrouter-interface.com",
105
  "X-Title": "Gradio OpenRouter Interface",
106
  },
107
+ model=model_id,
108
  messages=messages
109
  )
110
+ end_time = time.time()
111
+
112
+ # Update token usage statistics
113
+ input_tokens = completion.usage.prompt_tokens
114
+ output_tokens = completion.usage.completion_tokens
115
+
116
+ token_usage["total_input_tokens"] += input_tokens
117
+ token_usage["total_output_tokens"] += output_tokens
118
+
119
+ if model_name not in token_usage["model_usage"]:
120
+ token_usage["model_usage"][model_name] = {
121
+ "input_tokens": 0,
122
+ "output_tokens": 0,
123
+ "requests": 0,
124
+ "avg_response_time": 0
125
+ }
126
+
127
+ # Update model-specific usage
128
+ model_stats = token_usage["model_usage"][model_name]
129
+ model_stats["input_tokens"] += input_tokens
130
+ model_stats["output_tokens"] += output_tokens
131
+ model_stats["requests"] += 1
132
+
133
+ # Calculate running average of response time
134
+ response_time = end_time - start_time
135
+ model_stats["avg_response_time"] = ((model_stats["avg_response_time"] * (model_stats["requests"] - 1)) + response_time) / model_stats["requests"]
136
 
137
  # Return the model's response
138
  return completion.choices[0].message.content
 
140
  except Exception as e:
141
  return f"Error: {str(e)}"
142
 
143
+ # Function to generate token usage report
144
+ def generate_usage_report():
145
+ report = f"### Token Usage Statistics\n\n"
146
+ report += f"**Total Input Tokens:** {token_usage['total_input_tokens']}\n"
147
+ report += f"**Total Output Tokens:** {token_usage['total_output_tokens']}\n\n"
148
+
149
+ report += "### Model-Specific Usage\n\n"
150
+ report += "| Model | Input Tokens | Output Tokens | Requests | Avg Response Time (s) |\n"
151
+ report += "|-------|--------------|---------------|----------|----------------------|\n"
152
+
153
+ for model, stats in token_usage["model_usage"].items():
154
+ report += f"| {model} | {stats['input_tokens']} | {stats['output_tokens']} | {stats['requests']} | {stats['avg_response_time']:.2f} |\n"
155
+
156
+ return report
157
+
158
  # Create the Gradio interface
159
  with gr.Blocks(title="OpenRouter AI Multi-Modal Interface", css="style.css") as demo:
160
  gr.Markdown(
 
255
  """
256
  ### Available Image Models
257
  - **Kimi Vision**: Moonshot AI's vision-language model
 
 
258
  - **Gemini Pro Vision**: Google's multimodal vision model
259
+ - **Qwen Vision**: Alibaba's vision-language model
260
+ - **Gemma Vision**: Google's lightweight vision model
261
+ - **Llama 3 Vision**: Meta's vision-language model
262
 
263
  *Note: All responses will be provided in English*
264
  """
265
  )
266
+
267
+ # Token usage statistics
268
+ usage_stats = gr.Markdown("### Token Usage Statistics\n\nNo usage data yet.")
269
+ refresh_stats_btn = gr.Button("Refresh Usage Stats")
270
 
271
  # Set up the submit action
272
  def respond(message, chat_history, model, image, model_type):
 
283
  # Process with or without image
284
  if use_image:
285
  bot_message = chat_with_ai(message, model, chat_history, image)
 
 
 
 
 
286
  else:
287
  bot_message = chat_with_ai(message, model, chat_history)
 
288
 
289
+ chat_history.append((message, bot_message))
290
  return "", chat_history
291
 
292
  # Connect the components
 
306
  return None, None
307
 
308
  clear_btn.click(clear_all, None, [chatbot, image_input], queue=False)
309
+
310
+ # Update usage statistics
311
+ refresh_stats_btn.click(generate_usage_report, None, usage_stats)
312
 
313
  # For Hugging Face Spaces compatibility
314
  if __name__ == "__main__":
 
316
  demo.launch(share=True)
317
  else:
318
  # For Hugging Face Spaces, we need to expose the app
319
+ app = demo.launch(share=False, show_api=False)
320
+