add GPT-5
Browse files
app.py
CHANGED
|
@@ -24,27 +24,26 @@ AUTHORIZED_USER_IDS = set(authorized_users_str.split(',') if authorized_users_st
|
|
| 24 |
|
| 25 |
# Define model pricing information (approximate costs per 100 image API calls)
|
| 26 |
MODEL_PRICING = {
|
| 27 |
-
"google/gemini-2.
|
| 28 |
"gpt-4.1-mini": "$0.07",
|
| 29 |
"gpt-4.1": "$0.35",
|
| 30 |
-
"anthropic/claude-
|
| 31 |
-
"google/gemini-2.5-pro
|
| 32 |
-
"google/gemini-2.5-flash-preview-05-20:thinking": "$0.35",
|
| 33 |
"gpt-4.1-nano": "$0.02",
|
| 34 |
"openai/chatgpt-4o-latest": "$0.75",
|
| 35 |
"meta-llama/llama-4-maverick": "$0.04",
|
| 36 |
"meta-llama/llama-4-maverick:free": "Free",
|
| 37 |
-
"openai/gpt-5-chat": "N/A"
|
|
|
|
| 38 |
}
|
| 39 |
|
| 40 |
# Define preferred and additional models directly in the function
|
| 41 |
preferred_models_auth = [
|
| 42 |
-
("Gemini 2.
|
| 43 |
("GPT-4.1 Mini", "gpt-4.1-mini"),
|
| 44 |
-
("GPT-4.1
|
| 45 |
-
("Claude
|
| 46 |
-
("Gemini 2.5 Pro", "google/gemini-2.5-pro
|
| 47 |
-
("Gemini 2.5 Flash Thinking (Recommended)","google/gemini-2.5-flash-preview-05-20:thinking"),
|
| 48 |
("openai/gpt-5-chat", "GPT-5-chat")
|
| 49 |
]
|
| 50 |
|
|
@@ -52,6 +51,7 @@ additional_models = [
|
|
| 52 |
("GPT-4.1 Nano", "gpt-4.1-nano"),
|
| 53 |
("ChatGPT Latest", "openai/chatgpt-4o-latest"),
|
| 54 |
("Llama 4 Maverick", "meta-llama/llama-4-maverick")
|
|
|
|
| 55 |
]
|
| 56 |
|
| 57 |
# Calculate all models once
|
|
@@ -167,10 +167,10 @@ def create_demo():
|
|
| 167 |
is_authorized = profile.username in AUTHORIZED_USER_IDS
|
| 168 |
if is_authorized:
|
| 169 |
|
| 170 |
-
text = f"""**Current Model**: Gemini 2.
|
| 171 |
-
**Estimated cost per 100 Images**: {MODEL_PRICING["google/gemini-2.
|
| 172 |
|
| 173 |
-
return gr.update(choices=preferred_models_auth, label="Select Model",value="google/gemini-2.
|
| 174 |
else:
|
| 175 |
# Default model value
|
| 176 |
default_model = "meta-llama/llama-4-maverick:free"#preferred_models[0][1] # get free model
|
|
@@ -296,7 +296,7 @@ def create_demo():
|
|
| 296 |
return gr.Dropdown(choices=preferred_choices, value=current_model)
|
| 297 |
else:
|
| 298 |
# Reset to default model if current model is not in preferred models
|
| 299 |
-
return gr.Dropdown(choices=preferred_choices, value="google/gemini-2.
|
| 300 |
|
| 301 |
# Update model info when model selection changes
|
| 302 |
def update_model_info(model_value):
|
|
|
|
| 24 |
|
| 25 |
# Define model pricing information (approximate costs per 100 image API calls)
|
| 26 |
MODEL_PRICING = {
|
| 27 |
+
"google/gemini-2.5-flash": "$0.08",
|
| 28 |
"gpt-4.1-mini": "$0.07",
|
| 29 |
"gpt-4.1": "$0.35",
|
| 30 |
+
"anthropic/claude-sonnet-4": "$0.70",
|
| 31 |
+
"google/gemini-2.5-pro": "$1.20",
|
|
|
|
| 32 |
"gpt-4.1-nano": "$0.02",
|
| 33 |
"openai/chatgpt-4o-latest": "$0.75",
|
| 34 |
"meta-llama/llama-4-maverick": "$0.04",
|
| 35 |
"meta-llama/llama-4-maverick:free": "Free",
|
| 36 |
+
"openai/gpt-5-chat": "N/A",
|
| 37 |
+
"openai/gpt-5-mini": "N/A"
|
| 38 |
}
|
| 39 |
|
| 40 |
# Define preferred and additional models directly in the function
|
| 41 |
preferred_models_auth = [
|
| 42 |
+
("Gemini 2.5 Flash", "google/gemini-2.5-flash"),
|
| 43 |
("GPT-4.1 Mini", "gpt-4.1-mini"),
|
| 44 |
+
("GPT-4.1", "gpt-4.1"),
|
| 45 |
+
("Claude Sonnet 4", "anthropic/claude-sonnet-4"),
|
| 46 |
+
("Gemini 2.5 Pro", "google/gemini-2.5-pro"),
|
|
|
|
| 47 |
("openai/gpt-5-chat", "GPT-5-chat")
|
| 48 |
]
|
| 49 |
|
|
|
|
| 51 |
("GPT-4.1 Nano", "gpt-4.1-nano"),
|
| 52 |
("ChatGPT Latest", "openai/chatgpt-4o-latest"),
|
| 53 |
("Llama 4 Maverick", "meta-llama/llama-4-maverick")
|
| 54 |
+
("GPT-5-mini", "openai/gpt-5-mini")
|
| 55 |
]
|
| 56 |
|
| 57 |
# Calculate all models once
|
|
|
|
| 167 |
is_authorized = profile.username in AUTHORIZED_USER_IDS
|
| 168 |
if is_authorized:
|
| 169 |
|
| 170 |
+
text = f"""**Current Model**: Gemini 2.5 Flash
|
| 171 |
+
**Estimated cost per 100 Images**: {MODEL_PRICING["google/gemini-2.5-flash"]}"""
|
| 172 |
|
| 173 |
+
return gr.update(choices=preferred_models_auth, label="Select Model",value="google/gemini-2.5-flash"),text,f"Logged in as: {profile.username}"
|
| 174 |
else:
|
| 175 |
# Default model value
|
| 176 |
default_model = "meta-llama/llama-4-maverick:free"#preferred_models[0][1] # get free model
|
|
|
|
| 296 |
return gr.Dropdown(choices=preferred_choices, value=current_model)
|
| 297 |
else:
|
| 298 |
# Reset to default model if current model is not in preferred models
|
| 299 |
+
return gr.Dropdown(choices=preferred_choices, value="google/gemini-2.5-flash")
|
| 300 |
|
| 301 |
# Update model info when model selection changes
|
| 302 |
def update_model_info(model_value):
|