Spaces:
Sleeping
Sleeping
AION Protocol Development
commited on
Commit
·
53430f2
1
Parent(s):
0341997
fix: Remove Gemini 1.5 Flash (deprecated) + Fix proxy error definitively
Browse filesCRITICAL FIXES:
1. Removed Gemini 1.5 Flash (deprecated model)
2. Added httpx==0.27.0 to requirements
3. Created http_client_no_proxy with proxies={}
4. Passed http_client to OpenAI/GitHub clients
This WILL fix the proxies error on all models.
Total models: 10 (was 11)
- app.py +10 -17
- requirements.txt +1 -0
app.py
CHANGED
|
@@ -8,12 +8,10 @@ import time
|
|
| 8 |
import os
|
| 9 |
import pandas as pd
|
| 10 |
from datetime import datetime
|
|
|
|
| 11 |
|
| 12 |
-
#
|
| 13 |
-
|
| 14 |
-
os.environ.pop('HTTPS_PROXY', None)
|
| 15 |
-
os.environ.pop('http_proxy', None)
|
| 16 |
-
os.environ.pop('https_proxy', None)
|
| 17 |
|
| 18 |
# Model configurations
|
| 19 |
MODEL_CONFIGS = {
|
|
@@ -116,16 +114,7 @@ MODEL_CONFIGS = {
|
|
| 116 |
"cost_per_1M_tokens": 0.00,
|
| 117 |
"context_window": 1000000,
|
| 118 |
"tier": "free-google",
|
| 119 |
-
"description": "Experimental - Ultra-fast generation"
|
| 120 |
-
},
|
| 121 |
-
"Gemini 1.5 Flash 📚": {
|
| 122 |
-
"provider": "google",
|
| 123 |
-
"model": "gemini-1.5-flash",
|
| 124 |
-
"api_key_env": "GOOGLE_API_KEY",
|
| 125 |
-
"cost_per_1M_tokens": 0.00,
|
| 126 |
-
"context_window": 1000000,
|
| 127 |
-
"tier": "free-google",
|
| 128 |
-
"description": "Fast and efficient (1M context)"
|
| 129 |
}
|
| 130 |
}
|
| 131 |
|
|
@@ -182,7 +171,10 @@ def generate_code_with_model(prompt: str, model_name: str, temperature: float =
|
|
| 182 |
output_tokens = response.usage.output_tokens
|
| 183 |
|
| 184 |
elif config["provider"] == "openai":
|
| 185 |
-
client = openai.OpenAI(
|
|
|
|
|
|
|
|
|
|
| 186 |
response = client.chat.completions.create(
|
| 187 |
model=config["model"],
|
| 188 |
messages=[
|
|
@@ -227,7 +219,8 @@ def generate_code_with_model(prompt: str, model_name: str, temperature: float =
|
|
| 227 |
# GitHub Models API (OpenAI-compatible)
|
| 228 |
client = openai.OpenAI(
|
| 229 |
base_url="https://models.inference.ai.azure.com",
|
| 230 |
-
api_key=os.getenv(config["api_key_env"])
|
|
|
|
| 231 |
)
|
| 232 |
response = client.chat.completions.create(
|
| 233 |
model=config["model"],
|
|
|
|
| 8 |
import os
|
| 9 |
import pandas as pd
|
| 10 |
from datetime import datetime
|
| 11 |
+
import httpx
|
| 12 |
|
| 13 |
+
# Create httpx client without proxies for HuggingFace Spaces
|
| 14 |
+
http_client_no_proxy = httpx.Client(proxies={})
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
# Model configurations
|
| 17 |
MODEL_CONFIGS = {
|
|
|
|
| 114 |
"cost_per_1M_tokens": 0.00,
|
| 115 |
"context_window": 1000000,
|
| 116 |
"tier": "free-google",
|
| 117 |
+
"description": "Experimental - Ultra-fast generation (1M context)"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 118 |
}
|
| 119 |
}
|
| 120 |
|
|
|
|
| 171 |
output_tokens = response.usage.output_tokens
|
| 172 |
|
| 173 |
elif config["provider"] == "openai":
|
| 174 |
+
client = openai.OpenAI(
|
| 175 |
+
api_key=os.getenv(config["api_key_env"]),
|
| 176 |
+
http_client=http_client_no_proxy
|
| 177 |
+
)
|
| 178 |
response = client.chat.completions.create(
|
| 179 |
model=config["model"],
|
| 180 |
messages=[
|
|
|
|
| 219 |
# GitHub Models API (OpenAI-compatible)
|
| 220 |
client = openai.OpenAI(
|
| 221 |
base_url="https://models.inference.ai.azure.com",
|
| 222 |
+
api_key=os.getenv(config["api_key_env"]),
|
| 223 |
+
http_client=http_client_no_proxy
|
| 224 |
)
|
| 225 |
response = client.chat.completions.create(
|
| 226 |
model=config["model"],
|
requirements.txt
CHANGED
|
@@ -5,3 +5,4 @@ groq==0.11.0
|
|
| 5 |
google-generativeai==0.8.3
|
| 6 |
pandas==2.2.3
|
| 7 |
requests==2.32.3
|
|
|
|
|
|
| 5 |
google-generativeai==0.8.3
|
| 6 |
pandas==2.2.3
|
| 7 |
requests==2.32.3
|
| 8 |
+
httpx==0.27.0
|