Spaces:
Sleeping
Sleeping
AION Protocol Development
commited on
Commit
·
0341997
1
Parent(s):
21e097f
fix: Remove http_client workaround + disable proxies via env vars
Browse files- Remove http_client=DefaultHttpxClient (not available in openai SDK)
- Disable proxies via os.environ.pop() (cleaner approach)
- Fix GitHub Models base_url (remove /chat/completions suffix)
This should fix all 10 models affected by proxy errors.
app.py
CHANGED
|
@@ -9,6 +9,12 @@ import os
|
|
| 9 |
import pandas as pd
|
| 10 |
from datetime import datetime
|
| 11 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
# Model configurations
|
| 13 |
MODEL_CONFIGS = {
|
| 14 |
# === TIER 1: PREMIUM (Highest Quality) ===
|
|
@@ -176,11 +182,7 @@ def generate_code_with_model(prompt: str, model_name: str, temperature: float =
|
|
| 176 |
output_tokens = response.usage.output_tokens
|
| 177 |
|
| 178 |
elif config["provider"] == "openai":
|
| 179 |
-
|
| 180 |
-
client = openai.OpenAI(
|
| 181 |
-
api_key=os.getenv(config["api_key_env"]),
|
| 182 |
-
http_client=openai.DefaultHttpxClient(proxies=None)
|
| 183 |
-
)
|
| 184 |
response = client.chat.completions.create(
|
| 185 |
model=config["model"],
|
| 186 |
messages=[
|
|
@@ -224,9 +226,8 @@ def generate_code_with_model(prompt: str, model_name: str, temperature: float =
|
|
| 224 |
elif config["provider"] == "github":
|
| 225 |
# GitHub Models API (OpenAI-compatible)
|
| 226 |
client = openai.OpenAI(
|
| 227 |
-
base_url="https://models.inference.ai.azure.com
|
| 228 |
-
api_key=os.getenv(config["api_key_env"])
|
| 229 |
-
http_client=openai.DefaultHttpxClient(proxies=None)
|
| 230 |
)
|
| 231 |
response = client.chat.completions.create(
|
| 232 |
model=config["model"],
|
|
|
|
| 9 |
import pandas as pd
|
| 10 |
from datetime import datetime
|
| 11 |
|
| 12 |
+
# Disable proxies for HuggingFace Spaces compatibility
|
| 13 |
+
os.environ.pop('HTTP_PROXY', None)
|
| 14 |
+
os.environ.pop('HTTPS_PROXY', None)
|
| 15 |
+
os.environ.pop('http_proxy', None)
|
| 16 |
+
os.environ.pop('https_proxy', None)
|
| 17 |
+
|
| 18 |
# Model configurations
|
| 19 |
MODEL_CONFIGS = {
|
| 20 |
# === TIER 1: PREMIUM (Highest Quality) ===
|
|
|
|
| 182 |
output_tokens = response.usage.output_tokens
|
| 183 |
|
| 184 |
elif config["provider"] == "openai":
|
| 185 |
+
client = openai.OpenAI(api_key=os.getenv(config["api_key_env"]))
|
|
|
|
|
|
|
|
|
|
|
|
|
| 186 |
response = client.chat.completions.create(
|
| 187 |
model=config["model"],
|
| 188 |
messages=[
|
|
|
|
| 226 |
elif config["provider"] == "github":
|
| 227 |
# GitHub Models API (OpenAI-compatible)
|
| 228 |
client = openai.OpenAI(
|
| 229 |
+
base_url="https://models.inference.ai.azure.com",
|
| 230 |
+
api_key=os.getenv(config["api_key_env"])
|
|
|
|
| 231 |
)
|
| 232 |
response = client.chat.completions.create(
|
| 233 |
model=config["model"],
|