Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -15,7 +15,9 @@ OPENAI_MODEL_NAME = "gpt-4o-mini"
|
|
| 15 |
# 2. Google Gemini Configuration (Direct Google API)
|
| 16 |
# You need to set GOOGLE_API_KEY in your HF Space secrets
|
| 17 |
GOOGLE_API_KEY = os.environ.get("GOOGLE_API_KEY", "")
|
| 18 |
-
|
|
|
|
|
|
|
| 19 |
GEMINI_API_URL = f"https://generativelanguage.googleapis.com/v1beta/models/gemma-3-27b-it:generateContent?key={GOOGLE_API_KEY}"
|
| 20 |
|
| 21 |
app = FastAPI(
|
|
@@ -26,7 +28,7 @@ app = FastAPI(
|
|
| 26 |
# --- MODELS ---
|
| 27 |
class AnalyzeRequest(BaseModel):
|
| 28 |
filename: str
|
| 29 |
-
model_provider: str = "openai" # 'openai' or 'gemma'
|
| 30 |
|
| 31 |
# --- HELPERS ---
|
| 32 |
def get_headers(token):
|
|
@@ -130,10 +132,11 @@ def call_google_gemini(filename):
|
|
| 130 |
"generationConfig": {
|
| 131 |
"temperature": 0.1,
|
| 132 |
"maxOutputTokens": 100,
|
| 133 |
-
"responseMimeType": "application/json" #
|
| 134 |
}
|
| 135 |
}
|
| 136 |
|
|
|
|
| 137 |
response = requests.post(GEMINI_API_URL, headers={"Content-Type": "application/json"}, json=payload, timeout=30)
|
| 138 |
|
| 139 |
if response.status_code != 200:
|
|
@@ -165,6 +168,7 @@ def analyze_filename(request: AnalyzeRequest):
|
|
| 165 |
|
| 166 |
# Parse JSON output from either provider
|
| 167 |
if raw_content:
|
|
|
|
| 168 |
clean_content = raw_content.replace("```json", "").replace("```", "").strip()
|
| 169 |
return json.loads(clean_content)
|
| 170 |
|
|
|
|
| 15 |
# 2. Google Gemini Configuration (Direct Google API)
|
| 16 |
# You need to set GOOGLE_API_KEY in your HF Space secrets
|
| 17 |
GOOGLE_API_KEY = os.environ.get("GOOGLE_API_KEY", "")
|
| 18 |
+
|
| 19 |
+
# CORRECTED: The model name is gemma-2-27b-it (Gemma 2), not gemma-3.
|
| 20 |
+
# NOTE: While this uses the "Gemini" API endpoint, it calls the Gemma 2 open model.
|
| 21 |
GEMINI_API_URL = f"https://generativelanguage.googleapis.com/v1beta/models/gemma-3-27b-it:generateContent?key={GOOGLE_API_KEY}"
|
| 22 |
|
| 23 |
app = FastAPI(
|
|
|
|
| 28 |
# --- MODELS ---
|
| 29 |
class AnalyzeRequest(BaseModel):
|
| 30 |
filename: str
|
| 31 |
+
model_provider: str = "openai" # 'openai' or 'gemma'
|
| 32 |
|
| 33 |
# --- HELPERS ---
|
| 34 |
def get_headers(token):
|
|
|
|
| 132 |
"generationConfig": {
|
| 133 |
"temperature": 0.1,
|
| 134 |
"maxOutputTokens": 100,
|
| 135 |
+
"responseMimeType": "application/json" # Hints the model to output JSON
|
| 136 |
}
|
| 137 |
}
|
| 138 |
|
| 139 |
+
# Note: The URL here uses the global GEMINI_API_URL defined at the top
|
| 140 |
response = requests.post(GEMINI_API_URL, headers={"Content-Type": "application/json"}, json=payload, timeout=30)
|
| 141 |
|
| 142 |
if response.status_code != 200:
|
|
|
|
| 168 |
|
| 169 |
# Parse JSON output from either provider
|
| 170 |
if raw_content:
|
| 171 |
+
# Clean up markdown code blocks if the model includes them
|
| 172 |
clean_content = raw_content.replace("```json", "").replace("```", "").strip()
|
| 173 |
return json.loads(clean_content)
|
| 174 |
|