Spaces:
Sleeping
Sleeping
Update utils/inference.py
Browse files- utils/inference.py +17 -6
utils/inference.py
CHANGED
|
@@ -12,13 +12,15 @@ headers = {
|
|
| 12 |
|
| 13 |
def call_model(prompt: str) -> str:
|
| 14 |
response = requests.post(
|
| 15 |
-
f"{API_URL}/
|
| 16 |
headers=headers,
|
| 17 |
json={
|
| 18 |
-
"
|
| 19 |
-
"
|
| 20 |
-
|
| 21 |
-
|
|
|
|
|
|
|
| 22 |
}
|
| 23 |
)
|
| 24 |
|
|
@@ -26,4 +28,13 @@ def call_model(prompt: str) -> str:
|
|
| 26 |
raise RuntimeError(f"Inference error: {response.status_code} - {response.text}")
|
| 27 |
|
| 28 |
result = response.json()
|
| 29 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
|
| 13 |
def call_model(prompt: str) -> str:
|
| 14 |
response = requests.post(
|
| 15 |
+
f"{API_URL}/generate", # <-- use /generate for HF endpoints
|
| 16 |
headers=headers,
|
| 17 |
json={
|
| 18 |
+
"inputs": prompt,
|
| 19 |
+
"parameters": {
|
| 20 |
+
"max_new_tokens": 2048,
|
| 21 |
+
"temperature": 0.3,
|
| 22 |
+
"do_sample": False
|
| 23 |
+
}
|
| 24 |
}
|
| 25 |
)
|
| 26 |
|
|
|
|
| 28 |
raise RuntimeError(f"Inference error: {response.status_code} - {response.text}")
|
| 29 |
|
| 30 |
result = response.json()
|
| 31 |
+
|
| 32 |
+
# Handle variations in response format
|
| 33 |
+
if isinstance(result, dict) and "generated_text" in result:
|
| 34 |
+
return result["generated_text"]
|
| 35 |
+
elif isinstance(result, list) and "generated_text" in result[0]:
|
| 36 |
+
return result[0]["generated_text"]
|
| 37 |
+
elif "text" in result:
|
| 38 |
+
return result["text"]
|
| 39 |
+
else:
|
| 40 |
+
return "⚠️ No output generated."
|