Spaces:
Running
Running
Add detailed error logging to trace API failures.
Browse filesWill show the exact exception type and response on HuggingFace.
Co-Authored-By: Claude Opus 4.7 <noreply@anthropic.com>
- providers/openai_compat.py +14 -0
providers/openai_compat.py
CHANGED
|
@@ -210,7 +210,14 @@ class OpenAIChatTransport(BaseProvider):
|
|
| 210 |
|
| 211 |
async def _create_stream(self, body: dict) -> tuple[Any, dict]:
|
| 212 |
"""Create a streaming chat completion, optionally retrying once."""
|
|
|
|
|
|
|
| 213 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 214 |
stream = await self._global_rate_limiter.execute_with_retry(
|
| 215 |
self._client.chat.completions.create,
|
| 216 |
**body,
|
|
@@ -219,6 +226,13 @@ class OpenAIChatTransport(BaseProvider):
|
|
| 219 |
)
|
| 220 |
return stream, body
|
| 221 |
except Exception as error:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 222 |
retry_body = self._get_retry_request_body(error, body)
|
| 223 |
if retry_body is None:
|
| 224 |
raise
|
|
|
|
| 210 |
|
| 211 |
async def _create_stream(self, body: dict) -> tuple[Any, dict]:
|
| 212 |
"""Create a streaming chat completion, optionally retrying once."""
|
| 213 |
+
from loguru import logger
|
| 214 |
+
|
| 215 |
try:
|
| 216 |
+
logger.info(
|
| 217 |
+
"{}_CREATE_STREAM: calling API with model={}",
|
| 218 |
+
self._provider_name,
|
| 219 |
+
body.get("model"),
|
| 220 |
+
)
|
| 221 |
stream = await self._global_rate_limiter.execute_with_retry(
|
| 222 |
self._client.chat.completions.create,
|
| 223 |
**body,
|
|
|
|
| 226 |
)
|
| 227 |
return stream, body
|
| 228 |
except Exception as error:
|
| 229 |
+
logger.error(
|
| 230 |
+
"{}_CREATE_STREAM_ERROR: {} - {} - response={}",
|
| 231 |
+
self._provider_name,
|
| 232 |
+
type(error).__name__,
|
| 233 |
+
str(error),
|
| 234 |
+
getattr(error, "response", None),
|
| 235 |
+
)
|
| 236 |
retry_body = self._get_retry_request_body(error, body)
|
| 237 |
if retry_body is None:
|
| 238 |
raise
|