James Edmunds commited on
Commit ·
f229c91
1
Parent(s): e10d2ca
feat: add detailed OpenAI API error handling
Browse files- src/generator/generator.py +23 -12
src/generator/generator.py
CHANGED
|
@@ -201,16 +201,11 @@ class LyricGenerator:
|
|
| 201 |
chat_history = []
|
| 202 |
|
| 203 |
try:
|
| 204 |
-
print("Starting lyrics generation...")
|
| 205 |
-
print(f"OpenAI API Key present: {bool(Settings.OPENAI_API_KEY)}")
|
| 206 |
-
|
| 207 |
# Get source documents with scores first
|
| 208 |
-
print("Searching for similar documents...")
|
| 209 |
docs_and_scores = self.vector_store.similarity_search_with_score(
|
| 210 |
prompt,
|
| 211 |
k=20
|
| 212 |
)
|
| 213 |
-
print(f"Found {len(docs_and_scores)} similar documents")
|
| 214 |
|
| 215 |
# Sort by similarity (convert distance to similarity)
|
| 216 |
docs_and_scores.sort(key=lambda x: x[1], reverse=False)
|
|
@@ -226,22 +221,38 @@ class LyricGenerator:
|
|
| 226 |
'content': doc.page_content[:200] + "..." # First 200 chars
|
| 227 |
})
|
| 228 |
|
| 229 |
-
print("Generating response using QA chain...")
|
| 230 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 231 |
# Generate response using invoke
|
|
|
|
| 232 |
response = self.qa_chain.invoke({
|
| 233 |
"question": prompt,
|
| 234 |
"chat_history": chat_history
|
| 235 |
})
|
| 236 |
-
print("Successfully generated
|
|
|
|
| 237 |
except Exception as e:
|
| 238 |
-
|
| 239 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 240 |
raise RuntimeError(
|
| 241 |
-
"
|
| 242 |
-
"connection and
|
| 243 |
)
|
| 244 |
-
|
|
|
|
| 245 |
|
| 246 |
# Add detailed context to response
|
| 247 |
response["source_documents_with_scores"] = docs_and_scores
|
|
|
|
| 201 |
chat_history = []
|
| 202 |
|
| 203 |
try:
|
|
|
|
|
|
|
|
|
|
| 204 |
# Get source documents with scores first
|
|
|
|
| 205 |
docs_and_scores = self.vector_store.similarity_search_with_score(
|
| 206 |
prompt,
|
| 207 |
k=20
|
| 208 |
)
|
|
|
|
| 209 |
|
| 210 |
# Sort by similarity (convert distance to similarity)
|
| 211 |
docs_and_scores.sort(key=lambda x: x[1], reverse=False)
|
|
|
|
| 221 |
'content': doc.page_content[:200] + "..." # First 200 chars
|
| 222 |
})
|
| 223 |
|
|
|
|
| 224 |
try:
|
| 225 |
+
# Test OpenAI connection first
|
| 226 |
+
print("Testing OpenAI connection...")
|
| 227 |
+
test_response = self.embeddings.embed_query("test")
|
| 228 |
+
print("OpenAI connection successful")
|
| 229 |
+
|
| 230 |
# Generate response using invoke
|
| 231 |
+
print("Generating lyrics with QA chain...")
|
| 232 |
response = self.qa_chain.invoke({
|
| 233 |
"question": prompt,
|
| 234 |
"chat_history": chat_history
|
| 235 |
})
|
| 236 |
+
print("Successfully generated lyrics")
|
| 237 |
+
|
| 238 |
except Exception as e:
|
| 239 |
+
error_msg = str(e)
|
| 240 |
+
print(f"OpenAI API error: {error_msg}")
|
| 241 |
+
if "auth" in error_msg.lower():
|
| 242 |
+
raise RuntimeError(
|
| 243 |
+
"OpenAI API authentication failed. Please check your API key."
|
| 244 |
+
)
|
| 245 |
+
elif "rate" in error_msg.lower():
|
| 246 |
+
raise RuntimeError(
|
| 247 |
+
"OpenAI API rate limit exceeded. Please try again in a moment."
|
| 248 |
+
)
|
| 249 |
+
elif "connect" in error_msg.lower():
|
| 250 |
raise RuntimeError(
|
| 251 |
+
"Connection to OpenAI failed. Please check your internet "
|
| 252 |
+
"connection and try again."
|
| 253 |
)
|
| 254 |
+
else:
|
| 255 |
+
raise RuntimeError(f"OpenAI API error: {error_msg}")
|
| 256 |
|
| 257 |
# Add detailed context to response
|
| 258 |
response["source_documents_with_scores"] = docs_and_scores
|