Update app.py
Browse files
app.py
CHANGED
|
@@ -141,33 +141,28 @@ def init_db():
|
|
| 141 |
conn.commit()
|
| 142 |
|
| 143 |
def initialize_llm():
|
| 144 |
-
"""Initialize the
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
api_token
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
except Exception as e:
|
| 167 |
-
print(f"Failed to initialize {model}: {str(e)}")
|
| 168 |
-
continue
|
| 169 |
-
|
| 170 |
-
raise ValueError("Failed to initialize any model")
|
| 171 |
|
| 172 |
|
| 173 |
class ChatSession:
|
|
@@ -476,43 +471,38 @@ def get_chat_list():
|
|
| 476 |
@app.route("/api/chat", methods=["POST"])
|
| 477 |
def chat():
|
| 478 |
try:
|
|
|
|
| 479 |
data = request.json
|
| 480 |
user_input = data.get("message", "")
|
| 481 |
-
session_id = data.get("sessionId", "default")
|
| 482 |
-
|
| 483 |
print(f"Received message: {user_input}")
|
| 484 |
|
| 485 |
-
#
|
| 486 |
-
prompt = f"
|
| 487 |
-
|
| 488 |
-
Guidelines:
|
| 489 |
-
1. If code is needed, use ```python for code blocks
|
| 490 |
-
2. Be clear and concise
|
| 491 |
-
3. If you don't know something, say so
|
| 492 |
-
"""
|
| 493 |
|
| 494 |
-
# Get response from LLM
|
| 495 |
try:
|
|
|
|
| 496 |
response = llm(prompt)
|
| 497 |
-
print(f"
|
| 498 |
|
|
|
|
| 499 |
return jsonify({
|
| 500 |
"success": True,
|
| 501 |
"response": response
|
| 502 |
})
|
| 503 |
|
| 504 |
except Exception as e:
|
| 505 |
-
print(f"
|
|
|
|
| 506 |
return jsonify({
|
| 507 |
"success": False,
|
| 508 |
-
"response": "
|
| 509 |
})
|
| 510 |
|
| 511 |
except Exception as e:
|
| 512 |
-
print(f"
|
| 513 |
return jsonify({
|
| 514 |
"success": False,
|
| 515 |
-
"response": "
|
| 516 |
})
|
| 517 |
|
| 518 |
|
|
|
|
| 141 |
conn.commit()
|
| 142 |
|
| 143 |
def initialize_llm():
|
| 144 |
+
"""Initialize the model in the simplest way possible"""
|
| 145 |
+
try:
|
| 146 |
+
# Get API token
|
| 147 |
+
api_token = os.environ.get('HF_TOKEN')
|
| 148 |
+
if not api_token:
|
| 149 |
+
raise ValueError("No API token found")
|
| 150 |
+
|
| 151 |
+
# Initialize with minimal settings
|
| 152 |
+
llm = HuggingFaceHub(
|
| 153 |
+
repo_id="google/flan-t5-base", # Using a simpler model for testing
|
| 154 |
+
huggingfacehub_api_token=api_token
|
| 155 |
+
)
|
| 156 |
+
|
| 157 |
+
# Quick test
|
| 158 |
+
test = llm("Say hello")
|
| 159 |
+
print(f"Test response: {test}")
|
| 160 |
+
|
| 161 |
+
return llm
|
| 162 |
+
|
| 163 |
+
except Exception as e:
|
| 164 |
+
print(f"LLM initialization error: {str(e)}")
|
| 165 |
+
raise
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 166 |
|
| 167 |
|
| 168 |
class ChatSession:
|
|
|
|
| 471 |
@app.route("/api/chat", methods=["POST"])
|
| 472 |
def chat():
|
| 473 |
try:
|
| 474 |
+
# Get the user's message
|
| 475 |
data = request.json
|
| 476 |
user_input = data.get("message", "")
|
|
|
|
|
|
|
| 477 |
print(f"Received message: {user_input}")
|
| 478 |
|
| 479 |
+
# Very simple prompt for testing
|
| 480 |
+
prompt = f"You are a helpful assistant. Keep your response short and simple. User says: {user_input}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 481 |
|
|
|
|
| 482 |
try:
|
| 483 |
+
# Get response directly from model
|
| 484 |
response = llm(prompt)
|
| 485 |
+
print(f"Raw response received: {response}")
|
| 486 |
|
| 487 |
+
# Return whatever response we get
|
| 488 |
return jsonify({
|
| 489 |
"success": True,
|
| 490 |
"response": response
|
| 491 |
})
|
| 492 |
|
| 493 |
except Exception as e:
|
| 494 |
+
print(f"Model error: {str(e)}")
|
| 495 |
+
# Return a simple error message
|
| 496 |
return jsonify({
|
| 497 |
"success": False,
|
| 498 |
+
"response": "Sorry, I couldn't generate a response right now."
|
| 499 |
})
|
| 500 |
|
| 501 |
except Exception as e:
|
| 502 |
+
print(f"Request error: {str(e)}")
|
| 503 |
return jsonify({
|
| 504 |
"success": False,
|
| 505 |
+
"response": "Sorry, there was a problem with your request."
|
| 506 |
})
|
| 507 |
|
| 508 |
|