Update app.py
Browse files
app.py
CHANGED
|
@@ -141,23 +141,33 @@ def init_db():
|
|
| 141 |
conn.commit()
|
| 142 |
|
| 143 |
def initialize_llm():
|
| 144 |
-
"""Initialize the language model"""
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 161 |
|
| 162 |
|
| 163 |
class ChatSession:
|
|
@@ -473,17 +483,17 @@ def chat():
|
|
| 473 |
print(f"Received message: {user_input}")
|
| 474 |
|
| 475 |
# Prepare the prompt
|
| 476 |
-
|
| 477 |
|
| 478 |
-
|
| 479 |
-
|
| 480 |
-
|
| 481 |
-
|
| 482 |
-
|
| 483 |
|
| 484 |
# Get response from LLM
|
| 485 |
try:
|
| 486 |
-
response = llm(
|
| 487 |
print(f"Generated response: {response[:100]}...")
|
| 488 |
|
| 489 |
return jsonify({
|
|
|
|
| 141 |
conn.commit()
|
| 142 |
|
| 143 |
def initialize_llm():
|
| 144 |
+
"""Initialize the language model with fallbacks"""
|
| 145 |
+
models = [
|
| 146 |
+
"google/flan-t5-large", # Smaller, reliable model
|
| 147 |
+
"facebook/opt-350m", # Another reliable option
|
| 148 |
+
"gpt2", # Basic but stable
|
| 149 |
+
]
|
| 150 |
+
|
| 151 |
+
for model in models:
|
| 152 |
+
try:
|
| 153 |
+
print(f"Trying to initialize model: {model}")
|
| 154 |
+
api_token = os.environ.get('HF_TOKEN')
|
| 155 |
+
|
| 156 |
+
llm = HuggingFaceHub(
|
| 157 |
+
repo_id=model,
|
| 158 |
+
huggingfacehub_api_token=api_token
|
| 159 |
+
)
|
| 160 |
+
|
| 161 |
+
# Test the model
|
| 162 |
+
test_response = llm("Hello, this is a test.")
|
| 163 |
+
print(f"Successfully initialized {model}")
|
| 164 |
+
return llm
|
| 165 |
+
|
| 166 |
+
except Exception as e:
|
| 167 |
+
print(f"Failed to initialize {model}: {str(e)}")
|
| 168 |
+
continue
|
| 169 |
+
|
| 170 |
+
raise ValueError("Failed to initialize any model")
|
| 171 |
|
| 172 |
|
| 173 |
class ChatSession:
|
|
|
|
| 483 |
print(f"Received message: {user_input}")
|
| 484 |
|
| 485 |
# Prepare the prompt
|
| 486 |
+
prompt = f"""Please provide a helpful response to: {user_input}
|
| 487 |
|
| 488 |
+
Guidelines:
|
| 489 |
+
1. If code is needed, use ```python for code blocks
|
| 490 |
+
2. Be clear and concise
|
| 491 |
+
3. If you don't know something, say so
|
| 492 |
+
"""
|
| 493 |
|
| 494 |
# Get response from LLM
|
| 495 |
try:
|
| 496 |
+
response = llm(prompt)
|
| 497 |
print(f"Generated response: {response[:100]}...")
|
| 498 |
|
| 499 |
return jsonify({
|