roshnn24 commited on
Commit
08ab3b5
·
verified ·
1 Parent(s): efafd33

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -43
app.py CHANGED
@@ -141,33 +141,28 @@ def init_db():
141
  conn.commit()
142
 
143
  def initialize_llm():
144
- """Initialize the language model with fallbacks"""
145
- models = [
146
- "google/flan-t5-large", # Smaller, reliable model
147
- "facebook/opt-350m", # Another reliable option
148
- "gpt2", # Basic but stable
149
- ]
150
-
151
- for model in models:
152
- try:
153
- print(f"Trying to initialize model: {model}")
154
- api_token = os.environ.get('HF_TOKEN')
155
-
156
- llm = HuggingFaceHub(
157
- repo_id=model,
158
- huggingfacehub_api_token=api_token
159
- )
160
-
161
- # Test the model
162
- test_response = llm("Hello, this is a test.")
163
- print(f"Successfully initialized {model}")
164
- return llm
165
-
166
- except Exception as e:
167
- print(f"Failed to initialize {model}: {str(e)}")
168
- continue
169
-
170
- raise ValueError("Failed to initialize any model")
171
 
172
 
173
  class ChatSession:
@@ -476,43 +471,38 @@ def get_chat_list():
476
  @app.route("/api/chat", methods=["POST"])
477
  def chat():
478
  try:
 
479
  data = request.json
480
  user_input = data.get("message", "")
481
- session_id = data.get("sessionId", "default")
482
-
483
  print(f"Received message: {user_input}")
484
 
485
- # Prepare the prompt
486
- prompt = f"""Please provide a helpful response to: {user_input}
487
-
488
- Guidelines:
489
- 1. If code is needed, use ```python for code blocks
490
- 2. Be clear and concise
491
- 3. If you don't know something, say so
492
- """
493
 
494
- # Get response from LLM
495
  try:
 
496
  response = llm(prompt)
497
- print(f"Generated response: {response[:100]}...")
498
 
 
499
  return jsonify({
500
  "success": True,
501
  "response": response
502
  })
503
 
504
  except Exception as e:
505
- print(f"Error generating response: {str(e)}")
 
506
  return jsonify({
507
  "success": False,
508
- "response": "Error generating response. Please try again."
509
  })
510
 
511
  except Exception as e:
512
- print(f"Error in chat endpoint: {str(e)}")
513
  return jsonify({
514
  "success": False,
515
- "response": "An error occurred. Please try again."
516
  })
517
 
518
 
 
141
  conn.commit()
142
 
143
  def initialize_llm():
144
+ """Initialize the model in the simplest way possible"""
145
+ try:
146
+ # Get API token
147
+ api_token = os.environ.get('HF_TOKEN')
148
+ if not api_token:
149
+ raise ValueError("No API token found")
150
+
151
+ # Initialize with minimal settings
152
+ llm = HuggingFaceHub(
153
+ repo_id="google/flan-t5-base", # Using a simpler model for testing
154
+ huggingfacehub_api_token=api_token
155
+ )
156
+
157
+ # Quick test
158
+ test = llm("Say hello")
159
+ print(f"Test response: {test}")
160
+
161
+ return llm
162
+
163
+ except Exception as e:
164
+ print(f"LLM initialization error: {str(e)}")
165
+ raise
 
 
 
 
 
166
 
167
 
168
  class ChatSession:
 
471
  @app.route("/api/chat", methods=["POST"])
472
  def chat():
473
  try:
474
+ # Get the user's message
475
  data = request.json
476
  user_input = data.get("message", "")
 
 
477
  print(f"Received message: {user_input}")
478
 
479
+ # Very simple prompt for testing
480
+ prompt = f"You are a helpful assistant. Keep your response short and simple. User says: {user_input}"
 
 
 
 
 
 
481
 
 
482
  try:
483
+ # Get response directly from model
484
  response = llm(prompt)
485
+ print(f"Raw response received: {response}")
486
 
487
+ # Return whatever response we get
488
  return jsonify({
489
  "success": True,
490
  "response": response
491
  })
492
 
493
  except Exception as e:
494
+ print(f"Model error: {str(e)}")
495
+ # Return a simple error message
496
  return jsonify({
497
  "success": False,
498
+ "response": "Sorry, I couldn't generate a response right now."
499
  })
500
 
501
  except Exception as e:
502
+ print(f"Request error: {str(e)}")
503
  return jsonify({
504
  "success": False,
505
+ "response": "Sorry, there was a problem with your request."
506
  })
507
 
508