Chia Woon Yap commited on
Commit
d4b5be8
·
verified ·
1 Parent(s): 86c9106

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +86 -0
app.py CHANGED
@@ -370,6 +370,9 @@ def handle_small_talk(query):
370
  else:
371
  return "I'm here to help with HDB price predictions and information. How can I assist you today?"
372
 
 
 
 
373
  def chat_with_llm(query, chat_history):
374
  """Handle chat queries about HDB pricing and small talk"""
375
  if not groq_api_key or client is None:
@@ -421,7 +424,90 @@ def chat_with_llm(query, chat_history):
421
  response = f"I need more information to make a prediction. Please provide: {missing_list}."
422
  chat_history.append((query, response))
423
  return response, chat_history
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
424
 
 
 
 
 
 
 
 
 
 
 
 
 
 
425
  # If we have all parameters, make a prediction
426
  try:
427
  # Convert string numbers to appropriate types
 
370
  else:
371
  return "I'm here to help with HDB price predictions and information. How can I assist you today?"
372
 
373
+
374
+
375
+ """
376
  def chat_with_llm(query, chat_history):
377
  """Handle chat queries about HDB pricing and small talk"""
378
  if not groq_api_key or client is None:
 
424
  response = f"I need more information to make a prediction. Please provide: {missing_list}."
425
  chat_history.append((query, response))
426
  return response, chat_history
427
+ """
428
+ #start here
429
+
430
+ def chat_with_llm(query, chat_history):
431
+ """Handle chat queries about HDB pricing and small talk"""
432
+ if not groq_api_key or client is None:
433
+ return "Please set GROQ_API_KEY...", chat_history
434
+
435
+ # 1. First, check for small talk
436
+ if is_small_talk(query):
437
+ response = handle_small_talk(query)
438
+ chat_history.append((query, response))
439
+ return response, chat_history
440
+
441
+ # 2. Check if the query is a clear request for a general explanation/trend (not a specific price)
442
+ is_general_query = any(keyword in query.lower() for keyword in [
443
+ 'trend', 'overview', 'how are', 'what are', 'like in', 'average',
444
+ 'over the years', 'market', 'compare'
445
+ ])
446
+
447
+ # 3. If it's a general query, use the LLM to answer it directly
448
+ if is_general_query:
449
+ try:
450
+ completion = client.chat.completions.create(
451
+ model="llama-3.3-70b-versatile",
452
+ messages=[
453
+ {
454
+ "role": "system",
455
+ "content": "You are a helpful assistant specialized in HDB (Housing & Development Board) properties in Singapore. Provide accurate, helpful information about HDB prices, policies, and market trends. Use the provided context if available."
456
+ },
457
+ {
458
+ "role": "user",
459
+ "content": f"Based on general HDB market knowledge, answer this question: {query}"
460
+ }
461
+ ],
462
+ temperature=0.3,
463
+ max_tokens=500
464
+ )
465
+ response = completion.choices[0].message.content
466
+ chat_history.append((query, response))
467
+ return response, chat_history
468
+ except Exception as e:
469
+ error_msg = f"I encountered an error. Please try again later."
470
+ chat_history.append((query, error_msg))
471
+ return error_msg, chat_history
472
+
473
+ # 4. If it's not clearly general, try to extract parameters for a specific prediction
474
+ params = extract_parameters_from_query(query)
475
+
476
+ if "error" in params:
477
+ # If extraction failed, fall back to general Q&A
478
+ return answer_general_hdb_question(query, chat_history)
479
+
480
+ # 5. Check what we got back from parameter extraction
481
+ extracted_params = {k: v for k, v in params.items() if v is not None}
482
+ required_for_prediction = ['town', 'flat_type', 'floor_area_sqm', 'storey_level', 'flat_age']
483
+
484
+ # 6. If the user only provided a town or one other parameter, it's likely a general question.
485
+ if len(extracted_params) < 3: # e.g., if only 'town' and 'flat_type' are provided
486
+ # Ask a clarifying question or provide a general overview
487
+ if 'town' in extracted_params:
488
+ town = extracted_params['town']
489
+ # You could add a pre-generated fact here, e.g., average price for that town from the dataset
490
+ response = f"You asked about {town}. HDB prices can vary widely based on flat type, size, age, and specific location within the town. "
491
+ response += f"For example, are you interested in 4-Room or 5-Room flats? What's your budget or preferred size? "
492
+ response += "Alternatively, I can give you a prediction if you provide more details like flat type, size, and age."
493
+ else:
494
+ response = "I specialize in HDB price predictions and information. Could you provide more details about the property you're interested in (e.g., town, flat type, size) so I can give you a accurate estimate or information?"
495
+ chat_history.append((query, response))
496
+ return response, chat_history
497
 
498
+ # 7. If we have most parameters, ask for the missing ones specifically
499
+ missing_params = [param for param in required_for_prediction if params.get(param) is None]
500
+ if missing_params:
501
+ missing_list = ", ".join(missing_params)
502
+ response = f"I'd be happy to predict a price for you. I just need a few more details: {missing_list}."
503
+ chat_history.append((query, response))
504
+ return response, chat_history
505
+
506
+ # 8. If we have all parameters, make the prediction!
507
+ # ... (rest of the prediction code remains the same)
508
+
509
+
510
+
511
  # If we have all parameters, make a prediction
512
  try:
513
  # Convert string numbers to appropriate types