Fred808 commited on
Commit
dba6fba
·
verified ·
1 Parent(s): 1d6af17

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -98
app.py CHANGED
@@ -235,29 +235,28 @@ def stream_text_completion(prompt: str):
235
  except Exception as e:
236
  yield f"Error: {str(e)}" # Handle errors gracefully
237
 
238
- def stream_image_completion(image_b64: str):
239
- invoke_url = "https://ai.api.nvidia.com/v1/gr/meta/llama-3.2-90b-vision-instruct/chat/completions"
240
- headers = {
241
- "Authorization": f"Bearer {NVIDIA_API_KEY}",
242
- "Accept": "text/event-stream"
243
- }
244
- payload = {
245
- "model": "meta/llama-3.2-90b-vision-instruct",
246
- "messages": [
247
- {
248
- "role": "user",
249
- "content": f'What is in this image? <img src="data:image/png;base64,{image_b64}" />'
250
- }
251
- ],
252
- "max_tokens": 512,
253
- "temperature": 1.00,
254
- "top_p": 1.00,
255
- "stream": True
256
- }
257
- response = requests.post(invoke_url, headers=headers, json=payload, stream=True)
258
- for line in response.iter_lines():
259
- if line:
260
- yield line.decode("utf-8") + "\n"
261
 
262
  # --- Helper Function for Order Tracking ---
263
  async def log_order_tracking(order_id: str, status: str, message: str = None):
@@ -706,6 +705,7 @@ app = FastAPI()
706
  async def on_startup():
707
  await init_db()
708
 
 
709
  @app.post("/chatbot")
710
  async def chatbot_response(request: Request, background_tasks: BackgroundTasks):
711
  data = await request.json()
@@ -731,38 +731,10 @@ async def chatbot_response(request: Request, background_tasks: BackgroundTasks):
731
  background_tasks.add_task(log_sentiment, user_id, user_message, sentiment_score)
732
  sentiment_modifier = "Great to hear from you! " if sentiment_score > 0.3 else ""
733
 
734
- # --- Order Flow Handling ---
735
- # In the chatbot_response function
736
- if is_order_intent(user_message) or (user_id in user_state and user_state[user_id].flow == "order"):
737
- order_response = await process_order_flow(user_id, user_message)
738
- if order_response:
739
- background_tasks.add_task(log_chat_to_db, user_id, "outbound", order_response)
740
- conversation_context[user_id].append({
741
- "timestamp": datetime.utcnow().isoformat(),
742
- "role": "bot",
743
- "message": order_response
744
- })
745
- return JSONResponse(content={"response": sentiment_modifier + order_response})
746
-
747
- # --- Fallback to NVIDIA LLM ---
748
- recent_context = conversation_context.get(user_id, [])[-5:] # Get the last 5 messages
749
- response_stream = stream_text_completion(user_message, recent_context)
750
- fallback_response = "".join([chunk for chunk in response_stream])
751
-
752
- # Log the bot's response
753
- background_tasks.add_task(log_chat_to_db, user_id, "outbound", fallback_response)
754
- conversation_context[user_id].append({
755
- "timestamp": datetime.utcnow().isoformat(),
756
- "role": "bot",
757
- "message": fallback_response
758
- })
759
-
760
- return JSONResponse(content={"response": sentiment_modifier + fallback_response})
761
-
762
- # --- Menu Display ---
763
- if "menu" in user_message.lower():
764
  if user_id in user_state:
765
- del user_state[user_id]
766
  menu_with_images = []
767
  for index, item in enumerate(menu_items, start=1):
768
  image_url = google_image_scrape(item["name"])
@@ -790,57 +762,34 @@ async def chatbot_response(request: Request, background_tasks: BackgroundTasks):
790
  })
791
  return JSONResponse(content=response_payload)
792
 
793
- # --- Dish Selection via Menu ---
794
- if any(item["name"].lower() in user_message.lower() for item in menu_items) or \
795
- any(str(index) == user_message.strip() for index, item in enumerate(menu_items, start=1)):
796
- selected_dish = None
797
- if user_message.strip().isdigit():
798
- dish_number = int(user_message.strip())
799
- if 1 <= dish_number <= len(menu_items):
800
- selected_dish = menu_items[dish_number - 1]["name"]
801
- else:
802
- for item in menu_items:
803
- if item["name"].lower() in user_message.lower():
804
- selected_dish = item["name"]
805
- break
806
- if selected_dish:
807
- state = ConversationState()
808
- state.flow = "order"
809
- # Set step to 2 since the dish is already selected
810
- state.step = 2
811
- state.data["dish"] = selected_dish
812
- state.update_last_active()
813
- user_state[user_id] = state
814
- response_text = f"You selected {selected_dish}. How many servings would you like?"
815
- background_tasks.add_task(log_chat_to_db, user_id, "outbound", response_text)
816
- conversation_context[user_id].append({
817
- "timestamp": datetime.utcnow().isoformat(),
818
- "role": "bot",
819
- "message": response_text
820
- })
821
- return JSONResponse(content={"response": sentiment_modifier + response_text})
822
- else:
823
- response_text = "Sorry, I couldn't find that dish in the menu. Please try again."
824
- background_tasks.add_task(log_chat_to_db, user_id, "outbound", response_text)
825
  conversation_context[user_id].append({
826
  "timestamp": datetime.utcnow().isoformat(),
827
  "role": "bot",
828
- "message": response_text
829
  })
830
- return JSONResponse(content={"response": sentiment_modifier + response_text})
831
-
832
-
833
 
834
- # --- Fallback: LLM Response Streaming with Conversation Context ---
835
- recent_context = conversation_context.get(user_id, [])[-5:]
836
  context_str = "\n".join([f"{entry['role'].capitalize()}: {entry['message']}" for entry in recent_context])
837
  prompt = f"Conversation context:\n{context_str}\nUser query: {user_message}\nGenerate a helpful, personalized response for a restaurant chatbot."
838
- def stream_response():
839
- for chunk in stream_text_completion(prompt):
840
- yield chunk
841
- fallback_log = f"LLM fallback response for prompt: {prompt}"
842
- background_tasks.add_task(log_chat_to_db, user_id, "outbound", fallback_log)
843
- return StreamingResponse(stream_response(), media_type="text/plain")
 
 
 
 
 
 
844
 
845
  # --- Other Endpoints (Chat History, Order Details, User Profile, Analytics, Voice, Payment Callback) ---
846
  @app.get("/chat_history/{user_id}")
 
235
  except Exception as e:
236
  yield f"Error: {str(e)}" # Handle errors gracefully
237
 
238
+ def stream_text_completion(prompt: str):
239
+ from openai import OpenAI
240
+ client = OpenAI(
241
+ base_url="https://integrate.api.nvidia.com/v1",
242
+ api_key=NVIDIA_API_KEY
243
+ )
244
+ print(f"Using NVIDIA API Key: {NVIDIA_API_KEY}") # Debugging
245
+
246
+ try:
247
+ completion = client.chat.completions.create(
248
+ model="meta/llama-3.1-405b-instruct",
249
+ messages=[{"role": "user", "content": prompt}],
250
+ temperature=0.2,
251
+ top_p=0.7,
252
+ max_tokens=1024,
253
+ stream=True
254
+ )
255
+ for chunk in completion:
256
+ if chunk.choices[0].delta.content is not None:
257
+ yield chunk.choices[0].delta.content
258
+ except Exception as e:
259
+ yield f"Error: {str(e)}" # Handle errors gracefully
 
260
 
261
  # --- Helper Function for Order Tracking ---
262
  async def log_order_tracking(order_id: str, status: str, message: str = None):
 
705
  async def on_startup():
706
  await init_db()
707
 
708
+ @app.post("/chatbot")
709
  @app.post("/chatbot")
710
  async def chatbot_response(request: Request, background_tasks: BackgroundTasks):
711
  data = await request.json()
 
731
  background_tasks.add_task(log_sentiment, user_id, user_message, sentiment_score)
732
  sentiment_modifier = "Great to hear from you! " if sentiment_score > 0.3 else ""
733
 
734
+ # --- Handle Menu Selection ---
735
+ if user_message.strip() == "1" or "menu" in user_message.lower():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
736
  if user_id in user_state:
737
+ del user_state[user_id] # Clear any existing state
738
  menu_with_images = []
739
  for index, item in enumerate(menu_items, start=1):
740
  image_url = google_image_scrape(item["name"])
 
762
  })
763
  return JSONResponse(content=response_payload)
764
 
765
+ # --- Order Flow Handling ---
766
+ if is_order_intent(user_message) or (user_id in user_state and user_state[user_id].flow == "order"):
767
+ order_response = await process_order_flow(user_id, user_message)
768
+ if order_response:
769
+ background_tasks.add_task(log_chat_to_db, user_id, "outbound", order_response)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
770
  conversation_context[user_id].append({
771
  "timestamp": datetime.utcnow().isoformat(),
772
  "role": "bot",
773
+ "message": order_response
774
  })
775
+ return JSONResponse(content={"response": sentiment_modifier + order_response})
 
 
776
 
777
+ # --- Fallback to NVIDIA LLM ---
778
+ recent_context = conversation_context.get(user_id, [])[-5:] # Get the last 5 messages
779
  context_str = "\n".join([f"{entry['role'].capitalize()}: {entry['message']}" for entry in recent_context])
780
  prompt = f"Conversation context:\n{context_str}\nUser query: {user_message}\nGenerate a helpful, personalized response for a restaurant chatbot."
781
+ response_stream = stream_text_completion(prompt) # Pass only the prompt
782
+ fallback_response = "".join([chunk for chunk in response_stream])
783
+
784
+ # Log the bot's response
785
+ background_tasks.add_task(log_chat_to_db, user_id, "outbound", fallback_response)
786
+ conversation_context[user_id].append({
787
+ "timestamp": datetime.utcnow().isoformat(),
788
+ "role": "bot",
789
+ "message": fallback_response
790
+ })
791
+
792
+ return JSONResponse(content={"response": sentiment_modifier + fallback_response})
793
 
794
  # --- Other Endpoints (Chat History, Order Details, User Profile, Analytics, Voice, Payment Callback) ---
795
  @app.get("/chat_history/{user_id}")