vrindagopinath commited on
Commit
a3f2307
·
verified ·
1 Parent(s): 09edfea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -161
app.py CHANGED
@@ -1,6 +1,3 @@
1
- # Complete Malayalam Hospital Booking Chatbot using Llama 3.1-8B-Instruct
2
- # with HuggingFace Transformers Library in Google Colab
3
-
4
  import os
5
  import json
6
  import gradio as gr
@@ -11,23 +8,17 @@ import uuid
11
  import re
12
  import time
13
  import random
 
 
14
  from transformers import AutoModelForCausalLM, AutoTokenizer
15
- from google.colab import auth
16
- from googleapiclient.discovery import build
17
 
18
  # Set up timezone for India
19
  IST = pytz.timezone('Asia/Kolkata')
20
 
21
  # ===== CONFIGURATION =====
22
 
23
- # Path to store the model locally (to avoid re-downloading)
24
- MODEL_PATH = "/content/llama-3.1-8b-instruct"
25
-
26
- # Replace with your actual Hugging Face token
27
- HF_TOKEN = "" # Will be set via Colab input
28
-
29
- # Google Calendar API scopes
30
- SCOPES = ['https://www.googleapis.com/auth/calendar']
31
 
32
  # Available doctors and departments for booking
33
  available_doctors = {
@@ -78,7 +69,7 @@ function_definitions = [
78
  },
79
  {
80
  "name": "book_appointment",
81
- "description": "Book an appointment with a doctor and add it to Google Calendar",
82
  "parameters": {
83
  "type": "object",
84
  "properties": {
@@ -171,8 +162,8 @@ def check_time_slots(doctor_name, date):
171
  "available_slots": available_slots
172
  }
173
 
174
- def book_appointment(appointment_details, calendar_service):
175
- """Book an appointment with a doctor and add it to Google Calendar"""
176
  try:
177
  # Validate the appointment details first
178
  doctor_exists = False
@@ -205,44 +196,8 @@ def book_appointment(appointment_details, calendar_service):
205
  start_datetime = datetime.datetime(year, month, day, hours, minutes, 0, tzinfo=IST)
206
  end_datetime = start_datetime + datetime.timedelta(minutes=30) # 30 minutes appointment
207
 
208
- # Create the calendar event
209
- event = {
210
- 'summary': f"Medical appointment with {appointment_details['doctor_name']}",
211
- 'location': 'City Hospital, Kochi, Kerala',
212
- 'description': appointment_details.get('description', 'Regular checkup'),
213
- 'start': {
214
- 'dateTime': start_datetime.isoformat(),
215
- 'timeZone': 'Asia/Kolkata',
216
- },
217
- 'end': {
218
- 'dateTime': end_datetime.isoformat(),
219
- 'timeZone': 'Asia/Kolkata',
220
- },
221
- 'attendees': [
222
- {'email': 'doctor@cityhospital.com'},
223
- {'email': 'patient@example.com'} # In a real app, use actual email
224
- ],
225
- 'reminders': {
226
- 'useDefault': False,
227
- 'overrides': [
228
- {'method': 'email', 'minutes': 24 * 60},
229
- {'method': 'popup', 'minutes': 60},
230
- ],
231
- },
232
- }
233
-
234
- # Add to Google Calendar
235
- if calendar_service:
236
- try:
237
- event = calendar_service.events().insert(calendarId='primary', body=event).execute()
238
- appointment_id = event['id']
239
- except Exception as e:
240
- print(f"Calendar service error: {e}")
241
- # Generate a mock ID if calendar service fails
242
- appointment_id = str(uuid.uuid4())
243
- else:
244
- # If no calendar service, generate a mock ID
245
- appointment_id = str(uuid.uuid4())
246
 
247
  # Store in local database
248
  appointments_db[appointment_id] = {
@@ -253,6 +208,8 @@ def book_appointment(appointment_details, calendar_service):
253
  "date": appointment_details["date"],
254
  "time": appointment_details["time"],
255
  "description": appointment_details.get("description", ""),
 
 
256
  }
257
 
258
  return {
@@ -274,7 +231,7 @@ def book_appointment(appointment_details, calendar_service):
274
  "message": f"Failed to book appointment: {str(e)}"
275
  }
276
 
277
- def cancel_appointment(appointment_id, patient_phone, calendar_service):
278
  """Cancel an existing appointment"""
279
  try:
280
  # Check if appointment exists in our database
@@ -291,14 +248,6 @@ def cancel_appointment(appointment_id, patient_phone, calendar_service):
291
  "message": "Patient phone number does not match our records"
292
  }
293
 
294
- # Delete from Google Calendar
295
- if calendar_service:
296
- try:
297
- calendar_service.events().delete(calendarId='primary', eventId=appointment_id).execute()
298
- except Exception as e:
299
- print(f"Error deleting from calendar: {e}")
300
- # Continue anyway to delete from local database
301
-
302
  # Remove from local database
303
  del appointments_db[appointment_id]
304
 
@@ -312,62 +261,21 @@ def cancel_appointment(appointment_id, patient_phone, calendar_service):
312
  "message": f"Failed to cancel appointment: {str(e)}"
313
  }
314
 
315
- # ===== GOOGLE CALENDAR AUTHENTICATION =====
316
-
317
- def get_calendar_service():
318
- """Authenticate and return the Google Calendar service"""
319
- creds = None
320
-
321
- try:
322
- # Authenticate using Colab's auth helper
323
- auth.authenticate_user()
324
-
325
- # Get credentials from the authenticated Colab user
326
- from google.auth import default
327
- creds, _ = default()
328
-
329
- # Build and return the service
330
- service = build('calendar', 'v3', credentials=creds)
331
- return service
332
- except Exception as e:
333
- print(f"Error authenticating with Google Calendar: {e}")
334
- print("Continuing without Google Calendar integration.")
335
- return None
336
-
337
- # ===== LLAMA 3.1 MODEL SETUP =====
338
 
339
  def load_llama_model():
340
  """Load the Llama 3.1 model and tokenizer"""
341
- model_name = "meta-llama/Meta-Llama-3.1-8B-Instruct"
342
-
343
  print("Loading Llama 3.1 model and tokenizer...")
344
 
345
  try:
346
- # Check if model is already downloaded
347
- if os.path.exists(MODEL_PATH):
348
- print(f"Loading model from local path: {MODEL_PATH}")
349
- tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
350
- model = AutoModelForCausalLM.from_pretrained(
351
- MODEL_PATH,
352
- torch_dtype=torch.bfloat16,
353
- device_map="auto",
354
- low_cpu_mem_usage=True
355
- )
356
- else:
357
- print(f"Downloading model from Hugging Face Hub")
358
- tokenizer = AutoTokenizer.from_pretrained(model_name, token=HF_TOKEN)
359
- model = AutoModelForCausalLM.from_pretrained(
360
- model_name,
361
- torch_dtype=torch.bfloat16,
362
- device_map="auto",
363
- low_cpu_mem_usage=True,
364
- token=HF_TOKEN
365
- )
366
-
367
- # Save model locally to avoid re-downloading
368
- print(f"Saving model to: {MODEL_PATH}")
369
- tokenizer.save_pretrained(MODEL_PATH)
370
- model.save_pretrained(MODEL_PATH)
371
 
372
  print("Model loaded successfully!")
373
  return model, tokenizer
@@ -440,9 +348,9 @@ def extract_function_call(response_text):
440
 
441
  return None
442
 
443
- def process_chat(message, chat_history, language, model_tokenizer_calendar):
444
  """Process a chat message, calling functions when necessary"""
445
- model, tokenizer, calendar_service = model_tokenizer_calendar
446
 
447
  try:
448
  # Create system prompt based on language preference
@@ -505,9 +413,9 @@ Current language preference: {language}"""
505
  elif function_name == "check_time_slots" and "doctor_name" in function_args and "date" in function_args:
506
  function_result = check_time_slots(function_args["doctor_name"], function_args["date"])
507
  elif function_name == "book_appointment":
508
- function_result = book_appointment(function_args, calendar_service)
509
  elif function_name == "cancel_appointment" and "appointment_id" in function_args and "patient_phone" in function_args:
510
- function_result = cancel_appointment(function_args["appointment_id"], function_args["patient_phone"], calendar_service)
511
  else:
512
  function_result = {"error": "Invalid function call or missing parameters"}
513
 
@@ -562,7 +470,7 @@ Current language preference: {language}"""
562
 
563
  # ===== GRADIO INTERFACE =====
564
 
565
- def create_gradio_interface(model, tokenizer, calendar_service):
566
  """Create the Gradio interface for the chatbot"""
567
 
568
  with gr.Blocks(css="""
@@ -627,7 +535,7 @@ def create_gradio_interface(model, tokenizer, calendar_service):
627
  # Set up event handlers
628
  submit.click(
629
  process_chat,
630
- inputs=[msg, chat_history, language, gr.State((model, tokenizer, calendar_service))],
631
  outputs=[chatbot, chat_history]
632
  ).then(
633
  lambda: "",
@@ -637,7 +545,7 @@ def create_gradio_interface(model, tokenizer, calendar_service):
637
 
638
  msg.submit(
639
  process_chat,
640
- inputs=[msg, chat_history, language, gr.State((model, tokenizer, calendar_service))],
641
  outputs=[chatbot, chat_history]
642
  ).then(
643
  lambda: "",
@@ -674,8 +582,8 @@ def create_gradio_interface(model, tokenizer, calendar_service):
674
 
675
  # Initial welcome message
676
  demo.load(
677
- lambda: ([("", "Hello! I'm the online assistant for City Hospital. How can I help you today?")],
678
- [("", "Hello! I'm the online assistant for City Hospital. How can I help you today?")]),
679
  inputs=None,
680
  outputs=[chatbot, chat_history]
681
  )
@@ -684,44 +592,15 @@ def create_gradio_interface(model, tokenizer, calendar_service):
684
 
685
  # ===== MAIN EXECUTION =====
686
 
687
- def main():
688
- global HF_TOKEN
689
 
690
- print("===== Malayalam Hospital Booking Chatbot =====")
691
- print("Using Llama 3.1-8B-Instruct with Google Calendar integration")
692
 
693
- # Install required packages in Colab
694
- try:
695
- import IPython
696
- print("Installing required packages...")
697
- IPython.get_ipython().system('pip install transformers>=4.37.0')
698
- IPython.get_ipython().system('pip install accelerate>=0.25.0')
699
- IPython.get_ipython().system('pip install bitsandbytes>=0.41.0')
700
- IPython.get_ipython().system('pip install sentencepiece>=0.1.99')
701
- IPython.get_ipython().system('pip install gradio==3.50.2')
702
- IPython.get_ipython().system('pip install google-auth google-auth-oauthlib google-auth-httplib2')
703
- IPython.get_ipython().system('pip install google-api-python-client')
704
- IPython.get_ipython().system('pip install pytz')
705
- print("All packages installed successfully!")
706
- except:
707
- print("Not running in IPython environment or packages already installed.")
708
-
709
- # Get HF token from user input
710
- HF_TOKEN = input("Enter your Hugging Face token with access to meta-llama models: ")
711
-
712
- # Load the Llama model and tokenizer
713
- model, tokenizer = load_llama_model()
714
-
715
- if model is None or tokenizer is None:
716
- print("Failed to load the model. Please check your Hugging Face token and try again.")
717
- return
718
-
719
- # Get calendar service
720
- calendar_service = get_calendar_service()
721
-
722
- # Create and launch the Gradio interface
723
- demo = create_gradio_interface(model, tokenizer, calendar_service)
724
- demo.launch(share=True, debug=True)
725
-
726
- if __name__ == "__main__":
727
- main()
 
 
 
 
1
  import os
2
  import json
3
  import gradio as gr
 
8
  import re
9
  import time
10
  import random
11
+ import pandas as pd
12
+ import numpy as np
13
  from transformers import AutoModelForCausalLM, AutoTokenizer
 
 
14
 
15
  # Set up timezone for India
16
  IST = pytz.timezone('Asia/Kolkata')
17
 
18
  # ===== CONFIGURATION =====
19
 
20
+ # Model ID on Hugging Face
21
+ MODEL_ID = "meta-llama/Meta-Llama-3.1-8B-Instruct"
 
 
 
 
 
 
22
 
23
  # Available doctors and departments for booking
24
  available_doctors = {
 
69
  },
70
  {
71
  "name": "book_appointment",
72
+ "description": "Book an appointment with a doctor",
73
  "parameters": {
74
  "type": "object",
75
  "properties": {
 
162
  "available_slots": available_slots
163
  }
164
 
165
+ def book_appointment(appointment_details):
166
+ """Book an appointment with a doctor"""
167
  try:
168
  # Validate the appointment details first
169
  doctor_exists = False
 
196
  start_datetime = datetime.datetime(year, month, day, hours, minutes, 0, tzinfo=IST)
197
  end_datetime = start_datetime + datetime.timedelta(minutes=30) # 30 minutes appointment
198
 
199
+ # Generate a unique appointment ID
200
+ appointment_id = str(uuid.uuid4())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
 
202
  # Store in local database
203
  appointments_db[appointment_id] = {
 
208
  "date": appointment_details["date"],
209
  "time": appointment_details["time"],
210
  "description": appointment_details.get("description", ""),
211
+ "start_datetime": start_datetime,
212
+ "end_datetime": end_datetime
213
  }
214
 
215
  return {
 
231
  "message": f"Failed to book appointment: {str(e)}"
232
  }
233
 
234
+ def cancel_appointment(appointment_id, patient_phone):
235
  """Cancel an existing appointment"""
236
  try:
237
  # Check if appointment exists in our database
 
248
  "message": "Patient phone number does not match our records"
249
  }
250
 
 
 
 
 
 
 
 
 
251
  # Remove from local database
252
  del appointments_db[appointment_id]
253
 
 
261
  "message": f"Failed to cancel appointment: {str(e)}"
262
  }
263
 
264
+ # ===== LLAMA MODEL SETUP =====
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
265
 
266
  def load_llama_model():
267
  """Load the Llama 3.1 model and tokenizer"""
 
 
268
  print("Loading Llama 3.1 model and tokenizer...")
269
 
270
  try:
271
+ # Use 4-bit quantization for smaller memory footprint
272
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
273
+ model = AutoModelForCausalLM.from_pretrained(
274
+ MODEL_ID,
275
+ torch_dtype=torch.bfloat16,
276
+ device_map="auto",
277
+ load_in_4bit=True
278
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
279
 
280
  print("Model loaded successfully!")
281
  return model, tokenizer
 
348
 
349
  return None
350
 
351
+ def process_chat(message, chat_history, language, model_tokenizer):
352
  """Process a chat message, calling functions when necessary"""
353
+ model, tokenizer = model_tokenizer
354
 
355
  try:
356
  # Create system prompt based on language preference
 
413
  elif function_name == "check_time_slots" and "doctor_name" in function_args and "date" in function_args:
414
  function_result = check_time_slots(function_args["doctor_name"], function_args["date"])
415
  elif function_name == "book_appointment":
416
+ function_result = book_appointment(function_args)
417
  elif function_name == "cancel_appointment" and "appointment_id" in function_args and "patient_phone" in function_args:
418
+ function_result = cancel_appointment(function_args["appointment_id"], function_args["patient_phone"])
419
  else:
420
  function_result = {"error": "Invalid function call or missing parameters"}
421
 
 
470
 
471
  # ===== GRADIO INTERFACE =====
472
 
473
+ def create_gradio_interface(model, tokenizer):
474
  """Create the Gradio interface for the chatbot"""
475
 
476
  with gr.Blocks(css="""
 
535
  # Set up event handlers
536
  submit.click(
537
  process_chat,
538
+ inputs=[msg, chat_history, language, gr.State((model, tokenizer))],
539
  outputs=[chatbot, chat_history]
540
  ).then(
541
  lambda: "",
 
545
 
546
  msg.submit(
547
  process_chat,
548
+ inputs=[msg, chat_history, language, gr.State((model, tokenizer))],
549
  outputs=[chatbot, chat_history]
550
  ).then(
551
  lambda: "",
 
582
 
583
  # Initial welcome message
584
  demo.load(
585
+ lambda: ([("", "Hello! I'm the online assistant for City Hospital. How can I help you today?")],
586
+ [("", "Hello! I'm the online assistant for City Hospital. How can I help you today?")]),
587
  inputs=None,
588
  outputs=[chatbot, chat_history]
589
  )
 
592
 
593
  # ===== MAIN EXECUTION =====
594
 
595
+ print("===== Malayalam Hospital Booking Chatbot =====")
596
+ print("Using Llama 3.1-8B-Instruct for Hugging Face Spaces")
597
 
598
+ # Load the Llama model and tokenizer
599
+ model, tokenizer = load_llama_model()
600
 
601
+ if model is None or tokenizer is None:
602
+ raise ValueError("Failed to load the model. Please check configuration and try again.")
603
+
604
+ # Create and launch the Gradio interface
605
+ demo = create_gradio_interface(model, tokenizer)
606
+ demo.launch()