Samhugs07 commited on
Commit
199f428
·
1 Parent(s): 9df6909

Updated app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -32
app.py CHANGED
@@ -3,7 +3,7 @@
3
 
4
  # # Occasio - Event Management Assistant
5
 
6
- # In[3]:
7
 
8
 
9
  # imports
@@ -19,7 +19,7 @@ import google.generativeai as genai
19
  import gradio as gr
20
 
21
 
22
- # In[4]:
23
 
24
 
25
  # Load environment variables in a file called .env
@@ -46,7 +46,7 @@ else:
46
  print("Google API Key not set")
47
 
48
 
49
- # In[5]:
50
 
51
 
52
  # Connect to OpenAI, Anthropic and Google
@@ -61,7 +61,7 @@ genai.configure()
61
  GOOGLE_MODEL = "gemini-2.0-flash"
62
 
63
 
64
- # In[173]:
65
 
66
 
67
  system_message = (
@@ -78,7 +78,7 @@ system_message = (
78
  )
79
 
80
 
81
- # In[147]:
82
 
83
 
84
  # system_message = "You are called \"EventAI\", a virtual assistant for an Elementary school called Eagle Elementary School. You will help users by giving \
@@ -90,7 +90,7 @@ system_message = (
90
  # list the questions and respond"
91
 
92
 
93
- # In[148]:
94
 
95
 
96
  # Some imports for handling images
@@ -100,7 +100,7 @@ from io import BytesIO
100
  from PIL import Image
101
 
102
 
103
- # In[149]:
104
 
105
 
106
  def artist(event_text):
@@ -116,7 +116,7 @@ def artist(event_text):
116
  return Image.open(BytesIO(image_data))
117
 
118
 
119
- # In[150]:
120
 
121
 
122
  import base64
@@ -138,7 +138,7 @@ def talker(message):
138
  return output_filename
139
 
140
 
141
- # In[151]:
142
 
143
 
144
  school_events = [
@@ -257,7 +257,7 @@ school_events = [
257
  ]
258
 
259
 
260
- # In[152]:
261
 
262
 
263
  def get_event_details(query):
@@ -279,7 +279,7 @@ def get_event_details(query):
279
  #
280
  # Well, kinda.
281
 
282
- # In[153]:
283
 
284
 
285
  # for claude
@@ -302,7 +302,7 @@ tools_claude = [
302
  ]
303
 
304
 
305
- # In[154]:
306
 
307
 
308
  # For GPT
@@ -324,14 +324,14 @@ events_function_gpt = {
324
  }
325
 
326
 
327
- # In[155]:
328
 
329
 
330
  # And this is included in a list of tools:
331
  tools_gpt = [{"type": "function", "function": events_function_gpt}]
332
 
333
 
334
- # In[156]:
335
 
336
 
337
  #Gemini function declaration structure
@@ -366,11 +366,11 @@ gemini_event_details = [{
366
  ]
367
 
368
 
369
- # In[178]:
370
 
371
 
372
  def chat_claude(history):
373
- print(f"\nhistory is {history}\n")
374
  #Claude doesnt take any other key value pair other than role and content. Hence filtering only those key value pairs
375
  history_claude = list({"role": msg["role"], "content": msg["content"]} for msg in history if "role" in msg and "content" in msg)
376
  #history is [{'role': 'user', 'metadata': None, 'content': 'when is pta', 'options': None}]
@@ -383,7 +383,7 @@ def chat_claude(history):
383
  tools=tools_claude
384
  )
385
  image = None
386
- print(f"Claude's message is \n {pprint.pprint(message)}\n")
387
  try:
388
  if message.stop_reason == "tool_use":
389
  tool_use = next(block for block in message.content if block.type == "tool_use")
@@ -392,7 +392,7 @@ def chat_claude(history):
392
  tool_result = handle_tool_call(event_text)
393
  #tool_result = handle_tool_call(tool_use, "Claude")
394
 
395
- print(f"Tool Result: {tool_result}")
396
 
397
  response = claude.messages.stream(
398
  model=ANTHROPIC_MODEL,
@@ -449,11 +449,11 @@ def chat_claude(history):
449
 
450
 
451
 
452
- # In[177]:
453
 
454
 
455
  def chat_gpt(history):
456
- print(f"\nhistory is {history}\n")
457
  messages = [{"role": "system", "content": system_message}] + history
458
  response = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages, tools=tools_gpt)
459
  image = None
@@ -498,16 +498,16 @@ def chat_gpt(history):
498
  yield error_message, None
499
 
500
 
501
- # In[176]:
502
 
503
 
504
  def chat_gemini(history):
505
- print(f"\nhistory is {history}\n")
506
  history_gemini = [{'role': m['role'], 'parts': [{'text': m['content']}]} if 'content' in m #if content exists, change it to parts format
507
  else {'role': m['role'], 'parts': m['parts']} if 'parts' in m #else if parts exists, just copy it as it is
508
  else {'role': m['role']} for m in history] #else neither content nor parts exists, copy only the role ignoring all other keys like metadata, options etc
509
 
510
- print(f"\nhistroy_gemini is {history_gemini}\n")
511
  model = genai.GenerativeModel(
512
  model_name=GOOGLE_MODEL,
513
  system_instruction=system_message
@@ -530,7 +530,7 @@ def chat_gemini(history):
530
  image = artist(event_text)
531
  tool_result = handle_tool_call(event_text)
532
 
533
- print(f"\ntool_result is {tool_result}\n")
534
  stream = model.generate_content(
535
  "Based on this information `" + tool_result + "`, extract the details of the event and provide the event details to the user",
536
  stream=True
@@ -561,7 +561,7 @@ def chat_gemini(history):
561
 
562
 
563
 
564
- # In[168]:
565
 
566
 
567
  def call_and_process_model_responses(fn_name, chatbot):#, response, image):
@@ -577,13 +577,13 @@ def call_and_process_model_responses(fn_name, chatbot):#, response, image):
577
 
578
 
579
 
580
- # In[169]:
581
 
582
 
583
  def handle_tool_call(event_text):
584
- print(f"event text is {event_text}")
585
  event_found = get_event_details(event_text)
586
- print(f"event_found is {event_found}")
587
 
588
  if event_found:
589
  response = json.dumps({"name": event_found['name'],"description": event_found['description'], "when": event_found['date_time'], "where": event_found['location']})
@@ -593,7 +593,7 @@ def handle_tool_call(event_text):
593
 
594
 
595
 
596
- # In[170]:
597
 
598
 
599
  def process_chosen_model(chatbot, model):
@@ -610,7 +610,7 @@ def process_chosen_model(chatbot, model):
610
 
611
 
612
 
613
- # In[174]:
614
 
615
 
616
  # More involved Gradio code as we're not using the preset Chat interface!
@@ -653,10 +653,10 @@ with gr.Blocks(css="""
653
  lambda chat: talker(chat[-1]["content"]), inputs=[chatbot], outputs=gr.Audio(autoplay=True, visible=False)
654
  )
655
 
656
- clear.click(lambda: None, inputs=None, outputs=chatbot, queue=False)
657
 
658
 
659
- # In[175]:
660
 
661
 
662
  ui.launch(inbrowser=True)
 
3
 
4
  # # Occasio - Event Management Assistant
5
 
6
+ # In[ ]:
7
 
8
 
9
  # imports
 
19
  import gradio as gr
20
 
21
 
22
+ # In[ ]:
23
 
24
 
25
  # Load environment variables in a file called .env
 
46
  print("Google API Key not set")
47
 
48
 
49
+ # In[ ]:
50
 
51
 
52
  # Connect to OpenAI, Anthropic and Google
 
61
  GOOGLE_MODEL = "gemini-2.0-flash"
62
 
63
 
64
+ # In[ ]:
65
 
66
 
67
  system_message = (
 
78
  )
79
 
80
 
81
+ # In[ ]:
82
 
83
 
84
  # system_message = "You are called \"EventAI\", a virtual assistant for an Elementary school called Eagle Elementary School. You will help users by giving \
 
90
  # list the questions and respond"
91
 
92
 
93
+ # In[ ]:
94
 
95
 
96
  # Some imports for handling images
 
100
  from PIL import Image
101
 
102
 
103
+ # In[ ]:
104
 
105
 
106
  def artist(event_text):
 
116
  return Image.open(BytesIO(image_data))
117
 
118
 
119
+ # In[ ]:
120
 
121
 
122
  import base64
 
138
  return output_filename
139
 
140
 
141
+ # In[ ]:
142
 
143
 
144
  school_events = [
 
257
  ]
258
 
259
 
260
+ # In[ ]:
261
 
262
 
263
  def get_event_details(query):
 
279
  #
280
  # Well, kinda.
281
 
282
+ # In[ ]:
283
 
284
 
285
  # for claude
 
302
  ]
303
 
304
 
305
+ # In[ ]:
306
 
307
 
308
  # For GPT
 
324
  }
325
 
326
 
327
+ # In[ ]:
328
 
329
 
330
  # And this is included in a list of tools:
331
  tools_gpt = [{"type": "function", "function": events_function_gpt}]
332
 
333
 
334
+ # In[ ]:
335
 
336
 
337
  #Gemini function declaration structure
 
366
  ]
367
 
368
 
369
+ # In[ ]:
370
 
371
 
372
  def chat_claude(history):
373
+ #print(f"\nhistory is {history}\n")
374
  #Claude doesnt take any other key value pair other than role and content. Hence filtering only those key value pairs
375
  history_claude = list({"role": msg["role"], "content": msg["content"]} for msg in history if "role" in msg and "content" in msg)
376
  #history is [{'role': 'user', 'metadata': None, 'content': 'when is pta', 'options': None}]
 
383
  tools=tools_claude
384
  )
385
  image = None
386
+ #print(f"Claude's message is \n {pprint.pprint(message)}\n")
387
  try:
388
  if message.stop_reason == "tool_use":
389
  tool_use = next(block for block in message.content if block.type == "tool_use")
 
392
  tool_result = handle_tool_call(event_text)
393
  #tool_result = handle_tool_call(tool_use, "Claude")
394
 
395
+ #print(f"Tool Result: {tool_result}")
396
 
397
  response = claude.messages.stream(
398
  model=ANTHROPIC_MODEL,
 
449
 
450
 
451
 
452
+ # In[ ]:
453
 
454
 
455
  def chat_gpt(history):
456
+ #print(f"\nhistory is {history}\n")
457
  messages = [{"role": "system", "content": system_message}] + history
458
  response = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages, tools=tools_gpt)
459
  image = None
 
498
  yield error_message, None
499
 
500
 
501
+ # In[ ]:
502
 
503
 
504
  def chat_gemini(history):
505
+ #print(f"\nhistory is {history}\n")
506
  history_gemini = [{'role': m['role'], 'parts': [{'text': m['content']}]} if 'content' in m #if content exists, change it to parts format
507
  else {'role': m['role'], 'parts': m['parts']} if 'parts' in m #else if parts exists, just copy it as it is
508
  else {'role': m['role']} for m in history] #else neither content nor parts exists, copy only the role ignoring all other keys like metadata, options etc
509
 
510
+ #print(f"\nhistroy_gemini is {history_gemini}\n")
511
  model = genai.GenerativeModel(
512
  model_name=GOOGLE_MODEL,
513
  system_instruction=system_message
 
530
  image = artist(event_text)
531
  tool_result = handle_tool_call(event_text)
532
 
533
+ #print(f"\ntool_result is {tool_result}\n")
534
  stream = model.generate_content(
535
  "Based on this information `" + tool_result + "`, extract the details of the event and provide the event details to the user",
536
  stream=True
 
561
 
562
 
563
 
564
+ # In[ ]:
565
 
566
 
567
  def call_and_process_model_responses(fn_name, chatbot):#, response, image):
 
577
 
578
 
579
 
580
+ # In[ ]:
581
 
582
 
583
  def handle_tool_call(event_text):
584
+ #print(f"event text is {event_text}")
585
  event_found = get_event_details(event_text)
586
+ #print(f"event_found is {event_found}")
587
 
588
  if event_found:
589
  response = json.dumps({"name": event_found['name'],"description": event_found['description'], "when": event_found['date_time'], "where": event_found['location']})
 
593
 
594
 
595
 
596
+ # In[ ]:
597
 
598
 
599
  def process_chosen_model(chatbot, model):
 
610
 
611
 
612
 
613
+ # In[ ]:
614
 
615
 
616
  # More involved Gradio code as we're not using the preset Chat interface!
 
653
  lambda chat: talker(chat[-1]["content"]), inputs=[chatbot], outputs=gr.Audio(autoplay=True, visible=False)
654
  )
655
 
656
+ clear.click(lambda: (None, None), inputs=None, outputs=[image_output, chatbot], queue=False)
657
 
658
 
659
+ # In[ ]:
660
 
661
 
662
  ui.launch(inbrowser=True)