Samhugs07 commited on
Commit
9a404a1
·
1 Parent(s): c403ff1

Update space

Browse files
Files changed (2) hide show
  1. app.py +583 -55
  2. requirements.txt +5 -1
app.py CHANGED
@@ -1,64 +1,592 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
 
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
- messages.append({"role": "user", "content": message})
27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  response = ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
61
-
62
-
63
- if __name__ == "__main__":
64
- demo.launch()
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # # Occasio - Event Management Assistant
5
+
6
+ # In[ ]:
7
+
8
+
9
+ # imports
10
+
11
+ import os
12
+ import json
13
+ import time
14
+ import pprint
15
+ from dotenv import load_dotenv
16
+ from openai import OpenAI
17
+ import anthropic
18
+ import google.generativeai as genai
19
  import gradio as gr
 
20
 
 
 
 
 
21
 
22
+ # In[ ]:
23
+
24
+
25
+ # Load environment variables in a file called .env
26
+ # Print the key prefixes to help with any debugging
27
+
28
+ load_dotenv()
29
+ openai_api_key = os.getenv('OPENAI_API_KEY')
30
+ anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')
31
+ google_api_key = os.getenv('GOOGLE_API_KEY')
32
+
33
+ if openai_api_key:
34
+ print(f"OpenAI API Key exists and begins {openai_api_key[:8]}")
35
+ else:
36
+ print("OpenAI API Key not set")
37
+
38
+ if anthropic_api_key:
39
+ print(f"Anthropic API Key exists and begins {anthropic_api_key[:7]}")
40
+ else:
41
+ print("Anthropic API Key not set")
42
+
43
+ if google_api_key:
44
+ print(f"Google API Key exists and begins {google_api_key[:8]}")
45
+ else:
46
+ print("Google API Key not set")
47
+
48
+
49
+ # In[ ]:
50
+
51
+
52
+ # Connect to OpenAI, Anthropic and Google
53
+
54
+ openai = OpenAI()
55
+ OPENAI_MODEL = "gpt-4o-mini"
56
+
57
+ claude = anthropic.Anthropic()
58
+ ANTHROPIC_MODEL = "claude-3-haiku-20240307"
59
+
60
+ genai.configure()
61
+ GOOGLE_MODEL = "gemini-2.0-flash"
62
+
63
+
64
+ # In[ ]:
65
+
66
+
67
+ system_message = "You are called \"EventAI\", a virtual assistant for an Elementary school called Eagle Elementary School. You can help users by giving \
68
+ them details of upcoming shcool events like event name, description, location etc. "
69
+ #system_message += "Introduce yourself with a warm welcome message on your first response ONLY."
70
+ system_message += "Give short, courteous answers, no more than 2 sentences. "
71
+ system_message += "Always be accurate. If you don't know the answer, say so. Do not make up your own event details information"
72
+ system_message += "You might be asked to list the questions asked by the user so far. In that situation, based on the conversation history provided to you, \
73
+ list the questions and respond"
74
+
75
+
76
+ # In[ ]:
77
+
78
+
79
+ # Some imports for handling images
80
+
81
+ import base64
82
+ from io import BytesIO
83
+ from PIL import Image
84
+
85
+
86
+ # In[ ]:
87
+
88
+
89
+ def artist(event_text):
90
+ image_response = openai.images.generate(
91
+ model="dall-e-3",
92
+ prompt=f"An image representing an {event_text}, showing typical activities that happen for that {event_text}, in a vibrant pop-art style that elementary school kids will like",
93
+ size="1024x1024",
94
+ n=1,
95
+ response_format="b64_json",
96
+ )
97
+ image_base64 = image_response.data[0].b64_json
98
+ image_data = base64.b64decode(image_base64)
99
+ return Image.open(BytesIO(image_data))
100
+
101
+
102
+ # In[ ]:
103
+
104
+
105
+ import base64
106
+ from io import BytesIO
107
+ from PIL import Image
108
+ from IPython.display import Audio, display
109
+
110
+ def talker(message):
111
+ response = openai.audio.speech.create(
112
+ model="tts-1",
113
+ voice="onyx",
114
+ input=message)
115
+
116
+ audio_stream = BytesIO(response.content)
117
+ output_filename = "output_audio.mp3"
118
+ with open(output_filename, "wb") as f:
119
+ f.write(audio_stream.read())
120
+
121
+ # Play the generated audio
122
+ display(Audio(output_filename, autoplay=True))
123
+
124
+
125
+ # In[ ]:
126
+
127
+
128
+ school_events = [
129
+ {
130
+ "event_id": "pta",
131
+ "name": "Parent Teachers Meeting (PTA/PTM)",
132
+ "description": "Parent teachers meeting (PTA/PTM) to discuss students' progress.",
133
+ "date_time": "Apr 1st, 2025 11 AM",
134
+ "location" : "Glove Annexure Hall"
135
+ },
136
+ {
137
+ "event_id": "read aloud",
138
+ "name": "Read Aloud to your class/Reading to your class",
139
+ "description": "Kids can bring their favorite book and read it to their class.",
140
+ "date_time": "Apr 15th, 2025 1 PM",
141
+ "location": "Classroom"
142
+ },
143
+ {
144
+ "event_id": "100 days of school",
145
+ "name": "Celebrating 100 days of school. Dress up time for kids",
146
+ "description": "Kids can dress up as old people and celebrate the milestone with their teachers.",
147
+ "date_time": "May 15th, 2025 11 AM",
148
+ "location": "Classroom"
149
+ },
150
+ {
151
+ "event_id": "Book fair",
152
+ "name": "Scholastic book fair",
153
+ "description": "Kids can purchase their favorite scholastic books.",
154
+ "date_time": "Jun 22nd, 2025 10:30 AM",
155
+ "location": "Library"
156
+ },
157
+ {
158
+ "event_id": "Halloween",
159
+ "name": "Halloween",
160
+ "description": "Kids can dress up as their favorite characters",
161
+ "date_time": "Oct 31st, 2025",
162
+ "location": "Classroom"
163
+ },
164
+ {
165
+ "event_id": "Movie Night",
166
+ "name": "Movie Night",
167
+ "description": "A popular and kids centric movie will be played. Kids and families are welcome.",
168
+ "date_time": "May 3rd, 2025",
169
+ "location": "Main auditorium"
170
+ },
171
+ {
172
+ "event_id": "Intruder Drill",
173
+ "name": "Intruder Drill",
174
+ "description": "State mandated monthly intruder drill to prepare staff and students with necessary safety skills in times of a crisis",
175
+ "date_time": "May 3rd, 2025",
176
+ "location": "Main auditorium"
177
+ }
178
+ ]
179
+
180
+
181
+ # In[ ]:
182
+
183
+
184
+ def get_event_details(query):
185
+ search_words = query.lower().split()
186
+ for event in school_events:
187
+ event_text = event['name'].lower() + ' ' + event['description'].lower()
188
+ if all(word in event_text for word in search_words):
189
+ return event
190
+ return None
191
+
192
+
193
+ # ## Tools
194
+ #
195
+ # Tools are an incredibly powerful feature provided by the frontier LLMs.
196
+ #
197
+ # With tools, you can write a function, and have the LLM call that function as part of its response.
198
+ #
199
+ # Sounds almost spooky.. we're giving it the power to run code on our machine?
200
+ #
201
+ # Well, kinda.
202
+
203
+ # In[ ]:
204
 
 
 
 
 
 
 
 
 
 
205
 
206
+ # for claude
207
+ tools_claude = [
208
+ {
209
+ "name": "get_event_details",
210
+ "description": "Get the details of a particular upcoming event in Eagle Elementary School. Call this whenever you need to know the event details, for example when a user asks \
211
+ 'When is the pta meeting scheduled?",
212
+ "input_schema": {
213
+ "type": "object",
214
+ "properties": {
215
+ "event_text": {
216
+ "type": "string",
217
+ "description": "The event keyword that the user wants to getails on"
218
+ }
219
+ },
220
+ "required": ["event_text"]
221
+ }
222
+ }
223
+ ]
224
 
 
225
 
226
+ # In[ ]:
227
+
228
+
229
+ # For GPT
230
+ events_function_gpt = {
231
+ "name": "get_event_details",
232
+ "description": "Get the details of a particular upcoming event in Eagle Elementary School. Call this whenever you need to know the event details, for example when a user asks \
233
+ 'When is the pta meeting scheduled?",
234
+ "parameters": {
235
+ "type": "object",
236
+ "properties": {
237
+ "event_text": {
238
+ "type": "string",
239
+ "description": "The event keyword that the user wants to getails on",
240
+ },
241
+ },
242
+ "required": ["event_text"],
243
+ "additionalProperties": False
244
+ }
245
+ }
246
+
247
+
248
+ # In[ ]:
249
+
250
+
251
+ # And this is included in a list of tools:
252
+ tools_gpt = [{"type": "function", "function": events_function_gpt}]
253
+
254
+
255
+ # In[ ]:
256
+
257
+
258
+ #Gemini function declaration structure
259
+ gemini_event_details = [{
260
+ "name": "get_event_details",
261
+ "description":"Get the details of a particular upcoming event in Eagle Elementary School. Call this whenever you need to know the event details, for example when a user asks 'When is the pta meeting scheduled?",
262
+ "parameters": {
263
+ "type": "object",
264
+ "properties": {
265
+ "event_text": {
266
+ "type": "string",
267
+ "description": "The event keyword that the user wants to details on",
268
+ },
269
+ },
270
+ "required": ["event_text"],
271
+ },
272
+ },
273
+ {
274
+ "name": "get_event_test",
275
+ "description":"This is a test function to validate if the function call picks up the right function if there are multiple functions.",
276
+ "parameters": {
277
+ "type": "object",
278
+ "properties": {
279
+ "event_text": {
280
+ "type": "string",
281
+ "description": "The event keyword that the user wants to details on",
282
+ },
283
+ },
284
+ "required": ["event_text"],
285
+ },
286
+ }
287
+ ]
288
+
289
+
290
+ # In[ ]:
291
+
292
+
293
+ def chat_claude(history):
294
+ print(f"\nhistory is {history}\n")
295
+ #Claude doesnt take any other key value pair other than role and content. Hence filtering only those key value pairs
296
+ history_claude = list({"role": msg["role"], "content": msg["content"]} for msg in history if "role" in msg and "content" in msg)
297
+ #history is [{'role': 'user', 'metadata': None, 'content': 'when is pta', 'options': None}]
298
+ #messages = history
299
+ message = claude.messages.create(
300
+ model=ANTHROPIC_MODEL,
301
+ max_tokens=1000,
302
+ temperature=0.7,
303
+ system=system_message,
304
+ messages=history_claude,
305
+ tools=tools_claude
306
+ )
307
+ image = None
308
+ print(f"Claude's message is \n {pprint.pprint(message)}\n")
309
+ try:
310
+ if message.stop_reason == "tool_use":
311
+ tool_use = next(block for block in message.content if block.type == "tool_use")
312
+ event_text = tool_use.input.get('event_text')
313
+ image = artist(event_text)
314
+ tool_result = handle_tool_call(event_text)
315
+ #tool_result = handle_tool_call(tool_use, "Claude")
316
+
317
+ print(f"Tool Result: {tool_result}")
318
+
319
+ response = claude.messages.stream(
320
+ model=ANTHROPIC_MODEL,
321
+ max_tokens=4096,
322
+ system=system_message,
323
+ messages=[
324
+ {
325
+ "role": "user",
326
+ "content": [
327
+ {
328
+ "type": "text",
329
+ "text": history[-1].get('content')
330
+ }
331
+ ]
332
+ },
333
+ {
334
+ "role": "assistant",
335
+ "content": message.content
336
+ },
337
+ {
338
+ "role": "user",
339
+ "content": [
340
+ {
341
+ "type": "tool_result",
342
+ "tool_use_id": tool_use.id,
343
+ "content": tool_result,
344
+ }
345
+ ],
346
+ },
347
+ ],
348
+ tools=tools_claude
349
+ )
350
+ result = ""
351
+ with response as stream:
352
+ for text in stream.text_stream:
353
+ result += text or ""
354
+ yield result, None
355
+ talker(result)
356
+ #image= artist(tool_input.get('event_text'))
357
+ yield result, image
358
+ else:
359
+ response = next((block.text for block in message.content if hasattr(block, "text")), None,)
360
+ chunk_size=30
361
+ for i in range(0, len(response), chunk_size):
362
+ yield response[:i + chunk_size], None
363
+ time.sleep(0.05) #Simulate streaming delay
364
+ talker(response)
365
+ #image= artist(tool_input.get('event_text'))
366
+ yield response, None
367
+ except Exception as e:
368
+ error_message = "Apologies, my server is acting weird. Please try again later."
369
+ print(e)
370
+ yield error_message, None
371
+
372
+
373
+
374
+ # In[ ]:
375
+
376
+
377
+ def chat_gpt(history):
378
+ print(f"\nhistory is {history}\n")
379
+ messages = [{"role": "system", "content": system_message}] + history
380
+ response = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages, tools=tools_gpt)
381
+ image = None
382
+ try:
383
+ if response.choices[0].finish_reason=="tool_calls":
384
+ message = response.choices[0].message
385
+ tool = message.tool_calls[0]
386
+ arguments = json.loads(tool.function.arguments)
387
+ event_text = arguments.get('event_text')
388
+ image = artist(event_text)
389
+ event_json = handle_tool_call(event_text)
390
+ tool_output = {
391
+ "role": "tool",
392
+ "content": event_json,
393
+ "tool_call_id": tool.id
394
+ }
395
+ messages.append(message)
396
+ messages.append(tool_output)
397
+ stream = openai.chat.completions.create(
398
+ model=OPENAI_MODEL,
399
+ messages=messages,
400
+ stream=True
401
+ )
402
+ result = ""
403
+ for chunk in stream:
404
+ result += chunk.choices[0].delta.content or ""
405
+ yield result, None
406
+ talker(result)
407
+ yield result, image
408
+ else:
409
+ reply = response.choices[0].message.content
410
+ chunk_size=30
411
+ for i in range(0, len(reply), chunk_size):
412
+ yield reply[:i + chunk_size], None
413
+ time.sleep(0.05)
414
+ talker(reply)
415
+ #image= artist("No such event")
416
+ yield reply, None
417
+ except Exception as e:
418
+ error_message = "Apologies, my server is acting weird. Please try again later."
419
+ print(e)
420
+ yield error_message, None
421
+
422
+
423
+ # In[ ]:
424
+
425
+
426
+ def chat_gemini(history):
427
+ print(f"\nhistroy is {history}\n")
428
+ history_gemini = [{'role': m['role'], 'parts': [{'text': m['content']}]} if 'content' in m #if content exists, change it to parts format
429
+ else {'role': m['role'], 'parts': m['parts']} if 'parts' in m #else if parts exists, just copy it as it is
430
+ else {'role': m['role']} for m in history] #else neither content nor parts exists, copy only the role ignoring all other keys like metadata, options etc
431
+
432
+ print(f"\nhistroy_gemini is {history_gemini}\n")
433
+ model = genai.GenerativeModel(
434
+ model_name=GOOGLE_MODEL,
435
+ system_instruction=system_message
436
+ )
437
+ response = model.generate_content(
438
+ contents = history_gemini,
439
+ #contents = contents,
440
+ tools = [{
441
+ 'function_declarations': gemini_event_details,
442
+ }],
443
+ )
444
+ #print(f"response is {response}")
445
+
446
+ image = None
447
+ try:
448
+ # Check if the model wants to use a tool
449
+ if response.candidates[0].content.parts[0].function_call:
450
+ function_call = response.candidates[0].content.parts[0].function_call
451
+ event_text = function_call.args.get("event_text")
452
+ image = artist(event_text)
453
+ tool_result = handle_tool_call(event_text)
454
+
455
+ print(f"\ntool_result is {tool_result}\n")
456
+ stream = model.generate_content(
457
+ "Based on this information `" + tool_result + "`, extract the details of the event and provide the event details to the user",
458
+ stream=True
459
+ )
460
+ #print(f"\nSecond response is {stream}\n")
461
+ result = ""
462
+ for chunk in stream:
463
+ result += chunk.candidates[0].content.parts[0].text or ""
464
+ #print(f"REsult is \n{result}\n")
465
+ yield result, None
466
+ talker(result)
467
+ yield result, image
468
+ #print(f"REsult is \n{result}\n")
469
+ else:
470
+ reply = response.text
471
+ chunk_size=30
472
+ for i in range(0, len(reply), chunk_size):
473
+ yield reply[:i + chunk_size], None
474
+ time.sleep(0.05)
475
+ talker(reply)
476
+ #image= artist("No such event")
477
+ yield reply, None
478
+
479
+ except Exception as e:
480
+ error_message = "Apologies, my server is acting weird. Please try again later."
481
+ print(e)
482
+ yield error_message, None
483
+
484
+
485
+
486
+
487
+
488
+
489
+ # In[ ]:
490
+
491
+
492
+ def call_and_process_model_responses(fn_name, chatbot):#, response, image):
493
  response = ""
494
+ image = None
495
+ for response, image in fn_name(chatbot):
496
+ if chatbot and chatbot[-1]["role"] == "assistant":
497
+ chatbot[-1]["content"] = response # Update the last message
498
+ else:
499
+ chatbot.append({"role": "assistant", "content": response}) # First assistant message
500
+ #print(chatbot)
501
+ yield chatbot, image # Stream updated history to UI
502
+
503
+
504
+
505
+ # In[ ]:
506
+
507
+
508
+ def handle_tool_call(event_text):
509
+ print(f"event text is {event_text}")
510
+ event_found = get_event_details(event_text)
511
+ print(f"event_found is {event_found}")
512
+
513
+ if event_found:
514
+ response = json.dumps({"name": event_found['name'],"description": event_found['description'], "when": event_found['date_time'], "where": event_found['location']})
515
+ else:
516
+ response = json.dumps({"event": f"Sorry, there is no schedule currently for {event_text}"})
517
+ return response
518
+
519
+
520
+
521
+ # In[ ]:
522
+
523
+
524
+ def process_chosen_model(chatbot, model):
525
+ if model == 'GPT':
526
+ for chatbot, image in call_and_process_model_responses(chat_gpt, chatbot):
527
+ yield chatbot, image
528
+ elif model == 'Claude':
529
+ for chatbot, image in call_and_process_model_responses(chat_claude, chatbot):
530
+ yield chatbot, image
531
+ else:
532
+ #for Gemini, the content is to be replaced with parts.
533
+ for chatbot, image in call_and_process_model_responses(chat_gemini, chatbot):
534
+ yield chatbot, image
535
+
536
+
537
+
538
+ # In[ ]:
539
+
540
+
541
+ # More involved Gradio code as we're not using the preset Chat interface!
542
+ # Passing in inbrowser=True in the last line will cause a Gradio window to pop up immediately.
543
+
544
+ with gr.Blocks(css="""
545
+ select.gr-box {
546
+ appearance: auto !important;
547
+ -webkit-appearance: auto !important;
548
+ }
549
+ """) as ui:
550
+ with gr.Row():
551
+ gr.HTML("<h1 style='text-align: center; color: #4CAF50;'>Occasio! An Event Management Assistant</h1>") # Added title
552
+ with gr.Row():
553
+ # with gr.Column(scale=3): #Acts as a spacer on the left
554
+ # pass
555
+
556
+ with gr.Column(scale=0):
557
+ model = gr.Dropdown(
558
+ choices=["GPT", "Claude", "Gemini"],
559
+ label="Select model",
560
+ value="GPT",
561
+ interactive=True,
562
+ container=True # Applying the CSS class
563
+ )
564
+ # with gr.Column(scale=-54, min_width=200):
565
+ # gr.HTML("<h1 style='text-align: center; color: #4CAF50;'>Occasio</h1>") # Added title
566
+ # pass #Acts as a spacer on the right
567
+ with gr.Row():
568
+ chatbot = gr.Chatbot(height=500, type="messages")
569
+ image_output = gr.Image(height=500)
570
+ with gr.Row():
571
+ entry = gr.Textbox(label="Ask me \"when is pta meeting\", \"how about book fair\" and more... ")
572
+ with gr.Row():
573
+ clear = gr.Button("Clear", min_width=150)
574
+ #message=None
575
+
576
+ def do_entry(message, history):
577
+ history += [{"role":"user", "content":message}]
578
+ return "", history
579
+
580
+ entry.submit(do_entry, inputs=[entry, chatbot], outputs=[entry, chatbot]).then(
581
+ process_chosen_model, inputs=[chatbot, model], outputs=[chatbot, image_output]
582
+ )
583
+ clear.click(lambda: None, inputs=None, outputs=chatbot, queue=False)
584
+
585
+ ui.launch(inbrowser=True)
586
+
587
+
588
+ # In[ ]:
589
+
590
+
591
+
592
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1 +1,5 @@
1
- huggingface_hub==0.25.2
 
 
 
 
 
1
+ openai
2
+ google-generativeai
3
+ anthropic
4
+ gradio
5
+ python-dotenv