mhamoody commited on
Commit
3f601c5
·
verified ·
1 Parent(s): 2875655
Files changed (1) hide show
  1. app.py +11 -17
app.py CHANGED
@@ -4,18 +4,16 @@ from typing import List, Tuple, Optional
4
  import google.generativeai as genai
5
  import gradio as gr
6
  from PIL import Image
7
- import tempfile
8
- import os
9
 
10
  GOOGLE_API_KEY = os.environ.get("GEMINI_API_KEY")
11
 
12
- IMAGE_WIDTH = 512
13
  IMAGE_WIDTH = 512
14
 
15
  system_instruction_analysis = "You are an expert of the given topic. Analyze the provided text with a focus on the topic, identifying recent issues, recent insights, or improvements relevant to academic standards and effectiveness. Offer actionable advice for enhancing knowledge and suggest real-life examples."
16
  model_name = "gemini-2.0-flash-exp"
17
- model = genai.GenerativeModel(model_name, system_instruction=system_instruction_analysis)
18
- #genai.configure(api_key=google_key)
 
19
 
20
  # Helper Functions
21
  def preprocess_stop_sequences(stop_sequences: str) -> Optional[List[str]]:
@@ -58,6 +56,8 @@ def bot(
58
 
59
  # Configure the model
60
  genai.configure(api_key=google_key)
 
 
61
  generation_config = genai.types.GenerationConfig(
62
  temperature=temperature,
63
  max_output_tokens=max_output_tokens,
@@ -71,7 +71,7 @@ def bot(
71
 
72
  # Generate response
73
  try:
74
- response = model.generate_content(inputs, stream=True, generation_config=generation_config)
75
  response.resolve()
76
  except Exception as e:
77
  chatbot[-1][1] = f"Error occurred: {str(e)}"
@@ -140,9 +140,8 @@ top_p_component = gr.Slider(
140
  )
141
  example_scenarios = [
142
  "Describe Multimodal AI",
143
- "What are the difference between muliagent llm and multiagent system",
144
- "Why it's difficult to intgrate multimodality in prompt"]
145
- example_images = [["ex1.png"],["ex2.png"]]
146
 
147
 
148
  # Gradio Interface
@@ -182,13 +181,6 @@ with gr.Blocks(theme="earneleh/paris") as demo:
182
  fn=lambda query: query if query else "No query selected.",
183
  inputs=[example_radio],
184
  outputs=[text_prompt_component])
185
- # Custom examples section with blue styling
186
- with gr.Accordion("🧪Example Image 🩻", open=False):
187
- gr.Examples(
188
- examples=example_images,
189
- inputs=[image_prompt_component],
190
- label="Example Figures",
191
- )
192
  with gr.Accordion("🛠️Customize", open=False):
193
  temperature_component.render()
194
  max_output_tokens_component.render()
@@ -201,4 +193,6 @@ with gr.Blocks(theme="earneleh/paris") as demo:
201
  ).then(
202
  fn=bot, inputs=bot_inputs, outputs=[chatbot_component]
203
  )
204
- demo.launch()
 
 
 
4
  import google.generativeai as genai
5
  import gradio as gr
6
  from PIL import Image
 
 
7
 
8
  GOOGLE_API_KEY = os.environ.get("GEMINI_API_KEY")
9
 
 
10
  IMAGE_WIDTH = 512
11
 
12
  system_instruction_analysis = "You are an expert of the given topic. Analyze the provided text with a focus on the topic, identifying recent issues, recent insights, or improvements relevant to academic standards and effectiveness. Offer actionable advice for enhancing knowledge and suggest real-life examples."
13
  model_name = "gemini-2.0-flash-exp"
14
+
15
+ # Initialize model (will be configured with API key in bot function)
16
+ model = None
17
 
18
  # Helper Functions
19
  def preprocess_stop_sequences(stop_sequences: str) -> Optional[List[str]]:
 
56
 
57
  # Configure the model
58
  genai.configure(api_key=google_key)
59
+ model_instance = genai.GenerativeModel(model_name, system_instruction=system_instruction_analysis)
60
+
61
  generation_config = genai.types.GenerationConfig(
62
  temperature=temperature,
63
  max_output_tokens=max_output_tokens,
 
71
 
72
  # Generate response
73
  try:
74
+ response = model_instance.generate_content(inputs, stream=True, generation_config=generation_config)
75
  response.resolve()
76
  except Exception as e:
77
  chatbot[-1][1] = f"Error occurred: {str(e)}"
 
140
  )
141
  example_scenarios = [
142
  "Describe Multimodal AI",
143
+ "What are the difference between multiagent llm and multiagent system",
144
+ "Why it's difficult to integrate multimodality in prompt"]
 
145
 
146
 
147
  # Gradio Interface
 
181
  fn=lambda query: query if query else "No query selected.",
182
  inputs=[example_radio],
183
  outputs=[text_prompt_component])
 
 
 
 
 
 
 
184
  with gr.Accordion("🛠️Customize", open=False):
185
  temperature_component.render()
186
  max_output_tokens_component.render()
 
193
  ).then(
194
  fn=bot, inputs=bot_inputs, outputs=[chatbot_component]
195
  )
196
+
197
+ if __name__ == "__main__":
198
+ demo.launch(server_name="0.0.0.0", server_port=7860, share=False)