Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -8,15 +8,30 @@ def to_markdown(text):
|
|
| 8 |
text = text.replace('•', ' *')
|
| 9 |
return textwrap.indent(text, '> ', lambda line: True)
|
| 10 |
|
| 11 |
-
def chat(message, history):
|
| 12 |
-
"""Generates a response to the user's message using
|
| 13 |
genai.configure(api_key='AIzaSyCMBk81YmILNTok8hd6tYtJaevp1qbl6I0') # Replace with your actual API key
|
| 14 |
-
|
|
|
|
|
|
|
| 15 |
|
| 16 |
try:
|
| 17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
for chunk in response:
|
| 19 |
return to_markdown(chunk.text) # Format as Markdown
|
|
|
|
| 20 |
except Exception as e:
|
| 21 |
print(f"Error during generation: {e}")
|
| 22 |
return "An error occurred while generating the response. Please try again later."
|
|
@@ -25,7 +40,8 @@ chat_interface = gr.ChatInterface(
|
|
| 25 |
fn=chat,
|
| 26 |
title="Gemini Chat",
|
| 27 |
description="Chat with an AI assistant powered by Gemini",
|
| 28 |
-
theme="soft"
|
|
|
|
| 29 |
)
|
| 30 |
|
| 31 |
chat_interface.launch()
|
|
|
|
| 8 |
text = text.replace('•', ' *')
|
| 9 |
return textwrap.indent(text, '> ', lambda line: True)
|
| 10 |
|
| 11 |
+
def chat(message, history, img=None):
|
| 12 |
+
"""Generates a response to the user's message, optionally using an image."""
|
| 13 |
genai.configure(api_key='AIzaSyCMBk81YmILNTok8hd6tYtJaevp1qbl6I0') # Replace with your actual API key
|
| 14 |
+
|
| 15 |
+
text_model = genai.GenerativeModel('gemini-pro')
|
| 16 |
+
vision_model = genai.GenerativeModel('gemini-pro-vision')
|
| 17 |
|
| 18 |
try:
|
| 19 |
+
if img is not None:
|
| 20 |
+
# Process image with vision model
|
| 21 |
+
image_response = vision_model.generate_content(img)
|
| 22 |
+
image_response.resolve() # Wait for response completion
|
| 23 |
+
image_text = to_markdown(image_response.text)
|
| 24 |
+
|
| 25 |
+
# Combine image and text for unified response
|
| 26 |
+
prompt = f"{message}\n{image_text}"
|
| 27 |
+
response = text_model.generate_content(prompt, stream=True)
|
| 28 |
+
else:
|
| 29 |
+
# Use only text model
|
| 30 |
+
response = text_model.generate_content(message, stream=True)
|
| 31 |
+
|
| 32 |
for chunk in response:
|
| 33 |
return to_markdown(chunk.text) # Format as Markdown
|
| 34 |
+
|
| 35 |
except Exception as e:
|
| 36 |
print(f"Error during generation: {e}")
|
| 37 |
return "An error occurred while generating the response. Please try again later."
|
|
|
|
| 40 |
fn=chat,
|
| 41 |
title="Gemini Chat",
|
| 42 |
description="Chat with an AI assistant powered by Gemini",
|
| 43 |
+
theme="soft",
|
| 44 |
+
inputs=[gr.Textbox(lines=1, label="Enter your message"), gr.Image(optional=True)],
|
| 45 |
)
|
| 46 |
|
| 47 |
chat_interface.launch()
|