Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,87 +1,86 @@
|
|
| 1 |
-
import google.generativeai as genai # Import the Google Generative AI model
|
| 2 |
-
import gradio as gr # Import Gradio to create a web-based user interface (UI)
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
#
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
messages
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
prompt
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
messages
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
|
|
|
| 1 |
+
import google.generativeai as genai # Import the Google Generative AI model
|
| 2 |
+
import gradio as gr # Import Gradio to create a web-based user interface (UI)
|
| 3 |
+
import os
|
| 4 |
+
|
| 5 |
+
genai.configure(api_key=os.getenv("GEMINI_API_KEY"))
|
| 6 |
+
|
| 7 |
+
# Initialize the generative model
|
| 8 |
+
model = genai.GenerativeModel("gemini-1.5-pro") # Create a model instance using Gemini's specific version
|
| 9 |
+
|
| 10 |
+
def chat_interface(user_input, image_input, chat_state):
|
| 11 |
+
# Check if the chat_state is None (first time interaction)
|
| 12 |
+
if chat_state is None:
|
| 13 |
+
chat_state = {"history": []} # Initialize an empty chat history
|
| 14 |
+
|
| 15 |
+
# If user input is empty, return an empty response and the chat history
|
| 16 |
+
if not user_input.strip():
|
| 17 |
+
# Curate history from the existing conversation and return
|
| 18 |
+
messages = [{"role": "user", "content": u} for u, a in chat_state["history"]]
|
| 19 |
+
messages += [{"role": "assistant", content: a} for u, a in chat_state["history"]]
|
| 20 |
+
curated_history = "\n\n".join([f"*user*: {u}\n\n*assistant*: {a}" for u, a in chat_state["history"]])
|
| 21 |
+
return "", messages, curated_history, chat_state
|
| 22 |
+
|
| 23 |
+
# Create a prompt for the model by combining chat history and user input
|
| 24 |
+
prompt = "\n".join([f"User: {u}\nAssistant: {a}" for u, a in chat_state["history"]])
|
| 25 |
+
prompt += f"\nUser: {user_input}\nAssistant: "
|
| 26 |
+
|
| 27 |
+
# If an image is provided, include it in the prompt for the model to process
|
| 28 |
+
if image_input:
|
| 29 |
+
response = model.generate_content([image_input, prompt])
|
| 30 |
+
else:
|
| 31 |
+
# If no image is provided, just send the prompt text to the model
|
| 32 |
+
response = model.generate_content(prompt)
|
| 33 |
+
|
| 34 |
+
# Get the model's response (assistant's reply)
|
| 35 |
+
reply = response.text
|
| 36 |
+
|
| 37 |
+
# Update the chat history with the new user input and assistant's reply
|
| 38 |
+
chat_state["history"].append((user_input, reply))
|
| 39 |
+
|
| 40 |
+
# Prepare messages for the chatbot UI
|
| 41 |
+
messages = [{"role": "user", "content": u} for u, a in chat_state["history"]]
|
| 42 |
+
messages +=[{"role": "assistant", "content": a} for u, a in chat_state["history"]]
|
| 43 |
+
|
| 44 |
+
# Curate and format the chat history
|
| 45 |
+
curated_history = "\n\n".join(f"*user*: {u}\n*assistant*: {a}" for u, a in chat_state["history"])
|
| 46 |
+
|
| 47 |
+
# Return updated UI elements : empty input box, chatbot messages, chat history, and updated chat state
|
| 48 |
+
return "", messages, curated_history, chat_state
|
| 49 |
+
|
| 50 |
+
# Create Gradio UI for the chat_interface
|
| 51 |
+
with gr.Blocks() as demo:
|
| 52 |
+
gr.Markdown("##Gemini 2.0 Flash Chat") # Add title to the UI
|
| 53 |
+
|
| 54 |
+
chat_state = gr.State() # Store the current state of the chat, including history
|
| 55 |
+
|
| 56 |
+
with gr.Row(): # Organize elements in a row (side by side)
|
| 57 |
+
with gr.Column(): # First column for the chat area
|
| 58 |
+
chatbot = gr.Chatbot(label="Conversation", type="messages") # Display chat messages
|
| 59 |
+
user_input = gr.Textbox(label="Your Message") # Input box for user messages
|
| 60 |
+
submit_btn = gr.Button("Gönder") # Submit button for sending messages
|
| 61 |
+
attach_button = gr.Button("Resim Ekle", scale=1) # Button to attach images
|
| 62 |
+
image_input = gr.Image(type="pil", label="Resim Yükle", visible=False, scale=1) # Hidden image upload area
|
| 63 |
+
|
| 64 |
+
with gr.Column(): # Second column for displaying the raw chat history
|
| 65 |
+
raw_history = gr.Markdown("*Conversation History") # Display formatted chat history
|
| 66 |
+
|
| 67 |
+
# Define the function to toggle the visibility of the image upload area
|
| 68 |
+
def toggle_image_upload():
|
| 69 |
+
return gr.update(visible=True) # Make the image upload area visible
|
| 70 |
+
|
| 71 |
+
# Link the attach button to the toggle_image_upload function
|
| 72 |
+
attach_button.click(toggle_image_upload, [], [image_input])
|
| 73 |
+
|
| 74 |
+
# Set the submit button to trigger the chat_interface function with inputs
|
| 75 |
+
submit_btn.click(chat_interface, [user_input, image_input, chat_state], [user_input, chatbot, raw_history, chat_state])
|
| 76 |
+
|
| 77 |
+
# Set the textbox to trigger the chat_interface function when Enter is pressed
|
| 78 |
+
user_input.submit(chat_interface, [user_input, image_input, chat_state], [user_input, chatbot, raw_history, chat_state])
|
| 79 |
+
|
| 80 |
+
# Launch the Gradio interface (web app)
|
| 81 |
+
demo.launch()
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
|
|
|