Spaces:
Sleeping
Sleeping
| import os | |
| import io | |
| import gradio as gr | |
| import google.generativeai as genai | |
| from PIL import Image | |
| # Configure the Gemini API | |
| # Retrieve the API key from Hugging Face Spaces secrets | |
| api_key = os.environ.get("GEMINI_API_KEY") | |
| if not api_key: | |
| raise ValueError("GEMINI_API_KEY not found in environment variables. Please set it in Hugging Face Spaces secrets.") | |
| genai.configure(api_key=api_key) | |
| def upload_to_gemini(image): | |
| """Uploads the given image to Gemini.""" | |
| if image is None: | |
| return None | |
| image_byte_array = io.BytesIO() | |
| image.save(image_byte_array, format='JPEG') | |
| image_byte_array = image_byte_array.getvalue() | |
| return genai.upload_file(image_byte_array, mime_type="image/jpeg") | |
| # Create the model | |
| generation_config = { | |
| "temperature": 0.9, | |
| "top_p": 0.95, | |
| "top_k": 64, | |
| "max_output_tokens": 1024, | |
| "response_mime_type": "text/plain", | |
| } | |
| model = genai.GenerativeModel( | |
| model_name="gemini-1.5-flash", | |
| generation_config=generation_config, | |
| ) | |
| def chat_with_gemini(history, user_message, image): | |
| history = history or [] | |
| try: | |
| uploaded_image = upload_to_gemini(image) if image else None | |
| if not history: | |
| # Start a new chat session | |
| chat_session = model.start_chat( | |
| history=[ | |
| { | |
| "role": "user", | |
| "parts": [ | |
| uploaded_image, | |
| user_message, | |
| ] if uploaded_image else [user_message], | |
| }, | |
| ] | |
| ) | |
| else: | |
| # Continue existing chat session | |
| chat_session = model.start_chat(history=[ | |
| {"role": "user" if i % 2 == 0 else "model", "parts": [msg]} | |
| for i, (msg, _) in enumerate(history) | |
| ]) | |
| # Send the new message | |
| if uploaded_image: | |
| chat_session.send_message([uploaded_image, user_message]) | |
| else: | |
| chat_session.send_message(user_message) | |
| # Get the response | |
| response = chat_session.last | |
| response_text = response.text | |
| history.append((user_message, response_text)) | |
| except Exception as e: | |
| error_message = f"An error occurred: {str(e)}" | |
| history.append((user_message, error_message)) | |
| return history, history | |
| def clear_conversation(): | |
| return None | |
| # Define the Gradio interface | |
| with gr.Blocks() as demo: | |
| chatbot = gr.Chatbot(label="Chat with Gemini 1.5 Flash") | |
| msg = gr.Textbox(label="Type your message here") | |
| clear = gr.Button("Clear") | |
| image_upload = gr.Image(type="pil", label="Upload an image (optional)") | |
| msg.submit(chat_with_gemini, [chatbot, msg, image_upload], [chatbot, chatbot]) | |
| clear.click(clear_conversation, outputs=[chatbot]) | |
| # Launch the app | |
| if __name__ == "__main__": | |
| demo.launch() |