| from PIL import Image | |
| import io | |
| import streamlit as st | |
| import google.generativeai as genai | |
| safety_settings = [ | |
| { | |
| "category": "HARM_CATEGORY_HARASSMENT", | |
| "threshold": "BLOCK_NONE" | |
| }, | |
| { | |
| "category": "HARM_CATEGORY_HATE_SPEECH", | |
| "threshold": "BLOCK_NONE" | |
| }, | |
| { | |
| "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", | |
| "threshold": "BLOCK_NONE" | |
| }, | |
| { | |
| "category": "HARM_CATEGORY_DANGEROUS_CONTENT", | |
| "threshold": "BLOCK_NONE" | |
| }, | |
| ] | |
| if "authenticated" not in st.session_state: | |
| st.session_state["authenticated"] = False | |
| if not st.session_state["authenticated"]: | |
| password = st.text_input("пасскод", type="password") | |
| if password == st.secrets["real_password"]: | |
| st.session_state["authenticated"] = True | |
| st.success("успех") | |
| elif password: | |
| st.warning("неудача") | |
| else: | |
| with st.sidebar: | |
| st.title("Gemini Pro") | |
| genai.configure(api_key=st.secrets["api_key"]) | |
| uploaded_image = st.file_uploader( | |
| label="загрузи изображение", | |
| label_visibility="visible", | |
| help="если загружено изображение - можно спрашивать по нему что-то, если нет - будет обычный чат", | |
| accept_multiple_files=False, | |
| type=["png", "jpg"], | |
| ) | |
| if uploaded_image: | |
| image_bytes = uploaded_image.read() | |
| def get_response(messages, model="gemini-pro"): | |
| model = genai.GenerativeModel(model) | |
| res = model.generate_content(messages, stream=True, safety_settings=safety_settings) | |
| return res | |
| if "messages" not in st.session_state: | |
| st.session_state["messages"] = [] | |
| messages = st.session_state["messages"] | |
| if messages: | |
| for item in messages: | |
| role, parts = item.values() | |
| if role == "user": | |
| st.chat_message("user").markdown(parts[0]) | |
| elif role == "model": | |
| st.chat_message("assistant").markdown(parts[0]) | |
| chat_message = st.chat_input("Спроси что-нибудь!") | |
| if chat_message: | |
| st.chat_message("user").markdown(chat_message) | |
| res_area = st.chat_message("assistant").empty() | |
| if "image_bytes" in globals(): | |
| vision_message = [chat_message, Image.open(io.BytesIO(image_bytes))] | |
| res = get_response(vision_message, model="gemini-pro-vision") | |
| else: | |
| vision_message = [{"role": "user", "parts": [chat_message]}] | |
| res = get_response(vision_message) | |
| res_text = "" | |
| for chunk in res: | |
| res_text += chunk.text | |
| res_area.markdown(res_text) | |
| messages.append({"role": "model", "parts": [res_text]}) | |