Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -2,12 +2,11 @@ import streamlit as st
|
|
| 2 |
import google.generativeai as genai
|
| 3 |
import os
|
| 4 |
from dotenv import load_dotenv
|
|
|
|
|
|
|
| 5 |
|
| 6 |
load_dotenv()
|
| 7 |
|
| 8 |
-
|
| 9 |
-
tools = "google_search_retrieval"
|
| 10 |
-
|
| 11 |
# Configure the API key
|
| 12 |
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
| 13 |
|
|
@@ -18,8 +17,57 @@ safety_settings = [
|
|
| 18 |
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
|
| 19 |
]
|
| 20 |
|
| 21 |
-
model = genai.GenerativeModel('gemini-2.0-flash-exp',tools='code_execution'
|
| 22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
|
| 24 |
def role_to_streamlit(role):
|
| 25 |
if role == "model":
|
|
@@ -27,17 +75,24 @@ def role_to_streamlit(role):
|
|
| 27 |
else:
|
| 28 |
return role
|
| 29 |
|
| 30 |
-
# Add
|
| 31 |
if "chat" not in st.session_state:
|
| 32 |
st.session_state.chat = model.start_chat(history=[])
|
|
|
|
|
|
|
| 33 |
|
| 34 |
# Display Form Title
|
| 35 |
st.title("Mariam AI!")
|
| 36 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
# File upload section
|
| 38 |
-
uploaded_file = st.file_uploader("Télécharger un fichier (image/document)", type=['jpg','mp4','mp3', 'jpeg', 'png', 'pdf', 'txt'])
|
| 39 |
|
| 40 |
-
# Display chat messages
|
| 41 |
for message in st.session_state.chat.history:
|
| 42 |
with st.chat_message(role_to_streamlit(message.role)):
|
| 43 |
st.markdown(message.parts[0].text)
|
|
@@ -45,11 +100,8 @@ for message in st.session_state.chat.history:
|
|
| 45 |
# Function to handle file upload with Gemini
|
| 46 |
def process_uploaded_file(file):
|
| 47 |
if file is not None:
|
| 48 |
-
# Save the uploaded file temporarily
|
| 49 |
with open(os.path.join("temp", file.name), "wb") as f:
|
| 50 |
f.write(file.getbuffer())
|
| 51 |
-
|
| 52 |
-
# Upload the file to Gemini
|
| 53 |
try:
|
| 54 |
gemini_file = genai.upload_file(os.path.join("temp", file.name))
|
| 55 |
return gemini_file
|
|
@@ -57,33 +109,37 @@ def process_uploaded_file(file):
|
|
| 57 |
st.error(f"Erreur lors du téléchargement du fichier : {e}")
|
| 58 |
return None
|
| 59 |
|
| 60 |
-
#
|
| 61 |
if prompt := st.chat_input("Hey?"):
|
| 62 |
-
# Process any uploaded file
|
| 63 |
uploaded_gemini_file = None
|
| 64 |
if uploaded_file:
|
| 65 |
uploaded_gemini_file = process_uploaded_file(uploaded_file)
|
| 66 |
|
| 67 |
-
# Display user
|
| 68 |
st.chat_message("user").markdown(prompt)
|
| 69 |
-
|
| 70 |
-
# Send user entry to Gemini with optional file
|
| 71 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
if uploaded_gemini_file:
|
| 73 |
-
# If a file is uploaded, include it in the context
|
| 74 |
response = st.session_state.chat.send_message([uploaded_gemini_file, "\n\n", prompt])
|
| 75 |
else:
|
| 76 |
-
# Normal text-only conversation
|
| 77 |
-
print(prompt)
|
| 78 |
-
print("---------")
|
| 79 |
response = st.session_state.chat.send_message(prompt)
|
| 80 |
-
|
| 81 |
-
# Display
|
| 82 |
with st.chat_message("assistant"):
|
| 83 |
st.markdown(response.text)
|
| 84 |
|
| 85 |
except Exception as e:
|
| 86 |
st.error(f"Erreur lors de l'envoi du message : {e}")
|
| 87 |
|
| 88 |
-
# Create temp directory
|
| 89 |
os.makedirs("temp", exist_ok=True)
|
|
|
|
| 2 |
import google.generativeai as genai
|
| 3 |
import os
|
| 4 |
from dotenv import load_dotenv
|
| 5 |
+
import http.client
|
| 6 |
+
import json
|
| 7 |
|
| 8 |
load_dotenv()
|
| 9 |
|
|
|
|
|
|
|
|
|
|
| 10 |
# Configure the API key
|
| 11 |
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
| 12 |
|
|
|
|
| 17 |
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
|
| 18 |
]
|
| 19 |
|
| 20 |
+
model = genai.GenerativeModel('gemini-2.0-flash-exp', tools='code_execution',
|
| 21 |
+
safety_settings=safety_settings,
|
| 22 |
+
system_instruction="Tu es un assistant intelligent. ton but est d'assister au mieux que tu peux. tu as été créé par Aenir et tu t'appelles Mariam")
|
| 23 |
+
|
| 24 |
+
def perform_web_search(query):
|
| 25 |
+
conn = http.client.HTTPSConnection("google.serper.dev")
|
| 26 |
+
payload = json.dumps({"q": query})
|
| 27 |
+
headers = {
|
| 28 |
+
'X-API-KEY': '9b90a274d9e704ff5b21c0367f9ae1161779b573',
|
| 29 |
+
'Content-Type': 'application/json'
|
| 30 |
+
}
|
| 31 |
+
try:
|
| 32 |
+
conn.request("POST", "/search", payload, headers)
|
| 33 |
+
res = conn.getresponse()
|
| 34 |
+
data = json.loads(res.read().decode("utf-8"))
|
| 35 |
+
return data
|
| 36 |
+
except Exception as e:
|
| 37 |
+
st.error(f"Erreur lors de la recherche web : {e}")
|
| 38 |
+
return None
|
| 39 |
+
finally:
|
| 40 |
+
conn.close()
|
| 41 |
+
|
| 42 |
+
def format_search_results(data):
|
| 43 |
+
if not data:
|
| 44 |
+
return "Aucun résultat trouvé"
|
| 45 |
+
|
| 46 |
+
result = ""
|
| 47 |
+
|
| 48 |
+
# Knowledge Graph
|
| 49 |
+
if 'knowledgeGraph' in data:
|
| 50 |
+
kg = data['knowledgeGraph']
|
| 51 |
+
result += f"### {kg.get('title', '')}\n"
|
| 52 |
+
result += f"*{kg.get('type', '')}*\n\n"
|
| 53 |
+
result += f"{kg.get('description', '')}\n\n"
|
| 54 |
+
|
| 55 |
+
# Organic Results
|
| 56 |
+
if 'organic' in data:
|
| 57 |
+
result += "### Résultats principaux:\n"
|
| 58 |
+
for item in data['organic'][:3]: # Limit to top 3 results
|
| 59 |
+
result += f"- **{item['title']}**\n"
|
| 60 |
+
result += f" {item['snippet']}\n"
|
| 61 |
+
result += f" [Lien]({item['link']})\n\n"
|
| 62 |
+
|
| 63 |
+
# People Also Ask
|
| 64 |
+
if 'peopleAlsoAsk' in data:
|
| 65 |
+
result += "### Questions fréquentes:\n"
|
| 66 |
+
for item in data['peopleAlsoAsk'][:2]: # Limit to top 2 questions
|
| 67 |
+
result += f"- **{item['question']}**\n"
|
| 68 |
+
result += f" {item['snippet']}\n\n"
|
| 69 |
+
|
| 70 |
+
return result
|
| 71 |
|
| 72 |
def role_to_streamlit(role):
|
| 73 |
if role == "model":
|
|
|
|
| 75 |
else:
|
| 76 |
return role
|
| 77 |
|
| 78 |
+
# Add chat and settings to session state
|
| 79 |
if "chat" not in st.session_state:
|
| 80 |
st.session_state.chat = model.start_chat(history=[])
|
| 81 |
+
if "web_search" not in st.session_state:
|
| 82 |
+
st.session_state.web_search = False
|
| 83 |
|
| 84 |
# Display Form Title
|
| 85 |
st.title("Mariam AI!")
|
| 86 |
|
| 87 |
+
# Settings section
|
| 88 |
+
with st.sidebar:
|
| 89 |
+
st.title("Paramètres")
|
| 90 |
+
st.session_state.web_search = st.toggle("Activer la recherche web", value=st.session_state.web_search)
|
| 91 |
+
|
| 92 |
# File upload section
|
| 93 |
+
uploaded_file = st.file_uploader("Télécharger un fichier (image/document)", type=['jpg', 'mp4', 'mp3', 'jpeg', 'png', 'pdf', 'txt'])
|
| 94 |
|
| 95 |
+
# Display chat messages
|
| 96 |
for message in st.session_state.chat.history:
|
| 97 |
with st.chat_message(role_to_streamlit(message.role)):
|
| 98 |
st.markdown(message.parts[0].text)
|
|
|
|
| 100 |
# Function to handle file upload with Gemini
|
| 101 |
def process_uploaded_file(file):
|
| 102 |
if file is not None:
|
|
|
|
| 103 |
with open(os.path.join("temp", file.name), "wb") as f:
|
| 104 |
f.write(file.getbuffer())
|
|
|
|
|
|
|
| 105 |
try:
|
| 106 |
gemini_file = genai.upload_file(os.path.join("temp", file.name))
|
| 107 |
return gemini_file
|
|
|
|
| 109 |
st.error(f"Erreur lors du téléchargement du fichier : {e}")
|
| 110 |
return None
|
| 111 |
|
| 112 |
+
# Chat input and processing
|
| 113 |
if prompt := st.chat_input("Hey?"):
|
|
|
|
| 114 |
uploaded_gemini_file = None
|
| 115 |
if uploaded_file:
|
| 116 |
uploaded_gemini_file = process_uploaded_file(uploaded_file)
|
| 117 |
|
| 118 |
+
# Display user message
|
| 119 |
st.chat_message("user").markdown(prompt)
|
| 120 |
+
|
|
|
|
| 121 |
try:
|
| 122 |
+
# Perform web search if enabled
|
| 123 |
+
web_results = None
|
| 124 |
+
if st.session_state.web_search:
|
| 125 |
+
with st.spinner("Recherche web en cours..."):
|
| 126 |
+
web_results = perform_web_search(prompt)
|
| 127 |
+
if web_results:
|
| 128 |
+
formatted_results = format_search_results(web_results)
|
| 129 |
+
prompt = f"""Question: {prompt}\n\nRésultats de recherche web:\n{formatted_results}\n\nPourrais-tu analyser ces informations et me donner une réponse complète?"""
|
| 130 |
+
|
| 131 |
+
# Send message to Gemini
|
| 132 |
if uploaded_gemini_file:
|
|
|
|
| 133 |
response = st.session_state.chat.send_message([uploaded_gemini_file, "\n\n", prompt])
|
| 134 |
else:
|
|
|
|
|
|
|
|
|
|
| 135 |
response = st.session_state.chat.send_message(prompt)
|
| 136 |
+
|
| 137 |
+
# Display assistant response
|
| 138 |
with st.chat_message("assistant"):
|
| 139 |
st.markdown(response.text)
|
| 140 |
|
| 141 |
except Exception as e:
|
| 142 |
st.error(f"Erreur lors de l'envoi du message : {e}")
|
| 143 |
|
| 144 |
+
# Create temp directory
|
| 145 |
os.makedirs("temp", exist_ok=True)
|