Files changed (2) hide show
  1. app.py +305 -162
  2. requirements.txt +16 -14
app.py CHANGED
@@ -1,162 +1,305 @@
1
- #================imports==============
2
-
3
- import uuid
4
- import requests
5
- import os
6
- os.environ["USER_AGENT"] = "RAG-App/1.0"
7
- from typing import Dict, List, Any
8
- from dotenv import load_dotenv
9
-
10
- from bs4 import BeautifulSoup
11
- from langchain_core.globals import set_llm_cache
12
- from langchain_core.caches import InMemoryCache
13
-
14
- from langchain_community.document_loaders import WebBaseLoader
15
- from langchain_text_splitters import RecursiveCharacterTextSplitter
16
-
17
- from langchain_huggingface import HuggingFaceEmbeddings
18
- from langchain_community.vectorstores import Weaviate
19
- from langchain_community.vectorstores import FAISS
20
- from langchain_groq import ChatGroq
21
- from langchain_core.prompts import ChatPromptTemplate,MessagesPlaceholder
22
-
23
- from langchain_classic.chains.combine_documents import create_stuff_documents_chain
24
- from langchain_classic.chains import create_retrieval_chain
25
-
26
- from langchain_core.runnables.history import RunnableWithMessageHistory
27
- from langchain_community.chat_message_histories import ChatMessageHistory
28
- from langchain_core.chat_history import BaseChatMessageHistory
29
-
30
- #================== CONFIG==================
31
-
32
- load_dotenv()
33
-
34
- set_llm_cache(InMemoryCache())
35
- api_key=os.environ["GROQ_API_KEY"]
36
- #os.environ["HF_API_KEY"]
37
- print("api chargée:" if api_key else "y'a probleme!!")
38
-
39
- #========== charger et decouper documents=================
40
-
41
- urls=[
42
- "https://fr.wikipedia.org/wiki/%C3%89levage",
43
- "https://fr.wikipedia.org/wiki/La_P%C3%AAche"
44
- ]
45
-
46
- loader = WebBaseLoader(urls,
47
- requests_kwargs={
48
- "headers":{
49
- "User-Agent":"RAG-App/1.0"
50
- }
51
- }
52
- )
53
- docs =loader.load()
54
-
55
- splitter = RecursiveCharacterTextSplitter(chunk_size= 1000, chunk_overlap=200)
56
- chunks= splitter.split_documents(docs)
57
-
58
- #============embeding et indexation vers faiss_db================
59
-
60
- embeddings= HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
61
-
62
- faiss_db=FAISS.from_documents(
63
- documents=chunks,
64
- embedding=embeddings
65
- )
66
-
67
- retriever=faiss_db.as_retriever(search_type="similarity", search_kwargs={"k":3})
68
-
69
- #=============== LLM et Prompt=================
70
-
71
- llm = ChatGroq(
72
- model="llama-3.3-70b-versatile",
73
- temperature=0.0,
74
- max_tokens=1200
75
- )
76
-
77
- prompt = ChatPromptTemplate.from_messages([
78
- ("system", """Tu es un assistant expert en dans le domaine de l'elevage et la pêche. Réponds clairement.
79
- Si tu ne connais pas, n'invente pas. Garde un ton amical.
80
-
81
- Contexte :
82
- {context}"""),
83
- MessagesPlaceholder(variable_name="chat_history"),
84
- ("human", "{input}"),
85
- ])
86
-
87
- #============= CHAINE DE RECUPERATION=======
88
-
89
- stuff_chain= create_stuff_documents_chain(llm, prompt)
90
- rag_chain=create_retrieval_chain(retriever, stuff_chain)
91
-
92
- import gradio as gr
93
-
94
- store = {}
95
-
96
- def get_session_history(session_id:str)->BaseChatMessageHistory:
97
- if session_id not in store:
98
- store[session_id] = ChatMessageHistory()
99
- return store[session_id]
100
-
101
- # ====== CHAIN AVEC MÉMOIRE ======
102
-
103
- convers_chain = RunnableWithMessageHistory(
104
- rag_chain,
105
- get_session_history,
106
- input_messages_key="input",
107
- history_messages_key="chat_history",
108
- output_messages_key="answer"
109
- )
110
-
111
- # =============FONCTION CHAT ================
112
-
113
- SESSION_ID = str(uuid.uuid4()) # session globale
114
-
115
- def chat_fn(message, history):
116
- result = convers_chain.invoke(
117
- {"input": message},
118
- config={"configurable": {"session_id": SESSION_ID}}
119
- )
120
- return result.get("answer", str(result))
121
-
122
- # def chat_fn(message, history, request: gr.Request):
123
- # session_id = request.session_hash
124
-
125
- # result = convers_chain.invoke(
126
- # {"input": message},
127
- # config={"configurable": {"session_id": session_id}}
128
- # )
129
- # return result.get("answer", str(result))
130
-
131
- #session_id_state = gr.State(value=None)
132
-
133
- # def chat_fn(message, history, session_id_state):
134
- # if session_id_state is None:
135
- # session_id_state = str(uuid.uuid4())
136
-
137
- # result = convers_chain.invoke(
138
- # {"input": message},
139
- # config={"configurable": {"session_id": session_id_state}}
140
- # )
141
-
142
- # response = result.get("answer", str(result))
143
- # return response, session_id_state
144
-
145
-
146
- # ================= GRADIO ====================
147
-
148
- demo = gr.ChatInterface(
149
- fn=chat_fn,
150
- title="🤖 RAG:Specialist en Science Animale 👌",
151
- description="Posez vos questions sur l'élévage et la pêche",
152
- examples=[
153
- "C'est quoi la pêche ?",
154
- "Explique l'élévage",
155
- "Quelle est la différence entre l'élévage et pêche ?"
156
- ]
157
- )
158
-
159
-
160
- # ===================LANCEMENT ================
161
-
162
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #================imports==============
2
+
3
+ import uuid
4
+ import requests
5
+ import os
6
+ os.environ["USER_AGENT"] = "RAG-App/1.0"
7
+ from typing import Dict, List, Any
8
+ from dotenv import load_dotenv
9
+
10
+ from bs4 import BeautifulSoup
11
+ from langchain_core.globals import set_llm_cache
12
+ from langchain_core.caches import InMemoryCache
13
+
14
+ from langchain_community.document_loaders import WebBaseLoader
15
+ from langchain_text_splitters import RecursiveCharacterTextSplitter
16
+
17
+ from langchain_huggingface import HuggingFaceEmbeddings
18
+ from langchain_community.vectorstores import FAISS
19
+ from langchain_groq import ChatGroq
20
+ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
21
+
22
+ from langchain.chains.combine_documents import create_stuff_documents_chain
23
+ from langchain.chains import create_retrieval_chain
24
+
25
+ from langchain_core.runnables.history import RunnableWithMessageHistory
26
+ from langchain_community.chat_message_histories import ChatMessageHistory
27
+ from langchain_core.chat_history import BaseChatMessageHistory
28
+
29
+ import gradio as gr
30
+
31
+ #================== CONFIG==================
32
+
33
+ load_dotenv()
34
+
35
+ set_llm_cache(InMemoryCache())
36
+ api_key = os.environ.get("GROQ_API_KEY")
37
+ if not api_key:
38
+ raise ValueError("❌ GROQ_API_KEY non trouvée!")
39
+ print("✅ API chargée avec succès")
40
+
41
+ #========== charger et découper documents=================
42
+
43
+ print("📥 Chargement des documents...")
44
+ urls = [
45
+ "https://fr.wikipedia.org/wiki/%C3%89levage",
46
+ "https://fr.wikipedia.org/wiki/La_P%C3%AAche"
47
+ ]
48
+
49
+ try:
50
+ loader = WebBaseLoader(
51
+ urls,
52
+ requests_kwargs={"headers": {"User-Agent": "RAG-App/1.0"}}
53
+ )
54
+ docs = loader.load()
55
+ print(f"✅ {len(docs)} documents chargés")
56
+ except Exception as e:
57
+ print(f"⚠️ Erreur de chargement: {e}")
58
+ from langchain_core.documents import Document
59
+ docs = [
60
+ Document(page_content="L'élevage est l'ensemble des activités qui assurent la multiplication et l'entretien des animaux domestiques pour la production de biens et services."),
61
+ Document(page_content="La pêche est l'activité consistant à capturer des animaux aquatiques dans leur milieu naturel.")
62
+ ]
63
+
64
+ splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
65
+ chunks = splitter.split_documents(docs)
66
+ print(f"✅ {len(chunks)} segments créés")
67
+
68
+ #============embedding et indexation================
69
+
70
+ print("🔧 Création des embeddings...")
71
+ embeddings = HuggingFaceEmbeddings(
72
+ model_name="sentence-transformers/all-MiniLM-L6-v2",
73
+ model_kwargs={'device': 'cpu'}
74
+ )
75
+
76
+ faiss_db = FAISS.from_documents(documents=chunks, embedding=embeddings)
77
+ print("✅ Base FAISS créée")
78
+
79
+ retriever = faiss_db.as_retriever(search_type="similarity", search_kwargs={"k": 3})
80
+
81
+ #=============== LLM et Prompt=================
82
+
83
+ print("🤖 Initialisation du LLM...")
84
+ llm = ChatGroq(
85
+ model="llama-3.3-70b-versatile",
86
+ temperature=0.0,
87
+ max_tokens=1200
88
+ )
89
+
90
+ prompt = ChatPromptTemplate.from_messages([
91
+ ("system", """Tu es un assistant expert en élevage et pêche.
92
+ Réponds de manière claire et concise en français.
93
+ Si tu ne connais pas la réponse, dis-le honnêtement.
94
+ Contexte : {context}"""),
95
+ MessagesPlaceholder(variable_name="chat_history"),
96
+ ("human", "{input}"),
97
+ ])
98
+
99
+ #============= CHAINE DE RÉCUPÉRATION================
100
+
101
+ stuff_chain = create_stuff_documents_chain(llm, prompt)
102
+ rag_chain = create_retrieval_chain(retriever, stuff_chain)
103
+
104
+ # ====== GESTION DE L'HISTORIQUE ======
105
+
106
+ store = {}
107
+
108
+ def get_session_history(session_id: str) -> BaseChatMessageHistory:
109
+ if session_id not in store:
110
+ store[session_id] = ChatMessageHistory()
111
+ return store[session_id]
112
+
113
+ convers_chain = RunnableWithMessageHistory(
114
+ rag_chain,
115
+ get_session_history,
116
+ input_messages_key="input",
117
+ history_messages_key="chat_history",
118
+ output_messages_key="answer"
119
+ )
120
+
121
+ # ================= CSS POUR LE STYLE ====================
122
+
123
+ custom_css = """
124
+ .sidebar {
125
+ background: #202123 !important;
126
+ min-height: 100vh;
127
+ }
128
+ .main-area {
129
+ background: #343541 !important;
130
+ }
131
+ .chatbot-container {
132
+ height: calc(100vh - 200px) !important;
133
+ }
134
+ """
135
+
136
+ # ================= INTERFACE GRADIO (Gradio 6.0 compatible) ====================
137
+
138
+ with gr.Blocks() as demo:
139
+ with gr.Row(equal_height=True):
140
+ # Colonne gauche : Historique
141
+ with gr.Column(scale=1, min_width=250, elem_classes="sidebar"):
142
+ gr.Markdown("## 📚 Historique")
143
+ new_chat_btn = gr.Button("➕ Nouvelle conversation", variant="secondary")
144
+ history_radio = gr.Radio(
145
+ choices=[],
146
+ label="Conversations",
147
+ interactive=True
148
+ )
149
+ clear_btn = gr.Button("🗑️ Effacer", variant="stop", size="sm")
150
+
151
+ # Colonne droite : Chat
152
+ with gr.Column(scale=3, elem_classes="main-area"):
153
+ gr.Markdown("# 🤖 Assistant Élevage & Pêche")
154
+
155
+ chatbot = gr.Chatbot(
156
+ label="",
157
+ height=500,
158
+ show_label=False,
159
+ avatar_images=(None, "🐟")
160
+ )
161
+
162
+ with gr.Row():
163
+ msg_input = gr.Textbox(
164
+ placeholder="Posez votre question sur l'élevage ou la pêche...",
165
+ show_label=False,
166
+ scale=9,
167
+ container=False
168
+ )
169
+ send_btn = gr.Button("📤", variant="primary", scale=1)
170
+
171
+ gr.Examples(
172
+ examples=["C'est quoi l'élevage ?", "Explique la pêche", "Différence entre élevage et pêche ?"],
173
+ inputs=msg_input
174
+ )
175
+
176
+ # États
177
+ conversations_state = gr.State([])
178
+ current_session_id = gr.State(str(uuid.uuid4()))
179
+
180
+ # ================= FONCTIONS =================
181
+
182
+ def create_new_chat(conversations):
183
+ """Nouvelle conversation"""
184
+ new_id = str(uuid.uuid4())
185
+ conversations.append({"id": new_id, "title": "Nouveau chat", "messages": []})
186
+ choices = [c["title"] for c in conversations]
187
+ return conversations, new_id, [], gr.update(choices=choices, value=None)
188
+
189
+ def load_chat(selected_title, conversations):
190
+ """Charger une conversation"""
191
+ if not selected_title or not conversations:
192
+ return [], ""
193
+
194
+ for conv in conversations:
195
+ if conv["title"] == selected_title:
196
+ history = []
197
+ user_msg = None
198
+ for msg in conv["messages"]:
199
+ if msg["role"] == "user":
200
+ user_msg = msg["content"]
201
+ else:
202
+ if user_msg:
203
+ history.append([user_msg, msg["content"]])
204
+ user_msg = None
205
+ return history, conv["id"]
206
+ return [], ""
207
+
208
+ def send_message(message, chat_history, conversations, session_id):
209
+ """Envoyer un message"""
210
+ if not message or not message.strip():
211
+ return "", chat_history, conversations, session_id, gr.update()
212
+
213
+ # Gérer la session
214
+ if not session_id:
215
+ session_id = str(uuid.uuid4())
216
+
217
+ # Mettre à jour la conversation dans l'historique
218
+ conv_exists = False
219
+ for conv in conversations:
220
+ if conv["id"] == session_id:
221
+ conv_exists = True
222
+ conv["messages"].append({"role": "user", "content": message})
223
+ if conv["title"] == "Nouveau chat":
224
+ conv["title"] = message[:40] + "..."
225
+ break
226
+
227
+ if not conv_exists:
228
+ conversations.append({
229
+ "id": session_id,
230
+ "title": message[:40] + "...",
231
+ "messages": [{"role": "user", "content": message}]
232
+ })
233
+
234
+ # Ajouter le message au chatbot
235
+ chat_history.append([message, None])
236
+
237
+ try:
238
+ result = convers_chain.invoke(
239
+ {"input": message},
240
+ config={"configurable": {"session_id": session_id}}
241
+ )
242
+ response = result.get("answer", "Désolé, je n'ai pas compris.")
243
+ except Exception as e:
244
+ response = f"❌ Erreur: {str(e)}"
245
+
246
+ # Sauvegarder la réponse
247
+ for conv in conversations:
248
+ if conv["id"] == session_id:
249
+ conv["messages"].append({"role": "assistant", "content": response})
250
+ break
251
+
252
+ # Mettre à jour le chatbot
253
+ chat_history[-1] = [message, response]
254
+
255
+ # Mettre à jour la liste
256
+ choices = [c["title"] for c in conversations]
257
+
258
+ return "", chat_history, conversations, session_id, gr.update(choices=choices, value=conversations[-1]["title"] if conversations else None)
259
+
260
+ def clear_all():
261
+ """Tout effacer"""
262
+ store.clear()
263
+ return [], [], [], gr.update(choices=[])
264
+
265
+ # ================= ÉVÉNEMENTS =================
266
+
267
+ msg_input.submit(
268
+ send_message,
269
+ inputs=[msg_input, chatbot, conversations_state, current_session_id],
270
+ outputs=[msg_input, chatbot, conversations_state, current_session_id, history_radio]
271
+ )
272
+
273
+ send_btn.click(
274
+ send_message,
275
+ inputs=[msg_input, chatbot, conversations_state, current_session_id],
276
+ outputs=[msg_input, chatbot, conversations_state, current_session_id, history_radio]
277
+ )
278
+
279
+ new_chat_btn.click(
280
+ create_new_chat,
281
+ inputs=[conversations_state],
282
+ outputs=[conversations_state, current_session_id, chatbot, history_radio]
283
+ )
284
+
285
+ history_radio.change(
286
+ load_chat,
287
+ inputs=[history_radio, conversations_state],
288
+ outputs=[chatbot, current_session_id]
289
+ )
290
+
291
+ clear_btn.click(
292
+ clear_all,
293
+ outputs=[chatbot, conversations_state, current_session_id, history_radio]
294
+ )
295
+
296
+ # ================= LANCEMENT ====================
297
+
298
+ if __name__ == "__main__":
299
+ print("🚀 Lancement de l'application...")
300
+ demo.launch(
301
+ server_name="0.0.0.0",
302
+ server_port=7860,
303
+ css=custom_css,
304
+ theme="soft"
305
+ )
requirements.txt CHANGED
@@ -1,14 +1,16 @@
1
- #====== installer=====
2
-
3
- langchain
4
- langchain-huggingface
5
- langchain-core
6
- langchain-classic
7
- langchain-community
8
- langchain-groq
9
- sentence-transformers
10
- python-dotenv
11
- faiss-cpu
12
- beautifulsoup4
13
- gradio>=5.0.0
14
- torch
 
 
 
1
+ langchain>=0.3.0
2
+ langchain-core>=0.3.0
3
+ langchain-community>=0.3.0
4
+ langchain-groq>=0.2.0
5
+ langchain-huggingface>=0.1.0
6
+ langchain-text-splitters>=0.3.0
7
+ sentence-transformers>=3.0.0
8
+ huggingface-hub>=0.20.0
9
+ torch>=2.0.0
10
+ faiss-cpu>=1.8.0
11
+ beautifulsoup4>=4.12.0
12
+ lxml>=5.0.0
13
+ gradio>=5.0.0
14
+ python-dotenv>=1.0.0
15
+ requests>=2.31.0
16
+ numpy>=1.24.0