Antoni341 commited on
Commit
33e7d04
verified
1 Parent(s): d1fe548

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -9
app.py CHANGED
@@ -2,16 +2,18 @@ import os
2
  import uuid
3
  import gradio as gr
4
  from langchain_openai import ChatOpenAI
 
5
  from langchain_community.vectorstores import Qdrant
6
- # CAMBIO: Usar la librer铆a dedicada langchain-huggingface
7
  from langchain_community.embeddings import HuggingFaceEmbeddings
 
 
8
  from qdrant_client import QdrantClient, models
9
  from qdrant_client.http import models as rest_models
10
- from langchain.chains.history_aware_retriever import create_history_aware_retriever
11
- from langchain.chains.retrieval import create_retrieval_chain
12
  from langchain.chains.combine_documents import create_stuff_documents_chain
 
13
  from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
14
- from langchain_community.chat_message_histories import ChatMessageHistory
15
  from langchain_core.runnables.history import RunnableWithMessageHistory
16
 
17
  # --- 1. CONFIGURACI脫N Y VARIABLES DE ENTORNO ---
@@ -34,7 +36,7 @@ OPCIONES_CATEGORIAS = [
34
  # Cliente Qdrant
35
  client = QdrantClient(url=QDRANT_URL, api_key=QDRANT_API_KEY)
36
 
37
- # Embeddings (Actualizado para usar langchain_huggingface)
38
  embeddings_model = HuggingFaceEmbeddings(
39
  model_name="intfloat/e5-large-v2",
40
  model_kwargs={'device': 'cpu'},
@@ -48,7 +50,7 @@ llm_openai = ChatOpenAI(
48
  api_key=OPENAI_API_KEY
49
  )
50
 
51
- # Conexi贸n a la VectorDB (Actualizado a QdrantVectorStore)
52
  vectordb = Qdrant(
53
  client=client,
54
  collection_name=COLLECTION_NAME,
@@ -76,7 +78,6 @@ Utiliza los siguientes fragmentos de contexto recuperado para responder a la pre
76
  Si no sabes la respuesta, di que no lo sabes. \
77
  Menciona siempre de qu茅 documentos has extra铆do la informaci贸n (usando el metadato 'source'). \
78
  Profundiza en la respuesta.
79
-
80
  Contexto:
81
  {context}"""
82
 
@@ -143,7 +144,6 @@ def chat_logic(message, history, selected_category, session_id):
143
 
144
  # 4. Generar respuesta streaming usando el ID 煤nico del usuario
145
  full_response = ""
146
- # En versiones nuevas, a veces history devuelve objetos, aseguramos manejo de errores b谩sico
147
  try:
148
  for chunk in conversational_rag_chain.stream(
149
  {"input": message},
@@ -181,7 +181,6 @@ with gr.Blocks(theme=tema_musical, css=custom_css, title="Chatbot SUMA") as demo
181
 
182
  chat_interface = gr.ChatInterface(
183
  fn=chat_logic,
184
- # Pasamos el session_state (el ID oculto) a la funci贸n l贸gica
185
  additional_inputs=[filtro_dropdown, session_state],
186
  examples=[
187
  ["驴Cu谩les son los requisitos para ser socio?"],
 
2
  import uuid
3
  import gradio as gr
4
  from langchain_openai import ChatOpenAI
5
+ # Mantenemos langchain_community como pediste (versiones legacy)
6
  from langchain_community.vectorstores import Qdrant
 
7
  from langchain_community.embeddings import HuggingFaceEmbeddings
8
+ from langchain_community.chat_message_histories import ChatMessageHistory
9
+ # Clientes y Modelos
10
  from qdrant_client import QdrantClient, models
11
  from qdrant_client.http import models as rest_models
12
+ # Cadenas (Importaci贸n consolidada para evitar errores de ruta)
13
+ from langchain.chains import create_history_aware_retriever, create_retrieval_chain
14
  from langchain.chains.combine_documents import create_stuff_documents_chain
15
+ # Core
16
  from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
 
17
  from langchain_core.runnables.history import RunnableWithMessageHistory
18
 
19
  # --- 1. CONFIGURACI脫N Y VARIABLES DE ENTORNO ---
 
36
  # Cliente Qdrant
37
  client = QdrantClient(url=QDRANT_URL, api_key=QDRANT_API_KEY)
38
 
39
+ # Embeddings (Usando langchain_community antiguo)
40
  embeddings_model = HuggingFaceEmbeddings(
41
  model_name="intfloat/e5-large-v2",
42
  model_kwargs={'device': 'cpu'},
 
50
  api_key=OPENAI_API_KEY
51
  )
52
 
53
+ # Conexi贸n a la VectorDB (Wrapper antiguo Qdrant)
54
  vectordb = Qdrant(
55
  client=client,
56
  collection_name=COLLECTION_NAME,
 
78
  Si no sabes la respuesta, di que no lo sabes. \
79
  Menciona siempre de qu茅 documentos has extra铆do la informaci贸n (usando el metadato 'source'). \
80
  Profundiza en la respuesta.
 
81
  Contexto:
82
  {context}"""
83
 
 
144
 
145
  # 4. Generar respuesta streaming usando el ID 煤nico del usuario
146
  full_response = ""
 
147
  try:
148
  for chunk in conversational_rag_chain.stream(
149
  {"input": message},
 
181
 
182
  chat_interface = gr.ChatInterface(
183
  fn=chat_logic,
 
184
  additional_inputs=[filtro_dropdown, session_state],
185
  examples=[
186
  ["驴Cu谩les son los requisitos para ser socio?"],