Spaces:
Sleeping
Sleeping
| from flask import Flask, request, jsonify | |
| import chromadb | |
| from langchain_community.vectorstores import Chroma | |
| from langchain_openai import OpenAIEmbeddings | |
| import os | |
| from openai import OpenAI | |
| app = Flask(__name__) | |
| # Configurar la API Key de OpenAI | |
| OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") | |
| # Inicializar el cliente de OpenAI | |
| client = OpenAI(api_key=OPENAI_API_KEY) | |
| # Inicializar el cliente de ChromaDB en Hugging Face Space | |
| chroma_client = chromadb.PersistentClient(path="/app/chroma_db") # Usa la ruta dentro del contenedor | |
| # Cargar la base de datos de Chroma como un vector store | |
| vectorstore = Chroma( | |
| client=chroma_client, | |
| collection_name="docs", | |
| embedding_function=OpenAIEmbeddings(model="text-embedding-3-small", openai_api_key=OPENAI_API_KEY) | |
| ) | |
| # Crear un retriever | |
| retriever = vectorstore.as_retriever() | |
| def obtener_extractos(pregunta): | |
| """Obtiene documentos relevantes desde ChromaDB""" | |
| docs_relevantes = retriever.invoke(pregunta) | |
| return [(doc.page_content, doc.metadata.get("url", "URL no disponible")) for doc in docs_relevantes] | |
| def chat(): | |
| """Endpoint para generar respuestas usando OpenAI y ChromaDB""" | |
| data = request.json | |
| message = data.get("message", "") | |
| system_message = data.get("system_message", "Eres un asistente virtual.") | |
| max_tokens = data.get("max_tokens", 512) | |
| temperature = data.get("temperature", 0.7) | |
| top_p = data.get("top_p", 0.95) | |
| if not message: | |
| return jsonify({"error": "El campo 'message' es obligatorio."}), 400 | |
| # Obtener documentos relevantes | |
| contexto = obtener_extractos(message) | |
| # Construir el mensaje del sistema con el contexto | |
| system_message_final = f"""{system_message} | |
| Información relevante extraída de los documentos: | |
| {contexto} | |
| """ | |
| messages = [ | |
| {"role": "system", "content": system_message_final}, | |
| {"role": "user", "content": message} | |
| ] | |
| try: | |
| # Llamar a la API de OpenAI | |
| response = client.chat.completions.create( | |
| model="gpt-4o-mini", | |
| messages=messages, | |
| max_tokens=max_tokens, | |
| temperature=temperature, | |
| top_p=top_p | |
| ) | |
| completion = response.choices[0].message.content | |
| return jsonify({"response": completion, "context": contexto}) | |
| except Exception as e: | |
| return jsonify({"error": str(e)}), 500 | |
| if __name__ == '__main__': | |
| app.run(host="0.0.0.0", port=7860) | |