Spaces:
Sleeping
Sleeping
| import threading | |
| import queue | |
| import time | |
| from langchain.chains import ConversationalRetrievalChain | |
| from langchain.memory import ConversationBufferMemory | |
| from langchain.chat_models import ChatOpenAI | |
| from langsmith import traceable | |
| from langchain.embeddings.openai import OpenAIEmbeddings | |
| from langchain.vectorstores import Chroma | |
| from langchain.prompts import ChatPromptTemplate | |
| from langchain.prompts.chat import SystemMessagePromptTemplate, HumanMessagePromptTemplate | |
| from langchain.callbacks.base import BaseCallbackHandler | |
| import gradio as gr | |
| # -------------------------- | |
| # Custom Streaming Callback Handler that uses a queue. | |
| # -------------------------- | |
| class CustomStreamingCallbackHandler(BaseCallbackHandler): | |
| def __init__(self): | |
| self.token_queue = queue.Queue() | |
| def on_llm_new_token(self, token: str, **kwargs): | |
| # Simply put the new token in the queue. | |
| self.token_queue.put(token) | |
| # Instantiate the custom streaming callback (do not pass the chatbot here). | |
| stream_handler = CustomStreamingCallbackHandler() | |
| # -------------------------- | |
| # Setup vector database and embeddings | |
| # -------------------------- | |
| embeddings = OpenAIEmbeddings() | |
| vectordb = Chroma( | |
| persist_directory="./ai_referentes_f", | |
| embedding_function=embeddings | |
| ) | |
| # -------------------------- | |
| # Define prompt templates | |
| # -------------------------- | |
| general_system_template = r""" | |
| Toma la historia que te voy a compartir y transformala según las siguientes instrucciones. CUENTA EN UN PARRAFO SU HISTORIA. INCLUYE EN EL PARRAFO QUIEN FUE QUIEN LO HIZO. Cómo su carrera le dio capacidades para hacerlo. Cómo lo que hizo, conecta con ODS, tendencia a futuro y como transformo una realidad. Enfocate en el impacto. HAZ EL PARRAFO COMO SI FUERA UNA CHARLA TED. Inspira, no uses siglas, haz mencion a la ODS en relación al impacto que genera, no a la ODS, si mencionas organizaciones o personas, describe brevemente qué son y/o quiénes son. Se claro. Te estas dirigiendo a jovenes de colegio sin conocimientos previos de lo que les contaras, las ODS o tendencias a futuro. Utiliza un estilo que conecte con los estudiantes. No puedes contar la historia en primer persona. Inspira. | |
| Toma las siguientes historias de contexto {context} y responde únicamente basado en este contexto. | |
| """ | |
| general_user_template = "Pregunta:```{question}```" | |
| messages = [ | |
| SystemMessagePromptTemplate.from_template(general_system_template), | |
| HumanMessagePromptTemplate.from_template(general_user_template) | |
| ] | |
| qa_prompt = ChatPromptTemplate.from_messages(messages) | |
| # -------------------------- | |
| # Create conversation memory | |
| # -------------------------- | |
| def create_memory(): | |
| return ConversationBufferMemory(memory_key='chat_history', return_messages=True) | |
| # -------------------------- | |
| # Define the chain function that uses the LLM to answer queries | |
| # -------------------------- | |
| def pdf_qa(query, memory, llm): | |
| chain = ConversationalRetrievalChain.from_llm( | |
| llm=llm, | |
| retriever=vectordb.as_retriever(search_kwargs={'k': 28}), | |
| combine_docs_chain_kwargs={'prompt': qa_prompt}, | |
| memory=memory | |
| ) | |
| return chain({"question": query}) | |
| # -------------------------- | |
| # Build the Gradio Interface with custom CSS for the "Enviar" button. | |
| # -------------------------- | |
| with gr.Blocks() as demo: | |
| # Inject custom CSS via HTML. | |
| gr.HTML( | |
| """ | |
| <style> | |
| /* Target the button inside the container with id "enviar_button" */ | |
| #enviar_button button { | |
| background-color: #E50A17 !important; | |
| color: white !important; | |
| } | |
| </style> | |
| """ | |
| ) | |
| # Chatbot component with an initial greeting. | |
| chatbot = gr.Chatbot( | |
| label="Referentes", | |
| value=[[None, | |
| ''' | |
| ¡Hola! Te ayudo a imaginar el impacto que puedes lograr. En la UPC vas a aprender resolviendo retos reales que son necesarios de solucionar en nuestra sociedad. | |
| Dime qué carreras te emocinan y te podré decir el impacto que tu carrera puede generar. Inspirate y marca la diferencia tu también. | |
| ''' | |
| ]] | |
| ) | |
| msg = gr.Textbox(placeholder="Escribe aquí", label='') | |
| submit = gr.Button("Enviar", elem_id="enviar_button") | |
| memory_state = gr.State(create_memory) | |
| # Create the ChatOpenAI model with streaming enabled and our custom callback. | |
| llm = ChatOpenAI( | |
| temperature=0, | |
| model_name='gpt-4o', | |
| streaming=True, | |
| callbacks=[stream_handler] | |
| ) | |
| # -------------------------- | |
| # Generator function that runs the chain in a separate thread and polls the token queue. | |
| # -------------------------- | |
| def user(query, chat_history, memory): | |
| # Append the user's message with an empty bot response. | |
| chat_history.append((query, "")) | |
| # Immediately yield an update so the user's message appears. | |
| yield "", chat_history, memory | |
| # Container for the final chain result. | |
| final_result = [None] | |
| # Define a helper function to run the chain. | |
| def run_chain(): | |
| result = pdf_qa(query, memory, llm) | |
| final_result[0] = result | |
| # Signal end-of-stream by putting a sentinel value. | |
| stream_handler.token_queue.put(None) | |
| # Run the chain in a separate thread. | |
| thread = threading.Thread(target=run_chain) | |
| thread.start() | |
| # Poll the token queue for new tokens and yield updated chat history. | |
| current_response = "" | |
| while True: | |
| try: | |
| token = stream_handler.token_queue.get(timeout=0.1) | |
| except queue.Empty: | |
| token = None | |
| # A None token is our signal for end-of-stream. | |
| if token is None: | |
| if not thread.is_alive(): | |
| break | |
| else: | |
| continue | |
| current_response += token | |
| chat_history[-1] = (query, current_response) | |
| yield "", chat_history, memory | |
| thread.join() | |
| # Optionally, update the final answer if it differs from the streaming tokens. | |
| if final_result[0] and "answer" in final_result[0]: | |
| chat_history[-1] = (query, final_result[0]["answer"]) | |
| yield "", chat_history, memory | |
| # Wire up the generator function to Gradio components with queue enabled. | |
| submit.click(user, [msg, chatbot, memory_state], [msg, chatbot, memory_state], queue=True) | |
| msg.submit(user, [msg, chatbot, memory_state], [msg, chatbot, memory_state], queue=True) | |
| if __name__ == "__main__": | |
| demo.queue().launch() | |