Spaces:
Runtime error
Runtime error
| # gradio_chatbot.py | |
| import gradio as gr | |
| from langchain_community.llms import Ollama | |
| from langchain.prompts import PromptTemplate | |
| from langchain.chains import LLMChain, RetrievalQA | |
| from langchain.memory import ConversationBufferMemory | |
| from langchain.document_loaders import PyPDFLoader | |
| from langchain.vectorstores import Chroma | |
| from langchain.embeddings import SentenceTransformerEmbeddings | |
| # ------------------------------- | |
| # 1. Load PDF with demo conversations | |
| # ------------------------------- | |
| pdf_path = "demo_conversation.pdf" # replace with your PDF path | |
| loader = PyPDFLoader(pdf_path) | |
| documents = loader.load() | |
| # ------------------------------- | |
| # 2. Create embeddings and vector store (Chroma) | |
| # ------------------------------- | |
| embedding_model = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2") | |
| vectorstore = Chroma.from_documents(documents, embedding_model) | |
| retriever = vectorstore.as_retriever(search_type="similarity", search_kwargs={"k":3}) | |
| # ------------------------------- | |
| # 3. Load Ollama LLM | |
| # ------------------------------- | |
| llm = Ollama(model="llama3:8b") # Make sure llama3:8b is installed | |
| # ------------------------------- | |
| # 4. Create RetrievalQA for PDF content | |
| # ------------------------------- | |
| qa_chain = RetrievalQA.from_chain_type( | |
| llm=llm, | |
| retriever=retriever, | |
| return_source_documents=True | |
| ) | |
| # ------------------------------- | |
| # 5. Create conversation memory chain | |
| # ------------------------------- | |
| persona_prompt = """ | |
| You are Jordan Reyes, a warm and empathetic fitness coach. | |
| Use the knowledge from the uploaded PDF demo conversation to answer user questions. | |
| Keep track of conversation history and respond like Jordan. | |
| Chat History: | |
| {history} | |
| User: {user_input} | |
| Jordan: | |
| """ | |
| prompt = PromptTemplate( | |
| input_variables=["history", "user_input"], | |
| template=persona_prompt | |
| ) | |
| memory = ConversationBufferMemory(memory_key="history") | |
| conversation_chain = LLMChain( | |
| llm=llm, | |
| prompt=prompt, | |
| memory=memory | |
| ) | |
| # ------------------------------- | |
| # 6. Gradio interface function | |
| # ------------------------------- | |
| def chat_with_jordan(user_input, chat_history): | |
| # Initialize chat_history if None or empty | |
| if chat_history is None: | |
| chat_history = [] | |
| # If this is the first message (chat_history is empty), add the initial greeting | |
| if len(chat_history) == 0: | |
| initial_message = """Hi, I'm Jordan Reyes, your Fitness Connection Wellness Coach. My role is simple — help you hit your goals with the right tools. | |
| Before I suggest anything, what's the #1 outcome you want most right now? | |
| 1. Weight loss | |
| 2. More energy | |
| 3. Faster recovery | |
| 4. Sharper focus | |
| 5. Vitality""" | |
| chat_history.append({"role": "assistant", "content": initial_message}) | |
| return chat_history, "" | |
| # Step 1: Retrieve PDF context | |
| response_dict = qa_chain.invoke({"query": user_input}) | |
| pdf_context = response_dict["result"] | |
| # Step 2: Generate response using conversation chain | |
| response = conversation_chain.invoke({"user_input": f"{user_input}\nContext: {pdf_context}"}) | |
| answer = response["text"] | |
| # Step 3: Update chat history | |
| # Add user message | |
| chat_history.append({"role": "user", "content": user_input}) | |
| # Add bot message | |
| chat_history.append({"role": "assistant", "content": answer}) | |
| return chat_history, "" # second "" clears the input box | |
| # ------------------------------- | |
| # 7. Function to initialize chat with welcome message | |
| # ------------------------------- | |
| def init_chat(): | |
| initial_message = """Hi, I'm Jordan Reyes, your Fitness Connection Wellness Coach. My role is simple — help you hit your goals with the right tools. | |
| Before I suggest anything, what's the #1 outcome you want most right now? | |
| 1. Weight loss | |
| 2. More energy | |
| 3. Faster recovery | |
| 4. Sharper focus | |
| 5. Vitality""" | |
| return [{"role": "assistant", "content": initial_message}] | |
| # ------------------------------- | |
| # 8. Launch Gradio Web App | |
| # ------------------------------- | |
| with gr.Blocks() as demo: | |
| gr.Markdown("## Jordan Chatbot - Fitness Coach") | |
| # Initialize chat with welcome message | |
| chatbot = gr.Chatbot(value=init_chat) | |
| msg = gr.Textbox(placeholder="Type your message here...") | |
| # Clear button to reset conversation | |
| clear_btn = gr.Button("Clear Chat") | |
| def clear_chat(): | |
| # Clear the memory | |
| memory.clear() | |
| # Reset to initial message | |
| return init_chat() | |
| # Handle message submission | |
| msg.submit(chat_with_jordan, [msg, chatbot], [chatbot, msg]) | |
| # Handle clear button | |
| clear_btn.click(clear_chat, None, [chatbot]) | |
| demo.launch() | |