| import os |
| import streamlit as st |
| import time |
| from langchain.chat_models import ChatOpenAI |
| from langchain.chains import ConversationalRetrievalChain |
| from langchain.prompts import PromptTemplate |
| from langchain.memory import ConversationSummaryBufferMemory |
| from langchain.vectorstores import FAISS |
| from langchain.embeddings import OpenAIEmbeddings |
|
|
| |
| os.environ["OPENAI_API_KEY"] = os.environ.get("OPENAI_API_KEY") |
|
|
| |
| embeddings = OpenAIEmbeddings() |
| vectorstore = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True) |
|
|
| |
| retriever = vectorstore.as_retriever(search_kwargs={"k": 5}) |
|
|
| |
| prompt_template = """ |
| You are an AI-powered course recommendation expert with extensive knowledge of educational programs across various disciplines. Your primary goal is to provide personalized, high-quality course suggestions tailored to each user's unique interests, goals, and background. |
| Do not retrieve course recommendations if the user hasn't specifically asked for them or is simply greeting the chatbot. |
| In such general cases, focus on engaging the user by asking about their learning interests or what they are looking to explore. |
| |
| Conversation History: |
| {chat_history} |
| |
| Current User Query: |
| {question} |
| |
| Relevant Courses from Database: |
| {context} |
| |
| Instructions for Crafting Your Response: |
| |
| 1. Engagement and Tone: |
| - Begin with a warm, friendly greeting if this is a new interaction. |
| - Maintain a professional yet approachable tone throughout the conversation. |
| - If the user initiates casual chat, engage briefly before steering the conversation towards educational interests. |
| |
| 2. Analysis and Recommendation: |
| - Carefully analyze the user's query and conversation history to understand their educational needs, interests, and any constraints. |
| - Select the most relevant courses from the provided context, prioritizing those with learning outcomes and syllabus content that closely match the user's requirements. |
| |
| 3. Detailed Course Recommendations: |
| For each recommended course, provide: |
| - Course title and offering institution |
| - A concise overview of the course content |
| - Specific skills and knowledge to be gained (from "What You Will Learn") |
| - Key topics covered in the syllabus |
| - Course level, duration, and language of instruction |
| - Course ratings and reviews, if available |
| - Direct URL to the course page |
| |
| 4. Personalized Explanation: |
| - Clearly articulate how each recommended course aligns with the user's expressed interests and goals. |
| - Highlight specific aspects of the course that address the user's needs or previous queries. |
| |
| |
| Remember to prioritize accuracy, relevance, and user-centricity in your recommendations. Your goal is to empower the user to make informed decisions about their educational path. |
| |
| Recommendation: |
| """ |
|
|
|
|
| PROMPT = PromptTemplate( |
| template=prompt_template, |
| input_variables=["chat_history", "question", "context"] |
| ) |
|
|
| |
| llm = ChatOpenAI(temperature=0.7, model_name="gpt-4o") |
|
|
| |
| memory = ConversationSummaryBufferMemory(llm=llm, max_token_limit=1000, memory_key="chat_history", return_messages=True) |
|
|
| |
| qa_chain = ConversationalRetrievalChain.from_llm( |
| llm=llm, |
| retriever=retriever, |
| memory=memory, |
| combine_docs_chain_kwargs={"prompt": PROMPT} |
| ) |
|
|
| |
| st.set_page_config(page_title="HCourse Recommendation Chatbot", page_icon=":book:") |
| st.title("HONEY BEE: Course Recommendation Chatbot π") |
|
|
| |
| if "messages" not in st.session_state: |
| st.session_state.messages = [] |
| |
| welcome_message = ( |
| "Hello! I'm HONEY BEE, your friendly Course Recommendation Chatbot! π " |
| "I'm here to help you find the best courses based on your interests and goals. " |
| "Feel free to ask me anything about learning or courses!" |
| ) |
| st.session_state.messages.append({"role": "assistant", "content": welcome_message}) |
|
|
| |
| for message in st.session_state.messages: |
| with st.chat_message(message["role"]): |
| st.markdown(message["content"]) |
|
|
| |
| if prompt := st.chat_input("What are you looking to learn?"): |
| |
| st.session_state.messages.append({"role": "user", "content": prompt}) |
| |
| with st.chat_message("user"): |
| st.markdown(prompt) |
|
|
| |
| with st.chat_message("assistant"): |
| response = qa_chain({"question": prompt}) |
| response_text = response["answer"] |
| |
| |
| placeholder = st.empty() |
| |
| |
| accumulated_response = "" |
| |
| |
| for char in response_text: |
| accumulated_response += char |
| placeholder.markdown(accumulated_response, unsafe_allow_html=True) |
| time.sleep(0.01) |
| |
| |
| st.session_state.messages.append({"role": "assistant", "content": response_text}) |