| import os | |
| import streamlit as st | |
| from langchain.memory import ConversationBufferMemory | |
| from langchain.chains import ConversationChain | |
| from langchain_google_genai import ChatGoogleGenerativeAI | |
| GEMINI_API_KEY = os.getenv("GEMINI_API_KEY") | |
| st.set_page_config(page_title="Conversational AI Data Science Tutor", page_icon="π€") | |
| st.title("π€ Conversational AI Data Science Tutor") | |
| st.write("Ask me any **Data Science** related question!") | |
| if not GEMINI_API_KEY: | |
| st.error("β GEMINI_API_KEY not found. Please add it in Hugging Face β Settings β Variables and secrets.") | |
| else: | |
| llm = ChatGoogleGenerativeAI( | |
| model="gemini-1.5-pro", | |
| google_api_key=GEMINI_API_KEY | |
| ) | |
| memory = ConversationBufferMemory() | |
| conversation = ConversationChain( | |
| llm=llm, | |
| memory=memory, | |
| verbose=False | |
| ) | |
| if "messages" not in st.session_state: | |
| st.session_state.messages = [] | |
| for msg in st.session_state.messages: | |
| st.chat_message(msg["role"]).markdown(msg["content"]) | |
| if prompt := st.chat_input("Ask a data science question..."): | |
| st.session_state.messages.append({"role": "user", "content": prompt}) | |
| st.chat_message("user").markdown(prompt) | |
| response = conversation.predict(input=prompt) | |
| st.session_state.messages.append({"role": "assistant", "content": response}) | |
| st.chat_message("assistant").markdown(response) | |