import streamlit as st from langchain_google_genai import ChatGoogleGenerativeAI from dotenv import load_dotenv import os # Load environment variables load_dotenv() # Initialize the language model llm = ChatGoogleGenerativeAI(model="gemini-1.5-pro", temperature=0.6, google_api_key=os.getenv("GOOGLE_API_KEY")) # Set Streamlit page configuration st.set_page_config(page_title="Q&A Chatbot", page_icon="🤖", layout="centered", initial_sidebar_state="auto") # Main application interface st.header("Quest Bot") # Function to handle user input and model response def chat(input_question): try: # Get response from the model with st.spinner("getting response..."): response = llm.invoke(input_question) return response.content except ValueError as ve: st.error(f"ValueError: {str(ve)}") except Exception as e: st.error(f"Error: {str(e)}") return None # Initialize conversation history conversation = [] # User input for the question input_question = st.text_input("Input your question here: ") # Button to submit the question if st.button("Ask"): if input_question: st.spinner("getting response...") # Add user input to conversation history conversation.append(f"You: {input_question}") # Get response from the model response = chat(input_question) if response: # Add model response to conversation history conversation.append(f"Quest Bot: {response}") # Display conversation history st.subheader("Conversation") for message in conversation: st.write(message) else: # Show a warning if no question is input st.warning("Please input a question.")