Spaces:
No application file
No application file
File size: 1,525 Bytes
00325a1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
from langchain_groq import ChatGroq
from dotenv import load_dotenv
import streamlit as st
import os
# Load environment variables
load_dotenv()
# Set up API keys
groq_api_key = os.getenv("GOQ_API_KEY")
os.environ['GOOGLE_API_KEY'] = os.getenv("GOOGLE_API_KEY")
# Initialize Langchain Google Generative AI model
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain.schema import HumanMessage
def get_response(question):
chatllm = ChatGoogleGenerativeAI(
model="gemini-1.5-pro",
temperature=0.5, # Adjusted temperature for variety
max_tokens=150, # Set max tokens to avoid None issues
timeout=None,
max_retries=2,
)
response = chatllm([HumanMessage(content=question)])
return response
# Streamlit UI setup
st.set_page_config(page_title="QA BOT")
# Input field for user question
input_text = st.text_input("Input: ", key="input")
# Submit button for generating response
submit = st.button("Ask Question")
if submit:
if input_text: # Ensure there is input
response = get_response(input_text) # Get the response
st.subheader("The response is:")
# Check if response has content
if response and response.content:
st.write(response.content) # Display the response content
else:
st.write("No content received. Please try a different question.") # Fallback message
else:
st.write("Please enter a question.")
|