Sadique5's picture
Upload 23 files
d2224c7 verified
import streamlit as st
from llm import GeminiModel, api_key as SECRET_KEY
from langchain_google_genai import GoogleGenerativeAIEmbeddings
from langchain_community.vectorstores import FAISS
class RAGEnabledModel:
def __init__(self):
self.prompt = """
You are a helpful chat assistant who provides information about the public statements
and policy positions of different political parties in Germany for the upcoming 2025 elections.
When the user asks a question, you should respond in the same language they used
(e.g., if they ask in German, respond in German; if in English, respond in English).
Focus on factual information regarding each party’s stance, referencing relevant
policy areas such as economy, immigration, healthcare, the environment, and so on.
Stay neutral and objective, providing factual information without bias or personal
political opinions. Search online to find up-to-date latest information.
"""
# Load FAISS vector store
self.vector_db = FAISS.load_local("./faiss_index", GoogleGenerativeAIEmbeddings(model="models/embedding-001", google_api_key=SECRET_KEY),allow_dangerous_deserialization=True)
# Instantiate the GeminiModel (replace with actual import or code)
self.model = GeminiModel()
def retrieve_documents(self, query):
"""Retrieve relevant documents from the FAISS vector database."""
results = self.vector_db.similarity_search(query, k=5)
return results
def predict(self, text, history):
"""Perform RAG-enabled prediction."""
# Step 1: Retrieve relevant documents
documents = self.retrieve_documents(text)
# Step 2: Incorporate retrieved documents into the prompt
context = "\n\n".join([doc.page_content for doc in documents])
augmented_prompt = f"{self.prompt}\n\nRelevant Context:\n{context}\n\nUser Query: {text}"
# Step 3: Use the model for prediction
outp, pricing = self.model.predict(
augmented_prompt,
history=history,
grounding_threshold=0.15
)
return outp
####################################
# 2) Streamlit application layout #
####################################
def main():
st.set_page_config(page_title="German 2025 Elections - Political Parties", layout="centered")
st.title("German Political Parties' Statements for the 2025 Elections")
# Initialize the conversation history
if "history" not in st.session_state:
# We'll store (speaker, message) tuples in this list
st.session_state.history = []
# Create an instance of our RegularModel
model = RAGEnabledModel()
#############################
# 3) Chat-style input form #
#############################
with st.form(key="user_form"):
user_input = st.text_input(
"You:",
placeholder="Ask about a political party's stance on any policy in Germany (2025 elections)..."
)
submitted = st.form_submit_button("Send")
##########################
# 4) Handle user submit #
##########################
if submitted and user_input:
# Save the user message
st.session_state.history.append(("user", user_input))
# Instruct or rely on your model to answer in the user's language
# Optionally, you could do some language detection and pass it along:
# For example, using langdetect:
# from langdetect import detect
# user_lang = detect(user_input)
# instruction = f"Please reply in {user_lang}."
# combined_input = f"{instruction}\n\nUser asked: {user_input}"
# But if your model can automatically detect & respond in the same language,
# you can simply pass the user_input.
response = model.predict(user_input, st.session_state.history)
# Save the model's response
st.session_state.history.append(("bot", response))
##################################
# 5) Display the chat messages #
##################################
for speaker, message in st.session_state.history:
if speaker == "user":
st.markdown(f"**You**: {message}")
else:
st.markdown(f"**Assistant**: {message}")
###################################
# 6) Entry point for the app #
###################################
if __name__ == "__main__":
main()