|
|
import os |
|
|
from dotenv import load_dotenv |
|
|
load_dotenv() |
|
|
|
|
|
from langchain_astradb import AstraDBVectorStore |
|
|
from langchain_google_genai import GoogleGenerativeAIEmbeddings |
|
|
from langchain.prompts import PromptTemplate |
|
|
from langchain.chains import ConversationalRetrievalChain |
|
|
from langchain_google_genai import ChatGoogleGenerativeAI |
|
|
import streamlit as st |
|
|
import time |
|
|
import textwrap |
|
|
|
|
|
|
|
|
embeddings = GoogleGenerativeAIEmbeddings( |
|
|
model = "models/embedding-001", |
|
|
task_type = "retrieval_document" |
|
|
) |
|
|
|
|
|
|
|
|
llm = ChatGoogleGenerativeAI( |
|
|
|
|
|
model = "gemini-2.0-flash, |
|
|
temperature = 0.7, |
|
|
) |
|
|
|
|
|
# Get Info about the Database |
|
|
vstore = AstraDBVectorStore( |
|
|
collection_name = "Bhagavad_gita_data", |
|
|
embedding = embeddings, |
|
|
token = os.getenv("ASTRA_DB_APPLICATION_TOKEN"), |
|
|
api_endpoint = os.getenv("ASTRA_DB_API_ENDPOINT"), |
|
|
) |
|
|
|
|
|
# Now Retrieve the Documents from Server |
|
|
retriever = vstore.as_retriever(search_kwargs = {"k" : 5}) |
|
|
|
|
|
prompt_template = """ |
|
|
You are a wise counselor drawing from ancient Indian wisdom to offer psychological guidance. Your role is to provide practical, concise advice for modern challenges. |
|
|
You are going to be used for a psychiatrist assitance who gives advices on the context of bhagvad gita. |
|
|
Follow these guidelines: |
|
|
|
|
|
1. Begin with a brief, relatable insight from timeless teachings. |
|
|
|
|
|
2. Offer 4 to 6 specific, actionable points of advice. |
|
|
|
|
|
3. Each point should start on a new line and be clear and concise. |
|
|
|
|
|
4. Connect each piece of advice to universal principles of success and well-being. |
|
|
|
|
|
5. Use metaphors or examples from ancient texts without explicitly naming them. |
|
|
|
|
|
6. Conclude with an encouraging statement that motivates the user to apply the advice. |
|
|
|
|
|
7. Avoid religious terminology. Use phrases like "ancient wisdom" or "timeless teachings" instead. |
|
|
|
|
|
8. Ensure your response is practical, universally applicable, and inspirational. |
|
|
|
|
|
9. Be strict that if some gives some wrong or useless input which is not relevant to physcological issue or dilema then reply them to enter the proper question |
|
|
|
|
|
10. If possible try to give only Bhagavad Gita Verse related to it at end don't get any other verse from any other book and give verse Translation and number only as many don't know to read sanskrit. |
|
|
|
|
|
11. If you don't know the verse from Bhagavad Gita just search through the context and then give Answer in Hindi. |
|
|
|
|
|
Context: {context} |
|
|
Question: {question} |
|
|
Human: {human_input} |
|
|
Chat History: {chat_history} |
|
|
""" |
|
|
|
|
|
|
|
|
PROMPT = PromptTemplate( |
|
|
template = prompt_template, |
|
|
input_variables = ["context", "question", "human_input", "chat_history"] |
|
|
) |
|
|
|
|
|
qa_chain = ConversationalRetrievalChain.from_llm( |
|
|
llm, |
|
|
retriever = retriever, |
|
|
combine_docs_chain_kwargs = {"prompt": PROMPT}, |
|
|
return_source_documents = False, |
|
|
) |
|
|
|
|
|
# format the output in good format |
|
|
def format_and_wrap_text(text, wrap_length=100): |
|
|
# Split the text into main points |
|
|
main_points = text.split('**') |
|
|
|
|
|
formatted_text = "" |
|
|
for i in range(1, len(main_points), 2): |
|
|
# Add the main point title |
|
|
formatted_text += f"{main_points[i]}\n" |
|
|
|
|
|
# Split the subpoints by '* ' |
|
|
subpoints = main_points[i+1].strip().split('* ') |
|
|
for subpoint in subpoints: |
|
|
if subpoint.strip(): |
|
|
# Wrap each subpoint and add a bullet |
|
|
wrapped_subpoint = textwrap.fill(subpoint, wrap_length) |
|
|
formatted_text += f"{wrapped_subpoint}\n" |
|
|
|
|
|
formatted_text += "\n" |
|
|
|
|
|
print(formatted_text) |
|
|
|
|
|
# Streamlit App Design |
|
|
st.set_page_config(page_title="Arjun AI") |
|
|
|
|
|
# app |
|
|
st.title("Arjun AI") |
|
|
st.write("Get Yourself Help from Krishna's Teaching of Bhagavad Gita") |
|
|
|
|
|
# Initialize chat history |
|
|
if "messages" not in st.session_state: |
|
|
st.session_state.messages = [] |
|
|
|
|
|
# Display chat messages from history on app rerun |
|
|
for message in st.session_state.messages: |
|
|
with st.chat_message(message["role"]): |
|
|
st.markdown(message["content"]) |
|
|
|
|
|
# React to user input |
|
|
if prompt := st.chat_input("What is your question?"): |
|
|
# Display user message in chat message container |
|
|
st.chat_message("user").markdown(prompt) |
|
|
# Add user message to chat history |
|
|
st.session_state.messages.append({"role": "user", "content": prompt}) |
|
|
|
|
|
with st.chat_message("assistant"): |
|
|
message_placeholder = st.empty() |
|
|
full_response = "" |
|
|
|
|
|
# Get response from QA chain |
|
|
result = qa_chain({ |
|
|
"question": prompt, |
|
|
"human_input": prompt, |
|
|
"chat_history": [(msg["role"], msg["content"]) for msg in st.session_state.messages] |
|
|
}) |
|
|
full_response = result['answer'] |
|
|
|
|
|
# Simulate stream of response with milliseconds delay |
|
|
for chunk in full_response.split(): |
|
|
full_response = f"{full_response}" |
|
|
time.sleep(0.05) |
|
|
# Add a blinking cursor to simulate typing |
|
|
message_placeholder.markdown(full_response) |
|
|
|
|
|
message_placeholder.markdown(full_response) |
|
|
|
|
|
# Add assistant response to chat history |
|
|
st.session_state.messages.append({"role": "assistant", "content": full_response}) |
|
|
|
|
|
|