File size: 5,150 Bytes
f6eb5cc
 
323f2cc
f6eb5cc
323f2cc
 
8fb164b
323f2cc
 
 
 
 
f6eb5cc
323f2cc
 
 
 
 
f6eb5cc
323f2cc
 
 
0a52bcb
323f2cc
 
f6eb5cc
323f2cc
 
 
 
 
 
 
8fb164b
323f2cc
 
f6eb5cc
 
323f2cc
 
f6eb5cc
323f2cc
f6eb5cc
323f2cc
f6eb5cc
323f2cc
f6eb5cc
323f2cc
f6eb5cc
323f2cc
f6eb5cc
323f2cc
f6eb5cc
323f2cc
f6eb5cc
323f2cc
f6eb5cc
 
323f2cc
 
 
f6eb5cc
323f2cc
8168cff
323f2cc
 
 
 
f6eb5cc
 
323f2cc
f6eb5cc
323f2cc
 
f6eb5cc
 
323f2cc
 
 
 
 
8fb164b
f6eb5cc
323f2cc
 
 
 
f6eb5cc
323f2cc
 
 
 
 
 
 
 
 
 
 
 
 
 
f6eb5cc
323f2cc
f6eb5cc
323f2cc
 
f6eb5cc
323f2cc
 
 
 
 
f6eb5cc
 
 
323f2cc
 
 
 
f6eb5cc
323f2cc
 
 
 
 
f6eb5cc
 
 
323f2cc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ff61157
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
import os
from dotenv import load_dotenv
load_dotenv()

from langchain_astradb import AstraDBVectorStore
from langchain_google_genai import GoogleGenerativeAIEmbeddings
from langchain.prompts import PromptTemplate
from langchain.chains import ConversationalRetrievalChain
from langchain_google_genai import ChatGoogleGenerativeAI
import streamlit as st
import time
import textwrap

# embeddings 
embeddings = GoogleGenerativeAIEmbeddings(
    model = "models/embedding-001",
    task_type = "retrieval_document"
)

# llm 
llm = ChatGoogleGenerativeAI(
    # model = "gemini-1.5-pro",
    model = "gemini-2.0-flash, 
    temperature = 0.7,
)

# Get Info about the Database 
vstore = AstraDBVectorStore(
    collection_name = "Bhagavad_gita_data",
    embedding = embeddings,
    token = os.getenv("ASTRA_DB_APPLICATION_TOKEN"),
    api_endpoint = os.getenv("ASTRA_DB_API_ENDPOINT"),
)

# Now Retrieve the Documents from Server
retriever = vstore.as_retriever(search_kwargs = {"k" : 5})

prompt_template = """
You are a wise counselor drawing from ancient Indian wisdom to offer psychological guidance. Your role is to provide practical, concise advice for modern challenges. 
You are going to be used for a psychiatrist assitance who gives advices on the context of bhagvad gita.
Follow these guidelines:

1. Begin with a brief, relatable insight from timeless teachings.

2. Offer 4 to 6 specific, actionable points of advice.

3. Each point should start on a new line and be clear and concise.

4. Connect each piece of advice to universal principles of success and well-being.

5. Use metaphors or examples from ancient texts without explicitly naming them.

6. Conclude with an encouraging statement that motivates the user to apply the advice.

7. Avoid religious terminology. Use phrases like "ancient wisdom" or "timeless teachings" instead.

8. Ensure your response is practical, universally applicable, and inspirational.

9. Be strict that if some gives some wrong or useless input which is not relevant to physcological issue or dilema then reply them to enter the proper question

10. If possible try to give only Bhagavad Gita Verse related to it at end don't get any other verse from any other book and give verse Translation and number only as many don't know to read sanskrit. 

11. If you don't know the verse from Bhagavad Gita just search through the context and then give Answer in Hindi.

Context: {context}
Question: {question}
Human: {human_input}
Chat History: {chat_history}
"""


PROMPT = PromptTemplate(
    template = prompt_template,
    input_variables = ["context", "question", "human_input", "chat_history"]
)

qa_chain = ConversationalRetrievalChain.from_llm(
    llm,
    retriever = retriever,
    combine_docs_chain_kwargs = {"prompt": PROMPT},
    return_source_documents = False,
)

# format the output in good format
def format_and_wrap_text(text, wrap_length=100):
    # Split the text into main points
    main_points = text.split('**')

    formatted_text = ""
    for i in range(1, len(main_points), 2):
        # Add the main point title
        formatted_text += f"{main_points[i]}\n"

        # Split the subpoints by '* '
        subpoints = main_points[i+1].strip().split('* ')
        for subpoint in subpoints:
            if subpoint.strip():
                # Wrap each subpoint and add a bullet
                wrapped_subpoint = textwrap.fill(subpoint, wrap_length)
                formatted_text += f"{wrapped_subpoint}\n"

        formatted_text += "\n"

    print(formatted_text)

# Streamlit App Design 
st.set_page_config(page_title="Arjun AI")

# app 
st.title("Arjun AI")
st.write("Get Yourself Help from Krishna's Teaching of Bhagavad Gita")

# Initialize chat history
if "messages" not in st.session_state:
    st.session_state.messages = []

# Display chat messages from history on app rerun
for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])

# React to user input
if prompt := st.chat_input("What is your question?"):
    # Display user message in chat message container
    st.chat_message("user").markdown(prompt)
    # Add user message to chat history
    st.session_state.messages.append({"role": "user", "content": prompt})

    with st.chat_message("assistant"):
        message_placeholder = st.empty()
        full_response = ""

        # Get response from QA chain
        result = qa_chain({
            "question": prompt,
            "human_input": prompt,
            "chat_history": [(msg["role"], msg["content"]) for msg in st.session_state.messages]
        })
        full_response = result['answer']

        # Simulate stream of response with milliseconds delay
        for chunk in full_response.split():
            full_response = f"{full_response}"
            time.sleep(0.05)
            # Add a blinking cursor to simulate typing
            message_placeholder.markdown(full_response)
        
        message_placeholder.markdown(full_response)
    
    # Add assistant response to chat history
    st.session_state.messages.append({"role": "assistant", "content": full_response})