Tejas1020 commited on
Commit
f6eb5cc
·
verified ·
1 Parent(s): 3fe578d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +149 -153
app.py CHANGED
@@ -1,154 +1,150 @@
1
- import os
2
- from dotenv import load_dotenv
3
- load_dotenv()
4
-
5
- from langchain_astradb import AstraDBVectorStore
6
- from langchain_google_genai import GoogleGenerativeAIEmbeddings
7
- from langchain.prompts import PromptTemplate
8
- from langchain.chains import ConversationalRetrievalChain
9
- from langchain_google_genai import ChatGoogleGenerativeAI
10
- import streamlit as st
11
- import time
12
- import textwrap
13
-
14
- # tokens and all
15
- os.environ["GOOGLE_API_KEY"] = "AIzaSyCoi9bUBwY5Imto3aInEnYFFQg4xZvSI30"
16
- os.environ["ASTRA_DB_API_ENDPOINT"] = "https://85494e7a-90c5-40ad-aa7d-3fa4fa3f6c11-us-east-2.apps.astra.datastax.com"
17
- os.environ["ASTRA_DB_APPLICATION_TOKEN"] = "AstraCS:ZwUXuZDQfZTxkYawpkfRXYgQ:6904895ba7ec606747a82db43fc2169d1913f6e2695d85fb4d3c2937b0cd4d8e"
18
-
19
- # embeddings
20
- embeddings = GoogleGenerativeAIEmbeddings(
21
- model = "models/embedding-001",
22
- task_type = "retrieval_document"
23
- )
24
-
25
- # llm
26
- llm = ChatGoogleGenerativeAI(
27
- model = "gemini-1.5-pro-latest",
28
- temperature = 0.7,
29
- )
30
-
31
- # Get Info about the Database
32
- vstore = AstraDBVectorStore(
33
- collection_name = "Bhagavad_gita_data",
34
- embedding = embeddings,
35
- token = os.getenv("ASTRA_DB_APPLICATION_TOKEN"),
36
- api_endpoint = os.getenv("ASTRA_DB_API_ENDPOINT"),
37
- )
38
-
39
- # Now Retrieve the Documents from Server
40
- retriever = vstore.as_retriever(search_kwargs = {"k" : 5})
41
-
42
- prompt_template = """
43
- You are a wise counselor drawing from ancient Indian wisdom to offer psychological guidance. Your role is to provide practical, concise advice for modern challenges.
44
- You are going to be used for a psychiatrist assitance who gives advices on the context of bhagvad gita.
45
- Follow these guidelines:
46
-
47
- 1. Begin with a brief, relatable insight from timeless teachings.
48
-
49
- 2. Offer 4 to 6 specific, actionable points of advice.
50
-
51
- 3. Each point should start on a new line and be clear and concise.
52
-
53
- 4. Connect each piece of advice to universal principles of success and well-being.
54
-
55
- 5. Use metaphors or examples from ancient texts without explicitly naming them.
56
-
57
- 6. Conclude with an encouraging statement that motivates the user to apply the advice.
58
-
59
- 7. Avoid religious terminology. Use phrases like "ancient wisdom" or "timeless teachings" instead.
60
-
61
- 8. Ensure your response is practical, universally applicable, and inspirational.
62
-
63
- 9. Be strict that if some gives some wrong or useless input which is not relevant to physcological issue or dilema then reply them to enter the proper question
64
-
65
- 10. If possible try to give only Bhagavad Gita Verse related to it at end don't get any other verse from any other book.
66
-
67
- Context: {context}
68
- Question: {question}
69
- Human: {human_input}
70
- Chat History: {chat_history}
71
- """
72
-
73
-
74
- PROMPT = PromptTemplate(
75
- template = prompt_template,
76
- input_variables = ["context", "question", "human_input", "chat_history"]
77
- )
78
-
79
- qa_chain = ConversationalRetrievalChain.from_llm(
80
- llm,
81
- retriever = retriever,
82
- combine_docs_chain_kwargs = {"prompt": PROMPT},
83
- return_source_documents = False,
84
- )
85
-
86
- # format the output in good format
87
- def format_and_wrap_text(text, wrap_length=100):
88
- # Split the text into main points
89
- main_points = text.split('**')
90
-
91
- formatted_text = ""
92
- for i in range(1, len(main_points), 2):
93
- # Add the main point title
94
- formatted_text += f"{main_points[i]}\n"
95
-
96
- # Split the subpoints by '* '
97
- subpoints = main_points[i+1].strip().split('* ')
98
- for subpoint in subpoints:
99
- if subpoint.strip():
100
- # Wrap each subpoint and add a bullet
101
- wrapped_subpoint = textwrap.fill(subpoint, wrap_length)
102
- formatted_text += f"{wrapped_subpoint}\n"
103
-
104
- formatted_text += "\n"
105
-
106
- print(formatted_text)
107
-
108
- # Streamlit App Design
109
- st.set_page_config(page_title="Arjun AI")
110
-
111
-
112
- # app
113
- st.title("Arjun AI")
114
- st.write("Get Yourself Help from Krishna's Teaching of Bhagavad Gita")
115
-
116
- # Initialize chat history
117
- if "messages" not in st.session_state:
118
- st.session_state.messages = []
119
-
120
- # Display chat messages from history on app rerun
121
- for message in st.session_state.messages:
122
- with st.chat_message(message["role"]):
123
- st.markdown(message["content"])
124
-
125
- # React to user input
126
- if prompt := st.chat_input("What is your question?"):
127
- # Display user message in chat message container
128
- st.chat_message("user").markdown(prompt)
129
- # Add user message to chat history
130
- st.session_state.messages.append({"role": "user", "content": prompt})
131
-
132
- with st.chat_message("assistant"):
133
- message_placeholder = st.empty()
134
- full_response = ""
135
-
136
- # Get response from QA chain
137
- result = qa_chain({
138
- "question": prompt,
139
- "human_input": prompt,
140
- "chat_history": [(msg["role"], msg["content"]) for msg in st.session_state.messages]
141
- })
142
- full_response = result['answer']
143
-
144
- # Simulate stream of response with milliseconds delay
145
- for chunk in full_response.split():
146
- full_response = f"{full_response}"
147
- time.sleep(0.05)
148
- # Add a blinking cursor to simulate typing
149
- message_placeholder.markdown(full_response)
150
-
151
- message_placeholder.markdown(full_response)
152
-
153
- # Add assistant response to chat history
154
  st.session_state.messages.append({"role": "assistant", "content": full_response})
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+ load_dotenv()
4
+
5
+ from langchain_astradb import AstraDBVectorStore
6
+ from langchain_google_genai import GoogleGenerativeAIEmbeddings
7
+ from langchain.prompts import PromptTemplate
8
+ from langchain.chains import ConversationalRetrievalChain
9
+ from langchain_google_genai import ChatGoogleGenerativeAI
10
+ import streamlit as st
11
+ import time
12
+ import textwrap
13
+
14
+ # embeddings
15
+ embeddings = GoogleGenerativeAIEmbeddings(
16
+ model = "models/embedding-001",
17
+ task_type = "retrieval_document"
18
+ )
19
+
20
+ # llm
21
+ llm = ChatGoogleGenerativeAI(
22
+ model = "gemini-1.5-pro-latest",
23
+ temperature = 0.7,
24
+ )
25
+
26
+ # Get Info about the Database
27
+ vstore = AstraDBVectorStore(
28
+ collection_name = "Bhagavad_gita_data",
29
+ embedding = embeddings,
30
+ token = os.getenv("ASTRA_DB_APPLICATION_TOKEN"),
31
+ api_endpoint = os.getenv("ASTRA_DB_API_ENDPOINT"),
32
+ )
33
+
34
+ # Now Retrieve the Documents from Server
35
+ retriever = vstore.as_retriever(search_kwargs = {"k" : 5})
36
+
37
+ prompt_template = """
38
+ You are a wise counselor drawing from ancient Indian wisdom to offer psychological guidance. Your role is to provide practical, concise advice for modern challenges.
39
+ You are going to be used for a psychiatrist assitance who gives advices on the context of bhagvad gita.
40
+ Follow these guidelines:
41
+
42
+ 1. Begin with a brief, relatable insight from timeless teachings.
43
+
44
+ 2. Offer 4 to 6 specific, actionable points of advice.
45
+
46
+ 3. Each point should start on a new line and be clear and concise.
47
+
48
+ 4. Connect each piece of advice to universal principles of success and well-being.
49
+
50
+ 5. Use metaphors or examples from ancient texts without explicitly naming them.
51
+
52
+ 6. Conclude with an encouraging statement that motivates the user to apply the advice.
53
+
54
+ 7. Avoid religious terminology. Use phrases like "ancient wisdom" or "timeless teachings" instead.
55
+
56
+ 8. Ensure your response is practical, universally applicable, and inspirational.
57
+
58
+ 9. Be strict that if some gives some wrong or useless input which is not relevant to physcological issue or dilema then reply them to enter the proper question
59
+
60
+
61
+ 10. If possible try to give only Bhagavad Gita Verse related to it at end don't get any other verse from any other book and give verse Translation and number only as many don't know to read sanskrit.
62
+
63
+ Context: {context}
64
+ Question: {question}
65
+ Human: {human_input}
66
+ Chat History: {chat_history}
67
+ """
68
+
69
+
70
+ PROMPT = PromptTemplate(
71
+ template = prompt_template,
72
+ input_variables = ["context", "question", "human_input", "chat_history"]
73
+ )
74
+
75
+ qa_chain = ConversationalRetrievalChain.from_llm(
76
+ llm,
77
+ retriever = retriever,
78
+ combine_docs_chain_kwargs = {"prompt": PROMPT},
79
+ return_source_documents = False,
80
+ )
81
+
82
+ # format the output in good format
83
+ def format_and_wrap_text(text, wrap_length=100):
84
+ # Split the text into main points
85
+ main_points = text.split('**')
86
+
87
+ formatted_text = ""
88
+ for i in range(1, len(main_points), 2):
89
+ # Add the main point title
90
+ formatted_text += f"{main_points[i]}\n"
91
+
92
+ # Split the subpoints by '* '
93
+ subpoints = main_points[i+1].strip().split('* ')
94
+ for subpoint in subpoints:
95
+ if subpoint.strip():
96
+ # Wrap each subpoint and add a bullet
97
+ wrapped_subpoint = textwrap.fill(subpoint, wrap_length)
98
+ formatted_text += f"{wrapped_subpoint}\n"
99
+
100
+ formatted_text += "\n"
101
+
102
+ print(formatted_text)
103
+
104
+ # Streamlit App Design
105
+ st.set_page_config(page_title="Arjun AI")
106
+
107
+
108
+ # app
109
+ st.title("Arjun AI")
110
+ st.write("Get Yourself Help from Krishna's Teaching of Bhagavad Gita")
111
+
112
+ # Initialize chat history
113
+ if "messages" not in st.session_state:
114
+ st.session_state.messages = []
115
+
116
+ # Display chat messages from history on app rerun
117
+ for message in st.session_state.messages:
118
+ with st.chat_message(message["role"]):
119
+ st.markdown(message["content"])
120
+
121
+ # React to user input
122
+ if prompt := st.chat_input("What is your question?"):
123
+ # Display user message in chat message container
124
+ st.chat_message("user").markdown(prompt)
125
+ # Add user message to chat history
126
+ st.session_state.messages.append({"role": "user", "content": prompt})
127
+
128
+ with st.chat_message("assistant"):
129
+ message_placeholder = st.empty()
130
+ full_response = ""
131
+
132
+ # Get response from QA chain
133
+ result = qa_chain({
134
+ "question": prompt,
135
+ "human_input": prompt,
136
+ "chat_history": [(msg["role"], msg["content"]) for msg in st.session_state.messages]
137
+ })
138
+ full_response = result['answer']
139
+
140
+ # Simulate stream of response with milliseconds delay
141
+ for chunk in full_response.split():
142
+ full_response = f"{full_response}"
143
+ time.sleep(0.05)
144
+ # Add a blinking cursor to simulate typing
145
+ message_placeholder.markdown(full_response)
146
+
147
+ message_placeholder.markdown(full_response)
148
+
149
+ # Add assistant response to chat history
 
 
 
 
150
  st.session_state.messages.append({"role": "assistant", "content": full_response})