3v324v23 commited on
Commit
aa2aac7
·
1 Parent(s): 699796f
Files changed (3) hide show
  1. .env +1 -0
  2. app.py +284 -0
  3. requirements.txt +2 -0
.env ADDED
@@ -0,0 +1 @@
 
 
1
+ GOOGLE_API_KEY=AIzaSyDribHwetghmQg-yKJWlcCbJZdVrbqCRYA
app.py ADDED
@@ -0,0 +1,284 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import google.generativeai as genai
2
+ import streamlit as st
3
+ from dotenv import load_dotenv
4
+ import os
5
+
6
+ load_dotenv()
7
+
8
+ GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
9
+ genai.configure(api_key=GOOGLE_API_KEY)
10
+
11
+ MODEL_NAME = "gemini-1.5-pro-latest"
12
+ SAFETY_SETTINGS = [
13
+ {
14
+ "category": "HARM_CATEGORY_HARASSMENT",
15
+ "threshold": "BLOCK_MEDIUM_AND_ABOVE"
16
+ },
17
+ {
18
+ "category": "HARM_CATEGORY_HATE_SPEECH",
19
+ "threshold": "BLOCK_MEDIUM_AND_ABOVE"
20
+ },
21
+ {
22
+ "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
23
+ "threshold": "BLOCK_MEDIUM_AND_ABOVE"
24
+ },
25
+ {
26
+ "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
27
+ "threshold": "BLOCK_MEDIUM_AND_ABOVE"
28
+ },
29
+ ]
30
+
31
+ def initialize_gemini():
32
+ try:
33
+ model = genai.GenerativeModel(MODEL_NAME, safety_settings=SAFETY_SETTINGS)
34
+ return model
35
+ except Exception as e:
36
+ st.error(f"Error initializing Gemini model: {e}")
37
+ return None
38
+
39
+ def get_gemini_response(model, prompt,
40
+ output_format=None, tone=None, length=None, num_responses=1, creative_mode=False, fact_checking=False, chat=None):
41
+
42
+ constructed_prompt = prompt
43
+ if output_format:
44
+ constructed_prompt += f" Generate this in the format of a {output_format}."
45
+ if tone:
46
+ constructed_prompt += f" Use a {tone} tone."
47
+ if length:
48
+ constructed_prompt += f" The generated text should be approximately {length} words long."
49
+ if creative_mode:
50
+ constructed_prompt += " Be creative."
51
+ if fact_checking:
52
+ constructed_prompt += " Please ensure factual accuracy."
53
+
54
+ try:
55
+ if chat:
56
+ response = chat.send_message(constructed_prompt)
57
+ else:
58
+ response = model.generate_content(constructed_prompt, generation_config=genai.types.GenerationConfig(candidate_count=num_responses))
59
+ return response.text if response else None
60
+ except Exception as e:
61
+ st.error(f"Error generating response: {e}")
62
+ return None
63
+
64
+ custom_css = """
65
+ <style>
66
+ .user-message {
67
+ background-color: #b49577;
68
+ border-radius: 10px;
69
+ padding: 25px 30px;
70
+ margin-bottom: 3px;
71
+ text-align: right;
72
+ width: fit-content;
73
+ margin-left: auto;
74
+ word-break: break-word;
75
+ }
76
+
77
+ .gemini-message {
78
+ background-color: #FFFFFF;
79
+ border-radius: 10px;
80
+ padding: 25px 30px;
81
+ margin-bottom: 3px;
82
+ text-align: left;
83
+ width: fit-content;
84
+ margin-right: auto;
85
+ word-break: break-word;
86
+ font-size: 16px;
87
+ font-family: sans-serif;
88
+ line-height: 1.6;
89
+ }
90
+
91
+ .message-container {
92
+ display: flex;
93
+ flex-direction: column;
94
+ margin-bottom: 10px;
95
+ }
96
+
97
+ .stChatMessage {
98
+ padding: 0px;
99
+ border: none;
100
+ background-color: transparent;
101
+ }
102
+
103
+ /* CSS for the loading dots */
104
+ .loader {
105
+ font-size: 2em; /* Adjust size as needed */
106
+ color: #3498db; /* Loading dots color */
107
+ }
108
+
109
+ .loader:after {
110
+ content: ' .';
111
+ animation: dots 1s steps(5, end) infinite;
112
+ }
113
+
114
+ @keyframes dots {
115
+ 0%, 20% {
116
+ content: ' .';
117
+ }
118
+ 40% {
119
+ content: ' ..';
120
+ }
121
+ 60%, 80% {
122
+ content: ' ...';
123
+ }
124
+ 100% {
125
+ content: ' .';
126
+ }
127
+ }
128
+
129
+ /* Styles for input boxes */
130
+ div.stTextArea > div > div > textarea {
131
+ background-color: #f0f2f6 !important; /* Light gray background */
132
+ border-radius: 5px;
133
+ padding: 8px;
134
+ color: black; /* Ensuring text is readable */
135
+ }
136
+
137
+ div.stSelectbox > label {
138
+ color: black;
139
+ }
140
+
141
+ div.stSelectbox > div > button {
142
+ background-color: #f0f2f6 !important;
143
+ color: black;
144
+ border-radius: 5px;
145
+ }
146
+
147
+ div.stNumberInput > label {
148
+ color: black;
149
+ }
150
+
151
+ div.stNumberInput > div > div input {
152
+ background-color: #f0f2f6 !important;
153
+ color: black;
154
+ border-radius: 5px;
155
+ }
156
+
157
+ div.stSlider > label {
158
+ color: black;
159
+ }
160
+
161
+ div.stSlider > div > div > div[data-baseweb="slider"] {
162
+ background-color: #f0f2f6 !important;
163
+ border-radius: 5px;
164
+ }
165
+
166
+ div.stCheckbox > label {
167
+ color: black;
168
+ }
169
+
170
+ </style>
171
+ """
172
+
173
+ def main():
174
+ st.set_page_config(
175
+ page_title="AI Text Generator",
176
+ page_icon=":speech_balloon:",
177
+ layout="wide",
178
+ initial_sidebar_state="expanded",
179
+ )
180
+
181
+ st.markdown(custom_css, unsafe_allow_html=True)
182
+
183
+ st.title("AI Text Generator :speech_balloon:")
184
+
185
+ if "chat_history" not in st.session_state:
186
+ st.session_state["chat_history"] = []
187
+
188
+
189
+ with st.sidebar:
190
+ st.title("Chat History")
191
+
192
+ clear_button_key = "clear_sidebar"
193
+ if st.button("Clear History", key=clear_button_key):
194
+ st.session_state["chat_history"] = []
195
+ if "chat" in st.session_state:
196
+ del st.session_state["chat"]
197
+ st.rerun()
198
+
199
+ st.subheader("Recent:")
200
+
201
+ for i, message in enumerate(st.session_state["chat_history"]):
202
+ if message["role"] == "user":
203
+ st.markdown(f"**Input {i//2 + 1}:** {message['content']}")
204
+
205
+ if "generated" not in st.session_state:
206
+ st.session_state["generated"] = False
207
+
208
+ if not st.session_state["generated"]:
209
+ topic = st.text_area("Enter Topic:")
210
+
211
+ with st.container():
212
+ col1, col2, col3 = st.columns(3)
213
+ with col1:
214
+ output_format = st.selectbox("Output Format:", ["Story", "Poem", "Article", "Code", None])
215
+ with col2:
216
+ tone = st.selectbox("Tone:", ["Formal", "Informal", "Humorous", "Technical", None])
217
+ with col3:
218
+ num_responses = st.number_input("Responses:", min_value=1, max_value=5, value=1, step=1)
219
+
220
+ with st.container():
221
+ length = st.slider("Length (words):", 0, 300, 100)
222
+ creative_mode = st.checkbox("Creative Mode")
223
+ fact_checking = st.checkbox("Fact-Check")
224
+
225
+
226
+ model = initialize_gemini()
227
+ if model is None:
228
+ st.error("Chatbot initialization failed. Check your API key.")
229
+ return
230
+
231
+
232
+ if "chat" not in st.session_state:
233
+ try:
234
+ st.session_state["chat"] = model.start_chat(history=st.session_state["chat_history"])
235
+ except Exception as e:
236
+ st.error(f"Error starting chat session: {e}")
237
+ return
238
+
239
+ chat = st.session_state["chat"]
240
+
241
+
242
+ if st.button("Generate Text"):
243
+ if not topic:
244
+ st.warning("Please enter a topic.")
245
+ else:
246
+
247
+ with st.spinner(f"Generating"):
248
+ response = get_gemini_response(model, topic, output_format, tone, length, num_responses, creative_mode, fact_checking, chat)
249
+
250
+ if response:
251
+ st.session_state["generated_text"] = response
252
+ st.session_state["topic"] = topic
253
+ st.session_state["chat_history"].append({"role": "user", "content": topic})
254
+ st.session_state["chat_history"].append({"role": "gemini", "content": response})
255
+
256
+ st.session_state["generated"] = True
257
+ st.rerun()
258
+
259
+ else:
260
+ st.error("Failed to get a response from the chatbot.")
261
+
262
+ else:
263
+ st.subheader("Generated Text:")
264
+ st.markdown(f'<div class="gemini-message">{st.session_state["generated_text"]}</div>', unsafe_allow_html=True)
265
+
266
+ col1, col2 = st.columns(2)
267
+ with col1:
268
+ like = st.button("Like")
269
+ with col2:
270
+ dislike = st.button("Dislike")
271
+
272
+ if like:
273
+ st.success("Thanks for your feedback!")
274
+ if dislike:
275
+ st.error("We appreciate your feedback.")
276
+
277
+ if st.button("Generate Again"):
278
+ st.session_state["generated"] = False
279
+ del st.session_state["generated_text"]
280
+ st.rerun()
281
+
282
+
283
+ if __name__ == "__main__":
284
+ main()
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ streamlit
2
+ openai