hari7261 commited on
Commit
85d3665
Β·
verified Β·
1 Parent(s): 56df3cf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +807 -128
app.py CHANGED
@@ -1,142 +1,821 @@
1
- import torch
 
2
  import gradio as gr
3
- from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
 
 
4
 
5
- # ---------- CONFIG ----------
6
- # Using an open-access model instead of gated Mistral
7
- MODEL_NAME = "tiiuae/falcon-7b-instruct"
 
 
 
8
 
9
  try:
10
- # Preload model and tokenizer
11
- tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
12
-
13
- # Load model with appropriate settings
14
- model = AutoModelForCausalLM.from_pretrained(
15
- MODEL_NAME,
16
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
17
- device_map="auto" if torch.cuda.is_available() else None,
18
- low_cpu_mem_usage=True
19
- )
20
-
21
- generator = pipeline(
22
- "text-generation",
23
- model=model,
24
- tokenizer=tokenizer,
25
- max_new_tokens=512,
26
- temperature=0.5,
27
- do_sample=True
28
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
- except Exception as e:
31
- print(f"Error loading model: {str(e)}")
32
- # Fallback to CPU if GPU fails
33
- model = AutoModelForCausalLM.from_pretrained(
34
- MODEL_NAME,
35
- torch_dtype=torch.float32,
36
- device_map=None
37
- )
38
- generator = pipeline(
39
- "text-generation",
40
- model=model,
41
- tokenizer=tokenizer,
42
- max_new_tokens=512,
43
- temperature=0.5,
44
- do_sample=True
45
- )
46
-
47
- # ---------- TECH FILTER ----------
48
- def is_tech_query(message: str) -> bool:
49
- tech_keywords = [
50
- "python", "java", "javascript", "html", "css", "react", "angular",
51
- "node", "machine learning", "deep learning", "ai", "api", "code",
52
- "debug", "error", "technology", "computer", "programming", "software",
53
- "hardware", "cybersecurity", "database", "sql", "devops", "cloud",
54
- "algorithm", "backend", "frontend", "server", "linux", "windows",
55
- "docker", "kubernetes", "git", "github", "vscode", "pycharm",
56
- "tensorflow", "pytorch", "neural network", "blockchain", "web3",
57
- "smart contract", "ethereum", "bitcoin", "cryptography", "encryption"
58
- ]
59
- return any(k in message.lower() for k in tech_keywords)
60
-
61
- # ---------- CHAT FUNCTION ----------
62
- def chat_with_model(message, history):
63
- if not is_tech_query(message):
64
- return history + [[message, "⚠️ I can only answer technology-related queries. Please ask about programming, AI, cybersecurity, or other tech topics."]]
65
-
66
- conversation = ""
67
- for user_msg, bot_msg in history:
68
- conversation += f"User: {user_msg}\nAssistant: {bot_msg}\n"
69
- conversation += f"User: {message}\nAssistant:"
70
-
71
- try:
72
- output = generator(conversation, pad_token_id=tokenizer.eos_token_id)[0]["generated_text"]
73
- if "Assistant:" in output:
74
- answer = output.split("Assistant:")[-1].strip()
75
  else:
76
- answer = output.strip()
77
-
78
- # Clean up response
79
- answer = answer.split("User:")[0].strip() # Remove any following user prompts
80
- return history + [[message, answer]]
81
 
82
- except Exception as e:
83
- error_msg = f"❌ Error generating response: {str(e)}"
84
- return history + [[message, error_msg]]
85
-
86
- # ---------- LOGIN + UI ----------
87
- session_state = {"authenticated": False}
88
-
89
- def login(username, password):
90
- valid_credentials = {
91
- "admin": "admin123",
92
- "techuser": "techpass",
93
- "guest": "guest123"
94
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
 
96
- if username in valid_credentials and password == valid_credentials[username]:
97
- session_state["authenticated"] = True
98
- return gr.update(visible=False), gr.update(visible=True), ""
99
- else:
100
- return gr.update(), gr.update(visible=False), "❌ Invalid credentials. Try admin/admin123 or techuser/techpass"
101
-
102
- def logout():
103
- session_state["authenticated"] = False
104
- return gr.update(visible=True), gr.update(visible=False), "Logged out successfully"
105
-
106
- with gr.Blocks(css="""
107
- .gradio-container {max-width: 750px; margin: auto;}
108
- .chatbot {min-height: 500px;}
109
- """) as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
110
 
111
- # Login Page
112
- with gr.Group(visible=True) as login_group:
113
- gr.Markdown("# πŸ” Tech Chatbot Login")
114
- with gr.Row():
115
- username = gr.Textbox(label="Username", placeholder="Enter your username")
116
- password = gr.Textbox(label="Password", type="password", placeholder="Enter your password")
117
- with gr.Row():
118
- login_btn = gr.Button("Login", variant="primary")
119
- login_status = gr.Markdown("")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
 
121
- # Chatbot Page
122
- with gr.Group(visible=False) as chat_group:
123
- gr.Markdown("# πŸ’» Tech Assistant")
124
- chatbot = gr.Chatbot(height=500)
125
- with gr.Row():
126
- msg = gr.Textbox(placeholder="Ask about programming, AI, cybersecurity...",
127
- label="Your Tech Question", scale=4)
128
- submit_btn = gr.Button("Send", variant="primary", scale=1)
129
- with gr.Row():
130
- clear = gr.Button("Clear Chat")
131
- logout_btn = gr.Button("Logout")
 
 
 
132
 
133
- msg.submit(chat_with_model, [msg, chatbot], [chatbot])
134
- submit_btn.click(chat_with_model, [msg, chatbot], [chatbot])
135
- clear.click(lambda: None, None, chatbot, queue=False)
136
- logout_btn.click(logout, None, [login_group, chat_group, login_status])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137
 
138
- # Button Logic
139
- login_btn.click(login, [username, password], [login_group, chat_group, login_status])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140
 
 
141
  if __name__ == "__main__":
142
- demo.launch(server_name="0.0.0.0", server_port=7860)
 
 
 
 
 
 
 
1
+ import re
2
+ import random
3
  import gradio as gr
4
+ import json
5
+ import os
6
+ from typing import Dict, List, Any
7
 
8
+ # Try to import AI libraries
9
+ try:
10
+ import openai
11
+ OPENAI_AVAILABLE = True
12
+ except ImportError:
13
+ OPENAI_AVAILABLE = False
14
 
15
  try:
16
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
17
+ import torch
18
+ TRANSFORMERS_AVAILABLE = True
19
+ except ImportError:
20
+ TRANSFORMERS_AVAILABLE = False
21
+
22
+ # Try to import sentence transformers for semantic search
23
+ try:
24
+ from sentence_transformers import SentenceTransformer
25
+ import numpy as np
26
+ SENTENCE_TRANSFORMERS_AVAILABLE = True
27
+ except ImportError:
28
+ SENTENCE_TRANSFORMERS_AVAILABLE = False
29
+
30
+ class ChatAssistant:
31
+ def __init__(self):
32
+ self.name = "AI Assistant"
33
+ self.user_name = ""
34
+ self.conversation_history = []
35
+ self.model_loaded = False
36
+ self.generator = None
37
+ self.tokenizer = None
38
+ self.model = None
39
+ self.embedding_model = None
40
+
41
+ # Initialize embedding model for semantic search
42
+ self.init_embedding_model()
43
+
44
+ def init_embedding_model(self):
45
+ """Initialize embedding model for semantic search"""
46
+ if SENTENCE_TRANSFORMERS_AVAILABLE:
47
+ try:
48
+ self.embedding_model = SentenceTransformer('all-MiniLM-L6-v2')
49
+ except Exception as e:
50
+ print(f"Failed to load embedding model: {e}")
51
+ self.embedding_model = None
52
+ else:
53
+ self.embedding_model = None
54
 
55
+ def load_model(self):
56
+ """Load AI model for advanced queries"""
57
+ if self.model_loaded:
58
+ return True
59
+
60
+ if TRANSFORMERS_AVAILABLE:
61
+ try:
62
+ # Use a smaller, more reliable model
63
+ model_name = "microsoft/DialoGPT-medium"
64
+
65
+ self.tokenizer = AutoTokenizer.from_pretrained(model_name)
66
+ self.model = AutoModelForCausalLM.from_pretrained(
67
+ model_name,
68
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
69
+ device_map="auto" if torch.cuda.is_available() else None,
70
+ low_cpu_mem_usage=True
71
+ )
72
+
73
+ # Add pad token if not present
74
+ if self.tokenizer.pad_token is None:
75
+ self.tokenizer.pad_token = self.tokenizer.eos_token
76
+
77
+ self.generator = pipeline(
78
+ "text-generation",
79
+ model=self.model,
80
+ tokenizer=self.tokenizer,
81
+ device=0 if torch.cuda.is_available() else -1,
82
+ return_full_text=False
83
+ )
84
+
85
+ self.model_loaded = True
86
+ print("βœ… AI model loaded successfully!")
87
+ return True
88
+
89
+ except Exception as e:
90
+ print(f"⚠️ Could not load AI model: {str(e)}")
91
+ return False
 
 
 
 
 
 
 
 
92
  else:
93
+ print("πŸ”§ Install transformers and torch for AI features")
94
+ return False
 
 
 
95
 
96
+ def generate_response(self, query: str, context: str = "") -> str:
97
+ """Generate conversational AI response"""
98
+ if not self.model_loaded:
99
+ if not self.load_model():
100
+ return self.generate_fallback_response(query, context)
101
+
102
+ try:
103
+ # Create a conversational prompt with different response styles
104
+ tone = random.choice([
105
+ "friendly and conversational",
106
+ "professional and informative",
107
+ "detailed and analytical",
108
+ "concise and to-the-point",
109
+ "enthusiastic and encouraging"
110
+ ])
111
+
112
+ system_prompt = f"""You are an advanced AI assistant designed to help with a wide range of topics.
113
+ Respond in a {tone} tone, adapting to the user's needs.
114
+
115
+ Guidelines:
116
+ 1. For greetings: Be warm and welcoming
117
+ 2. For questions: Provide thoughtful, well-reasoned answers
118
+ 3. For complex topics: Break down into simpler parts
119
+ 4. For creative requests: Be imaginative and engaging
120
+ 5. For technical queries: Be precise and accurate
121
+ 6. Vary your responses - don't be repetitive"""
122
+
123
+ user_prompt = f"""User's question: {query}
124
+
125
+ Context from previous conversation: {context}
126
+
127
+ Please provide a helpful response that addresses the user's needs while following the guidelines above."""
128
+
129
+ # Generate response
130
+ full_prompt = f"{system_prompt}\n\nUser: {user_prompt}\nAssistant:"
131
+
132
+ response = self.generator(
133
+ full_prompt,
134
+ max_new_tokens=250,
135
+ do_sample=True,
136
+ temperature=0.7 + random.random() * 0.3, # Vary temperature
137
+ top_p=0.9,
138
+ pad_token_id=self.tokenizer.eos_token_id,
139
+ repetition_penalty=1.2,
140
+ no_repeat_ngram_size=3
141
+ )
142
+
143
+ if response and len(response) > 0:
144
+ generated_text = response[0]["generated_text"]
145
+ # Extract only the assistant's response
146
+ if "Assistant:" in generated_text:
147
+ ai_response = generated_text.split("Assistant:")[-1].strip()
148
+ if len(ai_response) > 20:
149
+ return ai_response
150
+
151
+ except Exception as e:
152
+ print(f"AI generation error: {e}")
153
+
154
+ # Fallback response
155
+ return self.generate_fallback_response(query, context)
156
 
157
+ def generate_fallback_response(self, query: str, context: str) -> str:
158
+ """Generate fallback response when model fails"""
159
+ query_lower = query.lower()
160
+
161
+ greetings = ["Hi there!", "Hello!", "Greetings!", "Nice to see you!"]
162
+ farewells = ["Goodbye!", "Farewell!", "See you later!", "Have a great day!"]
163
+
164
+ if any(word in query_lower for word in ['hi', 'hello', 'hey', 'greetings']):
165
+ return random.choice(greetings) + " How can I assist you today?"
166
+
167
+ elif any(word in query_lower for word in ['bye', 'goodbye', 'see you', 'farewell']):
168
+ return random.choice(farewells)
169
+
170
+ elif '?' in query or any(word in query_lower for word in ['what', 'how', 'why', 'when']):
171
+ return self.answer_question(query)
172
+
173
+ elif any(word in query_lower for word in ['thank', 'thanks', 'appreciate']):
174
+ return random.choice([
175
+ "You're very welcome!",
176
+ "Happy to help!",
177
+ "My pleasure!",
178
+ "Glad I could assist you!"
179
+ ])
180
+
181
+ else:
182
+ return random.choice([
183
+ "I'd be happy to help with that. Could you tell me more about what you're looking for?",
184
+ "That's an interesting topic. Here's what I can share about that...",
185
+ "Let me provide some information about that for you.",
186
+ "I can certainly help with that. Here's what you should know..."
187
+ ])
188
 
189
+ def answer_question(self, query: str) -> str:
190
+ """Generate answer for different types of questions"""
191
+ responses = {
192
+ 'science': [
193
+ "In scientific terms, this can be explained as...",
194
+ "The scientific perspective on this is...",
195
+ "From a scientific standpoint..."
196
+ ],
197
+ 'technology': [
198
+ "Technologically speaking, the current state is...",
199
+ "In the tech world, this is handled by...",
200
+ "The technical approach would be..."
201
+ ],
202
+ 'history': [
203
+ "Historically, this dates back to...",
204
+ "The historical context for this is...",
205
+ "Looking back through history..."
206
+ ],
207
+ 'general': [
208
+ "That's a great question. Here's what I know...",
209
+ "I can provide some insights on that...",
210
+ "Let me explain that for you..."
211
+ ]
212
+ }
213
+
214
+ category = 'general'
215
+ if any(word in query.lower() for word in ['science', 'physics', 'biology', 'chemistry']):
216
+ category = 'science'
217
+ elif any(word in query.lower() for word in ['tech', 'computer', 'software', 'hardware']):
218
+ category = 'technology'
219
+ elif any(word in query.lower() for word in ['history', 'historical', 'past']):
220
+ category = 'history'
221
+
222
+ return random.choice(responses[category])
223
 
224
+ def get_user_name(self, message):
225
+ name_patterns = [
226
+ r"my name is (\w+)",
227
+ r"i'm (\w+)",
228
+ r"i am (\w+)",
229
+ r"call me (\w+)"
230
+ ]
231
+
232
+ for pattern in name_patterns:
233
+ match = re.search(pattern, message.lower())
234
+ if match:
235
+ self.user_name = match.group(1).capitalize()
236
+ return f"Nice to meet you, {self.user_name}! How can I assist you today?"
237
+ return None
238
 
239
+ def process_message(self, message: str) -> str:
240
+ """Main method to process user messages and generate AI responses"""
241
+ # Check if the user is introducing themselves
242
+ name_response = self.get_user_name(message)
243
+ if name_response:
244
+ return name_response
245
+
246
+ # Generate AI response
247
+ context = "\n".join([f"User: {msg[0]}\nAssistant: {msg[1]}" for msg in self.conversation_history[-3:]])
248
+ ai_response = self.generate_response(message, context)
249
+
250
+ if ai_response and len(ai_response.strip()) > 10:
251
+ return ai_response
252
+ else:
253
+ # Final fallback
254
+ return self.generate_fallback_response(message, "")
255
+
256
+ # Initialize the assistant
257
+ assistant = ChatAssistant()
258
+
259
+ def chat_response(message, history):
260
+ """Generate response for Gradio chat interface"""
261
+ if not message.strip():
262
+ return "Please enter a message!"
263
+
264
+ response = assistant.process_message(message)
265
+ return response
266
 
267
+ def greet():
268
+ greetings = [
269
+ "Hello! I'm your AI assistant. How can I help you today?",
270
+ "Hi there! What would you like to know or discuss?",
271
+ "Greetings! I'm here to assist with any questions you might have.",
272
+ "Welcome! I'm ready to help with information, ideas, or just a friendly chat."
273
+ ]
274
+ return random.choice(greetings)
275
+
276
+ # Create Gradio interface
277
+ def create_interface():
278
+ with gr.Blocks(
279
+ title="πŸ’¬ AI Assistant - Your Intelligent Helper",
280
+ theme=gr.themes.Base(
281
+ primary_hue="blue",
282
+ secondary_hue="indigo",
283
+ neutral_hue="gray",
284
+ font=[gr.themes.GoogleFont("Inter"), "Arial", "sans-serif"]
285
+ ),
286
+ css="""
287
+ /* Main Container Styling */
288
+ .gradio-container {
289
+ max-width: 1400px !important;
290
+ margin: 0 auto !important;
291
+ padding: 20px !important;
292
+ background: linear-gradient(135deg, #f0f5ff 0%, #e8f0fe 100%) !important;
293
+ }
294
+
295
+ /* Header Styling */
296
+ .main-header {
297
+ text-align: center;
298
+ padding: 30px 20px;
299
+ background: linear-gradient(135deg, #1a237e 0%, #303f9f 50%, #3949ab 100%);
300
+ border-radius: 15px;
301
+ margin-bottom: 25px;
302
+ color: white !important;
303
+ box-shadow: 0 8px 32px rgba(26, 35, 126, 0.3);
304
+ border: 1px solid rgba(255, 255, 255, 0.2);
305
+ }
306
+
307
+ .main-header h1 {
308
+ color: white !important;
309
+ font-size: 2.5rem !important;
310
+ font-weight: 700 !important;
311
+ margin-bottom: 10px !important;
312
+ text-shadow: 2px 2px 4px rgba(0,0,0,0.3) !important;
313
+ }
314
+
315
+ .main-header p {
316
+ color: #e8eaf6 !important;
317
+ font-size: 1.2rem !important;
318
+ margin: 0 !important;
319
+ font-weight: 400 !important;
320
+ }
321
+
322
+ /* Feature Cards */
323
+ .feature-grid {
324
+ display: grid;
325
+ grid-template-columns: repeat(auto-fit, minmax(300px, 1fr));
326
+ gap: 20px;
327
+ margin-bottom: 25px;
328
+ }
329
+
330
+ .feature-card {
331
+ background: white !important;
332
+ padding: 25px !important;
333
+ border-radius: 12px !important;
334
+ border-left: 5px solid #3f51b5 !important;
335
+ box-shadow: 0 4px 15px rgba(0,0,0,0.1) !important;
336
+ transition: transform 0.3s ease, box-shadow 0.3s ease !important;
337
+ color: #1a237e !important;
338
+ }
339
+
340
+ .feature-card:hover {
341
+ transform: translateY(-5px) !important;
342
+ box-shadow: 0 8px 25px rgba(0,0,0,0.15) !important;
343
+ }
344
+
345
+ .feature-card h3 {
346
+ color: #1a237e !important;
347
+ font-size: 1.3rem !important;
348
+ font-weight: 600 !important;
349
+ margin-bottom: 10px !important;
350
+ }
351
+
352
+ .feature-card p {
353
+ color: #3949ab !important;
354
+ font-size: 1rem !important;
355
+ line-height: 1.5 !important;
356
+ margin: 0 !important;
357
+ }
358
+
359
+ /* Chat Container */
360
+ .chat-container {
361
+ background: white !important;
362
+ border-radius: 15px !important;
363
+ padding: 25px !important;
364
+ box-shadow: 0 8px 32px rgba(0,0,0,0.1) !important;
365
+ margin-bottom: 20px !important;
366
+ }
367
+
368
+ /* Chatbot Styling */
369
+ .chatbot-container {
370
+ border: 2px solid #e8eaf6 !important;
371
+ border-radius: 12px !important;
372
+ background: #f8fbff !important;
373
+ }
374
+
375
+ /* Override Gradio's default message styling */
376
+ .chatbot .message-wrap {
377
+ background: transparent !important;
378
+ }
379
+
380
+ .chatbot .message.user {
381
+ background: linear-gradient(135deg, #e3f2fd 0%, #bbdefb 100%) !important;
382
+ color: #0d47a1 !important;
383
+ border: 1px solid #90caf9 !important;
384
+ margin-left: 15% !important;
385
+ padding: 15px 20px !important;
386
+ border-radius: 15px 15px 5px 15px !important;
387
+ box-shadow: 0 2px 8px rgba(13, 71, 161, 0.2) !important;
388
+ font-weight: 500 !important;
389
+ }
390
+
391
+ .chatbot .message.bot {
392
+ background: linear-gradient(135deg, #e8eaf6 0%, #c5cae9 100%) !important;
393
+ color: #1a237e !important;
394
+ border: 1px solid #9fa8da !important;
395
+ margin-right: 15% !important;
396
+ padding: 15px 20px !important;
397
+ border-radius: 15px 15px 15px 5px !important;
398
+ box-shadow: 0 2px 8px rgba(26, 35, 126, 0.2) !important;
399
+ font-weight: 500 !important;
400
+ }
401
+
402
+ /* Force text color in chat messages */
403
+ .chatbot .message.user * {
404
+ color: #0d47a1 !important;
405
+ }
406
+
407
+ .chatbot .message.bot * {
408
+ color: #1a237e !important;
409
+ }
410
+
411
+ /* Ensure chat text is always visible */
412
+ .gradio-chatbot .chatbot .message {
413
+ color: inherit !important;
414
+ }
415
+
416
+ .gradio-chatbot .chatbot .message p {
417
+ color: inherit !important;
418
+ margin: 5px 0 !important;
419
+ line-height: 1.5 !important;
420
+ }
421
+
422
+ /* User message styling */
423
+ .gradio-chatbot .user {
424
+ background: linear-gradient(135deg, #e3f2fd 0%, #bbdefb 100%) !important;
425
+ color: #0d47a1 !important;
426
+ border: 1px solid #90caf9 !important;
427
+ border-radius: 15px 15px 5px 15px !important;
428
+ margin-left: 15% !important;
429
+ margin-right: 5% !important;
430
+ }
431
+
432
+ /* Bot message styling */
433
+ .gradio-chatbot .bot {
434
+ background: linear-gradient(135deg, #e8eaf6 0%, #c5cae9 100%) !important;
435
+ color: #1a237e !important;
436
+ border: 1px solid #9fa8da !important;
437
+ border-radius: 15px 15px 15px 5px !important;
438
+ margin-right: 15% !important;
439
+ margin-left: 5% !important;
440
+ }
441
+
442
+ /* Input Styling */
443
+ .input-container {
444
+ background: white !important;
445
+ border-radius: 12px !important;
446
+ border: 2px solid #e8eaf6 !important;
447
+ padding: 5px !important;
448
+ margin-top: 15px !important;
449
+ }
450
+
451
+ .input-container:focus-within {
452
+ border-color: #3f51b5 !important;
453
+ box-shadow: 0 0 10px rgba(63, 81, 181, 0.2) !important;
454
+ }
455
+
456
+ /* Input text styling */
457
+ .input-container textarea,
458
+ .input-container input {
459
+ color: #1a237e !important;
460
+ background: white !important;
461
+ border: none !important;
462
+ font-size: 1rem !important;
463
+ font-weight: 500 !important;
464
+ }
465
+
466
+ .input-container textarea::placeholder,
467
+ .input-container input::placeholder {
468
+ color: #6b7280 !important;
469
+ opacity: 0.8 !important;
470
+ }
471
+
472
+ /* Override any Gradio input styling */
473
+ .gradio-textbox {
474
+ background: white !important;
475
+ }
476
+
477
+ .gradio-textbox textarea {
478
+ color: #1a237e !important;
479
+ background: white !important;
480
+ border: 2px solid #e8eaf6 !important;
481
+ border-radius: 8px !important;
482
+ padding: 12px !important;
483
+ font-size: 1rem !important;
484
+ }
485
+
486
+ .gradio-textbox textarea:focus {
487
+ border-color: #3f51b5 !important;
488
+ box-shadow: 0 0 10px rgba(63, 81, 181, 0.2) !important;
489
+ outline: none !important;
490
+ }
491
+
492
+ /* Button Styling */
493
+ .btn-primary {
494
+ background: linear-gradient(135deg, #3f51b5 0%, #3949ab 100%) !important;
495
+ color: white !important;
496
+ border: none !important;
497
+ border-radius: 8px !important;
498
+ padding: 12px 24px !important;
499
+ font-weight: 600 !important;
500
+ font-size: 1rem !important;
501
+ transition: all 0.3s ease !important;
502
+ box-shadow: 0 4px 15px rgba(63, 81, 181, 0.3) !important;
503
+ }
504
+
505
+ .btn-primary:hover {
506
+ background: linear-gradient(135deg, #3949ab 0%, #303f9f 100%) !important;
507
+ transform: translateY(-2px) !important;
508
+ box-shadow: 0 6px 20px rgba(63, 81, 181, 0.4) !important;
509
+ }
510
+
511
+ .btn-secondary {
512
+ background: linear-gradient(135deg, #6c757d 0%, #5a6268 100%) !important;
513
+ color: white !important;
514
+ border: none !important;
515
+ border-radius: 8px !important;
516
+ padding: 12px 24px !important;
517
+ font-weight: 600 !important;
518
+ transition: all 0.3s ease !important;
519
+ }
520
+
521
+ .btn-secondary:hover {
522
+ background: linear-gradient(135deg, #5a6268 0%, #495057 100%) !important;
523
+ transform: translateY(-2px) !important;
524
+ }
525
+
526
+ /* Sidebar Styling */
527
+ .sidebar {
528
+ background: white !important;
529
+ border-radius: 15px !important;
530
+ padding: 25px !important;
531
+ box-shadow: 0 8px 32px rgba(0,0,0,0.1) !important;
532
+ height: fit-content !important;
533
+ }
534
+
535
+ .sidebar h3 {
536
+ color: #1a237e !important;
537
+ font-size: 1.4rem !important;
538
+ font-weight: 600 !important;
539
+ margin-bottom: 15px !important;
540
+ padding-bottom: 10px !important;
541
+ border-bottom: 2px solid #e8eaf6 !important;
542
+ }
543
+
544
+ .sidebar ul {
545
+ list-style: none !important;
546
+ padding: 0 !important;
547
+ margin: 0 !important;
548
+ }
549
+
550
+ .sidebar li {
551
+ color: #3949ab !important;
552
+ padding: 8px 0 !important;
553
+ border-bottom: 1px solid #f0f5ff !important;
554
+ font-size: 0.95rem !important;
555
+ line-height: 1.4 !important;
556
+ }
557
+
558
+ .sidebar strong {
559
+ color: #1a237e !important;
560
+ font-weight: 600 !important;
561
+ }
562
+
563
+ /* Examples Section */
564
+ .examples-section {
565
+ background: #f8fbff !important;
566
+ padding: 20px !important;
567
+ border-radius: 10px !important;
568
+ margin-top: 20px !important;
569
+ border: 1px solid #e8eaf6 !important;
570
+ }
571
+
572
+ /* Footer Styling */
573
+ .footer {
574
+ text-align: center;
575
+ padding: 25px;
576
+ background: linear-gradient(135deg, #1a237e 0%, #303f9f 100%);
577
+ border-radius: 15px;
578
+ margin-top: 30px;
579
+ color: white !important;
580
+ box-shadow: 0 8px 32px rgba(26, 35, 126, 0.3);
581
+ }
582
+
583
+ .footer p {
584
+ color: white !important;
585
+ margin: 5px 0 !important;
586
+ font-size: 1rem !important;
587
+ }
588
+
589
+ .footer strong {
590
+ color: #e8eaf6 !important;
591
+ font-size: 1.2rem !important;
592
+ }
593
+
594
+ /* Responsive Design */
595
+ @media (max-width: 768px) {
596
+ .gradio-container {
597
+ padding: 10px !important;
598
+ }
599
+
600
+ .main-header {
601
+ padding: 20px 15px !important;
602
+ }
603
+
604
+ .main-header h1 {
605
+ font-size: 2rem !important;
606
+ }
607
+
608
+ .feature-grid {
609
+ grid-template-columns: 1fr !important;
610
+ gap: 15px !important;
611
+ }
612
+
613
+ .chat-container, .sidebar {
614
+ padding: 15px !important;
615
+ }
616
+
617
+ .feature-card {
618
+ padding: 20px !important;
619
+ }
620
+ }
621
+
622
+ /* Text Visibility Fixes */
623
+ .gr-textbox textarea {
624
+ color: #1a237e !important;
625
+ background: white !important;
626
+ }
627
+
628
+ .gr-textbox label {
629
+ color: #1a237e !important;
630
+ font-weight: 600 !important;
631
+ }
632
+
633
+ /* Loading Animation */
634
+ .loading {
635
+ display: inline-block;
636
+ width: 20px;
637
+ height: 20px;
638
+ border: 3px solid #e8eaf6;
639
+ border-radius: 50%;
640
+ border-top-color: #3f51b5;
641
+ animation: spin 1s ease-in-out infinite;
642
+ }
643
+
644
+ @keyframes spin {
645
+ to { transform: rotate(360deg); }
646
+ }
647
+ """
648
+ ) as iface:
649
+
650
+ # Header
651
+ gr.HTML("""
652
+ <div class="main-header">
653
+ <h1>πŸ’¬ AI Assistant - Your Intelligent Helper</h1>
654
+ <p>Conversational AI powered by advanced language models ready to assist with any topic</p>
655
+ </div>
656
+ """)
657
+
658
+ # Feature highlights
659
+ gr.HTML("""
660
+ <div class="feature-grid">
661
+ <div class="feature-card">
662
+ <h3>🧠 Advanced AI</h3>
663
+ <p>State-of-the-art language model capable of understanding and responding to a wide range of topics with human-like conversation.</p>
664
+ </div>
665
+ <div class="feature-card">
666
+ <h3>🌐 Comprehensive Knowledge</h3>
667
+ <p>Access to vast information across science, technology, history, arts, and general knowledge topics.</p>
668
+ </div>
669
+ <div class="feature-card">
670
+ <h3>πŸ’‘ Adaptive Responses</h3>
671
+ <p>Dynamic responses that adjust to your needs - from simple explanations to in-depth technical discussions.</p>
672
+ </div>
673
+ </div>
674
+ """)
675
+
676
+ # Main chat area
677
+ with gr.Row(equal_height=True):
678
+ with gr.Column(scale=7):
679
+ gr.HTML('<div class="chat-container">')
680
+
681
+ chatbot = gr.Chatbot(
682
+ value=[(None, greet())],
683
+ height=520,
684
+ label="πŸ’¬ Chat with AI Assistant",
685
+ show_label=True,
686
+ container=True,
687
+ bubble_full_width=False,
688
+ avatar_images=(
689
+ "https://cdn-icons-png.flaticon.com/512/1077/1077012.png", # User
690
+ "https://cdn-icons-png.flaticon.com/512/1998/1998667.png" # Bot
691
+ ),
692
+ elem_classes=["chatbot-container"]
693
+ )
694
+
695
+ with gr.Row():
696
+ msg = gr.Textbox(
697
+ label="πŸ’¬ Ask me anything...",
698
+ placeholder="Example: 'Explain quantum computing' or 'Help me write a poem' or 'What's the history of Rome?'",
699
+ lines=2,
700
+ max_lines=4,
701
+ scale=4,
702
+ elem_classes=["input-container"]
703
+ )
704
+
705
+ with gr.Row():
706
+ with gr.Column(scale=2):
707
+ submit_btn = gr.Button("πŸš€ Send Message", variant="primary", elem_classes=["btn-primary"])
708
+ with gr.Column(scale=1):
709
+ clear_btn = gr.Button("πŸ—‘οΈ Clear Chat", variant="secondary", elem_classes=["btn-secondary"])
710
+
711
+ gr.HTML('</div>')
712
+
713
+ with gr.Column(scale=3):
714
+ gr.HTML("""
715
+ <div class="sidebar">
716
+ <h3>πŸ“š Knowledge Areas</h3>
717
+ <ul>
718
+ <li><strong>πŸ”¬ Science:</strong> Physics, Chemistry, Biology</li>
719
+ <li><strong>πŸ’» Technology:</strong> AI, Programming, Gadgets</li>
720
+ <li><strong>πŸ“œ History:</strong> Ancient, Medieval, Modern</li>
721
+ <li><strong>🎨 Arts:</strong> Literature, Music, Visual Arts</li>
722
+ <li><strong>🌍 World:</strong> Cultures, Geography, Politics</li>
723
+ <li><strong>🧠 Psychology:</strong> Mind, Behavior, Relationships</li>
724
+ <li><strong>πŸ’Ό Business:</strong> Economics, Finance, Marketing</li>
725
+ </ul>
726
+
727
+ <div class="examples-section">
728
+ <h3>πŸ’‘ Try These Questions</h3>
729
+ <ul>
730
+ <li>"Explain photosynthesis simply"</li>
731
+ <li>"How does blockchain work?"</li>
732
+ <li>"Summarize World War 2"</li>
733
+ <li>"Write a haiku about nature"</li>
734
+ <li>"Python vs JavaScript comparison"</li>
735
+ <li>"Tips for public speaking"</li>
736
+ </ul>
737
+ </div>
738
+
739
+ <div style="margin-top: 20px; padding: 15px; background: #f0f5ff; border-radius: 8px; text-align: center;">
740
+ <p style="margin: 0; color: #1a237e; font-weight: 600;">πŸ€– AI Status</p>
741
+ <p style="margin: 5px 0 0 0; color: #3949ab; font-size: 0.9rem;">Ready to help!</p>
742
+ </div>
743
+ </div>
744
+ """)
745
+
746
+ # Quick action buttons
747
+ gr.HTML("""
748
+ <div style="margin: 20px 0; text-align: center;">
749
+ <h3 style="color: #1a237e; margin-bottom: 15px;">πŸš€ Quick Start Topics</h3>
750
+ </div>
751
+ """)
752
+
753
+ with gr.Row():
754
+ tech_btn = gr.Button("πŸ’» Technology", elem_classes=["btn-primary"])
755
+ science_btn = gr.Button("πŸ”¬ Science", elem_classes=["btn-primary"])
756
+ history_btn = gr.Button("πŸ“œ History", elem_classes=["btn-primary"])
757
+ creative_btn = gr.Button("🎨 Creative", elem_classes=["btn-primary"])
758
+
759
+ # Function to handle chat
760
+ def respond(message, chat_history):
761
+ if not message.strip():
762
+ return chat_history, ""
763
+
764
+ response = assistant.process_message(message)
765
+ chat_history.append((message, response))
766
+ return chat_history, ""
767
+
768
+ # Function to set quick questions
769
+ def set_quick_question(question):
770
+ return question
771
+
772
+ # Event handlers for main chat
773
+ submit_btn.click(respond, [msg, chatbot], [chatbot, msg])
774
+ msg.submit(respond, [msg, chatbot], [chatbot, msg])
775
+ clear_btn.click(lambda: ([(None, greet())], ""), outputs=[chatbot, msg])
776
+
777
+ # Quick action button handlers
778
+ tech_btn.click(
779
+ set_quick_question,
780
+ inputs=gr.State("Explain artificial intelligence in simple terms"),
781
+ outputs=msg
782
+ )
783
+
784
+ science_btn.click(
785
+ set_quick_question,
786
+ inputs=gr.State("What's the difference between classical and quantum physics?"),
787
+ outputs=msg
788
+ )
789
+
790
+ history_btn.click(
791
+ set_quick_question,
792
+ inputs=gr.State("Summarize the key events of the Renaissance period"),
793
+ outputs=msg
794
+ )
795
+
796
+ creative_btn.click(
797
+ set_quick_question,
798
+ inputs=gr.State("Write a short poem about the ocean"),
799
+ outputs=msg
800
+ )
801
+
802
+ # Footer
803
+ gr.HTML("""
804
+ <div class="footer">
805
+ <p><strong>πŸ’¬ AI Assistant - Your Intelligent Helper</strong></p>
806
+ <p>Powered by advanced language models β€’ Comprehensive knowledge base β€’ Adaptive conversation</p>
807
+ <p>Β© 2025 AI Assistant | Providing intelligent conversation through cutting-edge AI</p>
808
+ </div>
809
+ """)
810
+
811
+ return iface
812
 
813
+ # Create and launch the interface
814
  if __name__ == "__main__":
815
+ interface = create_interface()
816
+ interface.launch(
817
+ share=True,
818
+ server_name="0.0.0.0",
819
+ server_port=7860,
820
+ show_api=False
821
+ )