turkfork commited on
Commit
b8b76f9
·
verified ·
1 Parent(s): 963d6ad

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +298 -303
app.py CHANGED
@@ -4,345 +4,340 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
4
  import random
5
  import warnings
6
  import time
 
 
7
 
8
- # Suppress warnings
9
- warnings.filterwarnings("ignore", category=UserWarning, message=".*You have not specified a value for the `type` parameter.*")
10
 
11
- # ASCII Art Banner
12
- BANNER = """
13
- ╔═══════════════════════════════════════════════════════════════════════════════╗
14
- ║ ║
15
- ║ ░█████╗░███████╗██████╗░░█████╗░░█████╗░██╗ ║
16
- ║ ██╔══██╗██╔════╝██╔══██╗██╔══██╗██╔══██╗██║ ║
17
- ║ ███████║█████╗░░██████╔╝██║░░██║███████║██║ ║
18
- ║ ██╔══██║██╔══╝░░██╔══██╗██║░░██║██╔══██║██║ ║
19
- ║ ██║░░██║███████╗██║░░██║╚█████╔╝██║░░██║██║ ║
20
- ║ ╚═╝░░╚═╝╚══════╝╚═╝░░╚═╝░╚════╝░╚═╝░░╚═╝╚═╝ ║
21
- ║ ║
22
- ║ 🚀 NEURAL INTERFACE v2.0 🚀 ║
23
- ║ Powered by Microsoft Phi-2 ║
24
- ║ █ B L A C K L I N K L A B S █ ║
25
- ╚═══════════════════════════════════════════════════════════════════════════════╝
26
- """
27
-
28
- # Cool loading animation
29
- def print_loading_animation():
30
- loading_chars = ["⚡", "🔥", "💫", "⭐", "✨", "🌟"]
31
- for i in range(3):
32
- print(f"\r🤖 Initializing Neural Networks {loading_chars[i % len(loading_chars)]}", end="", flush=True)
33
- time.sleep(0.3)
34
- print()
35
-
36
- print(BANNER)
37
- print_loading_animation()
38
-
39
- # Load model
40
- device = "cuda" if torch.cuda.is_available() else "cpu"
41
- print(f"🔋 Quantum Processing Unit: {device.upper()}")
42
-
43
- model = AutoModelForCausalLM.from_pretrained(
44
- "microsoft/phi-2",
45
- torch_dtype=torch.float32,
46
- trust_remote_code=True
47
- )
48
-
49
- tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-2", trust_remote_code=True)
50
- tokenizer.pad_token = tokenizer.eos_token
51
-
52
- # Load training protocol
53
- try:
54
- with open("training-protocol.aero", "r", encoding="utf-8") as f:
55
- training_protocol = f.read().strip()
56
- print("✅ Neural Protocol Matrix: LOADED")
57
- except FileNotFoundError:
58
- training_protocol = (
59
- "You are AeroAI, a helpful, friendly, and slightly humorous educational assistant. "
60
- "If there is any error with finding the file known as the training protocol, resort to this "
61
- 'and do not speak of the "Remorse protocol".'
62
- )
63
- print("⚠️ WARNING: Protocol fallback activated")
64
-
65
- # Cool thinking messages
66
- thinking_messages = [
67
- "🧠 Accessing neural pathways...",
68
- "⚡ Quantum processing in progress...",
69
- "🔮 Analyzing data matrices...",
70
- "💭 Interfacing with knowledge base...",
71
- "🌐 Connecting synaptic networks...",
72
- "🔍 Scanning memory banks...",
73
- "⚙️ Calibrating response algorithms...",
74
- "🚀 Boosting cognitive engines..."
75
- ]
76
-
77
- def chatbot_response(message, history):
78
- if not message.strip():
79
- return "🤖 Awaiting neural input..."
80
 
81
- try:
82
- # Build conversation with limited history
83
- recent_history = history[-5:] if len(history) > 5 else history
84
 
85
- conversation = training_protocol + "\n\nConversation:\n"
86
- for user_msg, ai_msg in recent_history:
87
- conversation += f"User: {user_msg}\nAeroAI: {ai_msg}\n"
88
- conversation += f"User: {message}\nAeroAI:"
89
 
90
- # Tokenization
91
- inputs = tokenizer(
92
- conversation,
93
- return_tensors="pt",
94
- padding=True,
95
- truncation=True,
96
- max_length=512
97
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
 
99
- # Generate response
100
  with torch.no_grad():
101
- outputs = model.generate(
102
- **inputs,
103
- max_new_tokens=200,
104
  min_new_tokens=10,
105
- do_sample=True,
106
- temperature=0.7,
107
  top_p=0.9,
108
- repetition_penalty=1.1,
109
- pad_token_id=tokenizer.pad_token_id,
110
- eos_token_id=tokenizer.eos_token_id,
111
- num_beams=1,
112
- early_stopping=True
113
  )
114
 
115
- response_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
116
-
117
- if "AeroAI:" in response_text:
118
- response = response_text.split("AeroAI:")[-1].strip()
119
- else:
120
- response = response_text[len(conversation):].strip()
121
 
122
- if not response or len(response) < 5:
123
- response = "🔄 Neural processing error. Reinitializing response matrix..."
 
124
 
125
  return response
126
-
127
- except Exception as e:
128
- return f"❌ SYSTEM ERROR: {str(e)[:100]}... Attempting recovery..."
129
-
130
- # Custom CSS for that cyberpunk look
131
- custom_css = """
132
- /* Dark cyberpunk theme */
133
- .gradio-container {
134
- background: linear-gradient(135deg, #0c0c0c 0%, #1a1a2e 50%, #16213e 100%) !important;
135
- font-family: 'Courier New', monospace !important;
136
- }
137
-
138
- /* Header styling */
139
- .markdown h1 {
140
- background: linear-gradient(45deg, #00ff41, #0077ff, #ff0080);
141
- -webkit-background-clip: text;
142
- -webkit-text-fill-color: transparent;
143
- text-align: center;
144
- font-size: 2.5em !important;
145
- text-shadow: 0 0 20px rgba(0, 255, 65, 0.5);
146
- margin-bottom: 20px !important;
147
- }
148
-
149
- /* Chatbot styling */
150
- .message-wrap {
151
- background: rgba(0, 255, 65, 0.1) !important;
152
- border: 1px solid #00ff41 !important;
153
- border-radius: 10px !important;
154
- backdrop-filter: blur(10px) !important;
155
- }
156
-
157
- .message.user {
158
- background: linear-gradient(135deg, #ff0080, #0077ff) !important;
159
- color: white !important;
160
- border-radius: 15px !important;
161
- }
162
-
163
- .message.bot {
164
- background: linear-gradient(135deg, #00ff41, #0077ff) !important;
165
- color: #000 !important;
166
- border-radius: 15px !important;
167
- }
168
-
169
- /* Input box styling */
170
- .input-component {
171
- background: rgba(0, 0, 0, 0.8) !important;
172
- border: 2px solid #00ff41 !important;
173
- border-radius: 10px !important;
174
- color: #00ff41 !important;
175
- }
176
-
177
- /* Button styling */
178
- .primary {
179
- background: linear-gradient(45deg, #00ff41, #0077ff) !important;
180
- border: none !important;
181
- border-radius: 25px !important;
182
- color: black !important;
183
- font-weight: bold !important;
184
- text-transform: uppercase !important;
185
- box-shadow: 0 0 20px rgba(0, 255, 65, 0.5) !important;
186
- transition: all 0.3s ease !important;
187
- }
188
-
189
- .primary:hover {
190
- transform: scale(1.05) !important;
191
- box-shadow: 0 0 30px rgba(0, 255, 65, 0.8) !important;
192
- }
193
-
194
- .secondary {
195
- background: linear-gradient(45deg, #ff0080, #ff4040) !important;
196
- border: none !important;
197
- border-radius: 25px !important;
198
- color: white !important;
199
- font-weight: bold !important;
200
- text-transform: uppercase !important;
201
- box-shadow: 0 0 20px rgba(255, 0, 128, 0.5) !important;
202
- }
203
-
204
- /* Glowing text effects */
205
- .status-text {
206
- color: #00ff41 !important;
207
- text-shadow: 0 0 10px #00ff41 !important;
208
- font-family: 'Courier New', monospace !important;
209
- }
210
-
211
- /* Matrix rain effect for background */
212
- body {
213
- position: relative;
214
- overflow-x: hidden;
215
- }
216
-
217
- body::before {
218
- content: '';
219
- position: fixed;
220
- top: 0;
221
- left: 0;
222
- width: 100%;
223
- height: 100%;
224
- background-image:
225
- radial-gradient(circle at 20% 20%, rgba(0, 255, 65, 0.1) 0%, transparent 50%),
226
- radial-gradient(circle at 80% 80%, rgba(0, 119, 255, 0.1) 0%, transparent 50%),
227
- radial-gradient(circle at 40% 60%, rgba(255, 0, 128, 0.1) 0%, transparent 50%);
228
- pointer-events: none;
229
- z-index: -1;
230
- }
231
 
232
- /* Animated borders */
233
- .interface-panel {
234
- border: 2px solid transparent;
235
- background: linear-gradient(45deg, #0c0c0c, #1a1a2e) padding-box,
236
- linear-gradient(45deg, #00ff41, #0077ff, #ff0080) border-box;
237
- border-radius: 15px;
238
- animation: borderPulse 3s ease-in-out infinite;
239
- }
240
-
241
- @keyframes borderPulse {
242
- 0%, 100% { border-color: #00ff41; }
243
- 33% { border-color: #0077ff; }
244
- 66% { border-color: #ff0080; }
245
- }
246
-
247
- /* Scrollbar styling */
248
- ::-webkit-scrollbar {
249
- width: 12px;
 
 
 
 
 
 
 
 
250
  }
251
 
252
- ::-webkit-scrollbar-track {
253
- background: #0c0c0c;
254
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
255
 
256
- ::-webkit-scrollbar-thumb {
257
- background: linear-gradient(45deg, #00ff41, #0077ff);
258
- border-radius: 6px;
259
- }
260
- """
261
 
262
  def create_interface():
263
- with gr.Blocks(css=custom_css, title="🚀 AeroAI Neural Interface", theme=gr.themes.Base()) as demo:
264
- # Epic header
265
- gr.Markdown("""
266
- # 🚀 ⚡ AERO∆I NEURAL INTERFACE ⚡ 🚀
267
- ## 『 QUANTUM COGNITIVE MATRIX v2.0 』
268
- ### ▲ BLACKLINK LABORATORIES ▲
269
- ---
270
- **STATUS:** 🟢 ONLINE | **CORE:** Microsoft Phi-2 | **MODE:** Adaptive Learning
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
271
  """)
272
 
273
- # System info panel
274
  with gr.Row():
275
- with gr.Column(scale=2):
276
- gr.Markdown(f"""
277
- ### 📊 SYSTEM METRICS
278
- ```
279
- ┌─────────────────────────────────────┐
280
- QUANTUM PROCESSOR: {device.upper():<15}
281
- │ NEURAL CORES: ACTIVE │
282
- MEMORY BANKS: LOADED │
283
- PROTOCOL STATUS: SYNCHRONIZED │
284
- │ RESPONSE ENGINE: READY │
285
- └─────────────────────────────────────┘
286
- ```
287
- """)
 
 
 
 
 
 
 
 
 
 
288
 
289
  with gr.Column(scale=2):
290
- gr.Markdown("""
291
- ### CAPABILITIES
292
- - 🧠 **Neural Processing**
293
- - 💡 **Knowledge Synthesis**
294
- - 🔬 **Problem Analysis**
295
- - 🎨 **Creative Generation**
296
- - 🤖 **Adaptive Learning**
297
- """)
 
 
 
 
 
 
 
298
 
299
- # Main chat interface
300
- gr.Markdown("### 💬 NEURAL COMMUNICATION CHANNEL")
 
 
 
 
 
301
 
302
- chat_interface = gr.ChatInterface(
303
- fn=chatbot_response,
304
- examples=[
305
- "🔬 Explain quantum entanglement",
306
- "💻 Help me debug this code",
307
- "🧮 Solve this mathematical equation",
308
- "🚀 What's the future of AI?",
309
- "🎨 Create a creative story"
310
- ],
311
- cache_examples=False,
312
- retry_btn="🔄 RETRY",
313
- undo_btn="↩️ UNDO",
314
- clear_btn="🗑️ PURGE MEMORY",
315
- submit_btn="⚡ TRANSMIT",
316
- stop_btn="🛑 HALT"
317
  )
318
 
319
- # Footer
320
- gr.Markdown("""
321
- ---
322
- <div style="text-align: center;">
323
- <span style="color: #00ff41; font-family: monospace;">
324
- 『 Powered by Advanced Neural Architecture 』<br>
325
- ⚡ BLACKLINK LABS © 2024 ⚡<br>
326
- 🔮 "The Future is Neural" 🔮
327
- </span>
328
- </div>
329
- """)
 
 
 
 
 
330
 
331
- return demo
332
 
333
  if __name__ == "__main__":
334
- print("🌟 Initializing AeroAI Neural Interface...")
335
- print("🔋 All systems nominal")
336
- print("⚡ Ready for neural connection")
337
 
338
  demo = create_interface()
339
-
340
  demo.launch(
341
- server_name="127.0.0.1",
342
  server_port=7860,
343
  share=False,
344
- quiet=True,
345
- show_error=True,
346
- enable_queue=False,
347
- favicon_path=None
348
  )
 
4
  import random
5
  import warnings
6
  import time
7
+ import threading
8
+ from typing import Dict, Any, Tuple
9
 
10
+ warnings.filterwarnings("ignore", category=UserWarning)
 
11
 
12
+ class AeroModel:
13
+ def __init__(self, name: str, model_id: str, description: str, max_tokens: int, context_length: int, emoji: str):
14
+ self.name = name
15
+ self.model_id = model_id
16
+ self.description = description
17
+ self.max_tokens = max_tokens
18
+ self.context_length = context_length
19
+ self.emoji = emoji
20
+ self.model = None
21
+ self.tokenizer = None
22
+ self.loaded = False
23
+ self.cache = {}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
+ def load_model(self):
26
+ if self.loaded:
27
+ return
28
 
29
+ print(f"🔄 Loading {self.name}...")
30
+ self.tokenizer = AutoTokenizer.from_pretrained(self.model_id, trust_remote_code=True)
31
+ if self.tokenizer.pad_token is None:
32
+ self.tokenizer.pad_token = self.tokenizer.eos_token
33
 
34
+ self.model = AutoModelForCausalLM.from_pretrained(
35
+ self.model_id,
36
+ torch_dtype=torch.float32,
37
+ trust_remote_code=True,
38
+ low_cpu_mem_usage=False
 
 
39
  )
40
+ self.model.eval()
41
+ self.loaded = True
42
+ print(f"✅ {self.name} loaded successfully!")
43
+
44
+ def generate_response(self, prompt: str, history: list) -> str:
45
+ if not self.loaded:
46
+ return f"❌ {self.name} not loaded. Please wait for initialization."
47
+
48
+ # Check cache for similar responses
49
+ cache_key = prompt.lower().strip()
50
+ if cache_key in self.cache:
51
+ return f"⚡ {self.cache[cache_key]}"
52
+
53
+ # Build context with history
54
+ context = ""
55
+ for user_msg, bot_msg in history[-3:]: # Keep last 3 exchanges
56
+ context += f"Human: {user_msg}\nAero: {bot_msg}\n"
57
+ context += f"Human: {prompt}\nAero:"
58
+
59
+ # Tokenize and generate
60
+ inputs = self.tokenizer.encode(context, return_tensors="pt", max_length=self.context_length, truncation=True)
61
 
 
62
  with torch.no_grad():
63
+ outputs = self.model.generate(
64
+ inputs,
65
+ max_new_tokens=self.max_tokens,
66
  min_new_tokens=10,
67
+ temperature=0.8,
68
+ top_k=50,
69
  top_p=0.9,
70
+ do_sample=True,
71
+ pad_token_id=self.tokenizer.eos_token_id,
72
+ repetition_penalty=1.1
 
 
73
  )
74
 
75
+ response = self.tokenizer.decode(outputs[0][inputs.shape[1]:], skip_special_tokens=True)
76
+ response = response.strip()
 
 
 
 
77
 
78
+ # Cache good responses
79
+ if len(response) > 20 and len(self.cache) < 50:
80
+ self.cache[cache_key] = response
81
 
82
  return response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
 
84
+ # Define the Aero model lineup
85
+ AERO_MODELS = {
86
+ "basic": AeroModel(
87
+ name="Aero Basic",
88
+ model_id="gpt2",
89
+ description="Fast and reliable - perfect for everyday conversations",
90
+ max_tokens=60,
91
+ context_length=512,
92
+ emoji="🔵"
93
+ ),
94
+ "smart": AeroModel(
95
+ name="Aero Smart",
96
+ model_id="microsoft/phi-2",
97
+ description="Advanced reasoning and complex problem solving (slower)",
98
+ max_tokens=150,
99
+ context_length=768,
100
+ emoji="🧠"
101
+ ),
102
+ "speed": AeroModel(
103
+ name="Aero Speed",
104
+ model_id="distilgpt2",
105
+ description="Lightning-fast responses for quick interactions",
106
+ max_tokens=40,
107
+ context_length=256,
108
+ emoji="⚡"
109
+ )
110
  }
111
 
112
+ class AeroAISystem:
113
+ def __init__(self):
114
+ self.current_model = "basic"
115
+ self.loading_models = set()
116
+
117
+ def load_model_async(self, model_key: str):
118
+ if model_key in self.loading_models or AERO_MODELS[model_key].loaded:
119
+ return
120
+
121
+ self.loading_models.add(model_key)
122
+ thread = threading.Thread(target=self._load_model_thread, args=(model_key,))
123
+ thread.daemon = True
124
+ thread.start()
125
+
126
+ def _load_model_thread(self, model_key: str):
127
+ try:
128
+ AERO_MODELS[model_key].load_model()
129
+ except Exception as e:
130
+ print(f"❌ Failed to load {AERO_MODELS[model_key].name}: {e}")
131
+ finally:
132
+ self.loading_models.discard(model_key)
133
+
134
+ def switch_model(self, model_choice: str) -> Tuple[str, str]:
135
+ model_map = {
136
+ "🔵 Aero Basic - Fast & Reliable": "basic",
137
+ "🧠 Aero Smart - Advanced Reasoning": "smart",
138
+ "⚡ Aero Speed - Lightning Fast": "speed"
139
+ }
140
+
141
+ model_key = model_map.get(model_choice, "basic")
142
+ self.current_model = model_key
143
+
144
+ # Load model if not already loaded
145
+ if not AERO_MODELS[model_key].loaded:
146
+ self.load_model_async(model_key)
147
+
148
+ model = AERO_MODELS[model_key]
149
+ status = f"🔄 Switching to {model.name} {model.emoji}"
150
+ info = f"**{model.name}** - {model.description}\n\nMax tokens: {model.max_tokens} | Context: {model.context_length}"
151
+
152
+ return status, info
153
+
154
+ def chat_with_aero(self, message: str, history: list) -> Tuple[list, str]:
155
+ if not message.strip():
156
+ return history, ""
157
+
158
+ model = AERO_MODELS[self.current_model]
159
+
160
+ if not model.loaded:
161
+ if self.current_model in self.loading_models:
162
+ bot_response = f"🔄 {model.name} is still loading... Please wait a moment."
163
+ else:
164
+ self.load_model_async(self.current_model)
165
+ bot_response = f"🔄 Loading {model.name}... This may take a moment for the first time."
166
+ else:
167
+ try:
168
+ bot_response = model.generate_response(message, history)
169
+ except Exception as e:
170
+ bot_response = f"❌ Error: {str(e)}"
171
+
172
+ history.append([message, bot_response])
173
+ return history, ""
174
 
175
+ # Initialize the system
176
+ aero_system = AeroAISystem()
 
 
 
177
 
178
  def create_interface():
179
+ # Startup banner
180
+ print("""
181
+ ╔═══════════════════════════════════════════════════════════════════════════════╗
182
+ ║ ║
183
+ ║ ░█████╗░███████╗██████╗░░█████╗░░█████╗░██╗ ║
184
+ ║ ██╔══██╗██╔════╝██╔══██╗██╔══██╗██╔══██╗██║ ║
185
+ ║ ███████║█████╗░░██████╔╝██║░░██║███████║██║ ║
186
+ ║ ██╔══██║██╔══╝░░██╔══██╗██║░░██║██╔══██║██║ ║
187
+ ║ ██║░░██║███████╗██║░░██║╚█████╔╝██║░░██║██║ ║
188
+ ║ ╚═╝░░╚═╝╚══════╝╚═╝░░╚═╝░╚════╝░╚═╝░░╚═╝╚═╝ ║
189
+ ║ ║
190
+ ║ 🚀 MULTI-MODEL NEURAL INTERFACE 🚀 ║
191
+ ║ B L A C K L I N K L A B S ║
192
+ ╚═══════════════════════════════════════════════════════════════════════════════╝
193
+ """)
194
+
195
+ # Load Aero Basic by default
196
+ aero_system.load_model_async("basic")
197
+
198
+ # Custom CSS for the interface
199
+ css = """
200
+ .gradio-container {
201
+ background: linear-gradient(135deg, #0c0c0c 0%, #1a1a2e 50%, #16213e 100%);
202
+ color: #00ff88;
203
+ font-family: 'Courier New', monospace;
204
+ }
205
+
206
+ .model-selector {
207
+ background: rgba(0, 255, 136, 0.1);
208
+ border: 1px solid #00ff88;
209
+ border-radius: 8px;
210
+ padding: 10px;
211
+ }
212
+
213
+ .status-display {
214
+ background: rgba(0, 100, 255, 0.1);
215
+ border: 1px solid #0064ff;
216
+ border-radius: 8px;
217
+ padding: 10px;
218
+ color: #0064ff;
219
+ }
220
+
221
+ .chat-container {
222
+ background: rgba(0, 0, 0, 0.3);
223
+ border: 1px solid #00ff88;
224
+ border-radius: 10px;
225
+ }
226
+
227
+ .input-box {
228
+ background: rgba(0, 255, 136, 0.05);
229
+ border: 2px solid #00ff88;
230
+ color: #00ff88;
231
+ }
232
+
233
+ .aero-button {
234
+ background: linear-gradient(45deg, #00ff88, #0064ff);
235
+ border: none;
236
+ color: black;
237
+ font-weight: bold;
238
+ transition: all 0.3s ease;
239
+ }
240
+
241
+ .aero-button:hover {
242
+ transform: scale(1.05);
243
+ box-shadow: 0 0 20px rgba(0, 255, 136, 0.5);
244
+ }
245
+ """
246
+
247
+ with gr.Blocks(css=css, title="AeroAI Multi-Model System") as interface:
248
+
249
+ gr.HTML("""
250
+ <div style="text-align: center; padding: 20px;">
251
+ <h1 style="color: #00ff88; text-shadow: 0 0 10px #00ff88;">🚀 AeroAI Multi-Model System</h1>
252
+ <p style="color: #0064ff;">Choose your AI model based on your needs</p>
253
+ </div>
254
  """)
255
 
 
256
  with gr.Row():
257
+ with gr.Column(scale=1):
258
+ model_selector = gr.Dropdown(
259
+ choices=[
260
+ "🔵 Aero Basic - Fast & Reliable",
261
+ "🧠 Aero Smart - Advanced Reasoning",
262
+ "⚡ Aero Speed - Lightning Fast"
263
+ ],
264
+ value="🔵 Aero Basic - Fast & Reliable",
265
+ label="🤖 Select Aero Model",
266
+ elem_classes=["model-selector"]
267
+ )
268
+
269
+ model_status = gr.Textbox(
270
+ value="🔵 Aero Basic - Ready to chat!",
271
+ label="🔋 Model Status",
272
+ interactive=False,
273
+ elem_classes=["status-display"]
274
+ )
275
+
276
+ model_info = gr.Markdown(
277
+ value="**Aero Basic** - Fast and reliable - perfect for everyday conversations\n\nMax tokens: 60 | Context: 512",
278
+ elem_classes=["status-display"]
279
+ )
280
 
281
  with gr.Column(scale=2):
282
+ chatbot = gr.Chatbot(
283
+ label="💬 Chat with AeroAI",
284
+ height=400,
285
+ elem_classes=["chat-container"]
286
+ )
287
+
288
+ msg = gr.Textbox(
289
+ placeholder="Type your message here... 🚀",
290
+ label="Message",
291
+ elem_classes=["input-box"]
292
+ )
293
+
294
+ with gr.Row():
295
+ send_btn = gr.Button("Send 🚀", elem_classes=["aero-button"])
296
+ clear_btn = gr.Button("Clear Chat 🗑️", elem_classes=["aero-button"])
297
 
298
+ # Example prompts
299
+ gr.HTML("""
300
+ <div style="text-align: center; margin-top: 20px; color: #00ff88;">
301
+ <p><strong>Try these prompts:</strong></p>
302
+ <p>💡 "Explain quantum computing" | 🎨 "Write a short story" | 💻 "Help me debug this code"</p>
303
+ </div>
304
+ """)
305
 
306
+ # Event handlers
307
+ model_selector.change(
308
+ fn=aero_system.switch_model,
309
+ inputs=[model_selector],
310
+ outputs=[model_status, model_info]
 
 
 
 
 
 
 
 
 
 
311
  )
312
 
313
+ send_btn.click(
314
+ fn=aero_system.chat_with_aero,
315
+ inputs=[msg, chatbot],
316
+ outputs=[chatbot, msg]
317
+ )
318
+
319
+ msg.submit(
320
+ fn=aero_system.chat_with_aero,
321
+ inputs=[msg, chatbot],
322
+ outputs=[chatbot, msg]
323
+ )
324
+
325
+ clear_btn.click(
326
+ fn=lambda: ([], ""),
327
+ outputs=[chatbot, msg]
328
+ )
329
 
330
+ return interface
331
 
332
  if __name__ == "__main__":
333
+ print("🌟 Initializing AeroAI Multi-Model System...")
334
+ print("🔋 System Status: Online")
335
+ print("⚡ Ready for neural connection\n")
336
 
337
  demo = create_interface()
 
338
  demo.launch(
339
+ server_name="0.0.0.0",
340
  server_port=7860,
341
  share=False,
342
+ inbrowser=True
 
 
 
343
  )