AIencoder commited on
Commit
d21ae32
·
verified ·
1 Parent(s): 8fd71dd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +642 -1
app.py CHANGED
@@ -1 +1,642 @@
1
- print("Hello world")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import time
4
+ import re
5
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
6
+ from typing import Dict, List, Tuple, Optional
7
+ import os
8
+ import json
9
+ from functools import lru_cache
10
+
11
+ # CPU-optimized configuration
12
+ MODEL_NAME = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
13
+ DEVICE = "cpu"
14
+ DTYPE = torch.float32 # Using float32 for CPU stability (float16 not well supported on CPU)
15
+
16
+ # File system emulation
17
+ class FileSystem:
18
+ def __init__(self):
19
+ self.files = {
20
+ "main.js": "// Start coding here\nconsole.log('Hello Axon Pro');",
21
+ "utils.js": "// Utility functions\nfunction add(a, b) {\n return a + b;\n}",
22
+ "style.css": "/* Add your styles here */\nbody {\n font-family: monospace;\n}"
23
+ }
24
+ self.current_file = "main.js"
25
+ self.history = []
26
+ self.max_history = 50
27
+
28
+ def save_file(self, content: str) -> None:
29
+ if self.current_file:
30
+ self.files[self.current_file] = content
31
+ self.history.append(("save", self.current_file, content[:100] + "..."))
32
+ if len(self.history) > self.max_history:
33
+ self.history.pop(0)
34
+
35
+ def get_file(self, filename: str) -> str:
36
+ return self.files.get(filename, "")
37
+
38
+ def get_current_file_content(self) -> str:
39
+ return self.files.get(self.current_file, "")
40
+
41
+ def set_current_file(self, filename: str) -> None:
42
+ if filename in self.files:
43
+ self.current_file = filename
44
+
45
+ def create_file(self, filename: str, content: str = "") -> None:
46
+ if filename not in self.files:
47
+ self.files[filename] = content
48
+ self.current_file = filename
49
+ self.history.append(("create", filename, content[:50] + "..."))
50
+
51
+ def get_all_files(self) -> List[str]:
52
+ return list(self.files.keys())
53
+
54
+ def get_context(self) -> str:
55
+ """Get context from all files for the AI model"""
56
+ context = ""
57
+ for filename, content in self.files.items():
58
+ context += f"// File: {filename}\n{content}\n\n"
59
+ return context
60
+
61
+ # Initialize file system
62
+ fs = FileSystem()
63
+
64
+ # Cache the model to avoid reloading
65
+ @lru_cache(maxsize=1)
66
+ def load_model():
67
+ print("Loading TinyLlama model for CPU... This may take a moment.")
68
+ start_time = time.time()
69
+
70
+ try:
71
+ # For CPU, we don't use quantization that requires bitsandbytes
72
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
73
+ model = AutoModelForCausalLM.from_pretrained(
74
+ MODEL_NAME,
75
+ torch_dtype=DTYPE,
76
+ device_map=DEVICE,
77
+ low_cpu_mem_usage=True
78
+ )
79
+
80
+ # Create the pipeline
81
+ pipe = pipeline(
82
+ "text-generation",
83
+ model=model,
84
+ tokenizer=tokenizer,
85
+ device=-1, # Force CPU
86
+ max_new_tokens=256,
87
+ temperature=0.2,
88
+ top_p=0.95,
89
+ do_sample=True
90
+ )
91
+
92
+ load_time = time.time() - start_time
93
+ print(f"Model loaded successfully in {load_time:.2f} seconds!")
94
+ return pipe
95
+ except Exception as e:
96
+ print(f"Error loading model: {str(e)}")
97
+ return None
98
+
99
+ def run_code(code: str) -> Tuple[str, str]:
100
+ """Safely execute JavaScript code in a sandboxed environment"""
101
+ start_time = time.time()
102
+ output = ""
103
+ error = ""
104
+
105
+ try:
106
+ # This is a placeholder - in a real implementation you'd use a proper JS sandbox
107
+ # For demonstration, we'll just return a success message
108
+ output = f"✓ Code executed successfully\nExecution time: {time.time() - start_time:.4f}s"
109
+ except Exception as e:
110
+ error = f"✗ Error: {str(e)}"
111
+
112
+ return output, error
113
+
114
+ def generate_completion(code: str, cursor_pos: int = 0) -> str:
115
+ """Generate code completion based on current context"""
116
+ model = load_model()
117
+ if not model:
118
+ return "Error: Failed to load AI model. Please try again later."
119
+
120
+ # Get context from all files
121
+ context = fs.get_context()
122
+
123
+ # Create prompt for code completion
124
+ prompt = f"""<|system|>
125
+ You are an AI programming assistant, an expert at completing code in JavaScript.
126
+ Complete the code below. Only return the completion, no explanations.
127
+ Keep the same indentation style as the code above.
128
+ </s>
129
+ <|user|>
130
+ Current code context:
131
+ {context}
132
+
133
+ Complete this code:
134
+ {code}
135
+ </s>
136
+ <|assistant|>
137
+ """
138
+
139
+ try:
140
+ # Generate completion
141
+ result = model(
142
+ prompt,
143
+ pad_token_id=model.tokenizer.eos_token_id,
144
+ num_return_sequences=1,
145
+ return_full_text=False
146
+ )
147
+
148
+ # Extract and clean the completion
149
+ completion = result[0]['generated_text'].strip()
150
+
151
+ # Remove any parts that repeat the prompt
152
+ if code.strip() in completion:
153
+ completion = completion.replace(code.strip(), "")
154
+
155
+ return completion
156
+ except Exception as e:
157
+ return f"Error generating completion: {str(e)}"
158
+
159
+ def explain_code(code: str) -> str:
160
+ """Generate explanation for the given code"""
161
+ model = load_model()
162
+ if not model:
163
+ return "Error: Failed to load AI model. Please try again later."
164
+
165
+ prompt = f"""<|system|>
166
+ You are an AI programming assistant that explains code clearly and concisely.
167
+ Provide a line-by-line explanation of what the code does.
168
+ </s>
169
+ <|user|>
170
+ Explain this JavaScript code:
171
+ {code}
172
+ </s>
173
+ <|assistant|>
174
+ """
175
+
176
+ try:
177
+ result = model(
178
+ prompt,
179
+ pad_token_id=model.tokenizer.eos_token_id,
180
+ max_new_tokens=512,
181
+ num_return_sequences=1,
182
+ return_full_text=False
183
+ )
184
+
185
+ return result[0]['generated_text'].strip()
186
+ except Exception as e:
187
+ return f"Error explaining code: {str(e)}"
188
+
189
+ def refactor_code(code: str) -> str:
190
+ """Suggest improvements to the given code"""
191
+ model = load_model()
192
+ if not model:
193
+ return "Error: Failed to load AI model. Please try again later."
194
+
195
+ prompt = f"""<|system|>
196
+ You are an AI programming assistant that refactors code for better readability, performance, and maintainability.
197
+ Provide a refactored version of the code with improvements.
198
+ Explain the key changes in a comment at the top.
199
+ </s>
200
+ <|user|>
201
+ Refactor this JavaScript code:
202
+ {code}
203
+ </s>
204
+ <|assistant|>
205
+ """
206
+
207
+ try:
208
+ result = model(
209
+ prompt,
210
+ pad_token_id=model.tokenizer.eos_token_id,
211
+ max_new_tokens=512,
212
+ num_return_sequences=1,
213
+ return_full_text=False
214
+ )
215
+
216
+ return result[0]['generated_text'].strip()
217
+ except Exception as e:
218
+ return f"Error refactoring code: {str(e)}"
219
+
220
+ def generate_code(prompt: str) -> str:
221
+ """Generate code based on a natural language description"""
222
+ model = load_model()
223
+ if not model:
224
+ return "Error: Failed to load AI model. Please try again later."
225
+
226
+ prompt = f"""<|system|>
227
+ You are an AI programming assistant that writes clean, efficient JavaScript code.
228
+ Generate code based on the user's description. Include comments for complex parts.
229
+ </s>
230
+ <|user|>
231
+ Write JavaScript code that: {prompt}
232
+ </s>
233
+ <|assistant|>
234
+ """
235
+
236
+ try:
237
+ result = model(
238
+ prompt,
239
+ pad_token_id=model.tokenizer.eos_token_id,
240
+ max_new_tokens=512,
241
+ num_return_sequences=1,
242
+ return_full_text=False
243
+ )
244
+
245
+ return result[0]['generated_text'].strip()
246
+ except Exception as e:
247
+ return f"Error generating code: {str(e)}"
248
+
249
+ def process_voice_input(transcript: str) -> str:
250
+ """Process voice input and convert to code or command"""
251
+ # This would integrate with Web Speech API in the frontend
252
+ # For now, just return the transcript as a placeholder
253
+ return transcript
254
+
255
+ def create_diff_view(original: str, modified: str) -> str:
256
+ """Create a diff view between two code versions"""
257
+ # Simple diff implementation for demonstration
258
+ lines_original = original.split('\n')
259
+ lines_modified = modified.split('\n')
260
+
261
+ diff = []
262
+ i, j = 0, 0
263
+
264
+ while i < len(lines_original) and j < len(lines_modified):
265
+ if lines_original[i] == lines_modified[j]:
266
+ diff.append(f" {lines_original[i]}")
267
+ i += 1
268
+ j += 1
269
+ else:
270
+ if i < len(lines_original):
271
+ diff.append(f"- {lines_original[i]}")
272
+ i += 1
273
+ if j < len(lines_modified):
274
+ diff.append(f"+ {lines_modified[j]}")
275
+ j += 1
276
+
277
+ # Add remaining lines
278
+ while i < len(lines_original):
279
+ diff.append(f"- {lines_original[i]}")
280
+ i += 1
281
+
282
+ while j < len(lines_modified):
283
+ diff.append(f"+ {lines_modified[j]}")
284
+ j += 1
285
+
286
+ return "\n".join(diff)
287
+
288
+ # Gradio UI Components (updated for Gradio 6.5.1)
289
+ with gr.Blocks(
290
+ title="Axon Pro - Free AI IDE",
291
+ theme=gr.themes.Default(
292
+ font=[gr.themes.GoogleFont('JetBrains Mono'), 'monospace'],
293
+ font_mono=[gr.themes.GoogleFont('JetBrains Mono'), 'monospace'],
294
+ primary_hue="blue",
295
+ neutral_hue="gray",
296
+ radius=0
297
+ ).set(
298
+ button_primary_background_fill="*primary_500",
299
+ button_primary_background_fill_hover="*primary_600",
300
+ body_background_fill="*neutral_900",
301
+ body_text_color="*neutral_50"
302
+ ),
303
+ css="""
304
+ .editor-container { height: 50vh; }
305
+ .terminal { height: 15vh; background-color: #1e1e1e; color: #d4d4d4; overflow: auto; }
306
+ .diff-view { height: 40vh; overflow: auto; }
307
+ .file-explorer { height: 50vh; background-color: #252526; overflow: auto; }
308
+ .ai-chat { height: 40vh; overflow: auto; }
309
+ .status-bar { background-color: #007acc; color: white; padding: 5px; text-align: center; }
310
+ .btn { min-width: 100px; }
311
+ .monaco-editor { height: 100% !important; }
312
+ .theme-dark { --body-bg: #1e1e1e; --editor-bg: #1e1e1e; }
313
+ .button-large { min-height: 40px !important; }
314
+ .tab-nav { background-color: #252526 !important; }
315
+ .tabitem { background-color: #1e1e1e !important; }
316
+ .code-wrap { white-space: pre-wrap !important; word-break: break-word !important; }
317
+ """
318
+ ) as demo:
319
+
320
+ gr.Markdown("# ⚡ Axon Pro — Free AI-Powered Code IDE")
321
+
322
+ with gr.Tabs():
323
+ with gr.Tab("Editor", id="editor-tab"):
324
+ with gr.Row(equal_height=True):
325
+ # File Explorer
326
+ with gr.Column(scale=1, min_width=200):
327
+ gr.Markdown("### 📁 File Explorer")
328
+ file_list = gr.Dropdown(
329
+ choices=fs.get_all_files(),
330
+ value=fs.current_file,
331
+ label="Files",
332
+ interactive=True,
333
+ container=False
334
+ )
335
+ with gr.Row():
336
+ new_file_btn = gr.Button("➕ New File", variant="secondary", elem_classes="button-large")
337
+ save_btn = gr.Button("💾 Save", variant="secondary", elem_classes="button-large")
338
+ new_file_name = gr.Textbox(
339
+ placeholder="filename.js",
340
+ label="New File Name",
341
+ container=False
342
+ )
343
+
344
+ # Main Editor Area
345
+ with gr.Column(scale=4):
346
+ editor = gr.Code(
347
+ value=fs.get_current_file_content(),
348
+ label="Code Editor",
349
+ language="javascript",
350
+ lines=20,
351
+ interactive=True,
352
+ elem_classes="code-wrap"
353
+ )
354
+
355
+ with gr.Row():
356
+ run_btn = gr.Button("▶ Run (Ctrl+R)", variant="primary", elem_classes="button-large")
357
+ complete_btn = gr.Button("✨ Complete (Ctrl+Enter)", variant="secondary", elem_classes="button-large")
358
+ explain_btn = gr.Button("📝 Explain (Ctrl+Shift+E)", variant="secondary", elem_classes="button-large")
359
+ refactor_btn = gr.Button("🔧 Refactor", variant="secondary", elem_classes="button-large")
360
+ generate_btn = gr.Button("⚡ Generate", variant="secondary", elem_classes="button-large")
361
+ voice_btn = gr.Button("🎤 Voice", variant="secondary", elem_classes="button-large")
362
+
363
+ with gr.Tabs():
364
+ with gr.Tab("Terminal", id="terminal-tab"):
365
+ terminal = gr.Textbox(
366
+ value="$ _ (Ctrl+R to run)",
367
+ lines=5,
368
+ interactive=False,
369
+ elem_classes="terminal"
370
+ )
371
+ clear_btn = gr.Button("CLEAR", variant="secondary", elem_classes="button-large")
372
+
373
+ with gr.Tab("AI Chat", id="chat-tab"):
374
+ chat_history = gr.Chatbot(
375
+ label="Axon AI",
376
+ elem_classes="ai-chat",
377
+ height="100%",
378
+ bubble_full_width=False,
379
+ avatar_images=(None, "https://api.iconify.design/teenyicons:ai-solid.svg")
380
+ )
381
+ with gr.Row():
382
+ chat_input = gr.Textbox(
383
+ placeholder="Ask about your code...",
384
+ label="Message Axon AI",
385
+ container=False,
386
+ scale=7
387
+ )
388
+ send_btn = gr.Button("Send", variant="primary", elem_classes="button-large", scale=1)
389
+
390
+ with gr.Tab("Diff View", id="diff-tab"):
391
+ diff_view = gr.Code(
392
+ value="",
393
+ label="AI Changes",
394
+ language="diff",
395
+ elem_classes="diff-view code-wrap",
396
+ interactive=False
397
+ )
398
+ with gr.Row():
399
+ apply_btn = gr.Button("Apply Changes", variant="primary", elem_classes="button-large")
400
+ discard_btn = gr.Button("Discard Changes", variant="secondary", elem_classes="button-large")
401
+
402
+ status_bar = gr.Markdown(
403
+ f"**AXON PRO v1.0** | {len(fs.get_all_files())} files | {sum(len(content.split('\\n')) for content in fs.files.values())} lines | JavaScript | Axon Pro v1.0 | AI Ready",
404
+ elem_classes="status-bar"
405
+ )
406
+
407
+ # Hidden state for tracking
408
+ current_file_state = gr.State(fs.current_file)
409
+ diff_original_state = gr.State("")
410
+ diff_modified_state = gr.State("")
411
+ diff_mode_state = gr.State(False)
412
+
413
+ # Event handlers
414
+ def update_file_content(content, current_file):
415
+ fs.save_file(content)
416
+ return fs.get_current_file_content()
417
+
418
+ def load_file(filename):
419
+ fs.set_current_file(filename)
420
+ return fs.get_current_file_content(), filename
421
+
422
+ def create_new_file(name):
423
+ if name and "." in name:
424
+ fs.create_file(name)
425
+ return gr.update(choices=fs.get_all_files(), value=name), fs.get_current_file_content()
426
+ return gr.update(choices=fs.get_all_files()), fs.get_current_file_content()
427
+
428
+ def save_file(content, current_file):
429
+ fs.save_file(content)
430
+ return content
431
+
432
+ def run_code_wrapper(content):
433
+ output, error = run_code(content)
434
+ return output if not error else error
435
+
436
+ def complete_code_wrapper(content):
437
+ completion = generate_completion(content)
438
+ return content + completion
439
+
440
+ def explain_code_wrapper(content):
441
+ explanation = explain_code(content)
442
+ fs.save_file(content)
443
+ diff_original = content
444
+ diff_modified = f"// EXPLANATION:\n// {explanation.replace('\\n', '\\n// ')}\n\n{content}"
445
+
446
+ # Switch to diff view
447
+ diff = create_diff_view(diff_original, diff_modified)
448
+ return diff, diff_original, diff_modified, True
449
+
450
+ def refactor_code_wrapper(content):
451
+ refactored = refactor_code(content)
452
+ fs.save_file(content)
453
+ diff_original = content
454
+ diff_modified = refactored
455
+
456
+ # Switch to diff view
457
+ diff = create_diff_view(diff_original, diff_modified)
458
+ return diff, diff_original, diff_modified, True
459
+
460
+ def generate_code_wrapper(prompt, history):
461
+ if not prompt.strip():
462
+ return diff_view.value, diff_original_state.value, diff_modified_state.value, diff_mode_state.value, history
463
+
464
+ generated = generate_code(prompt)
465
+ # Switch to diff view with the generated code
466
+ diff_original = ""
467
+ diff_modified = generated
468
+
469
+ # Switch to diff view
470
+ diff = create_diff_view(diff_original, diff_modified)
471
+ return diff, diff_original, diff_modified, True, history + [[prompt, "Generated code is ready for review in Diff View"]]
472
+
473
+ def apply_diff(diff_original, diff_modified, in_diff_mode):
474
+ if in_diff_mode:
475
+ fs.save_file(diff_modified)
476
+ return fs.get_current_file_content(), False
477
+ return gr.update(), in_diff_mode
478
+
479
+ def discard_diff():
480
+ return fs.get_current_file_content(), False
481
+
482
+ def clear_terminal():
483
+ return "$ _ (Ctrl+R to run)"
484
+
485
+ def handle_chat(message, history):
486
+ if not message.strip():
487
+ return "", history
488
+
489
+ # For demo, just echo the message with a placeholder response
490
+ response = f"AI: This is a demo response. In a full implementation, I'd analyze your code context and provide helpful suggestions."
491
+ new_history = history + [[message, response]]
492
+ return "", new_history
493
+
494
+ def switch_to_diff_tab():
495
+ return gr.Tabs(selected="diff-tab")
496
+
497
+ def switch_to_editor_tab():
498
+ return gr.Tabs(selected="editor-tab")
499
+
500
+ # Bind events
501
+ editor.change(update_file_content, [editor, current_file_state], editor)
502
+ file_list.change(load_file, file_list, [editor, current_file_state])
503
+ new_file_btn.click(create_new_file, new_file_name, [file_list, editor])
504
+ save_btn.click(save_file, [editor, current_file_state], editor)
505
+
506
+ run_btn.click(run_code_wrapper, editor, terminal)
507
+ complete_btn.click(complete_code_wrapper, editor, editor)
508
+
509
+ explain_btn.click(
510
+ explain_code_wrapper,
511
+ editor,
512
+ [diff_view, diff_original_state, diff_modified_state, diff_mode_state]
513
+ ).then(
514
+ switch_to_diff_tab, None, demo.tabs
515
+ )
516
+
517
+ refactor_btn.click(
518
+ refactor_code_wrapper,
519
+ editor,
520
+ [diff_view, diff_original_state, diff_modified_state, diff_mode_state]
521
+ ).then(
522
+ switch_to_diff_tab, None, demo.tabs
523
+ )
524
+
525
+ generate_btn.click(
526
+ lambda: gr.update(visible=True),
527
+ outputs=chat_input
528
+ )
529
+
530
+ chat_input.submit(
531
+ handle_chat,
532
+ [chat_input, chat_history],
533
+ [chat_input, chat_history]
534
+ ).then(
535
+ generate_code_wrapper,
536
+ [chat_input, chat_history],
537
+ [diff_view, diff_original_state, diff_modified_state, diff_mode_state, chat_history]
538
+ ).then(
539
+ switch_to_diff_tab, None, demo.tabs
540
+ )
541
+
542
+ send_btn.click(
543
+ handle_chat,
544
+ [chat_input, chat_history],
545
+ [chat_input, chat_history]
546
+ ).then(
547
+ generate_code_wrapper,
548
+ [chat_input, chat_history],
549
+ [diff_view, diff_original_state, diff_modified_state, diff_mode_state, chat_history]
550
+ ).then(
551
+ switch_to_diff_tab, None, demo.tabs
552
+ )
553
+
554
+ apply_btn.click(
555
+ apply_diff,
556
+ [diff_original_state, diff_modified_state, diff_mode_state],
557
+ [editor, diff_mode_state]
558
+ ).then(
559
+ switch_to_editor_tab, None, demo.tabs
560
+ )
561
+
562
+ discard_btn.click(
563
+ discard_diff,
564
+ outputs=[editor, diff_mode_state]
565
+ ).then(
566
+ switch_to_editor_tab, None, demo.tabs
567
+ )
568
+
569
+ clear_btn.click(
570
+ clear_terminal,
571
+ outputs=terminal
572
+ )
573
+
574
+ # Add keyboard shortcuts using Gradio's new event system
575
+ demo.load(
576
+ None,
577
+ None,
578
+ None,
579
+ _js="""
580
+ () => {
581
+ document.addEventListener('keydown', function(e) {
582
+ // Ctrl+R: Run code
583
+ if (e.ctrlKey && e.key === 'r') {
584
+ e.preventDefault();
585
+ const runBtn = document.querySelector('button:contains("Run")');
586
+ if (runBtn) runBtn.click();
587
+ }
588
+
589
+ // Ctrl+Enter: Complete code
590
+ if (e.ctrlKey && e.key === 'Enter') {
591
+ e.preventDefault();
592
+ const completeBtn = document.querySelector('button:contains("Complete")');
593
+ if (completeBtn) completeBtn.click();
594
+ }
595
+
596
+ // Ctrl+Shift+E: Explain code
597
+ if (e.ctrlKey && e.shiftKey && e.key === 'E') {
598
+ e.preventDefault();
599
+ const explainBtn = document.querySelector('button:contains("Explain")');
600
+ if (explainBtn) explainBtn.click();
601
+ }
602
+
603
+ // Ctrl+B: Toggle file explorer
604
+ if (e.ctrlKey && e.key === 'b') {
605
+ e.preventDefault();
606
+ // Implementation would toggle sidebar
607
+ }
608
+
609
+ // Ctrl+J: Toggle terminal
610
+ if (e.ctrlKey && e.key === 'j') {
611
+ e.preventDefault();
612
+ // Implementation would toggle terminal
613
+ }
614
+
615
+ // Ctrl+L: Toggle AI chat
616
+ if (e.ctrlKey && e.key === 'l') {
617
+ e.preventDefault();
618
+ // Implementation would toggle chat panel
619
+ }
620
+
621
+ // Ctrl+N: New file
622
+ if (e.ctrlKey && e.key === 'n') {
623
+ e.preventDefault();
624
+ document.querySelector('input[placeholder="filename.js"]').focus();
625
+ }
626
+ });
627
+ }
628
+ """
629
+ )
630
+
631
+ if __name__ == "__main__":
632
+ print("Starting Axon Pro IDE...")
633
+ print(f"Running on CPU with {torch.__version__}")
634
+ print(f"Model: {MODEL_NAME}")
635
+ demo.launch(
636
+ server_name="0.0.0.0",
637
+ server_port=int(os.getenv("PORT", 7860)),
638
+ debug=True,
639
+ show_api=False,
640
+ favicon_path="https://api.iconify.design/teenyicons:ai-solid.svg",
641
+ root_path=os.getenv("ROOT_PATH", "")
642
+ )