khirodsahoo93 commited on
Commit
e2adbf3
Β·
verified Β·
1 Parent(s): 736bdb9

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +73 -217
  2. requirements.txt +0 -1
app.py CHANGED
@@ -1,13 +1,10 @@
1
  """
2
- Python to C++ Code Optimizer - Modern UI with Password Protection
3
- AI-powered code conversion using GPT-4o, Claude-3.5-Sonnet, and Open Source models
4
 
5
  Supported Models:
6
  - GPT-4o (OpenAI) - Premium, fastest, most accurate
7
  - Claude-3.5-Sonnet (Anthropic) - Premium, excellent for code
8
- - CodeLlama-34B (Meta) - Open source, free/cheap
9
- - DeepSeek-Coder-33B - Open source, excellent for code
10
- - Mistral-7B - Open source, fast, general purpose
11
 
12
  ⚠️ SECURITY WARNING:
13
  This app executes arbitrary code. Only run code from trusted sources.
@@ -19,7 +16,6 @@ import io
19
  import sys
20
  import subprocess
21
  import socket
22
- import requests
23
  import httpx
24
  from openai import OpenAI
25
  import anthropic
@@ -67,16 +63,6 @@ def get_claude_client():
67
  OPENAI_MODEL = "gpt-4o"
68
  CLAUDE_MODEL = "claude-3-5-sonnet-20240620"
69
 
70
- # Hugging Face models (open source)
71
- HF_MODELS = {
72
- "CodeLlama-34B": "codellama/CodeLlama-34b-Instruct-hf",
73
- "DeepSeek-Coder-33B": "deepseek-ai/deepseek-coder-33b-instruct",
74
- "Mistral-7B": "mistralai/Mistral-7B-Instruct-v0.2"
75
- }
76
-
77
- # Hugging Face API endpoint
78
- HF_API_URL = "https://api-inference.huggingface.co/models/"
79
-
80
  # System and user prompts
81
  system_message = (
82
  "You are an assistant that reimplements Python code in high performance C++. "
@@ -144,129 +130,12 @@ def stream_claude(python):
144
  except Exception as e:
145
  yield f"❌ Error: {str(e)}"
146
 
147
- def stream_huggingface(python, model_name):
148
- """Stream Hugging Face model response"""
149
- try:
150
- # Get HF token (optional - works without it but with rate limits)
151
- hf_token = os.environ.get("HF_TOKEN", "")
152
-
153
- # Debug info
154
- if hf_token:
155
- yield f"πŸ”‘ Using HF token (first 10 chars): {hf_token[:10]}...\n\n"
156
- else:
157
- yield f"⚠️ No HF_TOKEN found - using public API (limited)\n\n"
158
-
159
- # Get the model ID
160
- model_id = HF_MODELS.get(model_name)
161
- if not model_id:
162
- yield f"❌ Unknown model: {model_name}"
163
- return
164
-
165
- yield f"πŸ“‘ Calling model: {model_id}\n\n"
166
-
167
- headers = {"Authorization": f"Bearer {hf_token}"} if hf_token else {}
168
-
169
- # Prepare the prompt
170
- prompt = f"{system_message}\n\n{user_prompt_for(python)}"
171
-
172
- payload = {
173
- "inputs": prompt,
174
- "parameters": {
175
- "max_new_tokens": 2000,
176
- "temperature": 0.7,
177
- "return_full_text": False
178
- }
179
- }
180
-
181
- # Call HF Inference API
182
- response = requests.post(
183
- HF_API_URL + model_id,
184
- headers=headers,
185
- json=payload,
186
- timeout=60
187
- )
188
-
189
- # Check if response body is empty
190
- if not response.text or len(response.text.strip()) == 0:
191
- yield f"⏳ Model is loading or initializing...\n\n"
192
- yield f"This happens on first use. Please try again in 30-60 seconds.\n\n"
193
- yield f"πŸ’‘ Quick alternative: Use GPT-4o or Claude-3.5-Sonnet (instant results!)"
194
- return
195
-
196
- if response.status_code == 200:
197
- try:
198
- result = response.json()
199
- if isinstance(result, list) and len(result) > 0:
200
- generated_text = result[0].get("generated_text", "")
201
- else:
202
- generated_text = result.get("generated_text", "")
203
-
204
- if not generated_text or len(generated_text.strip()) == 0:
205
- yield f"⚠️ Model returned empty response.\n\n"
206
- yield f"Try again or use GPT-4o/Claude-3.5-Sonnet instead."
207
- return
208
-
209
- # Clean up the response
210
- reply = generated_text.replace('```cpp\n','').replace('```','')
211
- yield reply
212
- except ValueError as json_err:
213
- # JSON parsing failed
214
- yield f"⚠️ Model response format error.\n\n"
215
- yield f"The model might still be warming up. Try again in 30 seconds.\n\n"
216
- yield f"πŸ’‘ Or use GPT-4o/Claude-3.5-Sonnet for instant results!"
217
-
218
- elif response.status_code == 401 or response.status_code == 403:
219
- # Authentication error - need HF token
220
- yield f"πŸ”‘ Authentication Required!\n\n"
221
- yield f"To use open-source models, you need a FREE Hugging Face token:\n\n"
222
- yield f"1. Get token: https://huggingface.co/settings/tokens\n"
223
- yield f"2. Add HF_TOKEN secret in Space Settings\n"
224
- yield f"3. Factory reboot\n\n"
225
- yield f"OR use GPT-4o/Claude-3.5-Sonnet instead (they work now!)"
226
- elif response.status_code == 503:
227
- # Service unavailable - model loading
228
- yield f"⏳ Model is currently loading (cold start)...\n\n"
229
- yield f"This can take 30-60 seconds on first use.\n"
230
- yield f"Please wait a minute and try again.\n\n"
231
- yield f"πŸ’‘ Quick solution: Use GPT-4o or Claude-3.5-Sonnet (no waiting!)"
232
- else:
233
- try:
234
- error_msg = response.json().get("error", "Unknown error")
235
- except:
236
- error_msg = response.text[:200] if response.text else "Empty response"
237
-
238
- if "loading" in str(error_msg).lower():
239
- yield f"⏳ Model is loading... This may take 20-30 seconds. Please try again."
240
- else:
241
- yield f"❌ Error from Hugging Face (HTTP {response.status_code}):\n{error_msg}\n\n"
242
- yield f"πŸ’‘ Tip: Use GPT-4o or Claude-3.5-Sonnet for now (they're working!)"
243
-
244
- except requests.exceptions.Timeout as timeout_err:
245
- yield f"⏱️ Request timed out: {str(timeout_err)}\n\n"
246
- yield f"Model might be loading (cold start). Try again in 30-60 seconds.\n\n"
247
- yield f"πŸ’‘ Or use GPT-4o/Claude-3.5-Sonnet for instant results!"
248
- except requests.exceptions.ConnectionError as conn_err:
249
- yield f"🌐 Connection error: {str(conn_err)}\n\n"
250
- yield f"Cannot reach Hugging Face API. Check your internet connection.\n\n"
251
- yield f"πŸ’‘ Please use GPT-4o or Claude-3.5-Sonnet instead."
252
- except requests.exceptions.RequestException as req_err:
253
- yield f"🌐 Network error: {str(req_err)}\n\n"
254
- yield f"Type: {type(req_err).__name__}\n\n"
255
- yield f"πŸ’‘ Please use GPT-4o or Claude-3.5-Sonnet instead."
256
- except Exception as e:
257
- yield f"❌ Unexpected error: {str(e)}\n\n"
258
- yield f"Error type: {type(e).__name__}\n"
259
- yield f"Full details: {repr(e)}\n\n"
260
- yield f"πŸ’‘ Tip: Use GPT-4o or Claude-3.5-Sonnet for reliable results!"
261
-
262
  def optimize(python, model):
263
  """Convert Python to C++ using selected AI model"""
264
  if model in ["GPT-4o", "GPT"]:
265
  result = stream_gpt(python)
266
  elif model in ["Claude-3.5-Sonnet", "Claude"]:
267
  result = stream_claude(python)
268
- elif model in HF_MODELS.keys():
269
- result = stream_huggingface(python, model)
270
  else:
271
  raise ValueError(f"Unknown model: {model}")
272
 
@@ -534,95 +403,82 @@ def create_interface():
534
  label="Python Code:",
535
  value=default_python,
536
  lines=15,
537
- placeholder="Enter your Python code here...",
538
  elem_classes=["python-input"],
539
- show_copy_button=True
540
  )
541
 
542
- gr.Markdown("### πŸ€– AI Model Selection")
543
- model_selector = gr.Dropdown(
544
- ["GPT-4o", "Claude-3.5-Sonnet", "CodeLlama-34B", "DeepSeek-Coder-33B", "Mistral-7B"],
545
- label="Select AI Model",
546
- value="GPT-4o",
547
- elem_classes=["model-selector"]
548
- )
 
 
549
 
550
- convert_btn = gr.Button("πŸ”„ Convert to C++", elem_classes=["modern-button"])
551
-
552
  with gr.Column(scale=1):
553
- gr.Markdown("### ⚑ Generated C++ Code")
554
  cpp_output = gr.Textbox(
555
- label="C++ Code:",
556
  lines=15,
557
- placeholder="Generated C++ code will appear here...",
558
  elem_classes=["cpp-output"],
559
- show_copy_button=True
560
  )
561
 
562
  # Execution Section
 
 
 
563
  with gr.Row():
564
- with gr.Column(scale=1):
565
- gr.Markdown("### 🐍 Python Execution")
566
- python_run_btn = gr.Button("▢️ Run Python", elem_classes=["run-button"])
567
- python_result = gr.TextArea(
568
- label="Python Output:",
569
- lines=8,
570
- elem_classes=["output-section", "python-output"],
571
- placeholder="Python execution results will appear here..."
572
  )
573
 
574
- with gr.Column(scale=1):
575
- gr.Markdown("### πŸš€ C++ Execution")
576
- cpp_run_btn = gr.Button("▢️ Run C++", elem_classes=["run-button"])
577
- cpp_result = gr.TextArea(
578
- label="C++ Output:",
579
- lines=8,
580
- elem_classes=["output-section", "cpp-output-result"],
581
- placeholder="C++ execution results will appear here..."
582
  )
583
 
584
- # Performance Metrics Section
585
- gr.HTML("""
586
- <div class="performance-card">
587
- <h3>πŸ“Š Performance Comparison</h3>
588
- <div>
589
- <span class="performance-metric">Python: ~6.3s</span>
590
- <span class="performance-metric">C++: ~0.6s</span>
591
- <span class="performance-metric">Speedup: 10x</span>
592
- </div>
593
- <p style="margin: 12px 0 0 0; color: #64748b; font-size: 14px;">
594
- Compare execution times and performance metrics between Python and C++ implementations.<br>
595
- Typical speedup: 10-100x depending on the algorithm.
596
- </p>
597
- </div>
598
- """)
599
-
600
- # Event Handlers
601
- convert_btn.click(
602
- optimize,
603
- inputs=[python_input, model_selector],
604
- outputs=[cpp_output],
605
- show_progress=True
606
  )
607
 
608
- python_run_btn.click(
609
- execute_python,
610
- inputs=[python_input],
611
- outputs=[python_result],
612
- show_progress=True
613
  )
614
 
615
- cpp_run_btn.click(
616
- execute_cpp,
617
- inputs=[cpp_output],
618
- outputs=[cpp_result],
619
- show_progress=True
620
  )
621
-
622
  return app
623
 
624
- # Launch with password protection
625
  if __name__ == "__main__":
 
 
 
 
 
626
  app = create_interface()
627
 
628
  # Check if running on Hugging Face Spaces
@@ -630,34 +486,34 @@ if __name__ == "__main__":
630
 
631
  if is_huggingface:
632
  # Hugging Face Spaces configuration
633
- print(f"πŸš€ Launching Python to C++ Code Optimizer on Hugging Face Spaces")
634
- print(f"πŸ” Password protection enabled")
635
-
636
  app.launch(
637
  auth=("user", APP_PASSWORD),
638
- auth_message="πŸ” Enter credentials to access the Python to C++ Code Optimizer"
639
  )
640
  else:
641
  # Local development configuration
642
- def find_free_port():
643
- with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
644
- s.bind(('', 0))
645
- s.listen(1)
646
- port = s.getsockname()[1]
647
- return port
 
 
 
 
 
648
 
649
- free_port = find_free_port()
650
- print(f"πŸš€ Launching Python to C++ Code Optimizer on port: {free_port}")
651
- print(f"πŸ” Password protection enabled")
652
 
653
- # Launch with authentication
654
  app.launch(
655
- inbrowser=True,
656
- share=False,
657
  server_name="127.0.0.1",
658
- server_port=free_port,
659
- show_error=True,
660
  auth=("user", APP_PASSWORD),
661
- auth_message="πŸ” Enter credentials to access the Python to C++ Code Optimizer"
662
  )
663
 
 
1
  """
2
+ Python to C++ Code Optimizer - AI-Powered Code Conversion
3
+ Modern Gradio app with password protection for secure deployments
4
 
5
  Supported Models:
6
  - GPT-4o (OpenAI) - Premium, fastest, most accurate
7
  - Claude-3.5-Sonnet (Anthropic) - Premium, excellent for code
 
 
 
8
 
9
  ⚠️ SECURITY WARNING:
10
  This app executes arbitrary code. Only run code from trusted sources.
 
16
  import sys
17
  import subprocess
18
  import socket
 
19
  import httpx
20
  from openai import OpenAI
21
  import anthropic
 
63
  OPENAI_MODEL = "gpt-4o"
64
  CLAUDE_MODEL = "claude-3-5-sonnet-20240620"
65
 
 
 
 
 
 
 
 
 
 
 
66
  # System and user prompts
67
  system_message = (
68
  "You are an assistant that reimplements Python code in high performance C++. "
 
130
  except Exception as e:
131
  yield f"❌ Error: {str(e)}"
132
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
  def optimize(python, model):
134
  """Convert Python to C++ using selected AI model"""
135
  if model in ["GPT-4o", "GPT"]:
136
  result = stream_gpt(python)
137
  elif model in ["Claude-3.5-Sonnet", "Claude"]:
138
  result = stream_claude(python)
 
 
139
  else:
140
  raise ValueError(f"Unknown model: {model}")
141
 
 
403
  label="Python Code:",
404
  value=default_python,
405
  lines=15,
 
406
  elem_classes=["python-input"],
407
+ placeholder="Enter your Python code here..."
408
  )
409
 
410
+ with gr.Row():
411
+ model_selector = gr.Dropdown(
412
+ ["GPT-4o", "Claude-3.5-Sonnet"],
413
+ label="Select AI Model",
414
+ value="GPT-4o",
415
+ elem_classes=["model-selector"]
416
+ )
417
+
418
+ convert_button = gr.Button("✨ Convert to C++", elem_classes=["modern-button"])
419
 
 
 
420
  with gr.Column(scale=1):
421
+ gr.Markdown("### ⚑ Optimized C++ Code")
422
  cpp_output = gr.Textbox(
423
+ label="Generated C++ Code:",
424
  lines=15,
 
425
  elem_classes=["cpp-output"],
426
+ interactive=False
427
  )
428
 
429
  # Execution Section
430
+ gr.Markdown("---")
431
+ gr.Markdown("## πŸƒ Code Execution & Performance Comparison")
432
+
433
  with gr.Row():
434
+ with gr.Column():
435
+ gr.Markdown("### 🐍 Python Output")
436
+ run_python_button = gr.Button("▢️ Run Python", elem_classes=["run-button"])
437
+ python_output = gr.Textbox(
438
+ label="Python Execution Output:",
439
+ lines=5,
440
+ elem_classes=["python-output"],
441
+ interactive=False
442
  )
443
 
444
+ with gr.Column():
445
+ gr.Markdown("### ⚑ C++ Output")
446
+ run_cpp_button = gr.Button("▢️ Run C++", elem_classes=["run-button"])
447
+ cpp_execution_output = gr.Textbox(
448
+ label="C++ Execution Output:",
449
+ lines=5,
450
+ elem_classes=["cpp-output-result"],
451
+ interactive=False
452
  )
453
 
454
+ # Event handlers
455
+ convert_button.click(
456
+ fn=optimize,
457
+ inputs=[python_input, model_selector],
458
+ outputs=cpp_output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
459
  )
460
 
461
+ run_python_button.click(
462
+ fn=execute_python,
463
+ inputs=python_input,
464
+ outputs=python_output
 
465
  )
466
 
467
+ run_cpp_button.click(
468
+ fn=execute_cpp,
469
+ inputs=cpp_output,
470
+ outputs=cpp_execution_output
 
471
  )
472
+
473
  return app
474
 
475
+ # Launch the app
476
  if __name__ == "__main__":
477
+ print("\n" + "="*50)
478
+ print(f"===== Application Startup at {__import__('datetime').datetime.now().strftime('%Y-%m-%d %H:%M:%S')} =====")
479
+ print("="*50)
480
+
481
+ # Create the app
482
  app = create_interface()
483
 
484
  # Check if running on Hugging Face Spaces
 
486
 
487
  if is_huggingface:
488
  # Hugging Face Spaces configuration
489
+ print("πŸš€ Launching Python to C++ Code Optimizer on Hugging Face Spaces")
490
+ print("πŸ” Password protection enabled")
 
491
  app.launch(
492
  auth=("user", APP_PASSWORD),
493
+ show_error=True
494
  )
495
  else:
496
  # Local development configuration
497
+ def get_available_port(start_port=7860):
498
+ """Find an available port starting from start_port"""
499
+ port = start_port
500
+ while port < start_port + 100:
501
+ try:
502
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
503
+ s.bind(('', port))
504
+ return port
505
+ except OSError:
506
+ port += 1
507
+ return start_port
508
 
509
+ port = get_available_port()
510
+ print(f"πŸš€ Launching Python to C++ Code Optimizer on port: {port}")
511
+ print(f"πŸ” Password protection enabled. Password: {APP_PASSWORD}")
512
 
 
513
  app.launch(
 
 
514
  server_name="127.0.0.1",
515
+ server_port=port,
 
516
  auth=("user", APP_PASSWORD),
517
+ show_error=True
518
  )
519
 
requirements.txt CHANGED
@@ -2,6 +2,5 @@ gradio==4.44.1
2
  openai==1.55.0
3
  anthropic==0.40.0
4
  python-dotenv==1.0.0
5
- requests>=2.31.0
6
  httpx>=0.27.0
7
 
 
2
  openai==1.55.0
3
  anthropic==0.40.0
4
  python-dotenv==1.0.0
 
5
  httpx>=0.27.0
6