khirodsahoo93 commited on
Commit
b731b47
·
verified ·
1 Parent(s): ee41461

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +94 -5
  2. requirements.txt +4 -3
app.py CHANGED
@@ -1,6 +1,13 @@
1
  """
2
  Python to C++ Code Optimizer - Modern UI with Password Protection
3
- AI-powered code conversion using GPT-4o and Claude-3.5-Sonnet
 
 
 
 
 
 
 
4
 
5
  ⚠️ SECURITY WARNING:
6
  This app executes arbitrary code. Only run code from trusted sources.
@@ -12,6 +19,8 @@ import io
12
  import sys
13
  import subprocess
14
  import socket
 
 
15
  from openai import OpenAI
16
  import anthropic
17
  import gradio as gr
@@ -27,23 +36,47 @@ except ImportError:
27
  # Set this as a Hugging Face Secret: APP_PASSWORD
28
  APP_PASSWORD = os.environ.get("APP_PASSWORD", "demo123") # Change default!
29
 
30
- # Lazy initialization of AI clients
31
  def get_openai_client():
32
  api_key = os.environ.get("OPENAI_API_KEY")
33
  if not api_key:
34
  raise ValueError("OPENAI_API_KEY not found. Please set it in your environment or .env file.")
35
- return OpenAI(api_key=api_key)
 
 
 
 
 
 
 
36
 
37
  def get_claude_client():
38
  api_key = os.environ.get("ANTHROPIC_API_KEY")
39
  if not api_key:
40
  raise ValueError("ANTHROPIC_API_KEY not found. Please set it in your environment or .env file.")
41
- return anthropic.Anthropic(api_key=api_key)
 
 
 
 
 
 
 
42
 
43
  # Model configurations
44
  OPENAI_MODEL = "gpt-4o"
45
  CLAUDE_MODEL = "claude-3-5-sonnet-20240620"
46
 
 
 
 
 
 
 
 
 
 
 
47
  # System and user prompts
48
  system_message = (
49
  "You are an assistant that reimplements Python code in high performance C++. "
@@ -111,12 +144,68 @@ def stream_claude(python):
111
  except Exception as e:
112
  yield f"❌ Error: {str(e)}"
113
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
  def optimize(python, model):
115
  """Convert Python to C++ using selected AI model"""
116
  if model in ["GPT-4o", "GPT"]:
117
  result = stream_gpt(python)
118
  elif model in ["Claude-3.5-Sonnet", "Claude"]:
119
  result = stream_claude(python)
 
 
120
  else:
121
  raise ValueError(f"Unknown model: {model}")
122
 
@@ -391,7 +480,7 @@ def create_interface():
391
 
392
  gr.Markdown("### 🤖 AI Model Selection")
393
  model_selector = gr.Dropdown(
394
- ["GPT-4o", "Claude-3.5-Sonnet"],
395
  label="Select AI Model",
396
  value="GPT-4o",
397
  elem_classes=["model-selector"]
 
1
  """
2
  Python to C++ Code Optimizer - Modern UI with Password Protection
3
+ AI-powered code conversion using GPT-4o, Claude-3.5-Sonnet, and Open Source models
4
+
5
+ Supported Models:
6
+ - GPT-4o (OpenAI) - Premium, fastest, most accurate
7
+ - Claude-3.5-Sonnet (Anthropic) - Premium, excellent for code
8
+ - CodeLlama-34B (Meta) - Open source, free/cheap
9
+ - DeepSeek-Coder-33B - Open source, excellent for code
10
+ - Mistral-7B - Open source, fast, general purpose
11
 
12
  ⚠️ SECURITY WARNING:
13
  This app executes arbitrary code. Only run code from trusted sources.
 
19
  import sys
20
  import subprocess
21
  import socket
22
+ import requests
23
+ import httpx
24
  from openai import OpenAI
25
  import anthropic
26
  import gradio as gr
 
36
  # Set this as a Hugging Face Secret: APP_PASSWORD
37
  APP_PASSWORD = os.environ.get("APP_PASSWORD", "demo123") # Change default!
38
 
39
+ # Lazy initialization of AI clients with explicit HTTP client to avoid Gradio conflicts
40
  def get_openai_client():
41
  api_key = os.environ.get("OPENAI_API_KEY")
42
  if not api_key:
43
  raise ValueError("OPENAI_API_KEY not found. Please set it in your environment or .env file.")
44
+
45
+ # Create a clean HTTP client without proxies to avoid Gradio conflicts
46
+ http_client = httpx.Client(
47
+ timeout=60.0,
48
+ limits=httpx.Limits(max_keepalive_connections=5, max_connections=10)
49
+ )
50
+
51
+ return OpenAI(api_key=api_key, http_client=http_client)
52
 
53
  def get_claude_client():
54
  api_key = os.environ.get("ANTHROPIC_API_KEY")
55
  if not api_key:
56
  raise ValueError("ANTHROPIC_API_KEY not found. Please set it in your environment or .env file.")
57
+
58
+ # Create a clean HTTP client without proxies to avoid Gradio conflicts
59
+ http_client = httpx.Client(
60
+ timeout=60.0,
61
+ limits=httpx.Limits(max_keepalive_connections=5, max_connections=10)
62
+ )
63
+
64
+ return anthropic.Anthropic(api_key=api_key, http_client=http_client)
65
 
66
  # Model configurations
67
  OPENAI_MODEL = "gpt-4o"
68
  CLAUDE_MODEL = "claude-3-5-sonnet-20240620"
69
 
70
+ # Hugging Face models (open source)
71
+ HF_MODELS = {
72
+ "CodeLlama-34B": "codellama/CodeLlama-34b-Instruct-hf",
73
+ "DeepSeek-Coder-33B": "deepseek-ai/deepseek-coder-33b-instruct",
74
+ "Mistral-7B": "mistralai/Mistral-7B-Instruct-v0.2"
75
+ }
76
+
77
+ # Hugging Face API endpoint
78
+ HF_API_URL = "https://api-inference.huggingface.co/models/"
79
+
80
  # System and user prompts
81
  system_message = (
82
  "You are an assistant that reimplements Python code in high performance C++. "
 
144
  except Exception as e:
145
  yield f"❌ Error: {str(e)}"
146
 
147
+ def stream_huggingface(python, model_name):
148
+ """Stream Hugging Face model response"""
149
+ try:
150
+ # Get HF token (optional - works without it but with rate limits)
151
+ hf_token = os.environ.get("HF_TOKEN", "")
152
+
153
+ # Get the model ID
154
+ model_id = HF_MODELS.get(model_name)
155
+ if not model_id:
156
+ yield f"❌ Unknown model: {model_name}"
157
+ return
158
+
159
+ headers = {"Authorization": f"Bearer {hf_token}"} if hf_token else {}
160
+
161
+ # Prepare the prompt
162
+ prompt = f"{system_message}\n\n{user_prompt_for(python)}"
163
+
164
+ payload = {
165
+ "inputs": prompt,
166
+ "parameters": {
167
+ "max_new_tokens": 2000,
168
+ "temperature": 0.7,
169
+ "return_full_text": False
170
+ }
171
+ }
172
+
173
+ # Call HF Inference API
174
+ response = requests.post(
175
+ HF_API_URL + model_id,
176
+ headers=headers,
177
+ json=payload,
178
+ timeout=60
179
+ )
180
+
181
+ if response.status_code == 200:
182
+ result = response.json()
183
+ if isinstance(result, list) and len(result) > 0:
184
+ generated_text = result[0].get("generated_text", "")
185
+ else:
186
+ generated_text = result.get("generated_text", "")
187
+
188
+ # Clean up the response
189
+ reply = generated_text.replace('```cpp\n','').replace('```','')
190
+ yield reply
191
+ else:
192
+ error_msg = response.json().get("error", "Unknown error")
193
+ if "loading" in error_msg.lower():
194
+ yield f"⏳ Model is loading... This may take 20-30 seconds. Please try again."
195
+ else:
196
+ yield f"❌ Error from Hugging Face: {error_msg}"
197
+
198
+ except Exception as e:
199
+ yield f"❌ Error calling Hugging Face: {str(e)}"
200
+
201
  def optimize(python, model):
202
  """Convert Python to C++ using selected AI model"""
203
  if model in ["GPT-4o", "GPT"]:
204
  result = stream_gpt(python)
205
  elif model in ["Claude-3.5-Sonnet", "Claude"]:
206
  result = stream_claude(python)
207
+ elif model in HF_MODELS.keys():
208
+ result = stream_huggingface(python, model)
209
  else:
210
  raise ValueError(f"Unknown model: {model}")
211
 
 
480
 
481
  gr.Markdown("### 🤖 AI Model Selection")
482
  model_selector = gr.Dropdown(
483
+ ["GPT-4o", "Claude-3.5-Sonnet", "CodeLlama-34B", "DeepSeek-Coder-33B", "Mistral-7B"],
484
  label="Select AI Model",
485
  value="GPT-4o",
486
  elem_classes=["model-selector"]
requirements.txt CHANGED
@@ -1,6 +1,7 @@
1
  gradio==4.44.1
2
- openai==1.54.4
3
- anthropic==0.39.0
4
  python-dotenv==1.0.0
5
- httpx>=0.24.0
 
6
 
 
1
  gradio==4.44.1
2
+ openai==1.55.0
3
+ anthropic==0.40.0
4
  python-dotenv==1.0.0
5
+ requests>=2.31.0
6
+ httpx>=0.27.0
7