sakuragolden commited on
Commit
ae3a99c
Β·
verified Β·
1 Parent(s): 28ab5c7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +127 -92
app.py CHANGED
@@ -1,139 +1,174 @@
1
- # --- Guard to prevent Streamlit from running this file ---
2
- import sys
3
- if any("streamlit" in arg.lower() for arg in sys.argv):
4
- print("❌ This is a Gradio app. Please run using: python app.py")
5
- sys.exit(0)
6
 
7
- # ---------------------------------------------------------
8
- # Universal Code Generator (HF Inference API Only, NO torch)
9
- # ---------------------------------------------------------
 
10
 
11
- import gradio as gr
12
- import requests
13
- import json
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
- # HF Inference API endpoint template
16
- def hf_inference(model, prompt, hf_token, max_new_tokens=512, temperature=0.7):
17
- if not hf_token:
18
- return "❌ ERROR: Missing Hugging Face Token."
19
 
20
- url = f"https://api-inference.huggingface.co/models/{model}"
21
- headers = {"Authorization": f"Bearer {hf_token}"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
 
 
23
  payload = {
24
  "inputs": prompt,
25
  "parameters": {
26
- "temperature": temperature,
27
- "max_new_tokens": max_new_tokens,
28
  "return_full_text": False
29
  }
30
  }
31
 
32
- resp = requests.post(url, headers=headers, json=payload)
33
-
 
 
 
34
  try:
35
  data = resp.json()
36
  except Exception:
37
- return "❌ API returned non-JSON response."
38
 
39
  # Common HF inference formats
40
- if isinstance(data, list) and len(data) > 0 and "generated_text" in data[0]:
41
  return data[0]["generated_text"]
42
 
43
- return json.dumps(data, indent=2)
 
44
 
 
 
45
 
46
- # -------------- Many Languages -----------------
47
 
48
- LANGUAGES = [
49
- "Python", "JavaScript", "TypeScript", "C", "C++", "C#", "Java", "Go",
50
- "Rust", "Ruby", "PHP", "Swift", "Kotlin", "Scala", "Haskell", "Lua",
51
- "R", "Julia", "MATLAB", "Dart", "Solidity", "Vyper", "Move", "Wasm",
52
- "Bash", "PowerShell", "SQL", "GraphQL", "NoSQL", "HTML", "CSS",
53
- "Perl", "OCaml", "F#", "Lisp", "Scheme", "Prolog", "Fortran", "COBOL",
54
- "Assembly", "Verilog", "VHDL", "LaTeX", "Markdown", "JSON", "YAML",
55
- "XML", "Dockerfile", "Makefile", "Nim", "Crystal", "Zig", "Q#", "Chapel"
56
- ]
57
 
58
- # ----------- Recommended HF Code Models -----------
59
 
60
- RECOMMENDED_MODELS = {
61
- "bigcode/starcoder2-3b": "StarCoder2-3B (Good general code generator)",
62
- "codellama/CodeLlama-7b-hf": "CodeLlama 7B (Strong coding model)",
63
- "microsoft/Phi-3-mini-4k-instruct": "Phi-3 Mini (Small + strong)",
64
- "Qwen/Qwen2.5-Coder-7B-Instruct": "Qwen2.5 Coder 7B (Excellent accuracy)",
65
- "deepseek-ai/deepseek-coder-6.7b-instruct": "DeepSeek Coder 6.7B (Great for algorithmic code)"
66
- }
67
-
68
- # ----------- Code Generation Logic -----------
69
-
70
- def build_prompt(task, language, content):
71
  if task == "Generate Code":
72
- return f"Write a high-quality {language} program:\n{content}\n\nOnly output code."
73
  if task == "Explain Code":
74
- return f"Explain this {language} code step-by-step:\n{content}"
75
  if task == "Fix Code":
76
- return f"Fix the following {language} code and explain corrections:\n{content}"
77
  if task == "Convert Code":
78
- return f"Convert the following code to {language}:\n{content}"
79
-
80
  return content
81
 
82
 
83
- def do_generate(task, language, content, model, token, temp, max_tokens):
84
- prompt = build_prompt(task, language, content)
85
- return hf_inference(model, prompt, token, max_new_tokens=max_tokens, temperature=temp)
 
 
86
 
 
 
87
 
88
- # -------------------------------------------
89
- # Gradio Interface
90
- # -------------------------------------------
91
 
92
  def build_ui():
93
- with gr.Blocks(title="Universal Code Generator (HF API, No Torch)") as ui:
94
-
95
- gr.Markdown("# πŸ”§ Universal Code Generator\n### Powered by Hugging Face Inference API")
96
-
97
- hf_token = gr.Textbox(label="Hugging Face Token", type="password", placeholder="Paste your HF token here")
98
-
99
- model = gr.Dropdown(
100
- list(RECOMMENDED_MODELS.keys()),
101
- label="Hugging Face Model",
102
- value="Qwen/Qwen2.5-Coder-7B-Instruct",
103
- info="Recommended high-quality code models"
104
  )
105
 
106
- gr.Markdown("### Task Selection")
107
-
108
- task = gr.Dropdown(
109
- ["Generate Code", "Explain Code", "Fix Code", "Convert Code"],
110
- label="Task Type"
111
- )
112
 
113
- language = gr.Dropdown(LANGUAGES, label="Target Language")
 
 
114
 
115
- content = gr.Textbox(
116
- label="Instruction / Code Input",
117
- lines=10,
118
- placeholder="Describe code OR paste code here..."
119
- )
120
-
121
- temp = gr.Slider(0.0, 1.5, 0.7, label="Temperature")
122
- max_tokens = gr.Slider(32, 2048, 512, label="Max Tokens")
123
 
124
  btn = gr.Button("Generate")
125
- output = gr.Code(label="Output")
126
 
127
- btn.click(
128
- do_generate,
129
- inputs=[task, language, content, model, hf_token, temp, max_tokens],
130
- outputs=output
131
- )
 
 
 
 
 
 
132
 
133
- return ui
134
 
135
 
136
- demo = build_ui()
137
 
138
  if __name__ == "__main__":
139
- demo.launch(server_name="0.0.0.0", server_port=7860)
 
 
1
+ import os
2
+ import json
3
+ import requests
4
+ import gradio as gr
5
+ from typing import Optional
6
 
7
+ # ---------------------------------------
8
+ # Universal Code Generator (HF Inference API)
9
+ # Hugging Face Spaces friendly: reads HF token from env if not pasted in UI.
10
+ # ---------------------------------------
11
 
12
+ # Recommended HF code models (suggested defaults)
13
+ RECOMMENDED_MODELS = {
14
+ "bigcode/starcoder2-3b": "StarCoder2-3B (Good general code generator)",
15
+ "codellama/CodeLlama-7b-hf": "CodeLlama 7B (Strong coding model)",
16
+ "microsoft/Phi-3-mini-4k-instruct": "Phi-3 Mini (Small + strong)",
17
+ "Qwen/Qwen2.5-Coder-7B-Instruct": "Qwen2.5 Coder 7B (Excellent accuracy)",
18
+ "deepseek-ai/deepseek-coder-6.7b-instruct": "DeepSeek Coder 6.7B (Great for algorithmic code)"
19
+ }
20
+
21
+ # Many supported languages
22
+ LANGUAGES = [
23
+ "Python", "JavaScript", "TypeScript", "C", "C++", "C#", "Java", "Go",
24
+ "Rust", "Ruby", "PHP", "Swift", "Kotlin", "Scala", "Haskell", "Lua",
25
+ "R", "Julia", "MATLAB", "Dart", "Solidity", "Vyper", "Move", "Wasm",
26
+ "Bash", "PowerShell", "SQL", "GraphQL", "NoSQL", "HTML", "CSS",
27
+ "Perl", "OCaml", "F#", "Lisp", "Scheme", "Prolog", "Fortran", "COBOL",
28
+ "Assembly", "Verilog", "VHDL", "LaTeX", "Markdown", "JSON", "YAML",
29
+ "XML", "Dockerfile", "Makefile", "Nim", "Crystal", "Zig", "Q#", "Chapel"
30
+ ]
31
+
32
+ # Simple dangerous keywords blacklist
33
+ DANGEROUS_KEYWORDS = [
34
+ "rm -rf", "mkfs", "dd if=", "fork bomb", "shutdown", "reboot", "poweroff",
35
+ "create user", "adduser", "useradd", "passwd", "ssh -i", "cryptominer", "virus",
36
+ "malware", "ransomware", "keylogger", "inject", "exploit", "reverse shell",
37
+ "nc -e", "wget http", "curl http", "chmod 777 /", "sudo rm -rf /", ">: /dev/sda"
38
+ ]
39
 
 
 
 
 
40
 
41
+ def get_env_token() -> Optional[str]:
42
+ """
43
+ Check environment/secrets for a HF token. Hugging Face Spaces secrets are exposed as env vars.
44
+ Check common names in order.
45
+ """
46
+ for name in ("HF_TOKEN", "HUGGINGFACEHUB_API_TOKEN", "HUGGINGFACE_TOKEN", "HUGGINGFACEHUB_TOKEN"):
47
+ val = os.getenv(name)
48
+ if val and val.strip():
49
+ return val.strip()
50
+ return None
51
+
52
+
53
+ def hf_inference(model: str, prompt: str, hf_token: Optional[str], max_new_tokens: int = 512, temperature: float = 0.7):
54
+ """
55
+ Call Hugging Face Inference API. hf_token may be None -> fallback to env token.
56
+ Returns output string (or error message).
57
+ """
58
+ token = hf_token if hf_token and hf_token.strip() else get_env_token()
59
+ if not token:
60
+ return "❌ ERROR: Missing Hugging Face token. Paste a token in the box or set a Spaces secret named HF_TOKEN."
61
 
62
+ url = f"https://api-inference.huggingface.co/models/{model}"
63
+ headers = {"Authorization": f"Bearer {token}", "Content-Type": "application/json"}
64
  payload = {
65
  "inputs": prompt,
66
  "parameters": {
67
+ "temperature": float(temperature),
68
+ "max_new_tokens": int(max_new_tokens),
69
  "return_full_text": False
70
  }
71
  }
72
 
73
+ try:
74
+ resp = requests.post(url, headers=headers, json=payload, timeout=120)
75
+ except Exception as e:
76
+ return f"❌ HTTP request failed: {e}"
77
+
78
  try:
79
  data = resp.json()
80
  except Exception:
81
+ return f"❌ API returned non-JSON response (status {resp.status_code}). Response text:\n{resp.text}"
82
 
83
  # Common HF inference formats
84
+ if isinstance(data, list) and len(data) > 0 and isinstance(data[0], dict) and "generated_text" in data[0]:
85
  return data[0]["generated_text"]
86
 
87
+ if isinstance(data, dict) and "generated_text" in data:
88
+ return data["generated_text"]
89
 
90
+ # Return pretty JSON for unexpected shapes
91
+ return json.dumps(data, indent=2)
92
 
 
93
 
94
+ def detect_dangerous(text: str) -> Optional[str]:
95
+ if not text:
96
+ return None
97
+ lower = text.lower()
98
+ for k in DANGEROUS_KEYWORDS:
99
+ if k in lower:
100
+ return k
101
+ return None
 
102
 
 
103
 
104
+ def build_prompt(task: str, language: str, content: str) -> str:
105
+ """
106
+ Basic prompt templates; you can expand/replace them later.
107
+ """
 
 
 
 
 
 
 
108
  if task == "Generate Code":
109
+ return f"Write a high-quality {language} program for the following requirement. Output only the code (no surrounding explanation):\n\n{content}\n"
110
  if task == "Explain Code":
111
+ return f"Explain the following {language} code step-by-step and mention edge cases:\n\n{content}\n"
112
  if task == "Fix Code":
113
+ return f"Fix the following {language} code and explain what you changed:\n\n{content}\n"
114
  if task == "Convert Code":
115
+ return f"Convert the following code to {language}. Keep behavior identical. Provide only the converted code:\n\n{content}\n"
 
116
  return content
117
 
118
 
119
+ def generate(task, language, content, model, hf_token, temperature, max_tokens):
120
+ # Security check
121
+ danger = detect_dangerous(content)
122
+ if danger:
123
+ return f"❌ Refused: content contains potentially dangerous keyword: '{danger}'"
124
 
125
+ prompt = build_prompt(task, language, content)
126
+ return hf_inference(model=model, prompt=prompt, hf_token=hf_token, max_new_tokens=max_tokens, temperature=temperature)
127
 
 
 
 
128
 
129
  def build_ui():
130
+ with gr.Blocks(title="Universal Code Generator (Hugging Face Inference)") as demo:
131
+ gr.Markdown("# πŸš€ Universal Code Generator\nUse a Hugging Face code-capable model to generate, explain, fix or convert code.")
132
+
133
+ gr.Markdown(
134
+ "### Quick setup\n"
135
+ "- For private use: paste your Hugging Face token in the field below.\n"
136
+ "- For Spaces public deployment (recommended): go to *Settings β†’ Secrets* in your Space and add a secret named **HF_TOKEN** "
137
+ "or **HUGGINGFACEHUB_API_TOKEN**. Then you can leave the token box empty and the app will use the secret."
 
 
 
138
  )
139
 
140
+ with gr.Row():
141
+ hf_token = gr.Textbox(label="Hugging Face Token (optional)", type="password", placeholder="Paste token or leave empty to use Space secret")
142
+ model = gr.Dropdown(list(RECOMMENDED_MODELS.keys()), value="Qwen/Qwen2.5-Coder-7B-Instruct", label="HF Model (recommended)")
 
 
 
143
 
144
+ with gr.Row():
145
+ task = gr.Dropdown(["Generate Code", "Explain Code", "Fix Code", "Convert Code"], value="Generate Code", label="Task")
146
+ language = gr.Dropdown(LANGUAGES, value="Python", label="Target Language")
147
 
148
+ content = gr.Textbox(placeholder="Describe what you want or paste code here...", label="Instruction / Code", lines=12)
149
+ temperature = gr.Slider(0.0, 1.5, value=0.7, label="Temperature")
150
+ max_tokens = gr.Slider(32, 2048, value=512, label="Max tokens (inference)")
 
 
 
 
 
151
 
152
  btn = gr.Button("Generate")
153
+ output = gr.Code(label="Model output")
154
 
155
+ btn.click(fn=generate, inputs=[task, language, content, model, hf_token, temperature, max_tokens], outputs=output)
156
+
157
+ # small model info panel
158
+ model_info = gr.Markdown(value="**Model info:** select a model to see a short note here.")
159
+ # update model info when selection changes
160
+ def model_info_text(model_name):
161
+ info = RECOMMENDED_MODELS.get(model_name)
162
+ if info:
163
+ return f"**Model info:** {info}"
164
+ return "**Model info:** Unknown. Compatibility depends on the model."
165
+ model.change(fn=model_info_text, inputs=model, outputs=model_info)
166
 
167
+ return demo
168
 
169
 
170
+ app = build_ui() # expose 'app' for Spaces (also works with 'demo' variable)
171
 
172
  if __name__ == "__main__":
173
+ # Do not pass server_name or port β€” Spaces handles serving.
174
+ app.launch()