Spaces:
Sleeping
Sleeping
File size: 7,493 Bytes
ae3a99c 28ab5c7 ae3a99c 28ab5c7 ae3a99c 28ab5c7 ae3a99c 28ab5c7 ae3a99c 28ab5c7 ae3a99c 28ab5c7 ae3a99c 28ab5c7 ae3a99c 28ab5c7 ae3a99c 28ab5c7 ae3a99c 28ab5c7 ae3a99c 28ab5c7 ae3a99c 7e15f52 ae3a99c 28ab5c7 ae3a99c 28ab5c7 ae3a99c 28ab5c7 ae3a99c 28ab5c7 ae3a99c 28ab5c7 7e15f52 ae3a99c 7e15f52 ae3a99c 7e15f52 ae3a99c 28ab5c7 ae3a99c 7e15f52 ae3a99c 351068f ae3a99c 7e15f52 28ab5c7 ae3a99c 7e15f52 ae3a99c 7e15f52 ae3a99c 7e15f52 28ab5c7 ae3a99c 7e15f52 ae3a99c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 |
import os
import json
import requests
import gradio as gr
from typing import Optional
# ---------------------------------------
# Universal Code Generator (HF Inference API)
# Hugging Face Spaces friendly: reads HF token from env if not pasted in UI.
# ---------------------------------------
# Recommended HF code models (suggested defaults)
RECOMMENDED_MODELS = {
"bigcode/starcoder2-3b": "StarCoder2-3B (Good general code generator)",
"codellama/CodeLlama-7b-hf": "CodeLlama 7B (Strong coding model)",
"microsoft/Phi-3-mini-4k-instruct": "Phi-3 Mini (Small + strong)",
"Qwen/Qwen2.5-Coder-7B-Instruct": "Qwen2.5 Coder 7B (Excellent accuracy)",
"deepseek-ai/deepseek-coder-6.7b-instruct": "DeepSeek Coder 6.7B (Great for algorithmic code)"
}
# Many supported languages
LANGUAGES = [
"Python", "JavaScript", "TypeScript", "C", "C++", "C#", "Java", "Go",
"Rust", "Ruby", "PHP", "Swift", "Kotlin", "Scala", "Haskell", "Lua",
"R", "Julia", "MATLAB", "Dart", "Solidity", "Vyper", "Move", "Wasm",
"Bash", "PowerShell", "SQL", "GraphQL", "NoSQL", "HTML", "CSS",
"Perl", "OCaml", "F#", "Lisp", "Scheme", "Prolog", "Fortran", "COBOL",
"Assembly", "Verilog", "VHDL", "LaTeX", "Markdown", "JSON", "YAML",
"XML", "Dockerfile", "Makefile", "Nim", "Crystal", "Zig", "Q#", "Chapel"
]
# Simple dangerous keywords blacklist
DANGEROUS_KEYWORDS = [
"rm -rf", "mkfs", "dd if=", "fork bomb", "shutdown", "reboot", "poweroff",
"create user", "adduser", "useradd", "passwd", "ssh -i", "cryptominer", "virus",
"malware", "ransomware", "keylogger", "inject", "exploit", "reverse shell",
"nc -e", "wget http", "curl http", "chmod 777 /", "sudo rm -rf /", ">: /dev/sda"
]
def get_env_token() -> Optional[str]:
"""
Check environment/secrets for a HF token. Hugging Face Spaces secrets are exposed as env vars.
Check common names in order.
"""
for name in ("HF_TOKEN", "HUGGINGFACEHUB_API_TOKEN", "HUGGINGFACE_TOKEN", "HUGGINGFACEHUB_TOKEN"):
val = os.getenv(name)
if val and val.strip():
return val.strip()
return None
def hf_inference(model: str, prompt: str, hf_token: Optional[str], max_new_tokens: int = 512, temperature: float = 0.7):
"""
Call Hugging Face Inference API. hf_token may be None -> fallback to env token.
Returns output string (or error message).
"""
token = hf_token if hf_token and hf_token.strip() else get_env_token()
if not token:
return "β ERROR: Missing Hugging Face token. Paste a token in the box or set a Spaces secret named HF_TOKEN."
url = f"https://api-inference.huggingface.co/models/{model}"
headers = {"Authorization": f"Bearer {token}", "Content-Type": "application/json"}
payload = {
"inputs": prompt,
"parameters": {
"temperature": float(temperature),
"max_new_tokens": int(max_new_tokens),
"return_full_text": False
}
}
try:
resp = requests.post(url, headers=headers, json=payload, timeout=120)
except Exception as e:
return f"β HTTP request failed: {e}"
try:
data = resp.json()
except Exception:
return f"β API returned non-JSON response (status {resp.status_code}). Response text:\n{resp.text}"
# Common HF inference formats
if isinstance(data, list) and len(data) > 0 and isinstance(data[0], dict) and "generated_text" in data[0]:
return data[0]["generated_text"]
if isinstance(data, dict) and "generated_text" in data:
return data["generated_text"]
# Return pretty JSON for unexpected shapes
return json.dumps(data, indent=2)
def detect_dangerous(text: str) -> Optional[str]:
if not text:
return None
lower = text.lower()
for k in DANGEROUS_KEYWORDS:
if k in lower:
return k
return None
def build_prompt(task: str, language: str, content: str) -> str:
"""
Basic prompt templates; you can expand/replace them later.
"""
if task == "Generate Code":
return f"Write a high-quality {language} program for the following requirement. Output only the code (no surrounding explanation):\n\n{content}\n"
if task == "Explain Code":
return f"Explain the following {language} code step-by-step and mention edge cases:\n\n{content}\n"
if task == "Fix Code":
return f"Fix the following {language} code and explain what you changed:\n\n{content}\n"
if task == "Convert Code":
return f"Convert the following code to {language}. Keep behavior identical. Provide only the converted code:\n\n{content}\n"
return content
def generate(task, language, content, model, hf_token, temperature, max_tokens):
# Security check
danger = detect_dangerous(content)
if danger:
return f"β Refused: content contains potentially dangerous keyword: '{danger}'"
prompt = build_prompt(task, language, content)
return hf_inference(model=model, prompt=prompt, hf_token=hf_token, max_new_tokens=max_tokens, temperature=temperature)
def build_ui():
with gr.Blocks(title="Universal Code Generator (Hugging Face Inference)") as demo:
gr.Markdown("# π Universal Code Generator\nUse a Hugging Face code-capable model to generate, explain, fix or convert code.")
gr.Markdown(
"### Quick setup\n"
"- For private use: paste your Hugging Face token in the field below.\n"
"- For Spaces public deployment (recommended): go to *Settings β Secrets* in your Space and add a secret named **HF_TOKEN** "
"or **HUGGINGFACEHUB_API_TOKEN**. Then you can leave the token box empty and the app will use the secret."
)
with gr.Row():
hf_token = gr.Textbox(label="Hugging Face Token (optional)", type="password", placeholder="Paste token or leave empty to use Space secret")
model = gr.Dropdown(list(RECOMMENDED_MODELS.keys()), value="Qwen/Qwen2.5-Coder-7B-Instruct", label="HF Model (recommended)")
with gr.Row():
task = gr.Dropdown(["Generate Code", "Explain Code", "Fix Code", "Convert Code"], value="Generate Code", label="Task")
language = gr.Dropdown(LANGUAGES, value="Python", label="Target Language")
content = gr.Textbox(placeholder="Describe what you want or paste code here...", label="Instruction / Code", lines=12)
temperature = gr.Slider(0.0, 1.5, value=0.7, label="Temperature")
max_tokens = gr.Slider(32, 2048, value=512, label="Max tokens (inference)")
btn = gr.Button("Generate")
output = gr.Code(label="Model output")
btn.click(fn=generate, inputs=[task, language, content, model, hf_token, temperature, max_tokens], outputs=output)
# small model info panel
model_info = gr.Markdown(value="**Model info:** select a model to see a short note here.")
# update model info when selection changes
def model_info_text(model_name):
info = RECOMMENDED_MODELS.get(model_name)
if info:
return f"**Model info:** {info}"
return "**Model info:** Unknown. Compatibility depends on the model."
model.change(fn=model_info_text, inputs=model, outputs=model_info)
return demo
app = build_ui() # expose 'app' for Spaces (also works with 'demo' variable)
if __name__ == "__main__":
# Do not pass server_name or port β Spaces handles serving.
app.launch()
|