Spaces:
Sleeping
Sleeping
| import os | |
| import json | |
| import requests | |
| import gradio as gr | |
| from typing import Optional | |
| # --------------------------------------- | |
| # Universal Code Generator (HF Inference API) | |
| # Hugging Face Spaces friendly: reads HF token from env if not pasted in UI. | |
| # --------------------------------------- | |
| # Recommended HF code models (suggested defaults) | |
| RECOMMENDED_MODELS = { | |
| "bigcode/starcoder2-3b": "StarCoder2-3B (Good general code generator)", | |
| "codellama/CodeLlama-7b-hf": "CodeLlama 7B (Strong coding model)", | |
| "microsoft/Phi-3-mini-4k-instruct": "Phi-3 Mini (Small + strong)", | |
| "Qwen/Qwen2.5-Coder-7B-Instruct": "Qwen2.5 Coder 7B (Excellent accuracy)", | |
| "deepseek-ai/deepseek-coder-6.7b-instruct": "DeepSeek Coder 6.7B (Great for algorithmic code)" | |
| } | |
| # Many supported languages | |
| LANGUAGES = [ | |
| "Python", "JavaScript", "TypeScript", "C", "C++", "C#", "Java", "Go", | |
| "Rust", "Ruby", "PHP", "Swift", "Kotlin", "Scala", "Haskell", "Lua", | |
| "R", "Julia", "MATLAB", "Dart", "Solidity", "Vyper", "Move", "Wasm", | |
| "Bash", "PowerShell", "SQL", "GraphQL", "NoSQL", "HTML", "CSS", | |
| "Perl", "OCaml", "F#", "Lisp", "Scheme", "Prolog", "Fortran", "COBOL", | |
| "Assembly", "Verilog", "VHDL", "LaTeX", "Markdown", "JSON", "YAML", | |
| "XML", "Dockerfile", "Makefile", "Nim", "Crystal", "Zig", "Q#", "Chapel" | |
| ] | |
| # Simple dangerous keywords blacklist | |
| DANGEROUS_KEYWORDS = [ | |
| "rm -rf", "mkfs", "dd if=", "fork bomb", "shutdown", "reboot", "poweroff", | |
| "create user", "adduser", "useradd", "passwd", "ssh -i", "cryptominer", "virus", | |
| "malware", "ransomware", "keylogger", "inject", "exploit", "reverse shell", | |
| "nc -e", "wget http", "curl http", "chmod 777 /", "sudo rm -rf /", ">: /dev/sda" | |
| ] | |
| def get_env_token() -> Optional[str]: | |
| """ | |
| Check environment/secrets for a HF token. Hugging Face Spaces secrets are exposed as env vars. | |
| Check common names in order. | |
| """ | |
| for name in ("HF_TOKEN", "HUGGINGFACEHUB_API_TOKEN", "HUGGINGFACE_TOKEN", "HUGGINGFACEHUB_TOKEN"): | |
| val = os.getenv(name) | |
| if val and val.strip(): | |
| return val.strip() | |
| return None | |
| def hf_inference(model: str, prompt: str, hf_token: Optional[str], max_new_tokens: int = 512, temperature: float = 0.7): | |
| """ | |
| Call Hugging Face Inference API. hf_token may be None -> fallback to env token. | |
| Returns output string (or error message). | |
| """ | |
| token = hf_token if hf_token and hf_token.strip() else get_env_token() | |
| if not token: | |
| return "β ERROR: Missing Hugging Face token. Paste a token in the box or set a Spaces secret named HF_TOKEN." | |
| url = f"https://api-inference.huggingface.co/models/{model}" | |
| headers = {"Authorization": f"Bearer {token}", "Content-Type": "application/json"} | |
| payload = { | |
| "inputs": prompt, | |
| "parameters": { | |
| "temperature": float(temperature), | |
| "max_new_tokens": int(max_new_tokens), | |
| "return_full_text": False | |
| } | |
| } | |
| try: | |
| resp = requests.post(url, headers=headers, json=payload, timeout=120) | |
| except Exception as e: | |
| return f"β HTTP request failed: {e}" | |
| try: | |
| data = resp.json() | |
| except Exception: | |
| return f"β API returned non-JSON response (status {resp.status_code}). Response text:\n{resp.text}" | |
| # Common HF inference formats | |
| if isinstance(data, list) and len(data) > 0 and isinstance(data[0], dict) and "generated_text" in data[0]: | |
| return data[0]["generated_text"] | |
| if isinstance(data, dict) and "generated_text" in data: | |
| return data["generated_text"] | |
| # Return pretty JSON for unexpected shapes | |
| return json.dumps(data, indent=2) | |
| def detect_dangerous(text: str) -> Optional[str]: | |
| if not text: | |
| return None | |
| lower = text.lower() | |
| for k in DANGEROUS_KEYWORDS: | |
| if k in lower: | |
| return k | |
| return None | |
| def build_prompt(task: str, language: str, content: str) -> str: | |
| """ | |
| Basic prompt templates; you can expand/replace them later. | |
| """ | |
| if task == "Generate Code": | |
| return f"Write a high-quality {language} program for the following requirement. Output only the code (no surrounding explanation):\n\n{content}\n" | |
| if task == "Explain Code": | |
| return f"Explain the following {language} code step-by-step and mention edge cases:\n\n{content}\n" | |
| if task == "Fix Code": | |
| return f"Fix the following {language} code and explain what you changed:\n\n{content}\n" | |
| if task == "Convert Code": | |
| return f"Convert the following code to {language}. Keep behavior identical. Provide only the converted code:\n\n{content}\n" | |
| return content | |
| def generate(task, language, content, model, hf_token, temperature, max_tokens): | |
| # Security check | |
| danger = detect_dangerous(content) | |
| if danger: | |
| return f"β Refused: content contains potentially dangerous keyword: '{danger}'" | |
| prompt = build_prompt(task, language, content) | |
| return hf_inference(model=model, prompt=prompt, hf_token=hf_token, max_new_tokens=max_tokens, temperature=temperature) | |
| def build_ui(): | |
| with gr.Blocks(title="Universal Code Generator (Hugging Face Inference)") as demo: | |
| gr.Markdown("# π Universal Code Generator\nUse a Hugging Face code-capable model to generate, explain, fix or convert code.") | |
| gr.Markdown( | |
| "### Quick setup\n" | |
| "- For private use: paste your Hugging Face token in the field below.\n" | |
| "- For Spaces public deployment (recommended): go to *Settings β Secrets* in your Space and add a secret named **HF_TOKEN** " | |
| "or **HUGGINGFACEHUB_API_TOKEN**. Then you can leave the token box empty and the app will use the secret." | |
| ) | |
| with gr.Row(): | |
| hf_token = gr.Textbox(label="Hugging Face Token (optional)", type="password", placeholder="Paste token or leave empty to use Space secret") | |
| model = gr.Dropdown(list(RECOMMENDED_MODELS.keys()), value="Qwen/Qwen2.5-Coder-7B-Instruct", label="HF Model (recommended)") | |
| with gr.Row(): | |
| task = gr.Dropdown(["Generate Code", "Explain Code", "Fix Code", "Convert Code"], value="Generate Code", label="Task") | |
| language = gr.Dropdown(LANGUAGES, value="Python", label="Target Language") | |
| content = gr.Textbox(placeholder="Describe what you want or paste code here...", label="Instruction / Code", lines=12) | |
| temperature = gr.Slider(0.0, 1.5, value=0.7, label="Temperature") | |
| max_tokens = gr.Slider(32, 2048, value=512, label="Max tokens (inference)") | |
| btn = gr.Button("Generate") | |
| output = gr.Code(label="Model output") | |
| btn.click(fn=generate, inputs=[task, language, content, model, hf_token, temperature, max_tokens], outputs=output) | |
| # small model info panel | |
| model_info = gr.Markdown(value="**Model info:** select a model to see a short note here.") | |
| # update model info when selection changes | |
| def model_info_text(model_name): | |
| info = RECOMMENDED_MODELS.get(model_name) | |
| if info: | |
| return f"**Model info:** {info}" | |
| return "**Model info:** Unknown. Compatibility depends on the model." | |
| model.change(fn=model_info_text, inputs=model, outputs=model_info) | |
| return demo | |
| app = build_ui() # expose 'app' for Spaces (also works with 'demo' variable) | |
| if __name__ == "__main__": | |
| # Do not pass server_name or port β Spaces handles serving. | |
| app.launch() | |