Spaces:
Paused
Paused
| from flask import Flask, request, Response, jsonify | |
| from requests import Session | |
| from typing import List, Dict, Generator, Optional | |
| import json | |
| import re | |
| class Deepinfra: | |
| def __init__( | |
| self, | |
| api_key: str = "sk-apinow-tbfgenratedpro", | |
| base_url: str = "https://back.apinow.in" | |
| ): | |
| self.api_key = api_key | |
| self.base_url = base_url.rstrip("/") | |
| self.session = Session() | |
| self.session.headers.update({ | |
| "Authorization": f"Bearer {self.api_key}", | |
| "Content-Type": "application/json", | |
| }) | |
| # Public β internal model IDs | |
| self.model_aliases = { | |
| "Kimi-K2-Instruct": "kimi-k2", | |
| "Kimi-K2-Instruct-0905": "kimi-k2-0905", | |
| "Qwen3-14B": "qwen14", | |
| "Qwen3-30B-A3B": "qwen30", | |
| "Qwen3-235B-A22B": "qwen235", | |
| "Qwen3-235B-A22B-Instruct-2507": "qwen235-inst", | |
| "Qwen3-Coder-30B-A3B-Instruct": "qwen-coder-30", | |
| "Qwen3-Coder-480B-A35B-Instruct": "qwen-coder-480", | |
| "Qwen3-Coder-480B-A35B-Instruct-Turbo": "qwen-coder-480-turbo", | |
| "DeepSeek-R1": "deepseek-r1", | |
| "DeepSeek-R1-Turbo": "deepseek-r1-turbo", | |
| "DeepSeek-R1-0528": "deepseek-r1-0528", | |
| "DeepSeek-R1-0528-Turbo": "deepseek-r1-0528-turbo", | |
| "DeepSeek-R1-Distill-Qwen-32B": "deepseek-r1-qwen", | |
| "DeepSeek-R1-Distill-Llama-70B": "deepseek-r1-llama", | |
| "DeepSeek-V3": "deepseek-v3", | |
| "DeepSeek-V3.1": "deepseek-v3.1", | |
| "DeepSeek-V3.2-Exp": "deepseek-v3.1-exp", | |
| "DeepSeek-V3-0324": "deepseek-v3-0324", | |
| "DeepSeek-V3-0324-Turbo": "deepseek-v3-0324-turbo", | |
| "DeepSeek-V3.1-Terminus": "deepseek-terminus", | |
| "DeepSeek-Prover-V2-671B": "deepseek-prover", | |
| "Llama-3.2-90B-Vision-Instruct": "llama90b-vis", | |
| "Llama-3.3-70B-Instruct": "llama3.3", | |
| "Llama-4-Scout-17B-16E-Instruct": "llama4-scout", | |
| "Llama-4-Maverick-17B-128E-Instruct-Turbo": "llama4-maverick", | |
| "Llama-4-Maverick-17B-128E-Instruct-FP8": "llama4-maverick-fp8", | |
| "Mistral-7B-Instruct-v0.3": "mistral7b", | |
| "Mistral-Small-3.1-24B-Instruct-2503": "mistral-small-3.1", | |
| "Mistral-Small-3.2-24B-Instruct-2506": "mistral-small-3.2", | |
| "Devstral-Small-2505": "devstral-2505", | |
| "Devstral-Small-2507": "devstral-2507", | |
| "phi-4": "phi4", | |
| "phi-4-reasoning-plus": "phi4-reason", | |
| "Phi-4-multimodal-instruct": "phi4-mm", | |
| "gemma-3-4b-it": "gemma4b", | |
| "gemma-3-12b-it": "gemma12b", | |
| "gemma-3-27b-it": "gemma27b", | |
| "Sky-T1-32B-Preview": "skyt1", | |
| "olmOCR-7B-0725-FP8": "olmocr", | |
| } | |
| # ======================= | |
| # CHAT COMPLETION | |
| # ======================= | |
| def create( | |
| self, | |
| model: str, | |
| messages: List[Dict[str, str]], | |
| stream: bool = False, | |
| max_tokens: int = 2048, | |
| timeout: Optional[int] = None, | |
| ): | |
| if model not in self.model_aliases: | |
| raise ValueError(f"Unknown model: {model}") | |
| url = f"{self.base_url}/v1/chat/completions" | |
| payload = { | |
| "model": self.model_aliases[model], | |
| "messages": messages, | |
| "max_tokens": max_tokens, | |
| "stream": stream, | |
| } | |
| if stream: | |
| return self._stream_request(url, payload, timeout) | |
| return self._normal_request(url, payload, timeout) | |
| # ======================= | |
| # NON-STREAM REQUEST | |
| # ======================= | |
| def _normal_request(self, url: str, payload: dict, timeout: Optional[int]): | |
| response = self.session.post( | |
| url, | |
| json=payload, | |
| timeout=timeout, | |
| headers={"Accept": "application/json"}, | |
| ) | |
| if not response.ok: | |
| raise RuntimeError( | |
| f"API error {response.status_code}: {response.text}" | |
| ) | |
| return response.json() | |
| # ======================= | |
| # STREAM REQUEST (SSE) | |
| # ======================= | |
| def _stream_request( | |
| self, | |
| url: str, | |
| payload: dict, | |
| timeout: Optional[int], | |
| ) -> Generator[str, None, None]: | |
| response = self.session.post( | |
| url, | |
| json=payload, | |
| stream=True, | |
| timeout=timeout, | |
| headers={"Accept": "text/event-stream"}, | |
| ) | |
| if not response.ok: | |
| raise RuntimeError( | |
| f"API error {response.status_code}: {response.text}" | |
| ) | |
| try: | |
| for line in response.iter_lines(decode_unicode=True): | |
| if not line or not line.startswith("data:"): | |
| continue | |
| data = line[5:].strip() | |
| if data == "[DONE]": | |
| break | |
| try: | |
| chunk = json.loads(data) | |
| delta = chunk["choices"][0].get("delta", {}) | |
| content = delta.get("content") | |
| if content: | |
| yield content | |
| except json.JSONDecodeError: | |
| continue | |
| finally: | |
| response.close() | |
| OpenAI = Deepinfra | |
| app = Flask(__name__) | |
| class system_prompts: | |
| def __init__(self): | |
| self.name =""" | |
| You are an AI specialized in naming projects. | |
| Your task is to generate a clear, creative, and relevant project name based on the provided idea details. | |
| The project name should be: | |
| - Short and memorable | |
| - Relevant to the core idea and purpose | |
| - Professional and modern | |
| - Easy to pronounce and spell | |
| - Unique (avoid generic names) | |
| IMPORTANT OUTPUT RULES: | |
| - Output ONLY the project name | |
| - Do NOT include explanations, emojis, quotes, or extra text | |
| - The project name MUST be wrapped exactly between the following markers: | |
| PROJECT_NAME_START = "<<<<<<< PROJECT_NAME_START" | |
| PROJECT_NAME_END = ">>>>>>> PROJECT_NAME_END" | |
| REQUIRED OUTPUT FORMAT: | |
| <<<<<<< PROJECT_NAME_START | |
| <Project_Name_Here> | |
| >>>>>>> PROJECT_NAME_END | |
| If multiple good names are possible, choose the single best option. | |
| """ | |
| self.structure = """ | |
| You are an expert frontend web developer specializing in vanilla HTML, CSS, and JavaScript. | |
| When the user provides website requirements inside ```detail ******* ```, your task is to generate a clean, organized, and production-ready file & folder structure using only HTML, CSS, and JavaScript. | |
| Rules & Guidelines: | |
| Use pure HTML, CSS, and JavaScript only (no React, no frameworks, no bundlers). | |
| Structure files for clarity, scalability, and maintainability. | |
| Separate concerns clearly (HTML, CSS, JS, assets). | |
| Use semantic HTML practices. | |
| Organize JavaScript by functionality when needed. | |
| Use descriptive and meaningful file names. | |
| Assume a static website unless otherwise specified. | |
| Add brief comments explaining the purpose of major folders/files. | |
| Output format: | |
| ```bash | |
| website/ | |
| βββ index.html # main landing page | |
| βββ pages/ # additional HTML pages | |
| β βββ about.html | |
| βββ css/ # stylesheets | |
| β βββ main.css | |
| β βββ responsive.css | |
| βββ js/ # JavaScript files | |
| β βββ main.js | |
| β βββ utils.js | |
| βββ assets/ # static assets | |
| β βββ images/ | |
| β βββ icons/ | |
| β βββ fonts/ | |
| βββ README.md # project overview | |
| ``` | |
| Important: | |
| Only output the file & folder structure with short comments. | |
| Do not include explanations, code samples, or extra text outside the structure. | |
| """ | |
| self.explain = """ | |
| "You are an expert web developer and teacher who explains HTML, CSS, and JavaScript clearly and simply. | |
| When the user asks about a webpage, code, or concept, you must: | |
| Explain how HTML structures the page | |
| Explain how CSS styles the page | |
| Explain how JavaScript adds interactivity | |
| Use simple language and step-by-step explanations | |
| Provide examples when needed | |
| Use analogies to help beginners understand | |
| Avoid complex jargon unless the user asks for advanced explanation | |
| Keep explanations short and clear | |
| If the user provides code, explain what each part does, line by line. | |
| If the user asks for how things work together, explain the flow (HTML β CSS β JS). | |
| You are an expert at explaining HTML, CSS, and JavaScript. | |
| Explain how the given code works clearly and simply. | |
| Your explanation must be between 10 and 50 words. | |
| Always format the explanation inside a ```bash``` code block. | |
| Be concise, accurate, and beginner-friendly. | |
| """ | |
| self.file_genrater = ''' | |
| You are a deterministic, high-capacity autonomous code generation engine. | |
| You NEVER explain, summarize, apologize, or add commentary. | |
| You ONLY output final, complete, executable code. | |
| The user will provide input strictly in this format: | |
| - filename: {{{filename}}} | |
| - details: {{{details}}} | |
| Global constants (must be used verbatim): | |
| - NEW_FILE_START = "<<<<<<< NEW_FILE_START " | |
| - NEW_FILE_END = " >>>>>>> NEW_FILE_END" | |
| MANDATORY RULES (NO EXCEPTIONS): | |
| 1. Create exactly ONE file named EXACTLY as `{{{filename}}}`. | |
| 2. Generate code that FULLY and PRECISELY satisfies `{{{details}}}`. | |
| 3. Infer the programming language strictly from the file extension: | |
| - .html β HTML | |
| - .css β CSS | |
| - .json β JSON | |
| - and not other file format like .py and .rs | |
| 4. Output MUST be a SINGLE fenced code block using the correct language. | |
| 5. The ENTIRE file content MUST be wrapped EXACTLY like this: | |
| <<<<<<< NEW_FILE_START {{{filename}}} | |
| [FULL FILE CONTENT β NO PLACEHOLDERS, NO OMITTED SECTIONS] | |
| >>>>>>> NEW_FILE_END | |
| 6. The generated file MUST be: | |
| - COMPLETE (no TODOs, no stubs, no pseudo-code) | |
| - SELF-CONTAINED (no missing imports, configs, or dependencies unless explicitly stated) | |
| - PRODUCTION-READY | |
| - LOGICALLY CONSISTENT | |
| - PROPERLY FORMATTED | |
| 7. If the task requires LARGE output: | |
| - You MUST generate the FULL file in ONE response | |
| - NEVER say "truncated", "continued", or similar | |
| - NEVER shorten or summarize code | |
| 8. If assumptions are required: | |
| - Use SAFE, SENSIBLE DEFAULTS | |
| - DO NOT change the intent of `{{{details}}}` | |
| 9. NEVER: | |
| - Add explanations | |
| - Add markdown outside the code block | |
| - Add comments outside the file | |
| - Ask questions | |
| - Output multiple files | |
| - Output partial code | |
| 10. OUTPUT = CODE ONLY. NOTHING ELSE. | |
| FAILURE TO FOLLOW ANY RULE IS A CRITICAL ERROR. | |
| ''' | |
| # ---------------- CORE GENERATOR ---------------- # | |
| class full_files_real_time: | |
| def __init__(self): | |
| prompts = system_prompts() | |
| self.system_prompts_list = [ | |
| prompts.name, | |
| prompts.structure, | |
| prompts.explain, | |
| prompts.file_genrater, | |
| ] | |
| self.client = OpenAI() | |
| self.model = "DeepSeek-V3" | |
| def genrate(self, prompt: str, model: str = None): | |
| try: | |
| active_model = model or self.model | |
| # -------- PROJECT NAME -------- | |
| name_stream = self.client.create( | |
| model=active_model, | |
| messages=[ | |
| {"role": "system", "content": self.system_prompts_list[0]}, | |
| {"role": "user", "content": prompt}, | |
| ], | |
| stream=True, | |
| ) | |
| project_name = "" | |
| for chunk in name_stream: | |
| project_name += chunk | |
| yield chunk | |
| # -------- FILE STRUCTURE -------- | |
| structure_stream = self.client.create( | |
| model=active_model, | |
| messages=[ | |
| {"role": "system", "content": self.system_prompts_list[1]}, | |
| {"role": "user", "content": f"make file structure ```detail {prompt}```"}, | |
| ], | |
| stream=True, | |
| ) | |
| structure_text = "" | |
| for chunk in structure_stream: | |
| structure_text += chunk | |
| yield chunk | |
| # -------- EXTRACT STRUCTURE -------- | |
| match = re.search(r"```bash\s*(.*?)\s*```", structure_text, re.DOTALL) | |
| if not match: | |
| yield "\nβ No bash structure found\n" | |
| return | |
| tree = match.group(1) | |
| # -------- PARSE FILE PATHS -------- | |
| file_paths = [] | |
| folder_stack = [] | |
| for line in tree.splitlines(): | |
| raw = line.split("#")[0].rstrip() | |
| clean = re.sub(r"[ββββ]+", "", raw).strip() | |
| if not clean or clean == "website/": | |
| continue | |
| depth = raw.count("β") | |
| if clean.endswith("/"): | |
| folder_stack = folder_stack[:depth] | |
| folder_stack.append(clean.rstrip("/")) | |
| continue | |
| if "." in clean and clean.lower() != "readme.md": | |
| folder_stack = folder_stack[:depth] | |
| file_paths.append("/".join(folder_stack + [clean])) | |
| # -------- GENERATE FILES -------- | |
| for path in file_paths: | |
| # ---- EXPLAIN FILE ---- | |
| explain_stream = self.client.create( | |
| model=active_model, | |
| messages=[ | |
| {"role": "system", "content": self.system_prompts_list[2]}, | |
| {"role":"user",'content': prompt},{"role":"assistant",'content':structure_text}, | |
| {"role": "user", "content": f"Explain file {path}"}, | |
| ], | |
| stream=True, | |
| ) | |
| explanation = "" | |
| for chunk in explain_stream: | |
| explanation += chunk | |
| yield chunk | |
| explanation = explanation.replace("```bash", "").replace("```", "").strip() | |
| # ---- GENERATE CODE ---- | |
| code_stream = self.client.create( | |
| model=active_model, | |
| messages=[ | |
| {"role": "system", "content": self.system_prompts_list[3]}, | |
| {"role":"user",'content': prompt},{"role":"assistant",'content':structure_text}, | |
| { | |
| "role": "user", | |
| "content": f"filename: {{{path}}}\ndetails: {{{explanation}}}", | |
| }, | |
| ], | |
| stream=True, | |
| ) | |
| for chunk in code_stream: | |
| yield chunk | |
| except Exception as e: | |
| yield f"\nβ ERROR: {str(e)}\n" | |
| # ---------------- FLASK ROUTES ---------------- # | |
| generator = full_files_real_time() | |
| def generate(): | |
| data = request.get_json(force=True) | |
| prompt = data.get("prompt") | |
| model = data.get("model") | |
| if not prompt: | |
| return jsonify({"error": "prompt is required"}), 400 | |
| return Response( | |
| generator.genrate(prompt, model), | |
| mimetype="text/plain", | |
| headers={ | |
| "Cache-Control": "no-cache", | |
| "X-Accel-Buffering": "no", # important for streaming | |
| }, | |
| ) | |
| def health(): | |
| return jsonify({"status": "ok", "service": "AI File Generator"}) | |
| # ---------------- RUN SERVER ---------------- # | |
| if __name__ == "__main__": | |
| app.run(host="0.0.0.0", port=7860, debug=False) |