MegaTronX commited on
Commit
8361a08
·
verified ·
1 Parent(s): 75ce9b5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +92 -18
app.py CHANGED
@@ -1,28 +1,102 @@
1
- import spaces
2
- from model_runner import ModelRunner
3
- from parser import parse_to_files
4
- from zip_util import make_zip_from_files
5
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
- # Same model you referenced
8
- MODEL_REPO = "tHottie/NeuralDaredevil-8B-abliterated-Q4_K_M-GGUF"
9
- MODEL_FILE = "neuraldaredevil-8b-abliterated-q4_k_m-imat.gguf"
10
 
11
- runner = ModelRunner(repo_id=MODEL_REPO, filename=MODEL_FILE)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
- @spaces.GPU(duration=120)
14
- def process_and_zip(ai_response: str):
15
- model_output = runner.interpret_code_description(ai_response)
16
- file_list = parse_to_files(model_output)
17
- zip_path = make_zip_from_files(file_list)
18
  return zip_path
19
 
 
20
  with gr.Blocks() as demo:
21
- gr.Markdown("### Paste your AI output with multiple files below:")
22
- input_box = gr.Textbox(lines=15, placeholder="Paste codegen or AI output here...")
23
- submit = gr.Button("Generate Zip")
24
- download = gr.File(label="Download zip")
25
 
26
- submit.click(fn=process_and_zip, inputs=input_box, outputs=download)
27
 
28
  demo.launch()
 
 
 
 
 
1
  import gradio as gr
2
+ import spaces
3
+
4
+ # We delay all CUDA-related imports to inside the GPU-decorated function
5
+
6
+ @spaces.GPU(duration=120)
7
+ def process_code_generation(ai_response: str):
8
+ import json
9
+ import re
10
+ from huggingface_hub import hf_hub_download
11
+ from llama_cpp import Llama
12
+ from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType
13
+ from llama_cpp_agent.providers import LlamaCppPythonProvider
14
+
15
+ # Download the model (cached if already exists)
16
+ model_path = hf_hub_download(
17
+ repo_id="tHottie/NeuralDaredevil-8B-abliterated-Q4_K_M-GGUF",
18
+ filename="neuraldaredevil-8b-abliterated-q4_k_m-imat.gguf",
19
+ local_dir="./models"
20
+ )
21
+
22
+ # Initialize LLaMA model (CPU-safe)
23
+ llm = Llama(
24
+ model_path=model_path,
25
+ n_gpu_layers=0, # IMPORTANT: ZeroGPU uses CPU under the hood
26
+ n_batch=512,
27
+ n_ctx=4096,
28
+ verbose=True
29
+ )
30
+ provider = LlamaCppPythonProvider(llm)
31
+ agent = LlamaCppAgent(
32
+ provider,
33
+ system_prompt="You are an AI code parser. Given a pasted AI response with multiple code files, extract the individual files and output a JSON object like {\"file.py\": \"...\", \"utils.py\": \"...\"}.",
34
+ predefined_messages_formatter_type=MessagesFormatterType.GEMMA_2,
35
+ debug_output=False
36
+ )
37
+
38
+ # Prompt to interpret the response
39
+ user_prompt = f"""
40
+ Given the following AI output that includes multiple code files, extract and convert it into a JSON object that maps filenames to code contents.
41
+
42
+ Example output:
43
+ {{
44
+ "app.py": "print('Hello World')",
45
+ "utils/helper.py": "def greet(): return 'hi'"
46
+ }}
47
 
48
+ AI Output:
49
+ {ai_response}
50
+ """
51
 
52
+ full_response = ""
53
+ for chunk in agent.get_chat_response(user_prompt, returns_streaming_generator=True):
54
+ full_response += chunk
55
+
56
+ # Try to parse as JSON
57
+ try:
58
+ parsed = json.loads(full_response)
59
+ except Exception:
60
+ # Fallback regex parsing if model output wasn't pure JSON
61
+ pattern = r"Filename:\s*(?P<fname>[\w\.\-/]+)\s*```(?:[a-zA-Z0-9]*)\n(?P<code>[\s\S]*?)```"
62
+ parsed = {}
63
+ for m in re.finditer(pattern, full_response):
64
+ parsed[m.group("fname").strip()] = m.group("code")
65
+
66
+ if not parsed:
67
+ parsed = {"output.txt": full_response}
68
+
69
+ # Write files and zip them
70
+ import os
71
+ import zipfile
72
+ import uuid
73
+
74
+ temp_dir = f"/tmp/code_files_{uuid.uuid4().hex}"
75
+ os.makedirs(temp_dir, exist_ok=True)
76
+
77
+ for filename, content in parsed.items():
78
+ file_path = os.path.join(temp_dir, filename)
79
+ os.makedirs(os.path.dirname(file_path), exist_ok=True)
80
+ with open(file_path, "w", encoding="utf-8") as f:
81
+ f.write(content)
82
+
83
+ zip_path = f"/tmp/code_bundle_{uuid.uuid4().hex}.zip"
84
+ with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_DEFLATED) as zipf:
85
+ for root, _, files in os.walk(temp_dir):
86
+ for file in files:
87
+ full_path = os.path.join(root, file)
88
+ arcname = os.path.relpath(full_path, start=temp_dir)
89
+ zipf.write(full_path, arcname=arcname)
90
 
 
 
 
 
 
91
  return zip_path
92
 
93
+
94
  with gr.Blocks() as demo:
95
+ gr.Markdown("### Paste your AI response below (with multiple files), and get a downloadable zip.")
96
+ input_box = gr.Textbox(lines=15, label="AI Response Input")
97
+ output_file = gr.File(label="Download Zip")
98
+ run_btn = gr.Button("Generate Zip")
99
 
100
+ run_btn.click(fn=process_code_generation, inputs=input_box, outputs=output_file)
101
 
102
  demo.launch()