Spaces:
Sleeping
Sleeping
File size: 5,341 Bytes
6c8368c c7b1731 6c8368c d6d07f7 7d75366 b760790 6c8368c 4ad9de9 ee52ef0 4ad9de9 ee52ef0 1febf1f ee52ef0 4ad9de9 1febf1f ee52ef0 4ad9de9 6c8368c f1607ac 1febf1f 4ad9de9 1febf1f 7d75366 2712381 1febf1f 9d6a6b8 1febf1f 9d6a6b8 1febf1f 9d6a6b8 1febf1f 9d6a6b8 f1607ac 32b45b9 c7b1731 9d6a6b8 1febf1f f1607ac 9d6a6b8 2712381 1febf1f 6c8368c 32b45b9 6c8368c 1febf1f 4ad9de9 1febf1f b760790 2712381 1febf1f c7b1731 4ad9de9 1febf1f 32b45b9 2712381 1febf1f 6c8368c 1febf1f 6c8368c 1febf1f 6c8368c 1febf1f 6c8368c 1febf1f 6c8368c 32b45b9 6c8368c 32b45b9 1febf1f 32b45b9 1febf1f 6c8368c 1febf1f 6c8368c f1607ac 6c8368c 32b45b9 6c8368c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 |
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
import warnings
# Suppress warnings
warnings.filterwarnings("ignore")
# Global variables for model
tokenizer = None
model = None
model_loaded = False
def load_model():
"""Load model on demand"""
global tokenizer, model, model_loaded
if model_loaded:
return True
try:
# Try CodeLlama 7B
print("Loading CodeLlama 7B model...")
model_name = "codellama/CodeLlama-7b-hf"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.float16,
device_map="auto"
)
model_loaded = True
print("Model loaded successfully")
return True
except Exception as e:
print(f"Model loading failed: {e}")
return False
def generate_code_fast(prompt):
"""Instant HTML code generation"""
# Load model if not loaded
if not load_model():
return "<!DOCTYPE html>\n<html>\n<head>\n <title>Error</title>\n</head>\n<body>\n <h1>Model Loading Failed</h1>\n</body>\n</html>"
try:
# Minimal prompt for instant generation
full_prompt = f"HTML for {prompt}:"
inputs = tokenizer(full_prompt, return_tensors="pt", truncation=True, max_length=128).to("cuda" if torch.cuda.is_available() else "cpu")
# Instant generation with minimal processing
outputs = model.generate(
**inputs,
max_new_tokens=500,
temperature=0.7,
do_sample=True
)
# Decode result
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
result = decoded[len(full_prompt):]
# Ensure valid HTML
if '<!DOCTYPE html>' not in result:
result = f"<!DOCTYPE html>\n<html>\n<head>\n <title>{prompt or 'Generated Site'}</title>\n</head>\n<body>\n <h1>{prompt or 'Content'}</h1>\n</body>\n</html>"
return result
except Exception as e:
return "<!DOCTYPE html>\n<html>\n<head>\n <title>Error</title>\n</head>\n<body>\n <h1>Generation Failed</h1>\n</body>\n</html>"
def run_code(html_code):
"""Run the generated code in preview"""
return html_code
def improve_code(description, current_code):
"""Improve existing code instantly"""
# Load model if not loaded
if not load_model():
return current_code
try:
# Minimal prompt for instant improvement
prompt = f"Improve: {description}"
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=128).to("cuda" if torch.cuda.is_available() else "cpu")
outputs = model.generate(**inputs, max_new_tokens=400, temperature=0.7)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
result = decoded[len(prompt):]
# Ensure valid HTML
if '<!DOCTYPE html>' in result:
start = result.find('<!DOCTYPE html>')
return result[start:]
return result
except Exception as e:
return current_code
with gr.Blocks(theme=gr.themes.Soft()) as app:
gr.Markdown("# AI Website Builder")
gr.Markdown("Instant Code Generation")
with gr.Tab("Builder"):
with gr.Row():
with gr.Column(scale=1):
desc_input = gr.Textbox(
label="Describe your website",
placeholder="e.g., Portfolio site",
lines=2
)
gen_btn = gr.Button("Generate Instantly", variant="primary")
imp_btn = gr.Button("Quick Improve")
run_btn = gr.Button("Run Website")
with gr.Column(scale=2):
code_editor = gr.Code(
label="HTML Code Editor",
language="html",
lines=20
)
# Preview area (initially hidden)
with gr.Row(visible=False) as preview_row:
gr.Markdown("### Live Preview")
with gr.Row(visible=False) as preview_content:
preview = gr.HTML()
# Back to editor button (initially hidden)
with gr.Row(visible=False) as back_row:
back_btn = gr.Button("Back to Editor")
# Event handling - instant generation
gen_btn.click(
fn=generate_code_fast,
inputs=desc_input,
outputs=code_editor
)
imp_btn.click(
fn=improve_code,
inputs=[desc_input, code_editor],
outputs=code_editor
)
run_btn.click(
fn=lambda html: (html, gr.Row(visible=True), gr.Row(visible=True), gr.Row(visible=True)),
inputs=code_editor,
outputs=[preview, preview_row, preview_content, back_row]
).then(
fn=run_code,
inputs=code_editor,
outputs=preview
)
back_btn.click(
fn=lambda: (gr.Row(visible=False), gr.Row(visible=False), gr.Row(visible=False)),
outputs=[preview_row, preview_content, back_row]
)
code_editor.change(
fn=lambda x: x,
inputs=code_editor,
outputs=preview
)
app.launch() |