Spaces:
Sleeping
Sleeping
| # app.py - Jekyll Master AI Demo (FIXED) | |
| import gradio as gr | |
| import torch | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| import warnings | |
| warnings.filterwarnings("ignore") | |
| # ================= CONFIGURATION ================= | |
| MODEL_ID = "daffaaditya/jekyll-master-ai" # Model Anda yang sudah diupload | |
| print("=" * 60) | |
| print("π Jekyll Master AI - Live Demo") | |
| print(f"π¦ Using model: {MODEL_ID}") | |
| print("=" * 60) | |
| # ================= LOAD MODEL ================= | |
| def load_model(): | |
| """Load model""" | |
| print("π₯ Loading model...") | |
| try: | |
| # Load tokenizer dan model langsung dari repo Anda | |
| tokenizer = AutoTokenizer.from_pretrained( | |
| MODEL_ID, | |
| trust_remote_code=True | |
| ) | |
| # Load model dengan quantization untuk hemat memory | |
| model = AutoModelForCausalLM.from_pretrained( | |
| MODEL_ID, | |
| torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, | |
| device_map="auto" if torch.cuda.is_available() else None, | |
| trust_remote_code=True, | |
| low_cpu_mem_usage=True | |
| ) | |
| print("β Model loaded successfully!") | |
| if torch.cuda.is_available(): | |
| print(f"π± Device: GPU ({torch.cuda.get_device_name(0)})") | |
| else: | |
| print(f"π± Device: CPU") | |
| except Exception as e: | |
| print(f"β Error loading model: {e}") | |
| print("π Using fallback mode...") | |
| # Fallback untuk testing | |
| tokenizer = None | |
| model = None | |
| return tokenizer, model | |
| # Load model - only once at startup | |
| tokenizer, model = load_model() | |
| # ================= GENERATION FUNCTION ================= | |
| def generate_jekyll_code(instruction, max_tokens=500, temperature=0.7): | |
| """Generate Jekyll code""" | |
| try: | |
| print(f"\nπ₯ Instruction: {instruction[:50]}...") | |
| # Jika model tidak loaded, beri contoh | |
| if model is None or tokenizer is None: | |
| example_output = """# Jekyll Master AI - Example Output | |
| # Model is loading or in fallback mode | |
| # Here's an example _config.yml for a tech blog: | |
| title: "Tech Blog" | |
| description: "A blog about technology and programming" | |
| baseurl: "" | |
| url: "https://yourblog.com" | |
| theme: minima | |
| markdown: kramdown | |
| permalink: pretty | |
| author: | |
| name: "Your Name" | |
| email: "you@example.com" | |
| plugins: | |
| - jekyll-feed | |
| - jekyll-seo-tag | |
| # Try the live demo when model is fully loaded!""" | |
| return example_output | |
| # Format prompt sederhana | |
| prompt = f"Generate Jekyll code for: {instruction}\n\nCode:" | |
| # Tokenize | |
| inputs = tokenizer( | |
| prompt, | |
| return_tensors="pt", | |
| truncation=True, | |
| max_length=512 | |
| ) | |
| if torch.cuda.is_available(): | |
| inputs = inputs.to("cuda") | |
| # Generate | |
| with torch.no_grad(): | |
| outputs = model.generate( | |
| **inputs, | |
| max_new_tokens=max_tokens, | |
| temperature=temperature, | |
| do_sample=True, | |
| top_p=0.9, | |
| repetition_penalty=1.1, | |
| pad_token_id=tokenizer.pad_token_id if tokenizer.pad_token_id else tokenizer.eos_token_id, | |
| eos_token_id=tokenizer.eos_token_id | |
| ) | |
| # Decode | |
| generated = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| # Extract code after prompt | |
| if prompt in generated: | |
| generated = generated.split(prompt)[-1].strip() | |
| # Clean up | |
| generated = generated.replace(prompt, "").strip() | |
| # Jika output kosong | |
| if not generated or len(generated) < 20: | |
| generated = """# Generated Jekyll Code | |
| # Example _config.yml structure: | |
| title: "Your Site Title" | |
| description: "Your site description" | |
| baseurl: "" | |
| url: "https://yoursite.com" | |
| theme: minima | |
| # For better results, be more specific in your request!""" | |
| print(f"π€ Generated {len(generated)} characters") | |
| return generated | |
| except Exception as e: | |
| error_msg = f"Error: {str(e)}" | |
| print(f"β {error_msg}") | |
| return f"# Error\n{error_msg}\n\nPlease try again or simplify your request." | |
| # ================= GRADIO INTERFACE ================= | |
| def create_interface(): | |
| """Create simple Gradio interface""" | |
| # Contoh instruksi | |
| examples = [ | |
| ["Buat file _config.yml untuk blog teknologi"], | |
| ["Buat layout post dengan featured image"], | |
| ["Buat include untuk navigation bar"], | |
| ["Buat plugin untuk reading time"], | |
| ["Buat file Sass untuk buttons"], | |
| ] | |
| with gr.Blocks(title="Jekyll Master AI") as demo: | |
| # Custom CSS via HTML element | |
| gr.HTML(""" | |
| <style> | |
| .gradio-container { | |
| max-width: 1200px; | |
| margin: 0 auto; | |
| font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, sans-serif; | |
| } | |
| .header { | |
| text-align: center; | |
| padding: 20px; | |
| background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); | |
| color: white; | |
| border-radius: 10px; | |
| margin-bottom: 20px; | |
| } | |
| .example-btn { | |
| margin: 5px; | |
| } | |
| .footer { | |
| text-align: center; | |
| margin-top: 30px; | |
| padding-top: 20px; | |
| border-top: 1px solid #eee; | |
| color: #666; | |
| } | |
| .gradio-button { | |
| background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); | |
| color: white; | |
| border: none; | |
| } | |
| .gradio-button:hover { | |
| opacity: 0.9; | |
| } | |
| textarea, input { | |
| border-radius: 8px !important; | |
| } | |
| </style> | |
| """) | |
| # Header | |
| gr.HTML(""" | |
| <div class="header"> | |
| <h1 style="margin: 0">π― Jekyll Master AI</h1> | |
| <p style="font-size: 1.2em; opacity: 0.9">Fine-tuned AI untuk Jekyll Static Site Generator</p> | |
| <p>Generate clean, production-ready code untuk website Jekyll Anda.</p> | |
| </div> | |
| """) | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| # Input | |
| instruction = gr.Textbox( | |
| label="Instruksi", | |
| placeholder="Contoh: Buat file _config.yml untuk blog teknologi...", | |
| lines=4 | |
| ) | |
| # Parameters | |
| with gr.Accordion("βοΈ Parameters", open=False): | |
| max_tokens = gr.Slider( | |
| minimum=100, | |
| maximum=1000, | |
| value=500, | |
| step=50, | |
| label="Max Tokens" | |
| ) | |
| temperature = gr.Slider( | |
| minimum=0.1, | |
| maximum=1.5, | |
| value=0.7, | |
| step=0.1, | |
| label="Temperature" | |
| ) | |
| # Buttons | |
| with gr.Row(): | |
| generate_btn = gr.Button("π Generate Code", variant="primary") | |
| clear_btn = gr.Button("ποΈ Clear", variant="secondary") | |
| with gr.Column(scale=2): | |
| # Output | |
| output = gr.Code( | |
| label="Generated Jekyll Code", | |
| language="yaml", | |
| lines=20 | |
| ) | |
| # Quick examples | |
| gr.Markdown("### π Contoh Cepat") | |
| with gr.Row(): | |
| for text, example in [ | |
| ("π Config", "Buat _config.yml untuk blog"), | |
| ("π¨ Layout", "Buat layout post"), | |
| ("π§ Plugin", "Buat plugin Jekyll"), | |
| ]: | |
| btn = gr.Button(text, size="sm") | |
| btn.click(lambda ex=example: ex, outputs=instruction) | |
| # Full examples | |
| gr.Examples( | |
| examples=examples, | |
| inputs=instruction, | |
| outputs=output, | |
| fn=generate_jekyll_code, | |
| cache_examples=False, | |
| label="Klik contoh untuk mencoba:" | |
| ) | |
| # Footer | |
| gr.HTML(f""" | |
| <div class="footer"> | |
| <p> | |
| <strong>Model:</strong> <a href="https://huggingface.co/{MODEL_ID}" target="_blank">{MODEL_ID}</a> | | |
| <strong>Dataset:</strong> <a href="https://huggingface.co/datasets/daffaaditya/jekyll-master-dataset" target="_blank">Jekyll Master Dataset</a> | | |
| <strong>Built with:</strong> Transformers + Gradio | |
| </p> | |
| </div> | |
| """) | |
| # ============ EVENT HANDLERS ============ | |
| # Generate button | |
| generate_btn.click( | |
| fn=generate_jekyll_code, | |
| inputs=[instruction, max_tokens, temperature], | |
| outputs=output | |
| ) | |
| # Clear button | |
| clear_btn.click( | |
| fn=lambda: ("", ""), | |
| inputs=[], | |
| outputs=[instruction, output] | |
| ) | |
| # Enter key submit | |
| instruction.submit( | |
| fn=generate_jekyll_code, | |
| inputs=[instruction, max_tokens, temperature], | |
| outputs=output | |
| ) | |
| return demo | |
| # ================= LAUNCH ================= | |
| if __name__ == "__main__": | |
| print("π Launching Jekyll Master AI Demo...") | |
| demo = create_interface() | |
| demo.launch( | |
| server_name="0.0.0.0", | |
| server_port=7860, | |
| share=False | |
| ) |