daffaaditya's picture
Create app.py
1ccae3b verified
raw
history blame
2.26 kB
# app.py - Jekyll Master AI Demo
import gradio as gr
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
import warnings
warnings.filterwarnings("ignore")
# ================= CONFIGURATION =================
MODEL_ID = "daffaaditya/jekyll-master-ai" # Model Anda yang sudah diupload
print("=" * 60)
print("πŸš€ Jekyll Master AI - Live Demo")
print(f"πŸ“¦ Using model: {MODEL_ID}")
print("=" * 60)
# ================= LOAD MODEL =================
@gr.cache_resource
def load_model():
"""Load model dengan caching"""
print("πŸ“₯ Loading model...")
try:
# Load tokenizer dan model langsung dari repo Anda
tokenizer = AutoTokenizer.from_pretrained(
MODEL_ID,
trust_remote_code=True
)
# Load model dengan quantization untuk hemat memory
model = AutoModelForCausalLM.from_pretrained(
MODEL_ID,
torch_dtype=torch.float16,
device_map="auto",
trust_remote_code=True,
low_cpu_mem_usage=True
)
print("βœ… Model loaded successfully!")
print(f"πŸ“± Device: {model.device}")
except Exception as e:
print(f"❌ Error loading model: {e}")
print("πŸ”„ Using fallback mode...")
# Fallback untuk testing
tokenizer = None
model = None
return tokenizer, model
# Load model
tokenizer, model = load_model()
# ================= GENERATION FUNCTION =================
def generate_jekyll_code(instruction, max_tokens=500, temperature=0.7):
"""Generate Jekyll code"""
try:
print(f"\nπŸ“₯ Instruction: {instruction[:50]}...")
# Jika model tidak loaded, beri contoh
if model is None or tokenizer is None:
return f"""# Jekyll Master AI - Example Output
# Model is loading or in fallback mode
# Here's an example _config.yml for a tech blog:
title: "Tech Blog"
description: "A blog about technology and programming"
baseurl: ""
url: "https://yourblog.com"
theme: minima
markdown: kramdown
permalink: pretty
author:
name: "Your Name"
email: "you@example.com"
plugins:
- jekyll-feed
- jekyll-seo-tag
# Try the live demo when model is fully loaded