daffaaditya commited on
Commit
49ab6e1
Β·
verified Β·
1 Parent(s): b48d2f7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +208 -6
app.py CHANGED
@@ -1,4 +1,4 @@
1
- # app.py - Jekyll Master AI Demo
2
  import gradio as gr
3
  import torch
4
  from transformers import AutoTokenizer, AutoModelForCausalLM
@@ -29,14 +29,17 @@ def load_model():
29
  # Load model dengan quantization untuk hemat memory
30
  model = AutoModelForCausalLM.from_pretrained(
31
  MODEL_ID,
32
- torch_dtype=torch.float16,
33
- device_map="auto",
34
  trust_remote_code=True,
35
  low_cpu_mem_usage=True
36
  )
37
 
38
  print("βœ… Model loaded successfully!")
39
- print(f"πŸ“± Device: {model.device}")
 
 
 
40
 
41
  except Exception as e:
42
  print(f"❌ Error loading model: {e}")
@@ -59,7 +62,7 @@ def generate_jekyll_code(instruction, max_tokens=500, temperature=0.7):
59
 
60
  # Jika model tidak loaded, beri contoh
61
  if model is None or tokenizer is None:
62
- return f"""# Jekyll Master AI - Example Output
63
 
64
  # Model is loading or in fallback mode
65
  # Here's an example _config.yml for a tech blog:
@@ -81,4 +84,203 @@ plugins:
81
  - jekyll-feed
82
  - jekyll-seo-tag
83
 
84
- # Try the live demo when model is fully loaded
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py - Jekyll Master AI Demo (FIXED)
2
  import gradio as gr
3
  import torch
4
  from transformers import AutoTokenizer, AutoModelForCausalLM
 
29
  # Load model dengan quantization untuk hemat memory
30
  model = AutoModelForCausalLM.from_pretrained(
31
  MODEL_ID,
32
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
33
+ device_map="auto" if torch.cuda.is_available() else None,
34
  trust_remote_code=True,
35
  low_cpu_mem_usage=True
36
  )
37
 
38
  print("βœ… Model loaded successfully!")
39
+ if torch.cuda.is_available():
40
+ print(f"πŸ“± Device: GPU ({torch.cuda.get_device_name(0)})")
41
+ else:
42
+ print(f"πŸ“± Device: CPU")
43
 
44
  except Exception as e:
45
  print(f"❌ Error loading model: {e}")
 
62
 
63
  # Jika model tidak loaded, beri contoh
64
  if model is None or tokenizer is None:
65
+ example_output = """# Jekyll Master AI - Example Output
66
 
67
  # Model is loading or in fallback mode
68
  # Here's an example _config.yml for a tech blog:
 
84
  - jekyll-feed
85
  - jekyll-seo-tag
86
 
87
+ # Try the live demo when model is fully loaded!"""
88
+ return example_output
89
+
90
+ # Format prompt sederhana
91
+ prompt = f"Generate Jekyll code for: {instruction}\n\nCode:"
92
+
93
+ # Tokenize
94
+ inputs = tokenizer(
95
+ prompt,
96
+ return_tensors="pt",
97
+ truncation=True,
98
+ max_length=512
99
+ )
100
+
101
+ if torch.cuda.is_available():
102
+ inputs = inputs.to("cuda")
103
+
104
+ # Generate
105
+ with torch.no_grad():
106
+ outputs = model.generate(
107
+ **inputs,
108
+ max_new_tokens=max_tokens,
109
+ temperature=temperature,
110
+ do_sample=True,
111
+ top_p=0.9,
112
+ repetition_penalty=1.1,
113
+ pad_token_id=tokenizer.pad_token_id if tokenizer.pad_token_id else tokenizer.eos_token_id,
114
+ eos_token_id=tokenizer.eos_token_id
115
+ )
116
+
117
+ # Decode
118
+ generated = tokenizer.decode(outputs[0], skip_special_tokens=True)
119
+
120
+ # Extract code after prompt
121
+ if prompt in generated:
122
+ generated = generated.split(prompt)[-1].strip()
123
+
124
+ # Clean up
125
+ generated = generated.replace(prompt, "").strip()
126
+
127
+ # Jika output kosong
128
+ if not generated or len(generated) < 20:
129
+ generated = """# Generated Jekyll Code
130
+
131
+ # Example _config.yml structure:
132
+
133
+ title: "Your Site Title"
134
+ description: "Your site description"
135
+ baseurl: ""
136
+ url: "https://yoursite.com"
137
+ theme: minima
138
+
139
+ # For better results, be more specific in your request!"""
140
+
141
+ print(f"πŸ“€ Generated {len(generated)} characters")
142
+ return generated
143
+
144
+ except Exception as e:
145
+ error_msg = f"Error: {str(e)}"
146
+ print(f"❌ {error_msg}")
147
+ return f"# Error\n{error_msg}\n\nPlease try again or simplify your request."
148
+
149
+ # ================= GRADIO INTERFACE =================
150
+ def create_interface():
151
+ """Create simple Gradio interface"""
152
+
153
+ # Contoh instruksi
154
+ examples = [
155
+ ["Buat file _config.yml untuk blog teknologi"],
156
+ ["Buat layout post dengan featured image"],
157
+ ["Buat include untuk navigation bar"],
158
+ ["Buat plugin untuk reading time"],
159
+ ["Buat file Sass untuk buttons"],
160
+ ]
161
+
162
+ # CSS sederhana
163
+ css = """
164
+ .gradio-container { max-width: 1200px; margin: 0 auto; }
165
+ .header { text-align: center; padding: 20px; }
166
+ .example-btn { margin: 5px; }
167
+ """
168
+
169
+ with gr.Blocks(css=css, title="Jekyll Master AI") as demo:
170
+
171
+ # Header
172
+ gr.Markdown("""
173
+ # 🎯 Jekyll Master AI
174
+ **Fine-tuned AI untuk Jekyll Static Site Generator**
175
+
176
+ Generate clean, production-ready code untuk website Jekyll Anda.
177
+ """)
178
+
179
+ with gr.Row():
180
+ with gr.Column(scale=1):
181
+ # Input
182
+ instruction = gr.Textbox(
183
+ label="Instruksi",
184
+ placeholder="Contoh: Buat file _config.yml untuk blog teknologi...",
185
+ lines=4
186
+ )
187
+
188
+ # Parameters
189
+ with gr.Accordion("βš™οΈ Parameters", open=False):
190
+ max_tokens = gr.Slider(
191
+ minimum=100,
192
+ maximum=1000,
193
+ value=500,
194
+ step=50,
195
+ label="Max Tokens"
196
+ )
197
+ temperature = gr.Slider(
198
+ minimum=0.1,
199
+ maximum=1.5,
200
+ value=0.7,
201
+ step=0.1,
202
+ label="Temperature"
203
+ )
204
+
205
+ # Buttons
206
+ with gr.Row():
207
+ generate_btn = gr.Button("πŸš€ Generate Code", variant="primary")
208
+ clear_btn = gr.Button("πŸ—‘οΈ Clear", variant="secondary")
209
+
210
+ with gr.Column(scale=2):
211
+ # Output
212
+ output = gr.Code(
213
+ label="Generated Jekyll Code",
214
+ language="yaml",
215
+ lines=20
216
+ )
217
+
218
+ # Quick examples
219
+ gr.Markdown("### πŸš€ Contoh Cepat")
220
+ with gr.Row():
221
+ for text, example in [
222
+ ("πŸ“ Config", "Buat _config.yml untuk blog"),
223
+ ("🎨 Layout", "Buat layout post"),
224
+ ("πŸ”§ Plugin", "Buat plugin Jekyll"),
225
+ ]:
226
+ gr.Button(
227
+ text,
228
+ size="sm",
229
+ variant="secondary"
230
+ ).click(
231
+ lambda ex=example: ex,
232
+ outputs=instruction
233
+ )
234
+
235
+ # Full examples
236
+ gr.Examples(
237
+ examples=examples,
238
+ inputs=instruction,
239
+ outputs=output,
240
+ fn=generate_jekyll_code,
241
+ cache_examples=False,
242
+ label="Klik contoh untuk mencoba:"
243
+ )
244
+
245
+ # Footer
246
+ gr.Markdown("---")
247
+ gr.Markdown(f"""
248
+ **Model:** [{MODEL_ID}](https://huggingface.co/{MODEL_ID}) |
249
+ **Dataset:** [Jekyll Master Dataset](https://huggingface.co/datasets/daffaaditya/jekyll-master-dataset) |
250
+ **Built with:** Transformers + Gradio
251
+ """)
252
+
253
+ # ============ EVENT HANDLERS ============
254
+
255
+ # Generate button
256
+ generate_btn.click(
257
+ fn=generate_jekyll_code,
258
+ inputs=[instruction, max_tokens, temperature],
259
+ outputs=output
260
+ )
261
+
262
+ # Clear button
263
+ clear_btn.click(
264
+ fn=lambda: ("", ""),
265
+ inputs=[],
266
+ outputs=[instruction, output]
267
+ )
268
+
269
+ # Enter key submit
270
+ instruction.submit(
271
+ fn=generate_jekyll_code,
272
+ inputs=[instruction, max_tokens, temperature],
273
+ outputs=output
274
+ )
275
+
276
+ return demo
277
+
278
+ # ================= LAUNCH =================
279
+ if __name__ == "__main__":
280
+ print("πŸš€ Launching Jekyll Master AI Demo...")
281
+ demo = create_interface()
282
+ demo.launch(
283
+ server_name="0.0.0.0",
284
+ server_port=7860,
285
+ share=False
286
+ )