MONKE / app.py
aaravriyer193's picture
Create app.py
e4e6387 verified
import os
import gradio as gr
import torch
from datasets import load_dataset
from transformers import (
AutoTokenizer,
AutoModelForCausalLM,
TrainingArguments,
Trainer,
DataCollatorForLanguageModeling,
)
from peft import LoraConfig, get_peft_model, TaskType
import threading
# ── Globals ──────────────────────────────────────────────────────────────────
training_log = []
training_thread = None
stop_flag = threading.Event()
def log(msg: str):
training_log.append(msg)
print(msg)
# ── Core training function ────────────────────────────────────────────────────
def run_finetuning(
model_name: str,
dataset_name: str,
dataset_config: str,
text_column: str,
num_train_epochs: int,
per_device_batch_size: int,
learning_rate: float,
max_seq_length: int,
use_lora: bool,
lora_r: int,
output_dir: str,
):
global training_log, stop_flag
training_log = []
stop_flag.clear()
try:
log(f"πŸ”§ Loading tokenizer: {model_name}")
tokenizer = AutoTokenizer.from_pretrained(model_name)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
log(f"πŸ“¦ Loading model: {model_name}")
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.float32, # CPU-safe
low_cpu_mem_usage=True,
)
if use_lora:
log(f"⚑ Applying LoRA (r={lora_r}) ...")
lora_config = LoraConfig(
task_type=TaskType.CAUSAL_LM,
r=lora_r,
lora_alpha=lora_r * 2,
lora_dropout=0.05,
bias="none",
target_modules=["c_attn", "c_proj", "q_proj", "v_proj", "k_proj", "o_proj"],
)
model = get_peft_model(model, lora_config)
trainable, total = model.get_nb_trainable_parameters()
log(f" Trainable params: {trainable:,} / {total:,} ({100*trainable/total:.2f}%)")
log(f"πŸ“‚ Loading dataset: {dataset_name}" + (f" ({dataset_config})" if dataset_config else ""))
ds_kwargs = {"split": "train", "trust_remote_code": True}
if dataset_config.strip():
dataset = load_dataset(dataset_name, dataset_config, **ds_kwargs)
else:
dataset = load_dataset(dataset_name, **ds_kwargs)
# Take a small sample for demo / CPU friendliness
dataset = dataset.select(range(min(500, len(dataset))))
log(f" Using {len(dataset)} training samples")
def tokenize(batch):
texts = [str(t) for t in batch[text_column]]
return tokenizer(
texts,
truncation=True,
max_length=max_seq_length,
padding="max_length",
)
log("πŸ”€ Tokenizing dataset ...")
tokenized = dataset.map(tokenize, batched=True, remove_columns=dataset.column_names)
tokenized.set_format("torch")
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
training_args = TrainingArguments(
output_dir=output_dir,
num_train_epochs=num_train_epochs,
per_device_train_batch_size=per_device_batch_size,
learning_rate=learning_rate,
logging_steps=5,
save_strategy="epoch",
fp16=False,
bf16=False,
no_cuda=True,
report_to="none",
disable_tqdm=False,
)
class LogCallback(torch.utils.data.Dataset):
pass
from transformers import TrainerCallback
class StreamLogger(TrainerCallback):
def on_log(self, args, state, control, logs=None, **kwargs):
if logs:
step = state.global_step
loss = logs.get("loss", "β€”")
lr = logs.get("learning_rate", "β€”")
log(f" step {step:>4} | loss: {loss} | lr: {lr}")
def on_epoch_end(self, args, state, control, **kwargs):
log(f"βœ… Epoch {int(state.epoch)} complete")
if stop_flag.is_set():
control.should_training_stop = True
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized,
data_collator=data_collator,
callbacks=[StreamLogger()],
)
log("πŸš€ Starting training ...")
trainer.train()
log(f"πŸ’Ύ Saving model to: {output_dir}")
trainer.save_model(output_dir)
tokenizer.save_pretrained(output_dir)
log("πŸŽ‰ Fine-tuning complete!")
except Exception as e:
log(f"❌ Error: {e}")
# ── Gradio helpers ────────────────────────────────────────────────────────────
def start_training(
model_name, dataset_name, dataset_config, text_column,
num_epochs, batch_size, learning_rate, max_seq_len,
use_lora, lora_r, output_dir,
):
global training_thread
if training_thread and training_thread.is_alive():
return "⚠️ Training already running!"
training_thread = threading.Thread(
target=run_finetuning,
args=(
model_name, dataset_name, dataset_config, text_column,
num_epochs, batch_size, learning_rate, max_seq_len,
use_lora, lora_r, output_dir,
),
daemon=True,
)
training_thread.start()
return "Training started! Check the log below."
def stop_training():
stop_flag.set()
return "πŸ›‘ Stop signal sent."
def get_logs():
return "\n".join(training_log) if training_log else "No logs yet..."
def is_running():
return "🟒 Running" if (training_thread and training_thread.is_alive()) else "⚫ Idle"
# ── Gradio UI ─────────────────────────────────────────────────────────────────
with gr.Blocks(
title="LLM Fine-Tuner",
theme=gr.themes.Base(
primary_hue="emerald",
neutral_hue="zinc",
font=gr.themes.GoogleFont("JetBrains Mono"),
),
css="""
.container { max-width: 900px; margin: auto; }
.gr-button-primary { background: #10b981 !important; }
footer { display: none !important; }
""",
) as demo:
gr.Markdown(
"""
# πŸ€– LLM Fine-Tuner
Fine-tune small language models on Hugging Face datasets β€” CPU-friendly with LoRA support.
"""
)
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("### 🧠 Model")
model_name = gr.Dropdown(
choices=[
"distilgpt2",
"gpt2",
"facebook/opt-125m",
"EleutherAI/pythia-70m",
"EleutherAI/pythia-160m",
"microsoft/phi-1_5",
],
value="distilgpt2",
label="Base Model",
allow_custom_value=True,
)
gr.Markdown("### πŸ“¦ Dataset")
dataset_name = gr.Textbox(value="wikitext", label="Dataset Name (HF Hub)")
dataset_config = gr.Textbox(value="wikitext-2-raw-v1", label="Dataset Config (optional)")
text_column = gr.Textbox(value="text", label="Text Column")
with gr.Column(scale=1):
gr.Markdown("### βš™οΈ Training")
num_epochs = gr.Slider(1, 10, value=1, step=1, label="Epochs")
batch_size = gr.Slider(1, 16, value=2, step=1, label="Batch Size")
learning_rate = gr.Number(value=2e-4, label="Learning Rate")
max_seq_len = gr.Slider(32, 512, value=128, step=32, label="Max Sequence Length")
output_dir = gr.Textbox(value="./finetuned-model", label="Output Directory")
gr.Markdown("### ⚑ LoRA (recommended for CPU)")
use_lora = gr.Checkbox(value=True, label="Use LoRA")
lora_r = gr.Slider(4, 64, value=8, step=4, label="LoRA Rank (r)")
with gr.Row():
start_btn = gr.Button("πŸš€ Start Fine-Tuning", variant="primary")
stop_btn = gr.Button("πŸ›‘ Stop", variant="secondary")
status_btn = gr.Button("πŸ”„ Refresh Status")
status_box = gr.Textbox(label="Status", value="⚫ Idle", interactive=False)
log_box = gr.Textbox(
label="Training Log",
lines=20,
max_lines=30,
interactive=False,
placeholder="Logs will appear here once training starts...",
)
start_btn.click(
fn=start_training,
inputs=[
model_name, dataset_name, dataset_config, text_column,
num_epochs, batch_size, learning_rate, max_seq_len,
use_lora, lora_r, output_dir,
],
outputs=status_box,
)
stop_btn.click(fn=stop_training, outputs=status_box)
status_btn.click(fn=lambda: (is_running(), get_logs()), outputs=[status_box, log_box])
gr.Markdown(
"""
---
**Tips:**
- `distilgpt2` (82M) is the best starting point on CPU.
- Enable **LoRA** to drastically reduce memory and training time.
- Keep **Max Sequence Length ≀ 128** and **Batch Size = 1–2** on free CPU tier.
- The dataset is capped at **500 samples** for CPU-friendly runs β€” edit the code to increase.
"""
)
if __name__ == "__main__":
demo.launch(share=False)