File size: 16,336 Bytes
94f255b
a5f3b08
 
 
94f255b
 
 
ce14469
af86d16
31a42db
88fb0c6
94f255b
31a42db
 
 
a5f3b08
94f255b
 
a5f3b08
 
003e6f5
 
 
8c913fc
003e6f5
b1fceea
003e6f5
b1fceea
 
003e6f5
 
b1fceea
817ccbc
 
b1fceea
817ccbc
 
 
cfbfbbe
 
b1fceea
 
817ccbc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b1fceea
cfbfbbe
b1fceea
cfbfbbe
a5f3b08
af86d16
94f255b
 
a5f3b08
 
 
 
 
94f255b
8c913fc
46c146f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ff77368
46c146f
be0df12
46c146f
4ba54ec
4f107f2
94f255b
b7a5c82
a5f3b08
b7a5c82
94f255b
43c0a34
 
94f255b
b7a5c82
94f255b
b7a5c82
46c146f
4f107f2
43c0a34
b7a5c82
 
 
58ba31b
 
b7a5c82
4f107f2
b7a5c82
46c146f
43c0a34
 
b7a5c82
 
 
 
 
 
 
da6d1f1
b7a5c82
 
31a42db
58ba31b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a5f3b08
94f255b
 
4f107f2
94f255b
a5f3b08
94f255b
58ba31b
b7a5c82
43c0a34
58ba31b
 
 
 
94f255b
58ba31b
 
94f255b
 
58ba31b
 
94f255b
 
 
 
 
58ba31b
 
94f255b
58ba31b
94f255b
ae9ce4a
94f255b
 
 
b7a5c82
94f255b
ae9ce4a
 
4f107f2
 
 
58ba31b
94f255b
58ba31b
 
94f255b
 
 
 
b7a5c82
58ba31b
b7a5c82
58ba31b
 
b7a5c82
58ba31b
 
94f255b
4f107f2
94f255b
 
 
4f107f2
 
94f255b
 
af86d16
a5f3b08
58ba31b
a5f3b08
58ba31b
 
bb47c06
94f255b
58ba31b
 
 
94f255b
58ba31b
94f255b
 
58ba31b
4ba54ec
 
a5f3b08
af86d16
a5f3b08
af86d16
a5f3b08
 
af86d16
 
94f255b
 
a5f3b08
 
 
 
94f255b
 
 
 
 
 
 
 
 
 
a5f3b08
94f255b
af86d16
 
 
a5f3b08
31a42db
af86d16
 
 
88fb0c6
8c913fc
af86d16
94f255b
a5f3b08
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
"""
PromptWizard Qwen Training โ€” Configurable Dataset & Repo
Fine-tunes Qwen using a user-selected dataset and uploads the trained model
to a user-specified Hugging Face Hub repo asynchronously with detailed logs.
"""

import gradio as gr
import spaces
import torch
from transformers import (
    AutoModelForCausalLM,
    AutoTokenizer,
    Trainer,
    TrainingArguments,
)
from datasets import load_dataset
from peft import LoraConfig, get_peft_model, TaskType
from huggingface_hub import HfApi, HfFolder, Repository
import os, tempfile, shutil, asyncio, threading, time
from datetime import datetime

# ==== Async upload wrapper ====
def start_async_upload(local_dir, hf_repo, output_log):
    """Starts async model upload in a background thread."""
    def runner():
        output_log.append(f"[INFO] ๐Ÿš€ Async upload thread started for repo: {hf_repo}")
        asyncio.run(async_upload_model(local_dir, hf_repo, output_log))
        output_log.append(f"[INFO] ๐Ÿ›‘ Async upload thread finished for repo: {hf_repo}")

    threading.Thread(target=runner, daemon=True).start()


from huggingface_hub import upload_folder, HfFolder

async def async_upload_model(local_dir, hf_repo, output_log, max_retries=3):
    """
    Uploads a local model directory to HF Hub asynchronously using HTTP API.
    """
    try:
        token = HfFolder.get_token()
        output_log.append(f"[INFO] โ˜๏ธ Preparing to upload to repo: {hf_repo}")

        attempt = 0
        while attempt < max_retries:
            try:
                output_log.append(f"[INFO] ๐Ÿ”„ Attempt {attempt+1} to upload folder via HTTP API...")
                upload_folder(
                    folder_path=local_dir,
                    repo_id=hf_repo,
                    repo_type="model",
                    token=token,
                    ignore_patterns=["*.lock", "*.tmp"],  # ignore temp files
                    create_pr=False,
                )
                output_log.append("[SUCCESS] โœ… Model successfully uploaded to HF Hub!")
                break
            except Exception as e:
                attempt += 1
                output_log.append(f"[ERROR] Upload attempt {attempt} failed: {e}")
                if attempt >= max_retries:
                    output_log.append("[ERROR] โŒ Max retries reached. Upload failed.")
                else:
                    output_log.append("[INFO] Retrying upload in 5 seconds...")
                    await asyncio.sleep(5)

    except Exception as e:
        output_log.append(f"[ERROR] โŒ Unexpected error during upload: {e}")

# ==== GPU check ====
def check_gpu_status():
    return "๐Ÿš€ Zero GPU Ready - GPU will be allocated when training starts"

# ==== Logging helper ====
def log_message(output_log, msg):
    line = f"[{datetime.now().strftime('%H:%M:%S')}] {msg}"
    print(line)
    output_log.append(line)


# ==== Main TExt Training ====
# @spaces.GPU(duration=300)
# def train_model(base_model, dataset_name, num_epochs, batch_size, learning_rate, hf_repo):
#     output_log = []
#     test_split = 0.2

#     try:
#         log_message(output_log, "๐Ÿ” Initializing training sequence...")

#         # ===== Device =====
#         device = "cuda" if torch.cuda.is_available() else "cpu"
#         log_message(output_log, f"๐ŸŽฎ Using device: {device}")
#         if device == "cuda":
#             log_message(output_log, f"โœ… GPU: {torch.cuda.get_device_name(0)}")

#         # ===== Load dataset =====
#         log_message(output_log, f"\n๐Ÿ“š Loading dataset: {dataset_name} ...")
#         dataset = load_dataset(dataset_name)
#         dataset = dataset["train"].train_test_split(test_size=test_split)
#         train_dataset = dataset["train"]
#         test_dataset = dataset["test"]

#         log_message(output_log, f"   Training samples: {len(train_dataset)}")
#         log_message(output_log, f"   Test samples: {len(test_dataset)}")
#         log_message(output_log, f"   Columns: {train_dataset.column_names}")

#         # ===== Format examples =====
# #         def format_example(item):
# #             text = item.get("text") or item.get("content") or " ".join(str(v) for v in item.values())
# #             prompt = f"""<|system|>
# # You are a wise teacher interpreting Bhagavad Gita with deep insights.
# # <|user|>
# # {text}
# # <|assistant|>
# # """
# #             return {"text": prompt}



#          # ===== Format examples dynamically =====
#         def format_example(item):
#             text_content = item.get("text") or item.get("content") or str(item.get("path", "")) or " ".join(str(v) for v in item.values())
#             # Use shorter, clean system prompt + user content for better loss
#             prompt = (
#                 f"<|system|>\nYou are an expert AI assistant.\n<|user|>\n{text_content}\n<|assistant|>\n"
#             )
#             return {"text": prompt}    

#         train_dataset = train_dataset.map(format_example)
#         test_dataset = test_dataset.map(format_example)
#         log_message(output_log, f"โœ… Formatted {len(train_dataset)} train + {len(test_dataset)} test examples")

#         # ===== Load model & tokenizer =====
#         log_message(output_log, f"\n๐Ÿค– Loading model: {base_model}")
#         tokenizer = AutoTokenizer.from_pretrained(base_model, trust_remote_code=True)
#         if tokenizer.pad_token is None:
#             tokenizer.pad_token = tokenizer.eos_token

#         model = AutoModelForCausalLM.from_pretrained(
#             base_model,
#             trust_remote_code=True,
#             torch_dtype=torch.float16 if device == "cuda" else torch.float32,
#             low_cpu_mem_usage=True,
#         )
#         if device == "cuda":
#             model = model.to(device)
#         log_message(output_log, "โœ… Model and tokenizer loaded successfully")
#         log_message(output_log, f"Tokenizer vocab size: {tokenizer.vocab_size}")

#         # ===== LoRA configuration =====
#         log_message(output_log, "\nโš™๏ธ Configuring LoRA for efficient fine-tuning...")
#         lora_config = LoraConfig(
#             task_type=TaskType.CAUSAL_LM,
#             r=8,
#             lora_alpha=16,
#             lora_dropout=0.1,
#             target_modules=["q_proj", "v_proj"],
#             bias="none",
#         )
#         model = get_peft_model(model, lora_config)
#         trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
#         log_message(output_log, f"Trainable params after LoRA: {trainable_params:,}")

#         # ===== Tokenization + labels =====
#         def tokenize_fn(examples):
#             tokenized = tokenizer(
#                 examples["text"],
#                 padding="max_length",
#                 truncation=True,
#                 max_length=256,
#             )
#             tokenized["labels"] = tokenized["input_ids"].copy()
#             return tokenized

#         train_dataset = train_dataset.map(tokenize_fn, batched=True)
#         test_dataset = test_dataset.map(tokenize_fn, batched=True)
#         log_message(output_log, "โœ… Tokenization + labels done")

#         # ===== Training arguments =====
#         output_dir = "./qwen-gita-lora"
#         training_args = TrainingArguments(
#             output_dir=output_dir,
#             num_train_epochs=num_epochs,
#             per_device_train_batch_size=batch_size,
#             gradient_accumulation_steps=2,
#             warmup_steps=10,
#             logging_steps=5,
#             save_strategy="epoch",
#             fp16=device == "cuda",
#             optim="adamw_torch",
#             learning_rate=learning_rate,
#             max_steps=500,  # Limit for demo is 100
#         )

#         trainer = Trainer(
#             model=model,
#             args=training_args,
#             train_dataset=train_dataset,
#             eval_dataset=test_dataset,
#             tokenizer=tokenizer,
#         )

#         # ===== Train =====
#         log_message(output_log, "\n๐Ÿš€ Starting training...")
#         trainer.train()
#         log_message(output_log, "\n๐Ÿ’พ Saving trained model locally...")
#         trainer.save_model(output_dir)
#         tokenizer.save_pretrained(output_dir)

#         # ===== Async upload =====
#         log_message(output_log, f"\nโ˜๏ธ Initiating async upload to {hf_repo}")
#         start_async_upload(output_dir, hf_repo, output_log)

#         log_message(output_log, "โœ… Training complete & async upload started!")

#     except Exception as e:
#         log_message(output_log, f"\nโŒ Error during training: {e}")

#     return "\n".join(output_log)


# =====================================================
# ๐Ÿง  Train model to expand short prompts into long ones
# =====================================================
@spaces.GPU(duration=300)
def train_model(
   base_model, dataset_name, num_epochs, batch_size, learning_rate, hf_repo
):
    output_log = []

    try:
        log_message(output_log, "๐Ÿš€ Starting FAST test training...")

        # ===== Device =====
        device = "cuda" if torch.cuda.is_available() else "cpu"
        dtype = torch.bfloat16 if torch.cuda.is_available() and torch.cuda.is_bf16_supported() else torch.float16
        log_message(output_log, f"๐ŸŽฎ Device: {device}, dtype: {dtype}")
        if device == "cuda":
            log_message(output_log, f"โœ… GPU: {torch.cuda.get_device_name(0)}")

        # ===== Load dataset =====
        log_message(output_log, f"\n๐Ÿ“š Loading dataset: {dataset_name}")
        dataset = load_dataset(dataset_name)
        dataset = dataset["train"].train_test_split(test_size=0.2, seed=42)
        train_dataset, test_dataset = dataset["train"], dataset["test"]

        # ===== โšก FAST mode: use small subset =====
        train_dataset = train_dataset.select(range(min(1000, len(train_dataset))))
        test_dataset = test_dataset.select(range(min(200, len(test_dataset))))
        log_message(output_log, f"โšก Using {len(train_dataset)} train / {len(test_dataset)} test samples")

        # ===== Format samples =====
        def format_example(example):
            short_prompt = example.get("short", "").strip()
            long_response = example.get("long", "").strip()
            return {
                "text": (
                    f"<|system|>\nYou are an AI that expands short prompts into detailed, descriptive ones.\n"
                    f"<|user|>\nShort: {short_prompt}\n"
                    f"<|assistant|>\n{long_response}"
                )
            }

        train_dataset = train_dataset.map(format_example)
        test_dataset = test_dataset.map(format_example)

            # ===== Format examples dynamically =====
        def format_example(item):
            text_content = item.get("text") or item.get("content") or str(item.get("path", "")) or " ".join(str(v) for v in item.values())
            # Use shorter, clean system prompt + user content for better loss
            prompt = (
                f"<|system|>\nYou are an expert AI assistant.\n<|user|>\n{text_content}\n<|assistant|>\n"
            )
            return {"text": prompt}    

        train_dataset = train_dataset.map(format_example)
        test_dataset = test_dataset.map(format_example)
        log_message(output_log, f"โœ… Formatted {len(train_dataset)} train + {len(test_dataset)} test examples")

        # ===== Load model & tokenizer =====
        log_message(output_log, f"\n๐Ÿค– Loading model: {base_model}")
        tokenizer = AutoTokenizer.from_pretrained(base_model, trust_remote_code=True)
        if tokenizer.pad_token is None:
            tokenizer.pad_token = tokenizer.eos_token

        model = AutoModelForCausalLM.from_pretrained(
            base_model,
            trust_remote_code=True,
            torch_dtype=torch.float16 if device == "cuda" else torch.float32,
            low_cpu_mem_usage=True,
        )
        if device == "cuda":
            model = model.to(device)
        log_message(output_log, "โœ… Model and tokenizer loaded successfully")
        log_message(output_log, f"Tokenizer vocab size: {tokenizer.vocab_size}")

        # ===== LoRA configuration =====
        log_message(output_log, "\nโš™๏ธ Configuring LoRA for efficient fine-tuning...")
        lora_config = LoraConfig(
            task_type=TaskType.CAUSAL_LM,
            r=8,
            lora_alpha=16,
            lora_dropout=0.1,
            target_modules=["q_proj", "v_proj"],
            bias="none",
        )
        model = get_peft_model(model, lora_config)
        trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
        log_message(output_log, f"Trainable params after LoRA: {trainable_params:,}")

        # ===== Tokenization + labels =====
        def tokenize_fn(examples):
            tokenized = tokenizer(
                examples["text"],
                padding="max_length",
                truncation=True,
                max_length=256,
            )
            tokenized["labels"] = tokenized["input_ids"].copy()
            return tokenized

        train_dataset = train_dataset.map(tokenize_fn, batched=True)
        test_dataset = test_dataset.map(tokenize_fn, batched=True)
        log_message(output_log, "โœ… Tokenization + labels done")

        # ===== Training arguments =====
        output_dir = "./qwen-gita-lora"
        training_args = TrainingArguments(
            output_dir=output_dir,
            num_train_epochs=num_epochs,
            per_device_train_batch_size=batch_size,
            gradient_accumulation_steps=2,
            warmup_steps=10,
            logging_steps=5,
            save_strategy="epoch",
            fp16=device == "cuda",
            optim="adamw_torch",
            learning_rate=learning_rate,
            max_steps=500,  # Limit for demo is 100
        )

        trainer = Trainer(
            model=model,
            args=training_args,
            train_dataset=train_dataset,
            eval_dataset=test_dataset,
            tokenizer=tokenizer,
        )

        # ===== Train =====
        log_message(output_log, "\n๐Ÿš€ Starting training...")
        trainer.train()
        log_message(output_log, "\n๐Ÿ’พ Saving trained model locally...")
        trainer.save_model(output_dir)
        tokenizer.save_pretrained(output_dir)

        # ===== Async upload =====
        log_message(output_log, f"\nโ˜๏ธ Initiating async upload to {hf_repo}")
        start_async_upload(output_dir, hf_repo, output_log)

        log_message(output_log, "โœ… Training complete & async upload started!")

    except Exception as e:
        log_message(output_log, f"\nโŒ Error during training: {e}")

    return "\n".join(output_log)
# ==== Gradio Interface ====
def create_interface():
    with gr.Blocks(title="PromptWizard โ€” Qwen Trainer") as demo:
        gr.Markdown("""
        # ๐Ÿง˜ PromptWizard Qwen Fine-tuning  
        Fine-tune Qwen on any dataset and upload to any Hugging Face repo.
        """)

        with gr.Row():
            with gr.Column():
                gr.Textbox(label="GPU Status", value=check_gpu_status(), interactive=False)
                base_model = gr.Textbox(label="Base Model", value="Qwen/Qwen2.5-0.5B")
                dataset_name = gr.Textbox(label="Dataset Name", value="rahul7star/Gita")
                hf_repo = gr.Textbox(label="HF Repo for Upload", value="rahul7star/Qwen0.5-3B-Gita")
                num_epochs = gr.Slider(1, 3, value=1, step=1, label="Epochs")
                batch_size = gr.Slider(1, 4, value=2, step=1, label="Batch Size")
                learning_rate = gr.Number(value=5e-5, label="Learning Rate")
                train_btn = gr.Button("๐Ÿš€ Start Fine-tuning", variant="primary")

            with gr.Column():
                output = gr.Textbox(
                    label="Training Log",
                    lines=25,
                    max_lines=40,
                    value="Click 'Start Fine-tuning' to train and upload your model.",
                )

        train_btn.click(
            fn=train_model,
            inputs=[base_model, dataset_name, num_epochs, batch_size, learning_rate, hf_repo],
            outputs=output,
        )

    return demo


if __name__ == "__main__":
    demo = create_interface()
    demo.launch(server_name="0.0.0.0", server_port=7860)