Sebastien De Greef
commited on
Commit
·
ecb8937
1
Parent(s):
16c10c3
detect logged in user
Browse files
app.py
CHANGED
|
@@ -19,6 +19,15 @@ logger = logging.getLogger(__name__)
|
|
| 19 |
log_contents = log_stream.getvalue()
|
| 20 |
print(log_contents)
|
| 21 |
logger.debug('This is a debug message')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
# Dropdown options
|
| 23 |
model_options = [
|
| 24 |
"unsloth/mistral-7b-v0.3-bnb-4bit", # New Mistral v3 2x faster!
|
|
@@ -53,7 +62,7 @@ class PrinterCallback(TrainerCallback):
|
|
| 53 |
def on_step_end(self, args, state, control, **kwargs):
|
| 54 |
if state.is_local_process_zero:
|
| 55 |
self.step = state.global_step
|
| 56 |
-
self.progress
|
| 57 |
#print("**Step ", state.global_step)
|
| 58 |
|
| 59 |
|
|
@@ -106,55 +115,6 @@ def inference(prompt, input_text):
|
|
| 106 |
result = tokenizer.batch_decode(outputs)
|
| 107 |
return result[0], gr.update(visible=True, interactive=True)
|
| 108 |
|
| 109 |
-
|
| 110 |
-
async def train_model(model_name: str, lora_r: int, lora_alpha: int, lora_dropout: float, per_device_train_batch_size: int, warmup_steps: int, max_steps: int,
|
| 111 |
-
gradient_accumulation_steps: int, logging_steps: int, log_to_tensorboard: bool, optim, learning_rate, weight_decay, lr_scheduler_type, seed: int, output_dir, progress= gr.Progress()):
|
| 112 |
-
global model, tokenizer
|
| 113 |
-
print(f"$$$ Training model {model_name} with {lora_r} R, {lora_alpha} alpha, {lora_dropout} dropout, {per_device_train_batch_size} per device train batch size, {warmup_steps} warmup steps, {max_steps} max steps, {gradient_accumulation_steps} gradient accumulation steps, {logging_steps} logging steps, {log_to_tensorboard} log to tensorboard, {optim} optimizer, {learning_rate} learning rate, {weight_decay} weight decay, {lr_scheduler_type} lr scheduler type, {seed} seed, {output_dir} output dir")
|
| 114 |
-
iseed = seed
|
| 115 |
-
model = FastLanguageModel.get_peft_model(
|
| 116 |
-
model,
|
| 117 |
-
r = lora_r,
|
| 118 |
-
target_modules = ["q_proj", "k_proj", "v_proj", "o_proj",
|
| 119 |
-
"gate_proj", "up_proj", "down_proj",],
|
| 120 |
-
lora_alpha = lora_alpha,
|
| 121 |
-
lora_dropout = lora_dropout,
|
| 122 |
-
bias = "none",
|
| 123 |
-
use_gradient_checkpointing = "unsloth", # True or "unsloth" for very long context
|
| 124 |
-
random_state=iseed,
|
| 125 |
-
use_rslora = False, # We support rank stabilized LoRA
|
| 126 |
-
loftq_config = None, # And LoftQ
|
| 127 |
-
)
|
| 128 |
-
|
| 129 |
-
trainer = SFTTrainer(
|
| 130 |
-
model = model,
|
| 131 |
-
tokenizer = tokenizer,
|
| 132 |
-
train_dataset = dataset,
|
| 133 |
-
dataset_text_field = "text",
|
| 134 |
-
max_seq_length = max_seq_length,
|
| 135 |
-
dataset_num_proc = 2,
|
| 136 |
-
packing = False, # Can make training 5x faster for short sequences.
|
| 137 |
-
callbacks = [PrinterCallback(progress)],
|
| 138 |
-
args = TrainingArguments(
|
| 139 |
-
per_device_train_batch_size = per_device_train_batch_size,
|
| 140 |
-
gradient_accumulation_steps = gradient_accumulation_steps,
|
| 141 |
-
warmup_steps = warmup_steps,
|
| 142 |
-
max_steps = 60, # Set num_train_epochs = 1 for full training runs
|
| 143 |
-
learning_rate = learning_rate,
|
| 144 |
-
fp16 = not is_bfloat16_supported(),
|
| 145 |
-
bf16 = is_bfloat16_supported(),
|
| 146 |
-
logging_steps = logging_steps,
|
| 147 |
-
optim = "adamw_8bit",
|
| 148 |
-
weight_decay = weight_decay,
|
| 149 |
-
lr_scheduler_type = "linear",
|
| 150 |
-
seed = iseed,
|
| 151 |
-
report_to="tensorboard" if log_to_tensorboard else None,
|
| 152 |
-
output_dir = output_dir
|
| 153 |
-
),
|
| 154 |
-
)
|
| 155 |
-
trainer.train()
|
| 156 |
-
return "Model training",gr.update(visible=True, interactive=False), gr.update(visible=True, interactive=True), gr.update(interactive=True)
|
| 157 |
-
|
| 158 |
def save_model(model_name, hub_model_name, hub_token, gguf_16bit, gguf_8bit, gguf_4bit, gguf_custom, gguf_custom_value, merge_16bit, merge_4bit, just_lora, push_to_hub):
|
| 159 |
global model, tokenizer
|
| 160 |
if gguf_custom:
|
|
@@ -190,7 +150,7 @@ with gr.Blocks(title="Unsloth fine-tuning") as demo:
|
|
| 190 |
with gr.Column():
|
| 191 |
gr.Image("unsloth.png", width="300px", interactive=False, show_download_button=False, show_label=False)
|
| 192 |
with gr.Column():
|
| 193 |
-
gr.Markdown(f"**GPU Information:** {gpu_stats.name} ({max_memory} GB)\n\n[Unsloth Docs](http://docs.unsloth.com/)\n\n[Unsloth GitHub](https://github.com/unslothai/unsloth)")
|
| 194 |
with gr.Tab("Base Model Parameters"):
|
| 195 |
|
| 196 |
with gr.Row():
|
|
@@ -265,6 +225,59 @@ with gr.Blocks(title="Unsloth fine-tuning") as demo:
|
|
| 265 |
|
| 266 |
train_output = gr.Textbox(label="Training Status", value="Model not trained", interactive=False)
|
| 267 |
train_btn = gr.Button("Train", visible=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 268 |
train_btn.click(train_model, inputs=[model_name, lora_r, lora_alpha, lora_dropout, per_device_train_batch_size, warmup_steps, max_steps, gradient_accumulation_steps, logging_steps, log_to_tensorboard, optim, learning_rate, weight_decay, lr_scheduler_type, seed, output_dir], outputs=[train_output, train_btn])
|
| 269 |
|
| 270 |
with gr.Tab("Save & Push Options"):
|
|
|
|
| 19 |
log_contents = log_stream.getvalue()
|
| 20 |
print(log_contents)
|
| 21 |
logger.debug('This is a debug message')
|
| 22 |
+
|
| 23 |
+
hf_user = None
|
| 24 |
+
hfApi = HfApi()
|
| 25 |
+
try:
|
| 26 |
+
hf_user = hfApi.whoami()
|
| 27 |
+
except Exception as e:
|
| 28 |
+
hf_user = "not logged in"
|
| 29 |
+
|
| 30 |
+
|
| 31 |
# Dropdown options
|
| 32 |
model_options = [
|
| 33 |
"unsloth/mistral-7b-v0.3-bnb-4bit", # New Mistral v3 2x faster!
|
|
|
|
| 62 |
def on_step_end(self, args, state, control, **kwargs):
|
| 63 |
if state.is_local_process_zero:
|
| 64 |
self.step = state.global_step
|
| 65 |
+
self.progress(self.step/60, desc=f"Training {self.step}/60")
|
| 66 |
#print("**Step ", state.global_step)
|
| 67 |
|
| 68 |
|
|
|
|
| 115 |
result = tokenizer.batch_decode(outputs)
|
| 116 |
return result[0], gr.update(visible=True, interactive=True)
|
| 117 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 118 |
def save_model(model_name, hub_model_name, hub_token, gguf_16bit, gguf_8bit, gguf_4bit, gguf_custom, gguf_custom_value, merge_16bit, merge_4bit, just_lora, push_to_hub):
|
| 119 |
global model, tokenizer
|
| 120 |
if gguf_custom:
|
|
|
|
| 150 |
with gr.Column():
|
| 151 |
gr.Image("unsloth.png", width="300px", interactive=False, show_download_button=False, show_label=False)
|
| 152 |
with gr.Column():
|
| 153 |
+
gr.Markdown(f"**User:** {hf_user}\n\n**GPU Information:** {gpu_stats.name} ({max_memory} GB)\n\n[Unsloth Docs](http://docs.unsloth.com/)\n\n[Unsloth GitHub](https://github.com/unslothai/unsloth)")
|
| 154 |
with gr.Tab("Base Model Parameters"):
|
| 155 |
|
| 156 |
with gr.Row():
|
|
|
|
| 225 |
|
| 226 |
train_output = gr.Textbox(label="Training Status", value="Model not trained", interactive=False)
|
| 227 |
train_btn = gr.Button("Train", visible=True)
|
| 228 |
+
|
| 229 |
+
def train_model(model_name: str, lora_r: int, lora_alpha: int, lora_dropout: float, per_device_train_batch_size: int, warmup_steps: int, max_steps: int,
|
| 230 |
+
gradient_accumulation_steps: int, logging_steps: int, log_to_tensorboard: bool, optim, learning_rate, weight_decay, lr_scheduler_type, seed: int, output_dir, progress= gr.Progress()):
|
| 231 |
+
global model, tokenizer
|
| 232 |
+
print(f"$$$ Training model {model_name} with {lora_r} R, {lora_alpha} alpha, {lora_dropout} dropout, {per_device_train_batch_size} per device train batch size, {warmup_steps} warmup steps, {max_steps} max steps, {gradient_accumulation_steps} gradient accumulation steps, {logging_steps} logging steps, {log_to_tensorboard} log to tensorboard, {optim} optimizer, {learning_rate} learning rate, {weight_decay} weight decay, {lr_scheduler_type} lr scheduler type, {seed} seed, {output_dir} output dir")
|
| 233 |
+
iseed = seed
|
| 234 |
+
model = FastLanguageModel.get_peft_model(
|
| 235 |
+
model,
|
| 236 |
+
r = lora_r,
|
| 237 |
+
target_modules = ["q_proj", "k_proj", "v_proj", "o_proj",
|
| 238 |
+
"gate_proj", "up_proj", "down_proj",],
|
| 239 |
+
lora_alpha = lora_alpha,
|
| 240 |
+
lora_dropout = lora_dropout,
|
| 241 |
+
bias = "none",
|
| 242 |
+
use_gradient_checkpointing = "unsloth", # True or "unsloth" for very long context
|
| 243 |
+
random_state=iseed,
|
| 244 |
+
use_rslora = False, # We support rank stabilized LoRA
|
| 245 |
+
loftq_config = None, # And LoftQ
|
| 246 |
+
)
|
| 247 |
+
progress(0.0, desc="Loading Trainer")
|
| 248 |
+
time.sleep(1)
|
| 249 |
+
trainer = SFTTrainer(
|
| 250 |
+
model = model,
|
| 251 |
+
tokenizer = tokenizer,
|
| 252 |
+
train_dataset = dataset,
|
| 253 |
+
dataset_text_field = "text",
|
| 254 |
+
max_seq_length = max_seq_length,
|
| 255 |
+
dataset_num_proc = 2,
|
| 256 |
+
packing = False, # Can make training 5x faster for short sequences.
|
| 257 |
+
callbacks = [PrinterCallback(progress)],
|
| 258 |
+
args = TrainingArguments(
|
| 259 |
+
per_device_train_batch_size = per_device_train_batch_size,
|
| 260 |
+
gradient_accumulation_steps = gradient_accumulation_steps,
|
| 261 |
+
warmup_steps = warmup_steps,
|
| 262 |
+
max_steps = 60, # Set num_train_epochs = 1 for full training runs
|
| 263 |
+
learning_rate = learning_rate,
|
| 264 |
+
fp16 = not is_bfloat16_supported(),
|
| 265 |
+
bf16 = is_bfloat16_supported(),
|
| 266 |
+
logging_steps = logging_steps,
|
| 267 |
+
optim = "adamw_8bit",
|
| 268 |
+
weight_decay = weight_decay,
|
| 269 |
+
lr_scheduler_type = "linear",
|
| 270 |
+
seed = iseed,
|
| 271 |
+
report_to="tensorboard" if log_to_tensorboard else None,
|
| 272 |
+
output_dir = output_dir
|
| 273 |
+
),
|
| 274 |
+
)
|
| 275 |
+
trainer.train()
|
| 276 |
+
progress(1, desc="Training completed")
|
| 277 |
+
time.sleep(1)
|
| 278 |
+
return "Model trained 100%",gr.update(visible=True, interactive=False), gr.update(visible=True, interactive=True), gr.update(interactive=True)
|
| 279 |
+
|
| 280 |
+
|
| 281 |
train_btn.click(train_model, inputs=[model_name, lora_r, lora_alpha, lora_dropout, per_device_train_batch_size, warmup_steps, max_steps, gradient_accumulation_steps, logging_steps, log_to_tensorboard, optim, learning_rate, weight_decay, lr_scheduler_type, seed, output_dir], outputs=[train_output, train_btn])
|
| 282 |
|
| 283 |
with gr.Tab("Save & Push Options"):
|